'Class::Data::Accessor' => 0.01,
'Carp::Clan' => 0,
'DBI' => 1.40,
+ 'Module::Find' => 0,
+ 'Class::Inspector' => 0,
},
build_requires => {
'DBD::SQLite' => 1.11,
},
- recommends => {
- 'Data::UUID' => 0,
- 'Module::Find' => 0,
- 'Class::Inspector' => 0,
- },
create_makefile_pl => 'passthrough',
create_readme => 1,
- test_files => [ glob('t/*.t'), glob('t/*/*.t') ]
+ test_files => [ glob('t/*.t'), glob('t/*/*.t') ],
+ script_files => [ glob('script/*') ],
);
Module::Build->new(%arguments)->create_build_script;
Revision history for DBIx::Class
+ - marked DB.pm as deprecated and noted it will be removed by 1.0
+ - add ResultSetColumn
+ - refactor ResultSet code to resolve attrs as late as poss
+ - merge prefetch attrs into join attrs
+ - add +select and +as attributes to ResultSet
+ - added AutoInflate::DateTime component
+ - refactor debugging to allow for profiling using Storage::Statistics
+ - removed Data::UUID from deps, made other optionals required
+ - modified SQLT parser to skip dupe table names
+ - added remove_column(s) to ResultSource/ResultSourceProxy
+ - added add_column alias to ResultSourceProxy
+ - added source_name to ResultSource
+ - load_classes now uses source_name and sets it if necessary
+ - add update_or_create_related to Relationship::Base
+ - add find_or_new to ResultSet/ResultSetProxy and find_or_new_related
+ to Relationship::Base
+ - add accessors for unique constraint names and coulums to
+ ResultSource/ResultSourceProxy
+ - rework ResultSet::find() to search unique constraints
+ - CDBICompat: modify retrieve to fix column casing when ColumnCase is
+ loaded
+ - CDBICompat: override find_or_create to fix column casing when
+ ColumnCase is loaded
+
0.06003
- make find_or_create_related check defined() instead of truth
- don't unnecessarily fetch rels for cascade_update
- remove build dependency on version.pm
0.05004 2006-02-13 20:59:00
- - allow specification of related columns via cols attr when primary
+ - allow specification of related columns via cols attr when primary
keys of the related table are not fetched
- fix count for group_by as scalar
- add horrific fix to make Oracle's retarded limit syntax work
+2005-04-16 by mst
+ - set_from_related should take undef
+ - ResultSource objects caching ->resultset causes interesting problems
+ - find why XSUB dumper kills schema in Catalyst (may be Pg only?)
+
+2006-04-11 by castaway
+ - using PK::Auto should set is_auto_increment for the PK columns, so that copy() "just works"
+ - docs of copy() should say that is_auto_increment is essential for auto_incrementing keys
+
+2006-03-25 by mst
+ - Refactor ResultSet::new to be less hairy
+ - we should move the setup of select, as, and from out of here
+ - these should be local rs attrs, not main attrs, and extra joins
+ provided on search should be merged
+ - find a way to un-wantarray search without breaking compat
+ - audit logging component
+ - delay relationship setup if done via ->load_classes
+ - double-sided relationships
+ - incremental deploy
+ - make short form of class specifier in relationships work
2006-01-31 by bluefeet
- Create a DBIx::Class::FilterColumn to replace inflate/deflate. This
We should still support the old inflate/deflate syntax, but this new
way should be recommended.
-2006-02-07 by JR
+2006-02-07 by castaway
- Extract DBIC::SQL::Abstract into a separate module for CPAN
- Chop PK::Auto::Foo up to have PK::Auto refer to an appropriate
DBIx::Storage::DBI::Foo, which will be loaded on connect from Driver info?
+(done -> 0.06001!)
- Add deploy method to Schema, which will create DB tables from Schema, via
SQLT
+(sorta done)
2006-03-18 by bluefeet
- Support table locking.
+2006-03-21 by bluefeet
+ - When subclassing a dbic class make it so you don't have to do
+ __PACKAGE__->table(__PACKAGE__->table()); for the result set to
+ return the correct object type.
+
+2006-03-27 by mst
+ Add the ability for deploy to be given a directory and grab <dbname>.sql
+ out of there if available. Try SQL::Translator if not. If none of the above,
+ cry (and die()). Then you can have a script that pre-gens for all available
+ SQLT modules so an app can do its own deploy without SQLT on the target
+ system
+
+2006-05-25 by mst (TODOed by bluefeet)
+ Add the search attributes "limit" and "rows_per_page".
+ limit: work as expected just like offset does
+ rows_per_page: only be used if you used the page attr or called $rs->page
+ rows: modify to be an alias that gets used to populate either as appropriate,
+ if you haven't specified one of the others
+
--- /dev/null
+Schema versioning/deployment ideas from Jess (with input from theorbtwo and mst):
+1) Add a method to storage to:
+ - take args of DB type, version, and optional file/pathname
+ - create an SQL file, via SQLT, for the current schema
+ - passing prev. version + version will create an sqlt-diff'ed upgrade file, such as
+ - $preversion->$currentversion-$dbtype.sql, which contains ALTER foo statements.
+2) Make deploy/deploy_statements able to to load from the appropriate file, for the current DB, or on the fly? - Compare against current schema version..
+3) Add an on_connect_cb (callback) thingy to storage.
+4) create a component to deploy version/updates:
+ - it hooks itself into on_connect_cb ?
+ - when run it:
+ - Attempts or prompts a backup of the database. (commands for these per-rdbms can be stored in storage::dbi::<dbtype> ?)
+ - Checks the version of the current schema being used
+ - Compares it to some schema table containing the installed version
+ - If none such exists, we can attempt to sqlt-diff the DB structure with the schema
+ - If version does exist, we use an array of user-defined upgrade paths,
+ eg: version = '3x.'; schema = '1.x', upgrade paths = ('1.x->2.x', '2.x->3.x')
+ - Find the appropriate upgrade-path file, parse into two chunks:
+ a) the commands which do not contain "DROP"
+ b) the ones that do
+ - Calls user callbacks for "pre-upgrade"
+ - Runs the first set of commands on the DB
+ - Calls user callbacks for "post-alter"
+ - Runs drop commands
+ - Calls user callbacks for "post-drop"
+ - The user will need to define (or ignore) the following callbacks:
+ - "pre-upgrade", any code to be run before the upgrade, called with schema object, version-from, version-to, db-type .. bear in mind that here any new fields in the schema will not work, but can be used via scalarrefs.
+ - "post-alter", this is the main callback, at this stage, all old and new fields will be available, to allow data migration.
+ - "post-drop", this is the clean-up stage, now only new fields are available.
+
jguenther: Justin Guenther <guentherj@agr.gc.ca>
+captainL: Luke Saunders <luke.saunders@gmail.com>
+
draven: Marcus Ramberg <mramberg@cpan.org>
nigel: Nigel Metheringham <nigelm@cpan.org>
sszabo: Stephan Szabo <sszabo@bigpanda.com>
-captainL: Luke Saunders <luke.saunders@gmail.com>
-
Todd Lipcon
wdh: Will Hawes
+gphat: Cory G Watson <gphat@cpan.org>
+
=head1 LICENSE
You may distribute this code under the same terms as Perl itself.
return $class->next::method(lc($col));
}
+# _build_query
+#
+# Build a query hash for find, et al. Overrides Retrieve::_build_query.
+
+sub _build_query {
+ my ($self, $query) = @_;
+
+ my %new_query;
+ $new_query{lc $_} = $query->{$_} for keys %$query;
+
+ return \%new_query;
+}
+
sub _mk_group_accessors {
my ($class, $type, $group, @fields) = @_;
#warn join(', ', map { ref $_ ? (@$_) : ($_) } @fields);
sub has_a {
my ($self, $col, $f_class, %args) = @_;
$self->throw_exception( "No such column ${col}" ) unless $self->has_column($col);
- eval "require $f_class";
+ $self->ensure_class_loaded($f_class);
if ($args{'inflate'} || $args{'deflate'}) { # Non-database has_a
if (!ref $args{'inflate'}) {
my $meth = $args{'inflate'};
use warnings FATAL => 'all';
-sub retrieve {
- die "No args to retrieve" unless @_ > 1;
- shift->find(@_);
+sub retrieve {
+ my $self = shift;
+ die "No args to retrieve" unless @_ > 0;
+
+ my @cols = $self->primary_columns;
+
+ my $query;
+ if (ref $_[0] eq 'HASH') {
+ $query = { %{$_[0]} };
+ }
+ elsif (@_ == @cols) {
+ $query = {};
+ @{$query}{@cols} = @_;
+ }
+ else {
+ $query = {@_};
+ }
+
+ $query = $self->_build_query($query);
+ $self->find($query);
+}
+
+sub find_or_create {
+ my $self = shift;
+ my $query = ref $_[0] eq 'HASH' ? shift : {@_};
+
+ $query = $self->_build_query($query);
+ $self->next::method($query);
+}
+
+# _build_query
+#
+# Build a query hash. Defaults to a no-op; ColumnCase overrides.
+
+sub _build_query {
+ my ($self, $query) = @_;
+
+ return $query;
}
sub retrieve_from_sql {
use warnings;
use Class::C3;
+use Class::Inspector;
sub inject_base {
my ($class, $target, @to_inject) = @_;
{
no strict 'refs';
- my %seen;
- unshift( @{"${target}::ISA"},
- grep { !$seen{ $_ }++ && $target ne $_ && !$target->isa($_) }
- @to_inject
- );
+ foreach my $to (reverse @to_inject) {
+ unshift( @{"${target}::ISA"}, $to )
+ unless ($target eq $to || $target->isa($to));
+ }
}
# Yes, this is hack. But it *does* work. Please don't submit tickets about
sub _load_components {
my ($class, @comp) = @_;
foreach my $comp (@comp) {
- eval "use $comp";
- die $@ if $@;
+ $class->ensure_class_loaded($comp);
}
$class->inject_base($class => @comp);
}
+# TODO: handle ->has_many('rel', 'Class'...) instead of
+# ->has_many('rel', 'Some::Schema::Class'...)
+sub ensure_class_loaded {
+ my ($class, $f_class) = @_;
+ eval "require $f_class";
+ my $err = $@;
+ Class::Inspector->loaded($f_class)
+ or die $err || "require $f_class was successful but the package".
+ "is not defined";
+}
+
1;
Serialize::Storable
InflateColumn
Relationship
+ PK::Auto
PK
Row
ResultSourceProxy::Table
=head1 NAME
-DBIx::Class::DB - Non-recommended classdata schema component
+DBIx::Class::DB - (DEPRECATED) classdata schema component
=head1 SYNOPSIS
This class is designed to support the Class::DBI connection-as-classdata style
for DBIx::Class. You are *strongly* recommended to use a DBIx::Class::Schema
-instead; DBIx::Class::DB will continue to be supported but new development
-will be focused on Schema-based DBIx::Class setups.
+instead; DBIx::Class::DB will not undergo new development and will be moved
+to being a CDBICompat-only component before 1.0.
=head1 METHODS
--- /dev/null
+package DBIx::Class::InflateColumn::DateTime;
+
+use strict;
+use warnings;
+use base qw/DBIx::Class/;
+
+__PACKAGE__->load_components(qw/InflateColumn/);
+
+__PACKAGE__->mk_group_accessors('simple' => '__datetime_parser');
+
+sub register_column {
+ my ($self, $column, $info, @rest) = @_;
+ $self->next::method($column, $info, @rest);
+ if ($info->{data_type} =~ /^datetime$/i) {
+ $self->inflate_column(
+ $column =>
+ {
+ inflate => sub {
+ my ($value, $obj) = @_;
+ $obj->_datetime_parser->parse_datetime($value);
+ },
+ deflate => sub {
+ my ($value, $obj) = @_;
+ $obj->_datetime_parser->format_datetime($value);
+ },
+ }
+ );
+ }
+}
+
+sub _datetime_parser {
+ my $self = shift;
+ if (my $parser = $self->__datetime_parser) {
+ return $parser;
+ }
+ my $parser = $self->result_source->storage->datetime_parser(@_);
+ return $self->__datetime_parser($parser);
+}
+
+1;
L<DBIx::Class::HTMLWidget> - Like FromForm but with DBIx::Class and HTML::Widget.
+L<DBIx::Class::Ordered> - Modify the position of objects in an ordered list.
+
L<DBIx::Class::PK::Auto> - Retrieve automatically created primary keys upon insert.
L<DBIx::Class::QueriesTime> - Display the amount of time it takes to run queries.
This can be useful when you don't want to pass around a Schema object to every
method.
+=head2 Profiling
+
+When you enable L<DBIx::Class::Storage::DBI>'s debugging it prints the SQL
+executed as well as notifications of query completion and transaction
+begin/commit. If you'd like to profile the SQL you can subclass the
+L<DBIx::Class::Storage::Statistics> class and write your own profiling
+mechanism:
+
+ package My::Profiler;
+ use strict;
+
+ use base 'DBIx::Class::Storage::Statistics';
+
+ use Time::HiRes qw(time);
+
+ my $start;
+
+ sub query_start {
+ my $self = shift();
+ my $sql = shift();
+ my $params = @_;
+
+ print "Executing $sql: ".join(', ', @params)."\n";
+ $start = time();
+ }
+
+ sub query_end {
+ my $self = shift();
+ my $sql = shift();
+ my @params = @_;
+
+ printf("Execution took %0.4f seconds.\n", time() - $start);
+ $start = undef;
+ }
+
+ 1;
+
+You can then install that class as the debugging object:
+
+ __PACKAGE__->storage()->debugobj(new My::Profiler());
+ __PACKAGE__->storage()->debug(1);
+
+A more complicated example might involve storing each execution of SQL in an
+array:
+
+ sub query_end {
+ my $self = shift();
+ my $sql = shift();
+ my @params = @_;
+
+ my $elapsed = time() - $start;
+ push(@{ $calls{$sql} }, {
+ params => \@params,
+ elapsed => $elapsed
+ });
+ }
+
+You could then create average, high and low execution times for an SQL
+statement and dig down to see if certain parameters cause aberrant behavior.
+
=head2 Getting the value of the primary key for the last database insert
AKA getting last_insert_id
--- /dev/null
+# vim: ts=8:sw=4:sts=4:et
+package DBIx::Class::Ordered;
+use strict;
+use warnings;
+use base qw( DBIx::Class );
+
+=head1 NAME
+
+DBIx::Class::Ordered - Modify the position of objects in an ordered list.
+
+=head1 SYNOPSIS
+
+Create a table for your ordered data.
+
+ CREATE TABLE items (
+ item_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ name TEXT NOT NULL,
+ position INTEGER NOT NULL
+ );
+ # Optional: group_id INTEGER NOT NULL
+
+In your Schema or DB class add Ordered to the top
+of the component list.
+
+ __PACKAGE__->load_components(qw( Ordered ... ));
+
+Specify the column that stores the position number for
+each row.
+
+ package My::Item;
+ __PACKAGE__->position_column('position');
+ __PACKAGE__->grouping_column('group_id'); # optional
+
+Thats it, now you can change the position of your objects.
+
+ #!/use/bin/perl
+ use My::Item;
+
+ my $item = My::Item->create({ name=>'Matt S. Trout' });
+ # If using grouping_column:
+ my $item = My::Item->create({ name=>'Matt S. Trout', group_id=>1 });
+
+ my $rs = $item->siblings();
+ my @siblings = $item->siblings();
+
+ my $sibling;
+ $sibling = $item->first_sibling();
+ $sibling = $item->last_sibling();
+ $sibling = $item->previous_sibling();
+ $sibling = $item->next_sibling();
+
+ $item->move_previous();
+ $item->move_next();
+ $item->move_first();
+ $item->move_last();
+ $item->move_to( $position );
+
+=head1 DESCRIPTION
+
+This module provides a simple interface for modifying the ordered
+position of DBIx::Class objects.
+
+=head1 AUTO UPDATE
+
+All of the move_* methods automatically update the rows involved in
+the query. This is not configurable and is due to the fact that if you
+move a record it always causes other records in the list to be updated.
+
+=head1 METHODS
+
+=head2 position_column
+
+ __PACKAGE__->position_column('position');
+
+Sets and retrieves the name of the column that stores the
+positional value of each record. Default to "position".
+
+=cut
+
+__PACKAGE__->mk_classdata( 'position_column' => 'position' );
+
+=head2 grouping_column
+
+ __PACKAGE__->grouping_column('group_id');
+
+This method specified a column to limit all queries in
+this module by. This effectively allows you to have multiple
+ordered lists within the same table.
+
+=cut
+
+__PACKAGE__->mk_classdata( 'grouping_column' );
+
+=head2 siblings
+
+ my $rs = $item->siblings();
+ my @siblings = $item->siblings();
+
+Returns either a result set or an array of all other objects
+excluding the one you called it on.
+
+=cut
+
+sub siblings {
+ my( $self ) = @_;
+ my $position_column = $self->position_column;
+ my $rs = $self->result_source->resultset->search(
+ {
+ $position_column => { '!=' => $self->get_column($position_column) },
+ $self->_grouping_clause(),
+ },
+ { order_by => $self->position_column },
+ );
+ return $rs->all() if (wantarray());
+ return $rs;
+}
+
+=head2 first_sibling
+
+ my $sibling = $item->first_sibling();
+
+Returns the first sibling object, or 0 if the first sibling
+is this sibliing.
+
+=cut
+
+sub first_sibling {
+ my( $self ) = @_;
+ return 0 if ($self->get_column($self->position_column())==1);
+ return ($self->result_source->resultset->search(
+ {
+ $self->position_column => 1,
+ $self->_grouping_clause(),
+ },
+ )->all())[0];
+}
+
+=head2 last_sibling
+
+ my $sibling = $item->last_sibling();
+
+Return the last sibling, or 0 if the last sibling is this
+sibling.
+
+=cut
+
+sub last_sibling {
+ my( $self ) = @_;
+ my $count = $self->result_source->resultset->search({$self->_grouping_clause()})->count();
+ return 0 if ($self->get_column($self->position_column())==$count);
+ return ($self->result_source->resultset->search(
+ {
+ $self->position_column => $count,
+ $self->_grouping_clause(),
+ },
+ )->all())[0];
+}
+
+=head2 previous_sibling
+
+ my $sibling = $item->previous_sibling();
+
+Returns the sibling that resides one position back. Undef
+is returned if the current object is the first one.
+
+=cut
+
+sub previous_sibling {
+ my( $self ) = @_;
+ my $position_column = $self->position_column;
+ my $position = $self->get_column( $position_column );
+ return 0 if ($position==1);
+ return ($self->result_source->resultset->search(
+ {
+ $position_column => $position - 1,
+ $self->_grouping_clause(),
+ }
+ )->all())[0];
+}
+
+=head2 next_sibling
+
+ my $sibling = $item->next_sibling();
+
+Returns the sibling that resides one position foward. Undef
+is returned if the current object is the last one.
+
+=cut
+
+sub next_sibling {
+ my( $self ) = @_;
+ my $position_column = $self->position_column;
+ my $position = $self->get_column( $position_column );
+ my $count = $self->result_source->resultset->search({$self->_grouping_clause()})->count();
+ return 0 if ($position==$count);
+ return ($self->result_source->resultset->search(
+ {
+ $position_column => $position + 1,
+ $self->_grouping_clause(),
+ },
+ )->all())[0];
+}
+
+=head2 move_previous
+
+ $item->move_previous();
+
+Swaps position with the sibling on position previous in the list.
+1 is returned on success, and 0 is returned if the objects is already
+the first one.
+
+=cut
+
+sub move_previous {
+ my( $self ) = @_;
+ my $position = $self->get_column( $self->position_column() );
+ return $self->move_to( $position - 1 );
+}
+
+=head2 move_next
+
+ $item->move_next();
+
+Swaps position with the sibling in the next position. 1 is returned on
+success, and 0 is returned if the object is already the last in the list.
+
+=cut
+
+sub move_next {
+ my( $self ) = @_;
+ my $position = $self->get_column( $self->position_column() );
+ my $count = $self->result_source->resultset->search({$self->_grouping_clause()})->count();
+ return 0 if ($position==$count);
+ return $self->move_to( $position + 1 );
+}
+
+=head2 move_first
+
+ $item->move_first();
+
+Moves the object to the first position. 1 is returned on
+success, and 0 is returned if the object is already the first.
+
+=cut
+
+sub move_first {
+ my( $self ) = @_;
+ return $self->move_to( 1 );
+}
+
+=head2 move_last
+
+ $item->move_last();
+
+Moves the object to the very last position. 1 is returned on
+success, and 0 is returned if the object is already the last one.
+
+=cut
+
+sub move_last {
+ my( $self ) = @_;
+ my $count = $self->result_source->resultset->search({$self->_grouping_clause()})->count();
+ return $self->move_to( $count );
+}
+
+=head2 move_to
+
+ $item->move_to( $position );
+
+Moves the object to the specified position. 1 is returned on
+success, and 0 is returned if the object is already at the
+specified position.
+
+=cut
+
+sub move_to {
+ my( $self, $to_position ) = @_;
+ my $position_column = $self->position_column;
+ my $from_position = $self->get_column( $position_column );
+ return 0 if ( $to_position < 1 );
+ return 0 if ( $from_position==$to_position );
+ my @between = (
+ ( $from_position < $to_position )
+ ? ( $from_position+1, $to_position )
+ : ( $to_position, $from_position-1 )
+ );
+ my $rs = $self->result_source->resultset->search({
+ $position_column => { -between => [ @between ] },
+ $self->_grouping_clause(),
+ });
+ my $op = ($from_position>$to_position) ? '+' : '-';
+ $rs->update({ $position_column => \"$position_column $op 1" });
+ $self->update({ $position_column => $to_position });
+ return 1;
+}
+
+=head2 insert
+
+Overrides the DBIC insert() method by providing a default
+position number. The default will be the number of rows in
+the table +1, thus positioning the new record at the last position.
+
+=cut
+
+sub insert {
+ my $self = shift;
+ my $position_column = $self->position_column;
+ $self->set_column( $position_column => $self->result_source->resultset->search( {$self->_grouping_clause()} )->count()+1 )
+ if (!$self->get_column($position_column));
+ return $self->next::method( @_ );
+}
+
+=head2 delete
+
+Overrides the DBIC delete() method by first moving the object
+to the last position, then deleting it, thus ensuring the
+integrity of the positions.
+
+=cut
+
+sub delete {
+ my $self = shift;
+ $self->move_last;
+ return $self->next::method( @_ );
+}
+
+=head1 PRIVATE METHODS
+
+These methods are used internally. You should never have the
+need to use them.
+
+=head2 _grouping_clause
+
+This method returns a name=>value pare for limiting a search
+by the collection column. If the collection column is not
+defined then this will return an empty list.
+
+=cut
+
+sub _grouping_clause {
+ my( $self ) = @_;
+ my $col = $self->grouping_column();
+ if ($col) {
+ return ( $col => $self->get_column($col) );
+ }
+ return ();
+}
+
+1;
+__END__
+
+=head1 BUGS
+
+=head2 Unique Constraints
+
+Unique indexes and constraints on the position column are not
+supported at this time. It would be make sense to support them,
+but there are some unexpected database issues that make this
+hard to do. The main problem from the author's view is that
+SQLite (the DB engine that we use for testing) does not support
+ORDER BY on updates.
+
+=head2 Race Condition on Insert
+
+If a position is not specified for an insert than a position
+will be chosen based on COUNT(*)+1. But, it first selects the
+count then inserts the record. The space of time between select
+and insert introduces a race condition. To fix this we need the
+ability to lock tables in DBIC. I've added an entry in the TODO
+about this.
+
+=head2 Multiple Moves
+
+Be careful when issueing move_* methods to multiple objects. If
+you've pre-loaded the objects then when you move one of the objects
+the position of the other object will not reflect their new value
+until you reload them from the database.
+
+There are times when you will want to move objects as groups, such
+as changeing the parent of several objects at once - this directly
+conflicts with this problem. One solution is for us to write a
+ResultSet class that supports a parent() method, for example. Another
+solution is to somehow automagically modify the objects that exist
+in the current object's result set to have the new position value.
+
+=head1 AUTHOR
+
+Aran Deltac <bluefeet@cpan.org>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
{ prefetch => [qw/book/],
});
my @book_objs = $obj->books;
+ my $books_rs = $obj->books;
+ ( $books_rs ) = $obj->books_rs;
$obj->add_to_books(\%col_data);
columns. You should pass the name of the column in the foreign class as the
$cond argument, or specify a complete join condition.
-As well as the accessor method, a method named C<< add_to_<relname> >>
-will also be added to your Row items, this allows you to insert new
-related items, using the same mechanism as in L<DBIx::Class::Relationship::Base/"create_related">.
+Three methods are created when you create a has_many relationship. The first
+method is the expected accessor method. The second is almost exactly the same
+as the accessor method but "_rs" is added to the end of the method name. This
+method works just like the normal accessor, except that it returns a resultset
+no matter what, even in list context. The third method, named
+C<< add_to_<relname> >>, will also be added to your Row items, this allows
+you to insert new related items, using the same mechanism as in
+L<DBIx::Class::Relationship::Base/"create_related">.
If you delete an object in a class with a C<has_many> relationship, all
the related objects will be deleted as well. However, any database-level
);
} elsif ($acc_type eq 'multi') {
$meth{$rel} = sub { shift->search_related($rel, @_) };
+ $meth{"${rel}_rs"} = sub { shift->search_related_rs($rel, @_) };
$meth{"add_to_${rel}"} = sub { shift->create_related($rel, @_); };
} else {
$class->throw_exception("No such relationship accessor type $acc_type");
=head2 search_related
- $rs->search_related('relname', $cond, $attrs);
+ @objects = $rs->search_related('relname', $cond, $attrs);
+ $objects_rs = $rs->search_related('relname', $cond, $attrs);
Run a search on a related resultset. The search will be restricted to the
item or items represented by the L<DBIx::Class::ResultSet> it was called
return shift->related_resultset(shift)->search(@_);
}
+=head2 search_related_rs
+
+ ( $objects_rs ) = $rs->search_related_rs('relname', $cond, $attrs);
+
+This method works exactly the same as search_related, except that
+it garauntees a restultset, even in list context.
+
+=cut
+
+sub search_related_rs {
+ return shift->related_resultset(shift)->search_rs(@_);
+}
+
=head2 count_related
$obj->count_related('relname', $cond, $attrs);
return $self->search_related($rel)->find(@_);
}
+=head2 find_or_new_related
+
+ my $new_obj = $obj->find_or_new_related('relname', \%col_data);
+
+Find an item of a related class. If none exists, instantiate a new item of the
+related class. The object will not be saved into your storage until you call
+L<DBIx::Class::Row/insert> on it.
+
+=cut
+
+sub find_or_new_related {
+ my $self = shift;
+ return $self->find_related(@_) || $self->new_related(@_);
+}
+
=head2 find_or_create_related
my $new_obj = $obj->find_or_create_related('relname', \%col_data);
Find or create an item of a related class. See
-L<DBIx::Class::ResultSet/"find_or_create"> for details.
+L<DBIx::Class::ResultSet/find_or_create> for details.
=cut
return (defined($obj) ? $obj : $self->create_related(@_));
}
+=head2 update_or_create_related
+
+ my $updated_item = $obj->update_or_create_related('relname', \%col_data, \%attrs?);
+
+Update or create an item of a related class. See
+L<DBIx::Class::ResultSet/update_or_create> for details.
+
+=cut
+
+sub update_or_create_related {
+ my $self = shift;
+ my $rel = shift;
+ return $self->related_resultset($rel)->update_or_create(@_);
+}
+
=head2 set_from_related
$book->set_from_related('author', $author_obj);
sub belongs_to {
my ($class, $rel, $f_class, $cond, $attrs) = @_;
- eval "require $f_class";
- if ($@) {
- $class->throw_exception($@) unless $@ =~ /Can't locate/;
- }
-
+ $class->ensure_class_loaded($f_class);
# no join condition or just a column name
if (!ref $cond) {
my %f_primaries = map { $_ => 1 } eval { $f_class->primary_columns };
sub has_many {
my ($class, $rel, $f_class, $cond, $attrs) = @_;
-
- eval "require $f_class";
- if ($@) {
- $class->throw_exception($@) unless $@ =~ /Can't locate/;
- }
+
+ $class->ensure_class_loaded($f_class);
unless (ref $cond) {
my ($pri, $too_many) = $class->primary_columns;
sub _has_one {
my ($class, $join_type, $rel, $f_class, $cond, $attrs) = @_;
- eval "require $f_class";
- if ($@) {
- $class->throw_exception($@) unless $@ =~ /Can't locate/;
- }
-
+ $class->ensure_class_loaded($f_class);
unless (ref $cond) {
my ($pri, $too_many) = $class->primary_columns;
$class->throw_exception( "might_have/has_one can only infer join for a single primary key; ${class} has more" )
fallback => 1;
use Data::Page;
use Storable;
+use Data::Dumper;
use Scalar::Util qw/weaken/;
+use DBIx::Class::ResultSetColumn;
use base qw/DBIx::Class/;
__PACKAGE__->load_components(qw/AccessorGroup/);
__PACKAGE__->mk_group_accessors('simple' => qw/result_source result_class/);
my ($source, $attrs) = @_;
weaken $source;
- $attrs = Storable::dclone($attrs || {}); # { %{ $attrs || {} } };
- #use Data::Dumper; warn Dumper($attrs);
- my $alias = ($attrs->{alias} ||= 'me');
-
- $attrs->{columns} ||= delete $attrs->{cols} if $attrs->{cols};
- delete $attrs->{as} if $attrs->{columns};
- $attrs->{columns} ||= [ $source->columns ] unless $attrs->{select};
- $attrs->{select} = [
- map { m/\./ ? $_ : "${alias}.$_" } @{delete $attrs->{columns}}
- ] if $attrs->{columns};
- $attrs->{as} ||= [
- map { m/^\Q$alias.\E(.+)$/ ? $1 : $_ } @{$attrs->{select}}
- ];
- if (my $include = delete $attrs->{include_columns}) {
- push(@{$attrs->{select}}, @$include);
- push(@{$attrs->{as}}, map { m/([^.]+)$/; $1; } @$include);
- }
- #use Data::Dumper; warn Dumper(@{$attrs}{qw/select as/});
-
- $attrs->{from} ||= [ { $alias => $source->from } ];
- $attrs->{seen_join} ||= {};
- my %seen;
- if (my $join = delete $attrs->{join}) {
- foreach my $j (ref $join eq 'ARRAY' ? @$join : ($join)) {
- if (ref $j eq 'HASH') {
- $seen{$_} = 1 foreach keys %$j;
- } else {
- $seen{$j} = 1;
- }
- }
- push(@{$attrs->{from}}, $source->resolve_join(
- $join, $attrs->{alias}, $attrs->{seen_join})
- );
- }
-
- $attrs->{group_by} ||= $attrs->{select} if delete $attrs->{distinct};
- $attrs->{order_by} = [ $attrs->{order_by} ] if
- $attrs->{order_by} and !ref($attrs->{order_by});
- $attrs->{order_by} ||= [];
-
- my $collapse = $attrs->{collapse} || {};
- if (my $prefetch = delete $attrs->{prefetch}) {
- my @pre_order;
- foreach my $p (ref $prefetch eq 'ARRAY' ? @$prefetch : ($prefetch)) {
- if ( ref $p eq 'HASH' ) {
- foreach my $key (keys %$p) {
- push(@{$attrs->{from}}, $source->resolve_join($p, $attrs->{alias}))
- unless $seen{$key};
- }
- } else {
- push(@{$attrs->{from}}, $source->resolve_join($p, $attrs->{alias}))
- unless $seen{$p};
- }
- my @prefetch = $source->resolve_prefetch(
- $p, $attrs->{alias}, {}, \@pre_order, $collapse);
- push(@{$attrs->{select}}, map { $_->[0] } @prefetch);
- push(@{$attrs->{as}}, map { $_->[1] } @prefetch);
- }
- push(@{$attrs->{order_by}}, @pre_order);
- }
- $attrs->{collapse} = $collapse;
-# use Data::Dumper; warn Dumper($collapse) if keys %{$collapse};
if ($attrs->{page}) {
$attrs->{rows} ||= 10;
$attrs->{offset} += ($attrs->{rows} * ($attrs->{page} - 1));
}
+ $attrs->{alias} ||= 'me';
+
bless {
result_source => $source,
result_class => $attrs->{result_class} || $source->result_class,
cond => $attrs->{where},
- from => $attrs->{from},
- collapse => $collapse,
+# from => $attrs->{from},
+# collapse => $collapse,
count => undef,
page => delete $attrs->{page},
pager => undef,
sub search {
my $self = shift;
-
- my $attrs = { %{$self->{attrs}} };
- my $having = delete $attrs->{having};
- $attrs = { %$attrs, %{ pop(@_) } } if @_ > 1 and ref $_[$#_] eq 'HASH';
+ my $rs = $self->search_rs( @_ );
+ return (wantarray ? $rs->all : $rs);
+}
+
+=head2 search_rs
+
+=over 4
+
+=item Arguments: $cond, \%attrs?
+
+=item Return Value: $resultset
+
+=back
+
+This method does the same exact thing as search() except it will
+always return a resultset, even in list context.
+
+=cut
+
+sub search_rs {
+ my $self = shift;
+
+ my $our_attrs = { %{$self->{attrs}} };
+ my $having = delete $our_attrs->{having};
+ my $attrs = {};
+ $attrs = pop(@_) if @_ > 1 and ref $_[$#_] eq 'HASH';
+
+ # merge new attrs into old
+ foreach my $key (qw/join prefetch/) {
+ next unless (exists $attrs->{$key});
+ if (exists $our_attrs->{$key}) {
+ $our_attrs->{$key} = $self->_merge_attr($our_attrs->{$key}, $attrs->{$key});
+ } else {
+ $our_attrs->{$key} = $attrs->{$key};
+ }
+ delete $attrs->{$key};
+ }
+
+ if (exists $our_attrs->{prefetch}) {
+ $our_attrs->{join} = $self->_merge_attr($our_attrs->{join}, $our_attrs->{prefetch}, 1);
+ }
+
+ my $new_attrs = { %{$our_attrs}, %{$attrs} };
+ # merge new where and having into old
my $where = (@_
? ((@_ == 1 || ref $_[0] eq "HASH")
? shift
: {@_}))
: undef());
if (defined $where) {
- $attrs->{where} = (defined $attrs->{where}
+ $new_attrs->{where} = (defined $new_attrs->{where}
? { '-and' =>
[ map { ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ }
- $where, $attrs->{where} ] }
+ $where, $new_attrs->{where} ] }
: $where);
}
if (defined $having) {
- $attrs->{having} = (defined $attrs->{having}
+ $new_attrs->{having} = (defined $new_attrs->{having}
? { '-and' =>
[ map { ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ }
- $having, $attrs->{having} ] }
+ $having, $new_attrs->{having} ] }
: $having);
}
- my $rs = (ref $self)->new($self->result_source, $attrs);
+ my $rs = (ref $self)->new($self->result_source, $new_attrs);
+ $rs->{_parent_rs} = $self->{_parent_rs} if ($self->{_parent_rs}); #XXX - hack to pass through parent of related resultsets
unless (@_) { # no search, effectively just a clone
my $rows = $self->get_cache;
}
}
- return (wantarray ? $rs->all : $rs);
+ return $rs;
}
=head2 search_literal
=back
-Finds a row based on its primary key or unique constraint. For example:
+Finds a row based on its primary key or unique constraint. For example, to find
+a row by its primary key:
my $cd = $schema->resultset('CD')->find(5);
-Also takes an optional C<key> attribute, to search by a specific key or unique
-constraint. For example:
+You can also find a row by a specific unique constraint using the C<key>
+attribute. For example:
+
+ my $cd = $schema->resultset('CD')->find('Massive Attack', 'Mezzanine', { key => 'artist_title' });
+
+Additionally, you can specify the columns explicitly by name:
my $cd = $schema->resultset('CD')->find(
{
{ key => 'artist_title' }
);
-See also L</find_or_create> and L</update_or_create>.
+If no C<key> is specified and you explicitly name columns, it searches on all
+unique constraints defined on the source, including the primary key.
+
+If the C<key> is specified as C<primary>, it searches only on the primary key.
+
+See also L</find_or_create> and L</update_or_create>. For information on how to
+declare unique constraints, see
+L<DBIx::Class::ResultSource/add_unique_constraint>.
=cut
sub find {
- my ($self, @vals) = @_;
- my $attrs = (@vals > 1 && ref $vals[$#vals] eq 'HASH' ? pop(@vals) : {});
+ my $self = shift;
+ my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {});
+
+ # Parse out a hash from input
+ my @cols = exists $attrs->{key}
+ ? $self->result_source->unique_constraint_columns($attrs->{key})
+ : $self->result_source->primary_columns;
- my @cols = $self->result_source->primary_columns;
- if (exists $attrs->{key}) {
- my %uniq = $self->result_source->unique_constraints;
+ my $hash;
+ if (ref $_[0] eq 'HASH') {
+ $hash = { %{$_[0]} };
+ }
+ elsif (@_ == @cols) {
+ $hash = {};
+ @{$hash}{@cols} = @_;
+ }
+ elsif (@_) {
+ # For backwards compatibility
+ $hash = {@_};
+ }
+ else {
$self->throw_exception(
- "Unknown key $attrs->{key} on '" . $self->result_source->name . "'"
- ) unless exists $uniq{$attrs->{key}};
- @cols = @{ $uniq{$attrs->{key}} };
+ "Arguments to find must be a hashref or match the number of columns in the "
+ . (exists $attrs->{key} ? "$attrs->{key} unique constraint" : "primary key")
+ );
}
- #use Data::Dumper; warn Dumper($attrs, @vals, @cols);
+
+ # Check the hash we just parsed against our source's unique constraints
+ my @constraint_names = exists $attrs->{key}
+ ? ($attrs->{key})
+ : $self->result_source->unique_constraint_names;
$self->throw_exception(
"Can't find unless a primary key or unique constraint is defined"
- ) unless @cols;
-
- my $query;
- if (ref $vals[0] eq 'HASH') {
- $query = { %{$vals[0]} };
- } elsif (@cols == @vals) {
- $query = {};
- @{$query}{@cols} = @vals;
- } else {
- $query = {@vals};
- }
- foreach my $key (grep { ! m/\./ } keys %$query) {
- $query->{"$self->{attrs}{alias}.$key"} = delete $query->{$key};
+ ) unless @constraint_names;
+
+ my @unique_queries;
+ foreach my $name (@constraint_names) {
+ my @unique_cols = $self->result_source->unique_constraint_columns($name);
+ my $unique_query = $self->_build_unique_query($hash, \@unique_cols);
+
+ # Add the ResultSet's alias
+ foreach my $key (grep { ! m/\./ } keys %$unique_query) {
+ my $alias = $self->{attrs}->{alias};
+ $unique_query->{"$alias.$key"} = delete $unique_query->{$key};
+ }
+
+ push @unique_queries, $unique_query if %$unique_query;
}
- #warn Dumper($query);
-
+
+ # Handle cases where the ResultSet already defines the query
+ my $query = @unique_queries ? \@unique_queries : undef;
+
+ # Run the query
if (keys %$attrs) {
- my $rs = $self->search($query,$attrs);
- return keys %{$rs->{collapse}} ? $rs->next : $rs->single;
- } else {
- return keys %{$self->{collapse}} ?
- $self->search($query)->next :
- $self->single($query);
+ my $rs = $self->search($query, $attrs);
+ $rs->_resolve;
+ return keys %{$rs->{_attrs}->{collapse}} ? $rs->next : $rs->single;
+ }
+ else {
+ $self->_resolve;
+ return (keys %{$self->{_attrs}->{collapse}})
+ ? $self->search($query)->next
+ : $self->single($query);
}
}
+# _build_unique_query
+#
+# Constrain the specified query hash based on the specified column names.
+
+sub _build_unique_query {
+ my ($self, $query, $unique_cols) = @_;
+
+ my %unique_query =
+ map { $_ => $query->{$_} }
+ grep { exists $query->{$_} }
+ @$unique_cols;
+
+ return \%unique_query;
+}
+
=head2 search_related
=over 4
sub cursor {
my ($self) = @_;
- my $attrs = { %{$self->{attrs}} };
+
+ $self->_resolve;
+ my $attrs = { %{$self->{_attrs}} };
return $self->{cursor}
- ||= $self->result_source->storage->select($self->{from}, $attrs->{select},
+ ||= $self->result_source->storage->select($attrs->{from}, $attrs->{select},
$attrs->{where},$attrs);
}
my $cd = $schema->resultset('CD')->single({ year => 2001 });
Inflates the first result without creating a cursor if the resultset has
-any records in it; if not returns nothing. Used by find() as an optimisation.
+any records in it; if not returns nothing. Used by L</find> as an optimisation.
Can optionally take an additional condition *only* - this is a fast-code-path
method; if you need to add extra joins or similar call ->search and then
sub single {
my ($self, $where) = @_;
- my $attrs = { %{$self->{attrs}} };
+ $self->_resolve;
+ my $attrs = { %{$self->{_attrs}} };
if ($where) {
if (defined $attrs->{where}) {
$attrs->{where} = {
$attrs->{where} = $where;
}
}
+
my @data = $self->result_source->storage->select_single(
- $self->{from}, $attrs->{select},
+ $attrs->{from}, $attrs->{select},
$attrs->{where},$attrs);
return (@data ? $self->_construct_object(@data) : ());
}
+=head2 get_column
+
+=over 4
+
+=item Arguments: $cond?
+
+=item Return Value: $resultsetcolumn
+
+=back
+
+ my $max_length = $rs->get_column('length')->max;
+
+Returns a ResultSetColumn instance for $column based on $self
+
+=cut
+
+sub get_column {
+ my ($self, $column) = @_;
+
+ my $new = DBIx::Class::ResultSetColumn->new($self, $column);
+ return $new;
+}
=head2 search_like
@{delete $self->{stashed_row}} :
$self->cursor->next
);
-# warn Dumper(\@row); use Data::Dumper;
return unless (@row);
return $self->_construct_object(@row);
}
+sub _resolve {
+ my $self = shift;
+
+ return if(exists $self->{_attrs}); #return if _resolve has already been called
+
+ my $attrs = $self->{attrs};
+ my $source = ($self->{_parent_rs}) ? $self->{_parent_rs} : $self->{result_source};
+
+ # XXX - lose storable dclone
+ my $record_filter = delete $attrs->{record_filter} if (defined $attrs->{record_filter});
+ $attrs = Storable::dclone($attrs || {}); # { %{ $attrs || {} } };
+ $attrs->{record_filter} = $record_filter if ($record_filter);
+ $self->{attrs}->{record_filter} = $record_filter if ($record_filter);
+
+ my $alias = $attrs->{alias};
+
+ $attrs->{columns} ||= delete $attrs->{cols} if $attrs->{cols};
+ delete $attrs->{as} if $attrs->{columns};
+ $attrs->{columns} ||= [ $self->{result_source}->columns ] unless $attrs->{select};
+ my $select_alias = ($self->{_parent_rs}) ? $self->{attrs}->{_live_join} : $alias;
+ $attrs->{select} = [
+ map { m/\./ ? $_ : "${select_alias}.$_" } @{delete $attrs->{columns}}
+ ] if $attrs->{columns};
+ $attrs->{as} ||= [
+ map { m/^\Q$alias.\E(.+)$/ ? $1 : $_ } @{$attrs->{select}}
+ ];
+ if (my $include = delete $attrs->{include_columns}) {
+ push(@{$attrs->{select}}, @$include);
+ push(@{$attrs->{as}}, map { m/([^.]+)$/; $1; } @$include);
+ }
+
+ $attrs->{from} ||= [ { $alias => $source->from } ];
+ $attrs->{seen_join} ||= {};
+ my %seen;
+ if (my $join = delete $attrs->{join}) {
+ foreach my $j (ref $join eq 'ARRAY' ? @$join : ($join)) {
+ if (ref $j eq 'HASH') {
+ $seen{$_} = 1 foreach keys %$j;
+ } else {
+ $seen{$j} = 1;
+ }
+ }
+
+ push(@{$attrs->{from}}, $source->resolve_join($join, $attrs->{alias}, $attrs->{seen_join}));
+ }
+ $attrs->{group_by} ||= $attrs->{select} if delete $attrs->{distinct};
+ $attrs->{order_by} = [ $attrs->{order_by} ] if
+ $attrs->{order_by} and !ref($attrs->{order_by});
+ $attrs->{order_by} ||= [];
+
+ if(my $seladds = delete($attrs->{'+select'})) {
+ my @seladds = (ref($seladds) eq 'ARRAY' ? @$seladds : ($seladds));
+ $attrs->{select} = [
+ @{ $attrs->{select} },
+ map { (m/\./ || ref($_)) ? $_ : "${alias}.$_" } $seladds
+ ];
+ }
+ if(my $asadds = delete($attrs->{'+as'})) {
+ my @asadds = (ref($asadds) eq 'ARRAY' ? @$asadds : ($asadds));
+ $attrs->{as} = [ @{ $attrs->{as} }, @asadds ];
+ }
+
+ my $collapse = $attrs->{collapse} || {};
+ if (my $prefetch = delete $attrs->{prefetch}) {
+ my @pre_order;
+ foreach my $p (ref $prefetch eq 'ARRAY' ? @$prefetch : ($prefetch)) {
+ if ( ref $p eq 'HASH' ) {
+ foreach my $key (keys %$p) {
+ push(@{$attrs->{from}}, $source->resolve_join($p, $attrs->{alias}))
+ unless $seen{$key};
+ }
+ } else {
+ push(@{$attrs->{from}}, $source->resolve_join($p, $attrs->{alias}))
+ unless $seen{$p};
+ }
+ my @prefetch = $source->resolve_prefetch(
+ $p, $attrs->{alias}, {}, \@pre_order, $collapse);
+ push(@{$attrs->{select}}, map { $_->[0] } @prefetch);
+ push(@{$attrs->{as}}, map { $_->[1] } @prefetch);
+ }
+ push(@{$attrs->{order_by}}, @pre_order);
+ }
+ $attrs->{collapse} = $collapse;
+ $self->{_attrs} = $attrs;
+}
+
+sub _merge_attr {
+ my ($self, $a, $b, $is_prefetch) = @_;
+
+ return $b unless $a;
+ if (ref $b eq 'HASH' && ref $a eq 'HASH') {
+ foreach my $key (keys %{$b}) {
+ if (exists $a->{$key}) {
+ $a->{$key} = $self->_merge_attr($a->{$key}, $b->{$key}, $is_prefetch);
+ } else {
+ $a->{$key} = delete $b->{$key};
+ }
+ }
+ return $a;
+ } else {
+ $a = [$a] unless (ref $a eq 'ARRAY');
+ $b = [$b] unless (ref $b eq 'ARRAY');
+
+ my $hash = {};
+ my $array = [];
+ foreach ($a, $b) {
+ foreach my $element (@{$_}) {
+ if (ref $element eq 'HASH') {
+ $hash = $self->_merge_attr($hash, $element, $is_prefetch);
+ } elsif (ref $element eq 'ARRAY') {
+ $array = [@{$array}, @{$element}];
+ } else {
+ if (($b == $_) && $is_prefetch) {
+ $self->_merge_array($array, $element, $is_prefetch);
+ } else {
+ push(@{$array}, $element);
+ }
+ }
+ }
+ }
+
+ if ((keys %{$hash}) && (scalar(@{$array} > 0))) {
+ return [$hash, @{$array}];
+ } else {
+ return (keys %{$hash}) ? $hash : $array;
+ }
+ }
+}
+
+sub _merge_array {
+ my ($self, $a, $b) = @_;
+
+ $b = [$b] unless (ref $b eq 'ARRAY');
+ # add elements from @{$b} to @{$a} which aren't already in @{$a}
+ foreach my $b_element (@{$b}) {
+ push(@{$a}, $b_element) unless grep {$b_element eq $_} @{$a};
+ }
+}
+
sub _construct_object {
my ($self, @row) = @_;
- my @as = @{ $self->{attrs}{as} };
-
+ my @as = @{ $self->{_attrs}{as} };
+
my $info = $self->_collapse_result(\@as, \@row);
-
my $new = $self->result_class->inflate_result($self->result_source, @$info);
-
- $new = $self->{attrs}{record_filter}->($new)
- if exists $self->{attrs}{record_filter};
+ $new = $self->{_attrs}{record_filter}->($new)
+ if exists $self->{_attrs}{record_filter};
return $new;
}
sub _collapse_result {
my ($self, $as, $row, $prefix) = @_;
+ my $live_join = $self->{attrs}->{_live_join} ||="";
my %const;
my @copy = @$row;
my $info = [ {}, {} ];
foreach my $key (keys %const) {
- if (length $key) {
+ if (length $key && $key ne $live_join) {
my $target = $info;
my @parts = split(/\./, $key);
foreach my $p (@parts) {
if (defined $prefix) {
@collapse = map {
m/^\Q${prefix}.\E(.+)$/ ? ($1) : ()
- } keys %{$self->{collapse}}
+ } keys %{$self->{_attrs}->{collapse}}
} else {
- @collapse = keys %{$self->{collapse}};
+ @collapse = keys %{$self->{_attrs}->{collapse}};
};
if (@collapse) {
$target = $target->[1]->{$p} ||= [];
}
my $c_prefix = (defined($prefix) ? "${prefix}.${c}" : $c);
- my @co_key = @{$self->{collapse}{$c_prefix}};
+ my @co_key = @{$self->{_attrs}->{collapse}{$c_prefix}};
my %co_check = map { ($_, $target->[0]->{$_}); } @co_key;
my $tree = $self->_collapse_result($as, $row, $c_prefix);
my (@final, @raw);
$row = $self->{stashed_row} = \@raw;
$tree = $self->_collapse_result($as, $row, $c_prefix);
}
- @$target = (@final ? @final : [ {}, {} ]);
+ @$target = (@final ? @final : [ {}, {} ]);
# single empty result to indicate an empty prefetched has_many
}
-
return $info;
}
sub _count { # Separated out so pager can get the full count
my $self = shift;
my $select = { count => '*' };
- my $attrs = { %{ $self->{attrs} } };
+
+ $self->_resolve;
+ my $attrs = { %{ $self->{_attrs} } };
if (my $group_by = delete $attrs->{group_by}) {
delete $attrs->{having};
my @distinct = (ref $group_by ? @$group_by : ($group_by));
}
$select = { count => { distinct => \@distinct } };
- #use Data::Dumper; die Dumper $select;
}
$attrs->{select} = $select;
# offset, order by and page are not needed to count. record_filter is cdbi
delete $attrs->{$_} for qw/rows offset order_by page pager record_filter/;
-
my ($count) = (ref $self)->new($self->result_source, $attrs)->cursor->next;
return $count;
}
my @obj;
- if (keys %{$self->{collapse}}) {
+ # TODO: don't call resolve here
+ $self->_resolve;
+ if (keys %{$self->{_attrs}->{collapse}}) {
+# if ($self->{attrs}->{prefetch}) {
# Using $self->cursor->all is really just an optimisation.
# If we're collapsing has_many prefetches it probably makes
# very little difference, and this is cleaner than hacking
# _construct_object to survive the approach
- $self->cursor->reset;
my @row = $self->cursor->next;
while (@row) {
push(@obj, $self->_construct_object(@row));
sub reset {
my ($self) = @_;
+ delete $self->{_attrs} if (exists $self->{_attrs});
+
$self->{all_cache_position} = 0;
$self->cursor->reset;
return $self;
$cond->{-and} = [];
my @cond = @{$self->{cond}{-and}};
- for (my $i = 0; $i < @cond - 1; $i++) {
+ for (my $i = 0; $i <= @cond - 1; $i++) {
my $entry = $cond[$i];
my %hash;
}
else {
$entry =~ /([^.]+)$/;
- $hash{$entry} = $cond[++$i];
+ $hash{$1} = $cond[++$i];
}
push @{$cond->{-and}}, \%hash;
return $obj;
}
+=head2 find_or_new
+
+=over 4
+
+=item Arguments: \%vals, \%attrs?
+
+=item Return Value: $object
+
+=back
+
+Find an existing record from this resultset. If none exists, instantiate a new
+result object and return it. The object will not be saved into your storage
+until you call L<DBIx::Class::Row/insert> on it.
+
+If you want objects to be saved immediately, use L</find_or_create> instead.
+
+=cut
+
+sub find_or_new {
+ my $self = shift;
+ my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {});
+ my $hash = ref $_[0] eq 'HASH' ? shift : {@_};
+ my $exists = $self->find($hash, $attrs);
+ return defined $exists ? $exists : $self->new_result($hash);
+}
+
=head2 create
=over 4
{ key => 'artist_title' }
);
-See also L</find> and L</update_or_create>.
+See also L</find> and L</update_or_create>. For information on how to declare
+unique constraints, see L<DBIx::Class::ResultSource/add_unique_constraint>.
=cut
If the C<key> is specified as C<primary>, it searches only on the primary key.
-See also L</find> and L</find_or_create>.
+See also L</find> and L</find_or_create>. For information on how to declare
+unique constraints, see L<DBIx::Class::ResultSource/add_unique_constraint>.
=cut
my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {});
my $hash = ref $_[0] eq 'HASH' ? shift : {@_};
- my %unique_constraints = $self->result_source->unique_constraints;
- my @constraint_names = (exists $attrs->{key}
- ? ($attrs->{key})
- : keys %unique_constraints);
-
- my @unique_hashes;
- foreach my $name (@constraint_names) {
- my @unique_cols = @{ $unique_constraints{$name} };
- my %unique_hash =
- map { $_ => $hash->{$_} }
- grep { exists $hash->{$_} }
- @unique_cols;
-
- push @unique_hashes, \%unique_hash
- if (scalar keys %unique_hash == scalar @unique_cols);
- }
-
- if (@unique_hashes) {
- my $row = $self->single(\@unique_hashes);
- if (defined $row) {
- $row->update($hash);
- return $row;
- }
+ my $row = $self->find($hash, $attrs);
+ if (defined $row) {
+ $row->update($hash);
+ return $row;
}
return $self->create($hash);
sub set_cache {
my ( $self, $data ) = @_;
$self->throw_exception("set_cache requires an arrayref")
- if defined($data) && (ref $data ne 'ARRAY');
+ if defined($data) && (ref $data ne 'ARRAY');
$self->{all_cache} = $data;
}
sub related_resultset {
my ( $self, $rel ) = @_;
+
$self->{related_resultsets} ||= {};
return $self->{related_resultsets}{$rel} ||= do {
- #warn "fetching related resultset for rel '$rel'";
+ #warn "fetching related resultset for rel '$rel' " . $self->result_source->{name};
my $rel_obj = $self->result_source->relationship_info($rel);
$self->throw_exception(
"search_related: result source '" . $self->result_source->name .
"' has no such relationship ${rel}")
unless $rel_obj; #die Dumper $self->{attrs};
- my $rs = $self->search(undef, { join => $rel });
- my $alias = defined $rs->{attrs}{seen_join}{$rel}
- && $rs->{attrs}{seen_join}{$rel} > 1
- ? join('_', $rel, $rs->{attrs}{seen_join}{$rel})
- : $rel;
-
- $self->result_source->schema->resultset($rel_obj->{class}
+ my $rs = $self->result_source->schema->resultset($rel_obj->{class}
)->search( undef,
- { %{$rs->{attrs}},
- alias => $alias,
+ { %{$self->{attrs}},
select => undef,
- as => undef }
+ as => undef,
+ join => $rel,
+ _live_join => $rel }
);
+
+ # keep reference of the original resultset
+ $rs->{_parent_rs} = $self->result_source;
+ return $rs;
};
}
attribute, the column names returned are storage-dependent. E.g. MySQL would
return a column named C<count(employeeid)> in the above example.
+=head2 +select
+
+=over 4
+
+Indicates additional columns to be selected from storage. Works the same as
+L<select> but adds columns to the selection.
+
+=back
+
+=head2 +as
+
+=over 4
+
+Indicates additional column names for those added via L<+select>.
+
+=back
+
=head2 as
=over 4
--- /dev/null
+package DBIx::Class::ResultSetColumn;
+use strict;
+use warnings;
+use base 'DBIx::Class';
+
+=head1 NAME
+
+ DBIx::Class::ResultSetColumn - helpful methods for messing
+ with a single column of the resultset
+
+=head1 SYNOPSIS
+
+ $rs = $schema->resultset('CD')->search({ artist => 'Tool' });
+ $rs_column = $rs->get_column('year');
+ $max_year = $rs_column->max; #returns latest year
+
+=head1 DESCRIPTION
+
+A convenience class used to perform operations on a specific column of a resultset.
+
+=cut
+
+=head1 METHODS
+
+=head2 new
+
+ my $obj = DBIx::Class::ResultSetColumn->new($rs, $column);
+
+Creates a new resultset column object from the resultset and column passed as params
+
+=cut
+
+sub new {
+ my ($class, $rs, $column) = @_;
+ $class = ref $class if ref $class;
+
+ my $object_ref = { _column => $column,
+ _parent_resultset => $rs };
+
+ my $new = bless $object_ref, $class;
+ $new->throw_exception("column must be supplied") unless ($column);
+ return $new;
+}
+
+=head2 next
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $value
+
+=back
+
+Returns the next value of the column in the resultset (C<undef> is there is none).
+
+Much like $rs->next but just returning the one value
+
+=cut
+
+sub next {
+ my $self = shift;
+
+ $self->{_resultset} = $self->{_parent_resultset}->search(undef, {select => [$self->{_column}], as => [$self->{_column}]}) unless ($self->{_resultset});
+ my ($row) = $self->{_resultset}->cursor->next;
+ return $row;
+}
+
+=head2 all
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: @values
+
+=back
+
+Returns all values of the column in the resultset (C<undef> is there are none).
+
+Much like $rs->all but returns values rather than row objects
+
+=cut
+
+sub all {
+ my $self = shift;
+ return map {$_->[0]} $self->{_parent_resultset}->search(undef, {select => [$self->{_column}], as => [$self->{_column}]})->cursor->all;
+}
+
+=head2 min
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $lowest_value
+
+=back
+
+Wrapper for ->func. Returns the lowest value of the column in the resultset (C<undef> is there are none).
+
+=cut
+
+sub min {
+ my $self = shift;
+ return $self->func('MIN');
+}
+
+=head2 max
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $highest_value
+
+=back
+
+Wrapper for ->func. Returns the highest value of the column in the resultset (C<undef> is there are none).
+
+=cut
+
+sub max {
+ my $self = shift;
+ return $self->func('MAX');
+}
+
+=head2 sum
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $sum_of_values
+
+=back
+
+Wrapper for ->func. Returns the sum of all the values in the column of the resultset. Use on varchar-like columns at your own risk.
+
+=cut
+
+sub sum {
+ my $self = shift;
+ return $self->func('SUM');
+}
+
+=head2 func
+
+=over 4
+
+=item Arguments: $function
+
+=item Return Value: $function_return_value
+
+=back
+
+Runs a query using the function on the column and returns the value. For example
+ $rs = $schema->resultset("CD")->search({});
+ $length = $rs->get_column('title')->func('LENGTH');
+
+Produces the following SQL
+ SELECT LENGTH( title ) from cd me
+
+=cut
+
+sub func {
+ my $self = shift;
+ my $function = shift;
+
+ my ($row) = $self->{_parent_resultset}->search(undef, {select => {$function => $self->{_column}}, as => [$self->{_column}]})->cursor->next;
+ return $row;
+}
+
+1;
+
+=head1 AUTHORS
+
+Luke Saunders <luke.saunders@gmail.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
sub find { shift->resultset_instance->find(@_); }
sub create { shift->resultset_instance->create(@_); }
sub find_or_create { shift->resultset_instance->find_or_create(@_); }
+sub find_or_new { shift->resultset_instance->find_or_new(@_); }
sub update_or_create { shift->resultset_instance->update_or_create(@_); }
1;
schema from _relationships/);
__PACKAGE__->mk_group_accessors('component_class' => qw/resultset_class
- result_class/);
+ result_class source_name/);
=head1 NAME
sub add_columns {
my ($self, @cols) = @_;
$self->_ordered_columns(\@cols) unless $self->_ordered_columns;
-
+
my @added;
my $columns = $self->_columns;
while (my $col = shift @cols) {
{
$self->{_columns_info_loaded}++;
my $info;
+ my $lc_info;
# eval for the case of storage without table
- eval { $info = $self->storage->columns_info_for($self->from) };
+ eval { $info = $self->storage->columns_info_for( $self->from, keys %{$self->_columns} ) };
unless ($@) {
+ for my $realcol ( keys %{$info} ) {
+ $lc_info->{lc $realcol} = $info->{$realcol};
+ }
foreach my $col ( keys %{$self->_columns} ) {
- foreach my $i ( keys %{$info->{$col}} ) {
- $self->_columns->{$col}{$i} = $info->{$col}{$i};
- }
+ $self->_columns->{$col} = $info->{$col} || $lc_info->{lc $col};
}
}
}
return @{$self->{_ordered_columns}||[]};
}
+=head2 remove_columns
+
+ $table->remove_columns(qw/col1 col2 col3/);
+
+Removes columns from the result source.
+
+=head2 remove_column
+
+ $table->remove_column('col');
+
+Convenience alias to remove_columns.
+
+=cut
+
+sub remove_columns {
+ my ($self, @cols) = @_;
+
+ return unless $self->_ordered_columns;
+
+ my $columns = $self->_columns;
+ my @remaining;
+
+ foreach my $col (@{$self->_ordered_columns}) {
+ push @remaining, $col unless grep(/$col/, @cols);
+ }
+
+ foreach (@cols) {
+ undef $columns->{$_};
+ };
+
+ $self->_ordered_columns(\@remaining);
+}
+
+*remove_column = \&remove_columns;
+
=head2 set_primary_key
=over 4
=head2 add_unique_constraint
Declare a unique constraint on this source. Call once for each unique
-constraint. Unique constraints are used when you call C<find> on a
-L<DBIx::Class::ResultSet>. Only columns in the constraint are searched,
-for example:
+constraint.
# For UNIQUE (column1, column2)
__PACKAGE__->add_unique_constraint(
constraint_name => [ qw/column1 column2/ ],
);
+Unique constraints are used, for example, when you call
+L<DBIx::Class::ResultSet/find>. Only columns in the constraint are searched.
+
=cut
sub add_unique_constraint {
return %{shift->_unique_constraints||{}};
}
+=head2 unique_constraint_names
+
+Returns the list of unique constraint names defined on this source.
+
+=cut
+
+sub unique_constraint_names {
+ my ($self) = @_;
+
+ my %unique_constraints = $self->unique_constraints;
+
+ return keys %unique_constraints;
+}
+
+=head2 unique_constraint_columns
+
+Returns the list of columns that make up the specified unique constraint.
+
+=cut
+
+sub unique_constraint_columns {
+ my ($self, $constraint_name) = @_;
+
+ my %unique_constraints = $self->unique_constraints;
+
+ $self->throw_exception(
+ "Unknown unique constraint $constraint_name on '" . $self->name . "'"
+ ) unless exists $unique_constraints{$constraint_name};
+
+ return @{ $unique_constraints{$constraint_name} };
+}
+
=head2 from
Returns an expression of the source to be supplied to storage to specify
An arrayref containing a list of accessors in the foreign class to proxy in
the main class. If, for example, you do the following:
-
+
CD->might_have(liner_notes => 'LinerNotes', undef, {
proxy => [ qw/notes/ ],
});
-
+
Then, assuming LinerNotes has an accessor named notes, you can do:
my $cd = CD->find(1);
my $f_source = $self->schema->source($f_source_name);
unless ($f_source) {
- eval "require $f_source_name;";
- if ($@) {
- die $@ unless $@ =~ /Can't locate/;
- }
+ $self->ensure_class_loaded($f_source_name);
$f_source = $f_source_name->result_source;
#my $s_class = ref($self->schema);
#$f_source_name =~ m/^${s_class}::(.*)$/;
return exists $self->_relationships->{$rel};
}
+=head2 reverse_relationship_info
+
+=over 4
+
+=item Arguments: $relname
+
+=back
+
+Returns an array of hash references of relationship information for
+the other side of the specified relationship name.
+
+=cut
+
+sub reverse_relationship_info {
+ my ($self, $rel) = @_;
+ my $rel_info = $self->relationship_info($rel);
+ my $ret = {};
+
+ return $ret unless ((ref $rel_info->{cond}) eq 'HASH');
+
+ my @cond = keys(%{$rel_info->{cond}});
+ my @refkeys = map {/^\w+\.(\w+)$/} @cond;
+ my @keys = map {$rel_info->{cond}->{$_} =~ /^\w+\.(\w+)$/} @cond;
+
+ # Get the related result source for this relationship
+ my $othertable = $self->related_source($rel);
+
+ # Get all the relationships for that source that related to this source
+ # whose foreign column set are our self columns on $rel and whose self
+ # columns are our foreign columns on $rel.
+ my @otherrels = $othertable->relationships();
+ my $otherrelationship;
+ foreach my $otherrel (@otherrels) {
+ my $otherrel_info = $othertable->relationship_info($otherrel);
+
+ my $back = $othertable->related_source($otherrel);
+ next unless $back->name eq $self->name;
+
+ my @othertestconds;
+
+ if (ref $otherrel_info->{cond} eq 'HASH') {
+ @othertestconds = ($otherrel_info->{cond});
+ }
+ elsif (ref $otherrel_info->{cond} eq 'ARRAY') {
+ @othertestconds = @{$otherrel_info->{cond}};
+ }
+ else {
+ next;
+ }
+
+ foreach my $othercond (@othertestconds) {
+ my @other_cond = keys(%$othercond);
+ my @other_refkeys = map {/^\w+\.(\w+)$/} @other_cond;
+ my @other_keys = map {$othercond->{$_} =~ /^\w+\.(\w+)$/} @other_cond;
+ next if (!$self->compare_relationship_keys(\@refkeys, \@other_keys) ||
+ !$self->compare_relationship_keys(\@other_refkeys, \@keys));
+ $ret->{$otherrel} = $otherrel_info;
+ }
+ }
+ return $ret;
+}
+
+=head2 compare_relationship_keys
+
+=over 4
+
+=item Arguments: $keys1, $keys2
+
+=back
+
+Returns true if both sets of keynames are the same, false otherwise.
+
+=cut
+
+sub compare_relationship_keys {
+ my ($self, $keys1, $keys2) = @_;
+
+ # Make sure every keys1 is in keys2
+ my $found;
+ foreach my $key (@$keys1) {
+ $found = 0;
+ foreach my $prim (@$keys2) {
+ if ($prim eq $key) {
+ $found = 1;
+ last;
+ }
+ }
+ last unless $found;
+ }
+
+ # Make sure every key2 is in key1
+ if ($found) {
+ foreach my $prim (@$keys2) {
+ $found = 0;
+ foreach my $key (@$keys1) {
+ if ($prim eq $key) {
+ $found = 1;
+ last;
+ }
+ }
+ last unless $found;
+ }
+ }
+
+ return $found;
+}
+
=head2 resolve_join
=over 4
);
}
+=head2 source_name
+
+=over 4
+
+=item Arguments: $source_name
+
+=back
+
+Set the name of the result source when it is loaded into a schema.
+This is usefull if you want to refer to a result source by a name other than
+its class name.
+
+ package ArchivedBooks;
+ use base qw/DBIx::Class/;
+ __PACKAGE__->table('books_archive');
+ __PACKAGE__->source_name('Books');
+
+ # from your schema...
+ $schema->resultset('Books')->find(1);
+
=head2 throw_exception
See L<DBIx::Class::Schema/"throw_exception">.
sub iterator_class { shift->result_source_instance->resultset_class(@_) }
sub resultset_class { shift->result_source_instance->resultset_class(@_) }
+sub source_name { shift->result_source_instance->source_name(@_) }
sub resultset_attributes {
shift->result_source_instance->resultset_attributes(@_);
}
}
+*add_column = \&add_columns;
+
sub has_column {
my ($self, $column) = @_;
return $self->result_source_instance->has_column($column);
return $self->result_source_instance->column_info($column);
}
-
+
sub columns {
return shift->result_source_instance->columns(@_);
}
-
+
+sub remove_columns {
+ return shift->result_source_instance->remove_columns(@_);
+}
+
+*remove_column = \&remove_columns;
+
sub set_primary_key {
shift->result_source_instance->set_primary_key(@_);
}
shift->result_source_instance->unique_constraints(@_);
}
+sub unique_constraint_names {
+ shift->result_source_instance->unique_constraint_names(@_);
+}
+
+sub unique_constraint_columns {
+ shift->result_source_instance->unique_constraint_columns(@_);
+}
+
sub add_relationship {
my ($class, $rel, @rest) = @_;
my $source = $class->result_source_instance;
=head2 is_changed
- my @changed_col_names = $obj->is_changed
+ my @changed_col_names = $obj->is_changed();
+ if ($obj->is_changed()) { ... }
=cut
return keys %{shift->{_dirty_columns} || {}};
}
+=head2 is_column_changed
+
+ if ($obj->is_column_changed('col')) { ... }
+
+=cut
+
+sub is_column_changed {
+ my( $self, $col ) = @_;
+ return exists $self->{_dirty_columns}->{$col};
+}
+
=head2 result_source
Accessor to the ResultSource this object was created from
package Library::Schema;
use base qw/DBIx::Class::Schema/;
-
+
# load Library::Schema::CD, Library::Schema::Book, Library::Schema::DVD
__PACKAGE__->load_classes(qw/CD Book DVD/);
$password,
{ AutoCommit => 0 },
);
-
+
my $schema2 = Library::Schema->connect($coderef_returning_dbh);
# fetch objects using Library::Schema::DVD
sub load_classes {
my ($class, @params) = @_;
-
+
my %comps_for;
-
+
if (@params) {
foreach my $param (@params) {
if (ref $param eq 'ARRAY') {
# filter out commented entries
my @modules = grep { $_ !~ /^#/ } @$param;
-
+
push (@{$comps_for{$class}}, @modules);
}
elsif (ref $param eq 'HASH') {
foreach my $prefix (keys %comps_for) {
foreach my $comp (@{$comps_for{$prefix}||[]}) {
my $comp_class = "${prefix}::${comp}";
- eval "use $comp_class"; # If it fails, assume the user fixed it
- if ($@) {
- $comp_class =~ s/::/\//g;
- die $@ unless $@ =~ /Can't locate.+$comp_class\.pm\sin\s\@INC/;
- warn $@ if $@;
- }
- push(@to_register, [ $comp, $comp_class ]);
+ $class->ensure_class_loaded($comp_class);
+ $comp_class->source_name($comp) unless $comp_class->source_name;
+
+ push(@to_register, [ $comp_class->source_name, $comp_class ]);
}
}
}
$self->storage->deploy($self, undef, $sqltargs);
}
+=head2 create_ddl_dir (EXPERIMENTAL)
+
+=over 4
+
+=item Arguments: \@databases, $version, $directory, $sqlt_args
+
+=back
+
+Creates an SQL file based on the Schema, for each of the specified
+database types, in the given directory.
+
+Note that this feature is currently EXPERIMENTAL and may not work correctly
+across all databases, or fully handle complex relationships.
+
+=cut
+
+sub create_ddl_dir
+{
+ my $self = shift;
+
+ $self->throw_exception("Can't create_ddl_dir without storage") unless $self->storage;
+ $self->storage->create_ddl_dir($self, @_);
+}
+
+sub ddl_filename
+{
+ my ($self, $type, $dir, $version) = @_;
+
+ my $filename = ref($self);
+ $filename =~ s/^.*:://;
+ $filename = "$dir$filename-$version-$type.sql";
+
+ return $filename;
+}
+
1;
=head1 AUTHORS
package DBIx::Class::Storage::DBI;
+# -*- mode: cperl; cperl-indent-level: 2 -*-
use base 'DBIx::Class::Storage';
use DBI;
use SQL::Abstract::Limit;
use DBIx::Class::Storage::DBI::Cursor;
+use DBIx::Class::Storage::Statistics;
use IO::File;
use Carp::Clan qw/DBIx::Class/;
-
BEGIN {
package DBIC::SQL::Abstract; # Would merge upstream, but nate doesn't reply :(
return $self->{name_sep};
}
-
-
-
-package DBIx::Class::Storage::DBI::DebugCallback;
-
-sub print {
- my ($self, $string) = @_;
- $string =~ m/^(\w+)/;
- ${$self}->($1, $string);
-}
-
} # End of BEGIN block
use base qw/DBIx::Class/;
__PACKAGE__->load_components(qw/AccessorGroup/);
__PACKAGE__->mk_group_accessors('simple' =>
- qw/connect_info _dbh _sql_maker _conn_pid _conn_tid debug debugfh
+ qw/_connect_info _dbh _sql_maker _conn_pid _conn_tid debug debugobj
cursor on_connect_do transaction_depth/);
sub new {
my $new = bless({}, ref $_[0] || $_[0]);
$new->cursor("DBIx::Class::Storage::DBI::Cursor");
$new->transaction_depth(0);
+
+ $new->debugobj(new DBIx::Class::Storage::Statistics());
+
+ my $fh;
if (defined($ENV{DBIX_CLASS_STORAGE_DBI_DEBUG}) &&
($ENV{DBIX_CLASS_STORAGE_DBI_DEBUG} =~ /=(.+)$/)) {
- $new->debugfh(IO::File->new($1, 'w'))
+ $fh = IO::File->new($1, 'w')
or $new->throw_exception("Cannot open trace file $1");
} else {
- $new->debugfh(IO::File->new('>&STDERR'));
+ $fh = IO::File->new('>&STDERR');
}
+ $new->debugobj->debugfh($fh);
$new->debug(1) if $ENV{DBIX_CLASS_STORAGE_DBI_DEBUG};
return $new;
}
=cut
+=head2 connect_info
+
+Connection information arrayref. Can either be the same arguments
+one would pass to DBI->connect, or a code-reference which returns
+a connected database handle. In either case, there is an optional
+final element in the arrayref, which can hold a hashref of
+connection-specific Storage::DBI options. These include
+C<on_connect_do>, and the sql_maker options C<limit_dialect>,
+C<quote_char>, and C<name_sep>. Examples:
+
+ ->connect_info([ 'dbi:SQLite:./foo.db' ]);
+ ->connect_info(sub { DBI->connect(...) });
+ ->connect_info([ 'dbi:Pg:dbname=foo',
+ 'postgres',
+ '',
+ { AutoCommit => 0 },
+ { quote_char => q{`}, name_sep => q{@} },
+ ]);
+
=head2 on_connect_do
Executes the sql statements given as a listref on every db connect.
=head2 debug
-Causes SQL trace information to be emitted on C<debugfh> filehandle
-(or C<STDERR> if C<debugfh> has not specifically been set).
+Causes SQL trace information to be emitted on the C<debugobj> object.
+(or C<STDERR> if C<debugobj> has not specifically been set).
=head2 debugfh
-Sets or retrieves the filehandle used for trace/debug output. This
-should be an IO::Handle compatible object (only the C<print> method is
-used). Initially set to be STDERR - although see information on the
+Set or retrieve the filehandle used for trace/debug output. This should be
+an IO::Handle compatible ojbect (only the C<print> method is used. Initially
+set to be STDERR - although see information on the
L<DBIX_CLASS_STORAGE_DBI_DEBUG> environment variable.
+=head2 debugobj
+
+Sets or retrieves the object used for metric collection. Defaults to an instance
+of L<DBIx::Class::Storage::Statistics> that is campatible with the original
+method of using a coderef as a callback. See the aforementioned Statistics
+class for more information.
+
=head2 debugcb
Sets a callback to be executed each time a statement is run; takes a sub
-reference. Overrides debugfh. Callback is executed as $sub->($op, $info)
-where $op is SELECT/INSERT/UPDATE/DELETE and $info is what would normally
-be printed.
+reference. Callback is executed as $sub->($op, $info) where $op is
+SELECT/INSERT/UPDATE/DELETE and $info is what would normally be printed.
-=cut
+See L<debugobj> for a better way.
+=cut
sub debugcb {
- my ($self, $cb) = @_;
- my $cb_obj = bless(\$cb, 'DBIx::Class::Storage::DBI::DebugCallback');
- $self->debugfh($cb_obj);
+ my $self = shift();
+
+ if($self->debugobj()->can('callback')) {
+ $self->debugobj()->callback(shift());
+ }
}
sub disconnect {
return $self->_dbh;
}
+sub _sql_maker_args {
+ my ($self) = @_;
+
+ return ( limit_dialect => $self->dbh );
+}
+
sub sql_maker {
my ($self) = @_;
unless ($self->_sql_maker) {
- $self->_sql_maker(new DBIC::SQL::Abstract( limit_dialect => $self->dbh ));
+ $self->_sql_maker(new DBIC::SQL::Abstract( $self->_sql_maker_args ));
}
return $self->_sql_maker;
}
+sub connect_info {
+ my ($self, $info_arg) = @_;
+
+ if($info_arg) {
+ my $info = [ @$info_arg ]; # copy because we can alter it
+ my $last_info = $info->[-1];
+ if(ref $last_info eq 'HASH') {
+ my $used;
+ if(my $on_connect_do = $last_info->{on_connect_do}) {
+ $used = 1;
+ $self->on_connect_do($on_connect_do);
+ }
+ for my $sql_maker_opt (qw/limit_dialect quote_char name_sep/) {
+ if(my $opt_val = $last_info->{$sql_maker_opt}) {
+ $used = 1;
+ $self->sql_maker->$sql_maker_opt($opt_val);
+ }
+ }
+
+ # remove our options hashref if it was there, to avoid confusing
+ # DBI in the case the user didn't use all 4 DBI options, as in:
+ # [ 'dbi:SQLite:foo.db', { quote_char => q{`} } ]
+ pop(@$info) if $used;
+ }
+
+ $self->_connect_info($info);
+ }
+
+ $self->_connect_info;
+}
+
sub _populate_dbh {
my ($self) = @_;
- my @info = @{$self->connect_info || []};
+ my @info = @{$self->_connect_info || []};
$self->_dbh($self->_connect(@info));
my $driver = $self->_dbh->{Driver}->{Name};
eval "require DBIx::Class::Storage::DBI::${driver}";
unless ($@) {
bless $self, "DBIx::Class::Storage::DBI::${driver}";
+ $self->_rebless() if $self->can('_rebless');
}
# if on-connect sql statements are given execute them
foreach my $sql_statement (@{$self->on_connect_do || []}) {
+ $self->debugobj->query_start($sql_statement) if $self->debug();
$self->_dbh->do($sql_statement);
+ $self->debugobj->query_end($sql_statement) if $self->debug();
}
$self->_conn_pid($$);
if ($self->{transaction_depth}++ == 0) {
my $dbh = $self->dbh;
if ($dbh->{AutoCommit}) {
- $self->debugfh->print("BEGIN WORK\n")
+ $self->debugobj->txn_begin()
if ($self->debug);
$dbh->begin_work;
}
my $dbh = $self->dbh;
if ($self->{transaction_depth} == 0) {
unless ($dbh->{AutoCommit}) {
- $self->debugfh->print("COMMIT\n")
+ $self->debugobj->txn_commit()
if ($self->debug);
$dbh->commit;
}
}
else {
if (--$self->{transaction_depth} == 0) {
- $self->debugfh->print("COMMIT\n")
+ $self->debugobj->txn_commit()
if ($self->debug);
$dbh->commit;
}
my $dbh = $self->dbh;
if ($self->{transaction_depth} == 0) {
unless ($dbh->{AutoCommit}) {
- $self->debugfh->print("ROLLBACK\n")
+ $self->debugobj->txn_rollback()
if ($self->debug);
$dbh->rollback;
}
}
else {
if (--$self->{transaction_depth} == 0) {
- $self->debugfh->print("ROLLBACK\n")
+ $self->debugobj->txn_rollback()
if ($self->debug);
$dbh->rollback;
}
my ($sql, @bind) = $self->sql_maker->$op($ident, @args);
unshift(@bind, @$extra_bind) if $extra_bind;
if ($self->debug) {
- my $bind_str = join(', ', map {
- defined $_ ? qq{`$_'} : q{`NULL'}
- } @bind);
- $self->debugfh->print("$sql ($bind_str)\n");
+ my @debug_bind = map { defined $_ ? qq{'$_'} : q{'NULL'} } @bind;
+ $self->debugobj->query_start($sql, @debug_bind);
}
my $sth = eval { $self->sth($sql,$op) };
);
}
@bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
- my $rv = eval { $sth->execute(@bind) };
- if ($@ || !$rv) {
- my $bind_str = join(', ', map {
- defined $_ ? qq{`$_'} : q{`NULL'}
- } @bind);
- $self->throw_exception(
- "Error executing '$sql' ($bind_str): ".($@ || $sth->errstr)
- );
+ my $rv;
+ if ($sth) {
+ my $time = time();
+ $rv = eval { $sth->execute(@bind) };
+
+ if ($@ || !$rv) {
+ $self->throw_exception("Error executing '$sql': ".($@ || $sth->errstr));
+ }
+ } else {
+ $self->throw_exception("'$sql' did not generate a statement.");
+ }
+ if ($self->debug) {
+ my @debug_bind = map { defined $_ ? qq{`$_'} : q{`NULL'} } @bind;
+ $self->debugobj->query_end($sql, @debug_bind);
}
return (wantarray ? ($rv, $sth, @bind) : $rv);
}
$dbh->{RaiseError} = 1;
$dbh->{PrintError} = 0;
eval {
- my $sth = $dbh->column_info( undef, undef, $table, '%' );
+ my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table);
+ my $sth = $dbh->column_info( undef,$schema, $tab, '%' );
$sth->execute();
while ( my $info = $sth->fetchrow_hashref() ){
my %column_info;
$column_info{size} = $info->{COLUMN_SIZE};
$column_info{is_nullable} = $info->{NULLABLE} ? 1 : 0;
$column_info{default_value} = $info->{COLUMN_DEF};
+ my $col_name = $info->{COLUMN_NAME};
+ $col_name =~ s/^\"(.*)\"$/$1/;
- $result{$info->{COLUMN_NAME}} = \%column_info;
+ $result{$col_name} = \%column_info;
}
};
$dbh->{RaiseError} = $old_raise_err;
sub sqlt_type { shift->dbh->{Driver}->{Name} }
+sub create_ddl_dir
+{
+ my ($self, $schema, $databases, $version, $dir, $sqltargs) = @_;
+
+ if(!$dir || !-d $dir)
+ {
+ warn "No directory given, using ./\n";
+ $dir = "./";
+ }
+ $databases ||= ['MySQL', 'SQLite', 'PostgreSQL'];
+ $databases = [ $databases ] if(ref($databases) ne 'ARRAY');
+ $version ||= $schema->VERSION || '1.x';
+
+ eval "use SQL::Translator";
+ $self->throw_exception("Can't deploy without SQL::Translator: $@") if $@;
+
+ my $sqlt = SQL::Translator->new({
+# debug => 1,
+ add_drop_table => 1,
+ });
+ foreach my $db (@$databases)
+ {
+ $sqlt->reset();
+ $sqlt->parser('SQL::Translator::Parser::DBIx::Class');
+# $sqlt->parser_args({'DBIx::Class' => $schema);
+ $sqlt->data($schema);
+ $sqlt->producer($db);
+
+ my $file;
+ my $filename = $schema->ddl_filename($db, $dir, $version);
+ if(-e $filename)
+ {
+ $self->throw_exception("$filename already exists, skipping $db");
+ next;
+ }
+ open($file, ">$filename")
+ or $self->throw_exception("Can't open $filename for writing ($!)");
+ my $output = $sqlt->translate;
+#use Data::Dumper;
+# print join(":", keys %{$schema->source_registrations});
+# print Dumper($sqlt->schema);
+ if(!$output)
+ {
+ $self->throw_exception("Failed to translate to $db. (" . $sqlt->error . ")");
+ next;
+ }
+ print $file $output;
+ close($file);
+ }
+
+}
+
sub deployment_statements {
- my ($self, $schema, $type, $sqltargs) = @_;
+ my ($self, $schema, $type, $version, $dir, $sqltargs) = @_;
$type ||= $self->sqlt_type;
+ $version ||= $schema->VERSION || '1.x';
+ $dir ||= './';
eval "use SQL::Translator";
- $self->throw_exception("Can't deploy without SQL::Translator: $@") if $@;
- eval "use SQL::Translator::Parser::DBIx::Class;";
- $self->throw_exception($@) if $@;
- eval "use SQL::Translator::Producer::${type};";
- $self->throw_exception($@) if $@;
- my $tr = SQL::Translator->new(%$sqltargs);
- SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema );
- return "SQL::Translator::Producer::${type}"->can('produce')->($tr);
+ if(!$@)
+ {
+ eval "use SQL::Translator::Parser::DBIx::Class;";
+ $self->throw_exception($@) if $@;
+ eval "use SQL::Translator::Producer::${type};";
+ $self->throw_exception($@) if $@;
+ my $tr = SQL::Translator->new(%$sqltargs);
+ SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema );
+ return "SQL::Translator::Producer::${type}"->can('produce')->($tr);
+ }
+
+ my $filename = $schema->ddl_filename($type, $dir, $version);
+ if(!-f $filename)
+ {
+# $schema->create_ddl_dir([ $type ], $version, $dir, $sqltargs);
+ $self->throw_exception("No SQL::Translator, and no Schema file found, aborting deploy");
+ return;
+ }
+ my $file;
+ open($file, "<$filename")
+ or $self->throw_exception("Can't open $filename ($!)");
+ my @rows = <$file>;
+ close($file);
+
+ return join('', @rows);
+
}
sub deploy {
my ($self, $schema, $type, $sqltargs) = @_;
- foreach my $statement ( $self->deployment_statements($schema, $type, $sqltargs) ) {
+ foreach my $statement ( $self->deployment_statements($schema, $type, undef, undef, $sqltargs) ) {
for ( split(";\n", $statement)) {
- $self->debugfh->print("$_\n") if $self->debug;
+ next if($_ =~ /^--/);
+ next if(!$_);
+# next if($_ =~ /^DROP/m);
+ next if($_ =~ /^BEGIN TRANSACTION/m);
+ next if($_ =~ /^COMMIT/m);
+ $self->debugobj->query_begin($_) if $self->debug;
$self->dbh->do($_) or warn "SQL was:\n $_";
+ $self->debugobj->query_end($_) if $self->debug;
}
}
}
+sub datetime_parser {
+ my $self = shift;
+ return $self->{datetime_parser} ||= $self->build_datetime_parser(@_);
+}
+
+sub datetime_parser_type { "DateTime::Format::MySQL"; }
+
+sub build_datetime_parser {
+ my $self = shift;
+ my $type = $self->datetime_parser_type(@_);
+ eval "use ${type}";
+ $self->throw_exception("Couldn't load ${type}: $@") if $@;
+ return $type;
+}
+
sub DESTROY { shift->disconnect }
1;
}
+sub datetime_parser_type { "DateTime::Format::DB2"; }
+
1;
=head1 NAME
my( $id ) = $_[0]->_dbh->selectrow_array('SELECT @@IDENTITY' );
return $id;
}
+
+sub build_datetime_parser {
+ my $self = shift;
+ my $type = "DateTime::Format::Strptime";
+ eval "use ${type}";
+ $self->throw_exception("Couldn't load ${type}: $@") if $@;
+ return $type->new( pattern => '%m/%d/%Y %H:%M:%S' );
+}
\r
1;
\r
--- /dev/null
+package DBIx::Class::Storage::DBI::ODBC;
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI/;
+
+sub _rebless {
+ my ($self) = @_;
+
+ my $dbh = $self->_dbh;
+ my $dbtype = eval { $dbh->get_info(17) };
+ unless ( $@ ) {
+ # Translate the backend name into a perl identifier
+ $dbtype =~ s/\W/_/gi;
+ my $class = "DBIx::Class::Storage::DBI::ODBC::${dbtype}";
+ eval "require $class";
+ bless $self, $class unless $@;
+ }
+}
+
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ODBC - Base class for ODBC drivers
+
+=head1 SYNOPSIS
+
+ # In your table classes
+ __PACKAGE__->load_components(qw/Core/);
+
+
+=head1 DESCRIPTION
+
+This class simply provides a mechanism for discovering and loading a sub-class
+for a specific ODBC backend. It should be transparent to the user.
+
+
+=head1 AUTHORS
+
+Marc Mims C<< <marc@sssonline.com> >>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
--- /dev/null
+package DBIx::Class::Storage::DBI::ODBC::DB2_400_SQL;
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI::ODBC/;
+
+sub last_insert_id
+{
+ my ($self) = @_;
+
+ my $dbh = $self->_dbh;
+
+ # get the schema/table separator:
+ # '.' when SQL naming is active
+ # '/' when system naming is active
+ my $sep = $dbh->get_info(41);
+ my $sth = $dbh->prepare_cached(
+ "SELECT IDENTITY_VAL_LOCAL() FROM SYSIBM${sep}SYSDUMMY1", {}, 3);
+ $sth->execute();
+
+ my @res = $sth->fetchrow_array();
+
+ return @res ? $res[0] : undef;
+}
+
+sub _sql_maker_args {
+ my ($self) = @_;
+
+ return (
+ limit_dialect => 'FetchFirst',
+ name_sep => $self->_dbh->get_info(41)
+ );
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ODBC::DB2_400_SQL - Support specific to DB2/400
+over ODBC
+
+=head1 SYNOPSIS
+
+ # In your table classes
+ __PACKAGE__->load_components(qw/PK::Auto Core/);
+ __PACKAGE__->set_primary_key('id');
+
+
+=head1 DESCRIPTION
+
+This class implements support specific to DB2/400 over ODBC, including
+auto-increment primary keys, SQL::Abstract::Limit dialect, and name separator
+for for connections using either SQL naming or System naming.
+
+
+=head1 AUTHORS
+
+Marc Mims C<< <marc@sssonline.com> >>
+
+Based on DBIx::Class::Storage::DBI::DB2 by Jess Robinson.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
--- /dev/null
+package DBIx::Class::Storage::DBI::ODBC400;
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI/;
+
+sub last_insert_id
+{
+ my ($self) = @_;
+
+ my $dbh = $self->_dbh;
+
+ # get the schema/table separator:
+ # '.' when SQL naming is active
+ # '/' when system naming is active
+ my $sep = $dbh->get_info(41);
+ my $sth = $dbh->prepare_cached(
+ "SELECT IDENTITY_VAL_LOCAL() FROM SYSIBM${sep}SYSDUMMY1", {}, 3);
+ $sth->execute();
+
+ my @res = $sth->fetchrow_array();
+
+ return @res ? $res[0] : undef;
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ODBC400 - Automatic primary key class for DB2/400
+over ODBC
+
+=head1 SYNOPSIS
+
+ # In your table classes
+ __PACKAGE__->load_components(qw/PK::Auto Core/);
+ __PACKAGE__->set_primary_key('id');
+
+
+=head1 DESCRIPTION
+
+This class implements autoincrements for DB2/400 over ODBC.
+
+
+=head1 AUTHORS
+
+Marc Mims C<< <marc@questright.com> >>
+
+Based on DBIx::Class::Storage::DBI::DB2 by Jess Robinson.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
my ($schema,$table) = $source->name =~ /^(.+)\.(.+)$/ ? ($1,$2)
: (undef,$source->name);
while (my $col = shift @pri) {
- my $info = $dbh->column_info(undef,$schema,$table,$col)->fetchrow_arrayref;
- if (defined $info->[12] and $info->[12] =~
+ my $info = $dbh->column_info(undef,$schema,$table,$col)->fetchrow_hashref;
+ if (defined $info->{COLUMN_DEF} and $info->{COLUMN_DEF} =~
/^nextval\(+'([^']+)'::(?:text|regclass)\)/)
{
- return $1; # may need to strip quotes -- see if this works
+ my $seq = $1;
+ return $seq =~ /\./ ? $seq : $info->{TABLE_SCHEM} . "." . $seq; # may need to strip quotes -- see if this works
}
}
}
return 'PostgreSQL';
}
+sub datetime_parser_type { return "DateTime::Format::Pg"; }
+
1;
=head1 NAME
--- /dev/null
+package DBIx::Class::Storage::Statistics;
+use strict;
+
+use base qw/DBIx::Class::AccessorGroup Class::Data::Accessor/;
+__PACKAGE__->mk_group_accessors(simple => qw/callback debugfh/);
+
+=head1 NAME
+
+DBIx::Class::Storage::Statistics - SQL Statistics
+
+=head1 SYNOPSIS
+
+=head1 DESCRIPTION
+
+This class is called by DBIx::Class::Storage::DBI as a means of collecting
+statistics on it's actions. Using this class alone merely prints the SQL
+executed, the fact that it completes and begin/end notification for
+transactions.
+
+To really use this class you should subclass it and create your own method
+for collecting the statistics as discussed in L<DBIx::Class::Manual::Cookbook>.
+
+=head1 METHODS
+
+=cut
+
+=head2 new
+
+Returns a new L<DBIx::Class::Storage::Statistics> object.
+
+=cut
+sub new {
+ my $self = bless({}, ref($_[0]) || $_[0]);
+
+ return $self;
+}
+
+=head2 debugfh
+
+Sets or retrieves the filehandle used for trace/debug output. This should
+be an IO::Handle compatible object (only the C<print> method is used). Initially
+should be set to STDERR - although see information on the
+L<DBIX_CLASS_STORAGE_DBI_DEBUG> environment variable.
+
+=head2 txn_begin
+
+Called when a transaction begins.
+
+=cut
+sub txn_begin {
+ my $self = shift();
+}
+
+=head2 txn_rollback
+
+Called when a transaction is rolled back.
+
+=cut
+sub txn_rollback {
+ my $self = shift();
+}
+
+=head2 txn_commit
+
+Called when a transaction is committed.
+
+=cut
+sub txn_commit {
+ my $self = shift();
+}
+
+=head2 query_start
+
+Called before a query is executed. The first argument is the SQL string being
+executed and subsequent arguments are the parameters used for the query.
+
+=cut
+sub query_start {
+ my $self = shift();
+ my $string = shift();
+
+ if(defined($self->callback())) {
+ $string =~ m/^(\w+)/;
+ $self->callback()->($1, $string);
+ return;
+ }
+
+ $self->debugfh->print("$string: " . join(', ', @_) . "\n");
+}
+
+=head2 query_end
+
+Called when a query finishes executing. Has the same arguments as query_start.
+
+=cut
+sub query_end {
+ my $self = shift();
+ my $string = shift();
+}
+
+1;
+
+=head1 AUTHORS
+
+Cory G. Watson <gphat@cpan.org>
+
+=head1 LICENSE
+
+You may distribute this code under the same license as Perl itself.
+
+=cut
use base qw/DBIx::Class/;
-__PACKAGE__->load_components(qw/PK::Auto CDBICompat Core DB/);
+__PACKAGE__->load_components(qw/CDBICompat Core DB/);
use File::Temp qw/tempfile/;
my (undef, $DB) = tempfile();
+++ /dev/null
-package DBIx::Class::UUIDColumns;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class/;
-
-__PACKAGE__->mk_classdata( 'uuid_auto_columns' => [] );
-__PACKAGE__->mk_classdata( 'uuid_maker' );
-__PACKAGE__->uuid_class( __PACKAGE__->_find_uuid_module );
-
-# be compatible with Class::DBI::UUID
-sub uuid_columns {
- my $self = shift;
- for (@_) {
- $self->throw_exception("column $_ doesn't exist") unless $self->has_column($_);
- }
- $self->uuid_auto_columns(\@_);
-}
-
-sub uuid_class {
- my ($self, $class) = @_;
-
- if ($class) {
- $class = "DBIx::Class::UUIDMaker$class" if $class =~ /^::/;
-
- if (!eval "require $class") {
- $self->throw_exception("$class could not be loaded: $@");
- } elsif (!$class->isa('DBIx::Class::UUIDMaker')) {
- $self->throw_exception("$class is not a UUIDMaker subclass");
- } else {
- $self->uuid_maker($class->new);
- };
- };
-
- return ref $self->uuid_maker;
-};
-
-sub insert {
- my $self = shift;
- for my $column (@{$self->uuid_auto_columns}) {
- $self->store_column( $column, $self->get_uuid )
- unless defined $self->get_column( $column );
- }
- $self->next::method(@_);
-}
-
-sub get_uuid {
- return shift->uuid_maker->as_string;
-}
-
-sub _find_uuid_module {
- if (eval{require Data::UUID}) {
- return '::Data::UUID';
- } elsif ($^O ne 'openbsd' && eval{require APR::UUID}) {
- # APR::UUID on openbsd causes some as yet unfound nastiness for XS
- return '::APR::UUID';
- } elsif (eval{require UUID}) {
- return '::UUID';
- } elsif (eval{
- # squelch the 'too late for INIT' warning in Win32::API::Type
- local $^W = 0;
- require Win32::Guidgen;
- }) {
- return '::Win32::Guidgen';
- } elsif (eval{require Win32API::GUID}) {
- return '::Win32API::GUID';
- } else {
- shift->throw_exception('no suitable uuid module could be found')
- };
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDColumns - Implicit uuid columns
-
-=head1 SYNOPSIS
-
- package Artist;
- __PACKAGE__->load_components(qw/UUIDColumns Core DB/);
- __PACKAGE__->uuid_columns( 'artist_id' );
-
-=head1 DESCRIPTION
-
-This L<DBIx::Class> component resembles the behaviour of
-L<Class::DBI::UUID>, to make some columns implicitly created as uuid.
-
-When loaded, C<UUIDColumns> will search for a suitable uuid generation module
-from the following list of supported modules:
-
- Data::UUID
- APR::UUID*
- UUID
- Win32::Guidgen
- Win32API::GUID
-
-If no supporting module can be found, an exception will be thrown.
-
-*APR::UUID will not be loaded under OpenBSD due to an as yet unidentified XS
-issue.
-
-If you would like to use a specific module, you can set C<uuid_class>:
-
- __PACKAGE__->uuid_class('::Data::UUID');
- __PACKAGE__->uuid_class('MyUUIDGenerator');
-
-Note that the component needs to be loaded before Core.
-
-=head1 METHODS
-
-=head2 uuid_columns(@columns)
-
-Takes a list of columns to be filled with uuids during insert.
-
- __PACKAGE__->uuid_columns('id');
-
-=head2 uuid_class($classname)
-
-Takes the name of a UUIDMaker subclass to be used for uuid value generation.
-This can be a fully qualified class name, or a shortcut name starting with ::
-that matches one of the available DBIx::Class::UUIDMaker subclasses:
-
- __PACKAGE__->uuid_class('CustomUUIDGenerator');
- # loads CustomeUUIDGenerator
-
- __PACKAGE->uuid_class('::Data::UUID');
- # loads DBIx::Class::UUIDMaker::Data::UUID;
-
-Note that C<uuid_class> chacks to see that the specified class isa
-DBIx::Class::UUIDMaker subbclass and throws and exception if it isn't.
-
-=head2 uuid_maker
-
-Returns the current UUIDMaker instance for the given module.
-
- my $uuid = __PACKAGE__->uuid_maker->as_string;
-
-=head1 SEE ALSO
-
-L<DBIx::Class::UUIDMaker>
-
-=head1 AUTHORS
-
-Chia-liang Kao <clkao@clkao.org>
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
+++ /dev/null
-package DBIx::Class::UUIDMaker;
-
-use strict;
-use warnings;
-
-sub new {
- return bless {}, shift;
-};
-
-sub as_string {
- return undef;
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDMaker - UUID wrapper module
-
-=head1 SYNOPSIS
-
- package CustomUUIDMaker;
- use base qw/DBIx::Class::/;
-
- sub as_string {
- my $uuid;
- ...magic incantations...
- return $uuid;
- };
-
-=head1 DESCRIPTION
-
-DBIx::Class::UUIDMaker is a base class used by the various uuid generation
-subclasses.
-
-=head1 METHODS
-
-=head2 as_string
-
-Returns the new uuid as a string.
-
-=head1 SEE ALSO
-
-L<DBIx::Class::UUIDMaker>,
-L<DBIx::Class::UUIDMaker::UUID>,
-L<DBIx::Class::UUIDMaker::APR::UUID>,
-L<DBIx::Class::UUIDMaker::Data::UUID>,
-L<DBIx::Class::UUIDMaker::Win32::Guidgen>,
-L<DBIx::Class::UUIDMaker::Win32API::GUID>,
-L<DBIx::Class::UUIDMaker::Data::Uniqid>
-
-=head1 AUTHOR
-
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
+++ /dev/null
-package DBIx::Class::UUIDMaker::APR::UUID;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::UUIDMaker/;
-use APR::UUID ();
-
-sub as_string {
- return APR::UUID->new->format;
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDMaker::APR::UUID - Create uuids using APR::UUID
-
-=head1 SYNOPSIS
-
- package Artist;
- __PACKAGE__->load_components(qw/UUIDColumns Core DB/);
- __PACKAGE__->uuid_columns( 'artist_id' );
- __PACKAGE__->uuid_class('::APR::UUID');
-
-=head1 DESCRIPTION
-
-This DBIx::Class::UUIDMaker subclass uses APR::UUID to generate uuid
-strings in the following format:
-
- 098f2470-bae0-11cd-b579-08002b30bfeb
-
-=head1 METHODS
-
-=head2 as_string
-
-Returns the new uuid as a string.
-
-=head1 SEE ALSO
-
-L<APR::UUID>
-
-=head1 AUTHOR
-
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
+++ /dev/null
-package DBIx::Class::UUIDMaker::Data::UUID;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::UUIDMaker/;
-use Data::UUID ();
-
-sub as_string {
- return Data::UUID->new->to_string(Data::UUID->new->create);
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDMaker::Data::UUID - Create uuids using Data::UUID
-
-=head1 SYNOPSIS
-
- package Artist;
- __PACKAGE__->load_components(qw/UUIDColumns Core DB/);
- __PACKAGE__->uuid_columns( 'artist_id' );
- __PACKAGE__->uuid_class('::Data::UUID');
-
-=head1 DESCRIPTION
-
-This DBIx::Class::UUIDMaker subclass uses Data::UUID to generate uuid
-strings in the following format:
-
- 098f2470-bae0-11cd-b579-08002b30bfeb
-
-=head1 METHODS
-
-=head2 as_string
-
-Returns the new uuid as a string.
-
-=head1 SEE ALSO
-
-L<Data::UUID>
-
-=head1 AUTHOR
-
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
+++ /dev/null
-package DBIx::Class::UUIDMaker::Data::Uniqid;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::UUIDMaker/;
-use Data::Uniqid ();
-
-sub as_string {
- return Data::Uniqid->luniqid;
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDMaker::Data::Uniqid - Create uuids using Data::Uniqid
-
-=head1 SYNOPSIS
-
- package Artist;
- __PACKAGE__->load_components(qw/UUIDColumns Core DB/);
- __PACKAGE__->uuid_columns( 'artist_id' );
- __PACKAGE__->uuid_class('::Data::Uniqid');
-
-=head1 DESCRIPTION
-
-This DBIx::Class::UUIDMaker subclass uses Data::Uniqid to generate uuid
-strings using Data::Uniqid::luniqid.
-
-=head1 METHODS
-
-=head2 as_string
-
-Returns the new uuid as a string.
-
-=head1 SEE ALSO
-
-L<Data::Data::Uniqid>
-
-=head1 AUTHOR
-
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
+++ /dev/null
-package DBIx::Class::UUIDMaker::UUID;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::UUIDMaker/;
-use UUID ();
-
-sub as_string {
- my ($uuid, $uuidstring);
- UUID::generate($uuid);
- UUID::unparse($uuid, $uuidstring);
-
- return $uuidstring;
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDMaker::UUID - Create uuids using UUID
-
-=head1 SYNOPSIS
-
- package Artist;
- __PACKAGE__->load_components(qw/UUIDColumns Core DB/);
- __PACKAGE__->uuid_columns( 'artist_id' );
- __PACKAGE__->uuid_class('::UUID');
-
-=head1 DESCRIPTION
-
-This DBIx::Class::UUIDMaker subclass uses UUID to generate uuid
-strings in the following format:
-
- 098f2470-bae0-11cd-b579-08002b30bfeb
-
-=head1 METHODS
-
-=head2 as_string
-
-Returns the new uuid as a string.
-
-=head1 SEE ALSO
-
-L<UUID>
-
-=head1 AUTHOR
-
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
+++ /dev/null
-package DBIx::Class::UUIDMaker::Win32::Guidgen;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::UUIDMaker/;
-use Win32::Guidgen ();
-
-sub as_string {
- my $uuid = Win32::Guidgen::create();
- $uuid =~ s/(^\{|\}$)//g;
-
- return $uuid;
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDMaker::Win32::Guidgen - Create uuids using Win32::Guidgen
-
-=head1 SYNOPSIS
-
- package Artist;
- __PACKAGE__->load_components(qw/UUIDColumns Core DB/);
- __PACKAGE__->uuid_columns( 'artist_id' );
- __PACKAGE__->uuid_class('::Win32::Guidgen');
-
-=head1 DESCRIPTION
-
-This DBIx::Class::UUIDMaker subclass uses Win32::Guidgen to generate uuid
-strings in the following format:
-
- 098f2470-bae0-11cd-b579-08002b30bfeb
-
-=head1 METHODS
-
-=head2 as_string
-
-Returns the new uuid as a string.
-
-=head1 SEE ALSO
-
-L<Win32::Guidgen>
-
-=head1 AUTHOR
-
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
+++ /dev/null
-package DBIx::Class::UUIDMaker::Win32API::GUID;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::UUIDMaker/;
-use Win32API::GUID ();
-
-sub as_string {
- return Win32API::GUID::CreateGuid();
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDMaker::Win32API::GUID - Create uuids using Win32API::GUID
-
-=head1 SYNOPSIS
-
- package Artist;
- __PACKAGE__->load_components(qw/UUIDColumns Core DB/);
- __PACKAGE__->uuid_columns( 'artist_id' );
- __PACKAGE__->uuid_class('::Win32API::GUID');
-
-=head1 DESCRIPTION
-
-This DBIx::Class::UUIDMaker subclass uses Win32API::GUID to generate uuid
-strings in the following format:
-
- 098f2470-bae0-11cd-b579-08002b30bfeb
-
-=head1 METHODS
-
-=head2 as_string
-
-Returns the new uuid as a string.
-
-=head1 SEE ALSO
-
-L<Win32API::GUID>
-
-=head1 AUTHOR
-
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
# print Dumper($dbixschema->registered_classes);
#foreach my $tableclass ($dbixschema->registered_classes)
+
+ my %seen_tables;
+
foreach my $moniker ($dbixschema->sources)
{
#eval "use $tableclass";
#print("Can't load $tableclass"), next if($@);
my $source = $dbixschema->source($moniker);
+ next if $seen_tables{$source->name}++;
+
my $table = $schema->add_table(
name => $source->name,
type => 'TABLE',
}
$table->primary_key($source->primary_columns);
+ my @primary = $source->primary_columns;
+ my %unique_constraints = $source->unique_constraints;
+ foreach my $uniq (keys %unique_constraints) {
+ if (!$source->compare_relationship_keys($unique_constraints{$uniq}, \@primary)) {
+ $table->add_constraint(
+ type => 'unique',
+ name => "$uniq",
+ fields => $unique_constraints{$uniq}
+ );
+ }
+ }
+
my @rels = $source->relationships();
foreach my $rel (@rels)
{
my $rel_info = $source->relationship_info($rel);
- my $rel_table = $source->related_source($rel)->name;
-
# Ignore any rel cond that isn't a straight hash
next unless ref $rel_info->{cond} eq 'HASH';
+ my $othertable = $source->related_source($rel);
+ my $rel_table = $othertable->name;
+
# Get the key information, mapping off the foreign/self markers
my @cond = keys(%{$rel_info->{cond}});
my @refkeys = map {/^\w+\.(\w+)$/} @cond;
if($rel_table)
{
- #Decide if this is a foreign key based on whether the self
- #items are our primary columns.
+ my $reverse_rels = $source->reverse_relationship_info($rel);
+ my ($otherrelname, $otherrelationship) = each %{$reverse_rels};
- # Make sure every self key is in the primary key list
- my $found;
- foreach my $key (@keys) {
- $found = 0;
- foreach my $prim ($source->primary_columns) {
- if ($prim eq $key) {
- $found = 1;
- last;
- }
- }
- last unless $found;
- }
+ my $on_delete = '';
+ my $on_update = '';
- # Make sure every primary key column is in the self keys
- if ($found) {
- foreach my $prim ($source->primary_columns) {
- $found = 0;
- foreach my $key (@keys) {
- if ($prim eq $key) {
- $found = 1;
- last;
- }
- }
- last unless $found;
- }
+ if (defined $otherrelationship) {
+ $on_delete = $otherrelationship->{'attrs'}->{cascade_delete} ? 'CASCADE' : '';
+ $on_update = $otherrelationship->{'attrs'}->{cascade_copy} ? 'CASCADE' : '';
}
- # if $found then the two sets are equal.
+ #Decide if this is a foreign key based on whether the self
+ #items are our primary columns.
# If the sets are different, then we assume it's a foreign key from
# us to another table.
- if (!$found) {
+ if (!$source->compare_relationship_keys(\@keys, \@primary)) {
$table->add_constraint(
type => 'foreign_key',
name => "fk_$keys[0]",
fields => \@keys,
reference_fields => \@refkeys,
reference_table => $rel_table,
+ on_delete => $on_delete,
+ on_update => $on_update
);
}
}
}
1;
+
--- /dev/null
+#!/usr/bin/perl
+use strict;
+use warnings;
+use lib qw(lib t/lib);
+
+# USAGE:
+# maint/inheritance_pod.pl Some::Module
+
+my $module = $ARGV[0];
+eval(" require $module; ");
+
+my @modules = Class::C3::calculateMRO($module);
+shift( @modules );
+
+print "=head1 INHERITED METHODS\n\n";
+
+foreach my $module (@modules) {
+ print "=head2 $module\n\n";
+ print "=over 4\n\n";
+ my $file = $module;
+ $file =~ s/::/\//g;
+ $file .= '.pm';
+ foreach my $path (@INC){
+ if (-e "$path/$file") {
+ open(MODULE,"<$path/$file");
+ while (my $line = <MODULE>) {
+ if ($line=~/^\s*sub ([a-z][a-z_]+) \{/) {
+ my $method = $1;
+ print "=item *\n\n";
+ print "L<$method|$module/$method>\n\n";
+ }
+ }
+ close(MODULE);
+ last;
+ }
+ }
+ print "=back\n\n";
+}
+
+1;
--- /dev/null
+#!/usr/bin/perl
+use strict;
+use warnings;
+
+use Getopt::Long;
+use Pod::Usage;
+use JSON qw( jsonToObj );
+
+$JSON::BareKey = 1;
+$JSON::QuotApos = 1;
+
+GetOptions(
+ 'schema=s' => \my $schema_class,
+ 'class=s' => \my $resultset_class,
+ 'connect=s' => \my $connect,
+ 'op=s' => \my $op,
+ 'set=s' => \my $set,
+ 'where=s' => \my $where,
+ 'attrs=s' => \my $attrs,
+ 'format=s' => \my $format,
+ 'force' => \my $force,
+ 'trace' => \my $trace,
+ 'quiet' => \my $quiet,
+ 'help' => \my $help,
+ 'tlibs' => \my $t_libs,
+);
+
+if ($t_libs) {
+ unshift( @INC, 't/lib', 'lib' );
+}
+
+pod2usage(1) if ($help);
+$ENV{DBIX_CLASS_STORAGE_DBI_DEBUG} = 1 if ($trace);
+
+die('No op specified') if(!$op);
+die('Invalid op') if ($op!~/^insert|update|delete|select$/s);
+my $csv_class;
+if ($op eq 'select') {
+ $format ||= 'tsv';
+ die('Invalid format') if ($format!~/^tsv|csv$/s);
+ $csv_class = 'Text::CSV_XS';
+ eval{ require Text::CSV_XS };
+ if ($@) {
+ $csv_class = 'Text::CSV_PP';
+ eval{ require Text::CSV_PP };
+ die('The select op requires either the Text::CSV_XS or the Text::CSV_PP module') if ($@);
+ }
+}
+
+die('No schema specified') if(!$schema_class);
+eval("require $schema_class");
+die('Unable to load schema') if ($@);
+$connect = jsonToObj( $connect ) if ($connect);
+my $schema = $schema_class->connect(
+ ( $connect ? @$connect : () )
+);
+
+die('No class specified') if(!$resultset_class);
+my $resultset = eval{ $schema->resultset($resultset_class) };
+die('Unable to load the class with the schema') if ($@);
+
+$set = jsonToObj( $set ) if ($set);
+$where = jsonToObj( $where ) if ($where);
+$attrs = jsonToObj( $attrs ) if ($attrs);
+
+if ($op eq 'insert') {
+ die('Do not use the where option with the insert op') if ($where);
+ die('Do not use the attrs option with the insert op') if ($attrs);
+ my $obj = $resultset->create( $set );
+ print ''.ref($resultset).' ID: '.join(',',$obj->id())."\n";
+}
+elsif ($op eq 'update') {
+ $resultset = $resultset->search( ($where||{}) );
+ my $count = $resultset->count();
+ print "This action will modify $count ".ref($resultset)." records.\n" if (!$quiet);
+ if ( $force || confirm() ) {
+ $resultset->update_all( $set );
+ }
+}
+elsif ($op eq 'delete') {
+ die('Do not use the set option with the delete op') if ($set);
+ $resultset = $resultset->search( ($where||{}), ($attrs||()) );
+ my $count = $resultset->count();
+ print "This action will delete $count ".ref($resultset)." records.\n" if (!$quiet);
+ if ( $force || confirm() ) {
+ $resultset->delete_all();
+ }
+}
+elsif ($op eq 'select') {
+ die('Do not use the set option with the select op') if ($set);
+ my $csv = $csv_class->new({
+ sep_char => ( $format eq 'tsv' ? "\t" : ',' ),
+ });
+ $resultset = $resultset->search( ($where||{}), ($attrs||()) );
+ my @columns = $resultset->result_source->columns();
+ $csv->combine( @columns );
+ print $csv->string()."\n";
+ while (my $row = $resultset->next()) {
+ my @fields;
+ foreach my $column (@columns) {
+ push( @fields, $row->get_column($column) );
+ }
+ $csv->combine( @fields );
+ print $csv->string()."\n";
+ }
+}
+
+sub confirm {
+ print "Are you sure you want to do this? (type YES to confirm) ";
+ my $response = <STDIN>;
+ return 1 if ($response=~/^YES/);
+ return;
+}
+
+__END__
+
+=head1 NAME
+
+dbicadmin - Execute operations upon DBIx::Class objects.
+
+=head1 SYNOPSIS
+
+ dbicadmin --op=insert --schema=My::Schema --class=Class --set=JSON
+ dbicadmin --op=update --schema=My::Schema --class=Class --set=JSON --where=JSON
+ dbicadmin --op=delete --schema=My::Schema --class=Class --where=JSON
+ dbicadmin --op=select --schema=My::Schema --class=Class --where=JSON --format=tsv
+
+=head1 DESCRIPTION
+
+This utility provides the ability to run INSERTs, UPDATEs,
+DELETEs, and SELECTs on any DBIx::Class object.
+
+=head1 OPTIONS
+
+=head2 op
+
+The type of operation. Valid values are insert, update, delete,
+and select.
+
+=head2 schema
+
+The name of your schema class.
+
+=head2 class
+
+The name of the class, within your schema, that you want to run
+the operation on.
+
+=head2 connect
+
+A JSON array to be passed to your schema class upon connecting.
+The array will need to be compatible with whatever the DBIC
+->connect() method requires.
+
+=head2 set
+
+This option must be valid JSON data string and is passed in to
+the DBIC update() method. Use this option with the update
+and insert ops.
+
+=head2 where
+
+This option must be valid JSON data string and is passed in as
+the first argument to the DBIC search() method. Use this
+option with the update, delete, and select ops.
+
+=head2 attrs
+
+This option must be valid JSON data string and is passed in as
+the second argument to the DBIC search() method. Use this
+option with the update, delete, and select ops.
+
+=head2 help
+
+Display this help page.
+
+=head2 force
+
+Suppresses the confirmation dialogues that are usually displayed
+when someone runs a DELETE or UPDATE action.
+
+=head2 quiet
+
+Do not display status messages.
+
+=head2 trace
+
+Turns on tracing on the DBI storage, thus printing SQL as it is
+executed.
+
+=head2 tlibs
+
+This option is purely for testing during the DBIC installation. Do
+not use it.
+
+=head1 JSON
+
+JSON is a lightweight data-interchange format. It allows you
+to express complex data structures for use in the where and
+set options.
+
+This module turns on L<JSON>'s BareKey and QuotApos options so
+that your data can look a bit more readable.
+
+ --where={"this":"that"} # generic JSON
+ --where={this:'that'} # with BareKey and QuoteApos
+
+Consider wrapping your JSON in outer quotes so that you don't
+have to escape your inner quotes.
+
+ --where={this:\"that\"} # no outer quote
+ --where='{this:"that"}' # outer quoted
+
+=head1 AUTHOR
+
+Aran Deltac <bluefeet@cpan.org>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
# Test for inject_base to filter out duplicates
{ package DBICTest::_InjectBaseTest;
use base qw/ DBIx::Class /;
+ package DBICTest::_InjectBaseTest::A;
+ package DBICTest::_InjectBaseTest::B;
+ package DBICTest::_InjectBaseTest::C;
}
DBICTest::_InjectBaseTest->inject_base( 'DBICTest::_InjectBaseTest', qw/
DBICTest::_InjectBaseTest::A
--- /dev/null
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use Test::More;
+
+BEGIN {
+ eval "use DBD::SQLite";
+ plan $@
+ ? ( skip_all => 'needs DBD::SQLite for testing' )
+ : ( tests => 13 );
+}
+
+use lib qw(t/lib);
+
+use_ok('DBICTest');
+use_ok('DBICTest::HelperRels');
+
+my $cbworks = 0;
+
+DBICTest->schema->storage->debugcb(sub { $cbworks = 1; });
+DBICTest->schema->storage->debug(0);
+my $rs = DBICTest::CD->search({});
+$rs->count();
+ok(!$cbworks, 'Callback not called with debug disabled');
+
+DBICTest->schema->storage->debug(1);
+
+$rs->count();
+ok($cbworks, 'Debug callback worked.');
+
+my $prof = new DBIx::Test::Profiler();
+DBICTest->schema->storage->debugobj($prof);
+
+# Test non-transaction calls.
+$rs->count();
+ok($prof->{'query_start'}, 'query_start called');
+ok($prof->{'query_end'}, 'query_end called');
+ok(!$prof->{'txn_begin'}, 'txn_begin not called');
+ok(!$prof->{'txn_commit'}, 'txn_commit not called');
+
+$prof->reset();
+
+# Test transaction calls
+DBICTest->schema->txn_begin();
+ok($prof->{'txn_begin'}, 'txn_begin called');
+
+$rs = DBICTest::CD->search({});
+$rs->count();
+ok($prof->{'query_start'}, 'query_start called');
+ok($prof->{'query_end'}, 'query_end called');
+
+DBICTest->schema->txn_commit();
+ok($prof->{'txn_commit'}, 'txn_commit called');
+
+$prof->reset();
+
+# Test a rollback
+DBICTest->schema->txn_begin();
+$rs = DBICTest::CD->search({});
+$rs->count();
+DBICTest->schema->txn_rollback();
+ok($prof->{'txn_rollback'}, 'txn_rollback called');
+
+DBICTest->schema->storage->debug(0);
+
+package DBIx::Test::Profiler;
+use strict;
+
+sub new {
+ my $self = bless({});
+}
+
+sub query_start {
+ my $self = shift();
+ $self->{'query_start'} = 1;
+}
+
+sub query_end {
+ my $self = shift();
+ $self->{'query_end'} = 1;
+}
+
+sub txn_begin {
+ my $self = shift();
+ $self->{'txn_begin'} = 1;
+}
+
+sub txn_rollback {
+ my $self = shift();
+ $self->{'txn_rollback'} = 1;
+}
+
+sub txn_commit {
+ my $self = shift();
+ $self->{'txn_commit'} = 1;
+}
+
+sub reset {
+ my $self = shift();
+
+ $self->{'query_start'} = 0;
+ $self->{'query_end'} = 0;
+ $self->{'txn_begin'} = 0;
+ $self->{'txn_rollback'} = 0;
+ $self->{'txn_end'} = 0;
+}
+
+1;
--- /dev/null
+use Test::More;
+use strict;
+use warnings;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::BasicRels;
+
+plan tests => 7;
+
+my $schema = DBICTest->schema;
+my $total_cds = $schema->resultset('CD')->count;
+cmp_ok($total_cds, '>', 0, 'need cd records');
+
+# test that delete_related w/o conditions deletes all related records only
+my $artist = $schema->resultset("Artist")->find(3);
+my $artist_cds = $artist->cds->count;
+cmp_ok($artist_cds, '<', $total_cds, 'need more cds than just related cds');
+
+ok($artist->delete_related('cds'));
+cmp_ok($schema->resultset('CD')->count, '==', ($total_cds - $artist_cds), 'too many cds were deleted');
+
+$total_cds -= $artist_cds;
+
+# test that delete_related w/conditions deletes just the matched related records only
+my $artist2 = $schema->resultset("Artist")->find(2);
+my $artist2_cds = $artist2->search_related('cds')->count;
+cmp_ok($artist2_cds, '<', $total_cds, 'need more cds than related cds');
+
+ok($artist2->delete_related('cds', {title => {like => '%'}}));
+cmp_ok($schema->resultset('CD')->count, '==', ($total_cds - $artist2_cds), 'too many cds were deleted');
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::BasicRels;
+
+require "t/run/146db2_400.tl";
+run_tests(DBICTest->schema);
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::BasicRels;
+
+require "t/run/27ordered.tl";
+run_tests(DBICTest->schema);
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::BasicRels;
+
+require "t/run/28result_set_column.tl";
+run_tests(DBICTest->schema);
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::BasicRels;
+
+require "t/run/29inflate_datetime.tl";
+run_tests(DBICTest->schema);
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::BasicRels;
+
+require "t/run/30ensure_class_loaded.tl";
+run_tests(DBICTest->schema);
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::BasicRels;
+
+require "t/run/30join_torture.tl";
+run_tests(DBICTest->schema);
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::HelperRels;
+
+require "t/run/146db2_400.tl";
+run_tests(DBICTest->schema);
eval "use SQL::Translator";
plan skip_all => 'SQL::Translator required' if $@;
+# do not taunt happy dave ball
+
my $schema = DBICTest::Schema;
-plan tests => 29;
+plan tests => 31;
my $translator = SQL::Translator->new(
parser_args => {
my $output = $translator->translate();
-my @constraints =
+my @fk_constraints =
(
{'display' => 'twokeys->cd',
'selftable' => 'twokeys', 'foreigntable' => 'cd',
{'display' => 'twokeys->artist',
'selftable' => 'twokeys', 'foreigntable' => 'artist',
'selfcols' => ['artist'], 'foreigncols' => ['artistid'],
- 'needed' => 1, on_delete => '', on_update => ''},
+ 'needed' => 1, on_delete => 'CASCADE', on_update => 'CASCADE'},
{'display' => 'cd_to_producer->cd',
'selftable' => 'cd_to_producer', 'foreigntable' => 'cd',
'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
- 'needed' => 1, on_delete => '', on_update => ''},
+ 'needed' => 1, on_delete => 'CASCADE', on_update => 'CASCADE'},
{'display' => 'cd_to_producer->producer',
'selftable' => 'cd_to_producer', 'foreigntable' => 'producer',
'selfcols' => ['producer'], 'foreigncols' => ['producerid'],
{'display' => 'self_ref_alias -> self_ref for self_ref',
'selftable' => 'self_ref_alias', 'foreigntable' => 'self_ref',
'selfcols' => ['self_ref'], 'foreigncols' => ['id'],
- 'needed' => 1, on_delete => '', on_update => ''},
+ 'needed' => 1, on_delete => 'CASCADE', on_update => 'CASCADE'},
{'display' => 'self_ref_alias -> self_ref for alias',
'selftable' => 'self_ref_alias', 'foreigntable' => 'self_ref',
'selfcols' => ['alias'], 'foreigncols' => ['id'],
{'display' => 'cd -> artist',
'selftable' => 'cd', 'foreigntable' => 'artist',
'selfcols' => ['artist'], 'foreigncols' => ['artistid'],
- 'needed' => 1, on_delete => '', on_update => ''},
+ 'needed' => 1, on_delete => 'CASCADE', on_update => 'CASCADE'},
{'display' => 'artist_undirected_map -> artist for id1',
'selftable' => 'artist_undirected_map', 'foreigntable' => 'artist',
'selfcols' => ['id1'], 'foreigncols' => ['artistid'],
- 'needed' => 1, on_delete => '', on_update => ''},
+ 'needed' => 1, on_delete => 'CASCADE', on_update => ''},
{'display' => 'artist_undirected_map -> artist for id2',
'selftable' => 'artist_undirected_map', 'foreigntable' => 'artist',
'selfcols' => ['id2'], 'foreigncols' => ['artistid'],
- 'needed' => 1, on_delete => '', on_update => ''},
+ 'needed' => 1, on_delete => 'CASCADE', on_update => ''},
{'display' => 'track->cd',
'selftable' => 'track', 'foreigntable' => 'cd',
'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
- 'needed' => 2, on_delete => '', on_update => ''},
+ 'needed' => 2, on_delete => 'CASCADE', on_update => 'CASCADE'},
{'display' => 'treelike -> treelike for parent',
'selftable' => 'treelike', 'foreigntable' => 'treelike',
'selfcols' => ['parent'], 'foreigncols' => ['id'],
'needed' => 1, on_delete => '', on_update => ''},
- {'display' => 'twokeytreelike -> twokeytreelike for parent1,parent2',
- 'selftable' => 'twokeytreelike', 'foreigntable' => 'twokeytreelike',
- 'selfcols' => ['parent1', 'parent2'], 'foreigncols' => ['id1','id2'],
- 'needed' => 1, on_delete => '', on_update => ''},
+
+ # shouldn't this be generated?
+ #
+ #{'display' => 'twokeytreelike -> twokeytreelike for parent1,parent2',
+ # 'selftable' => 'twokeytreelike', 'foreigntable' => 'twokeytreelike',
+ # 'selfcols' => ['parent1', 'parent2'], 'foreigncols' => ['id1','id2'],
+ # 'needed' => 1, on_delete => '', on_update => ''},
+
{'display' => 'tags -> cd',
'selftable' => 'tags', 'foreigntable' => 'cd',
'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
- 'needed' => 1, on_delete => '', on_update => ''},
+ 'needed' => 1, on_delete => 'CASCADE', on_update => 'CASCADE'},
{'display' => 'bookmark -> link',
'selftable' => 'bookmark', 'foreigntable' => 'link',
'selfcols' => ['link'], 'foreigncols' => ['id'],
'needed' => 1, on_delete => '', on_update => ''},
);
+my @unique_constraints = (
+ {'display' => 'cd artist and title unique',
+ 'table' => 'cd', 'cols' => ['artist', 'title'],
+ 'needed' => 1},
+ {'display' => 'twokeytreelike name unique',
+ 'table' => 'twokeytreelike', 'cols' => ['name'],
+ 'needed' => 1},
+# {'display' => 'employee position and group_id unique',
+# 'table' => 'employee', cols => ['position', 'group_id'],
+# 'needed' => 1},
+);
+
my $tschema = $translator->schema();
for my $table ($tschema->get_tables) {
my $table_name = $table->name;
for my $c ( $table->get_constraints ) {
- next unless $c->type eq 'FOREIGN KEY';
-
- ok(check($table_name, scalar $c->fields,
- $c->reference_table, scalar $c->reference_fields,
- $c->on_delete, $c->on_update), "Constraint on $table_name matches an expected constraint");
+ if ($c->type eq 'FOREIGN KEY') {
+ ok(check_fk($table_name, scalar $c->fields,
+ $c->reference_table, scalar $c->reference_fields,
+ $c->on_delete, $c->on_update), "Foreign key constraint on $table_name matches an expected constraint");
+ }
+ elsif ($c->type eq 'UNIQUE') {
+ ok(check_unique($table_name, scalar $c->fields),
+ "Unique constraint on $table_name matches an expected constraint");
+ }
}
}
+# Make sure all the foreign keys are done.
my $i;
-for ($i = 0; $i <= $#constraints; ++$i) {
- ok(!$constraints[$i]->{'needed'}, "Constraint $constraints[$i]->{display}");
+for ($i = 0; $i <= $#fk_constraints; ++$i) {
+ ok(!$fk_constraints[$i]->{'needed'}, "Constraint $fk_constraints[$i]->{display}");
+}
+# Make sure all the uniques are done.
+for ($i = 0; $i <= $#unique_constraints; ++$i) {
+ ok(!$unique_constraints[$i]->{'needed'}, "Constraint $unique_constraints[$i]->{display}");
}
-sub check {
+sub check_fk {
my ($selftable, $selfcol, $foreigntable, $foreigncol, $ondel, $onupd) = @_;
$ondel = '' if (!defined($ondel));
$onupd = '' if (!defined($onupd));
my $i;
- for ($i = 0; $i <= $#constraints; ++$i) {
- if ($selftable eq $constraints[$i]->{'selftable'} &&
- $foreigntable eq $constraints[$i]->{'foreigntable'} &&
- ($ondel eq $constraints[$i]->{on_delete}) &&
- ($onupd eq $constraints[$i]->{on_update})) {
+ for ($i = 0; $i <= $#fk_constraints; ++$i) {
+ if ($selftable eq $fk_constraints[$i]->{'selftable'} &&
+ $foreigntable eq $fk_constraints[$i]->{'foreigntable'} &&
+ ($ondel eq $fk_constraints[$i]->{on_delete}) &&
+ ($onupd eq $fk_constraints[$i]->{on_update})) {
# check columns
my $found = 0;
for (my $j = 0; $j <= $#$selfcol; ++$j) {
$found = 0;
- for (my $k = 0; $k <= $#{$constraints[$i]->{'selfcols'}}; ++$k) {
- if ($selfcol->[$j] eq $constraints[$i]->{'selfcols'}->[$k] &&
- $foreigncol->[$j] eq $constraints[$i]->{'foreigncols'}->[$k]) {
+ for (my $k = 0; $k <= $#{$fk_constraints[$i]->{'selfcols'}}; ++$k) {
+ if ($selfcol->[$j] eq $fk_constraints[$i]->{'selfcols'}->[$k] &&
+ $foreigncol->[$j] eq $fk_constraints[$i]->{'foreigncols'}->[$k]) {
+ $found = 1;
+ last;
+ }
+ }
+ last unless $found;
+ }
+
+ if ($found) {
+ for (my $j = 0; $j <= $#{$fk_constraints[$i]->{'selfcols'}}; ++$j) {
+ $found = 0;
+ for (my $k = 0; $k <= $#$selfcol; ++$k) {
+ if ($selfcol->[$k] eq $fk_constraints[$i]->{'selfcols'}->[$j] &&
+ $foreigncol->[$k] eq $fk_constraints[$i]->{'foreigncols'}->[$j]) {
+ $found = 1;
+ last;
+ }
+ }
+ last unless $found;
+ }
+ }
+
+ if ($found) {
+ --$fk_constraints[$i]->{needed};
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+sub check_unique {
+ my ($selftable, $selfcol) = @_;
+
+ $ondel = '' if (!defined($ondel));
+ $onupd = '' if (!defined($onupd));
+
+ my $i;
+ for ($i = 0; $i <= $#unique_constraints; ++$i) {
+ if ($selftable eq $unique_constraints[$i]->{'table'}) {
+
+ my $found = 0;
+ for (my $j = 0; $j <= $#$selfcol; ++$j) {
+ $found = 0;
+ for (my $k = 0; $k <= $#{$unique_constraints[$i]->{'cols'}}; ++$k) {
+ if ($selfcol->[$j] eq $unique_constraints[$i]->{'cols'}->[$k]) {
$found = 1;
last;
}
}
if ($found) {
- for (my $j = 0; $j <= $#{$constraints[$i]->{'selfcols'}}; ++$j) {
+ for (my $j = 0; $j <= $#{$unique_constraints[$i]->{'cols'}}; ++$j) {
$found = 0;
for (my $k = 0; $k <= $#$selfcol; ++$k) {
- if ($selfcol->[$k] eq $constraints[$i]->{'selfcols'}->[$j] &&
- $foreigncol->[$k] eq $constraints[$i]->{'foreigncols'}->[$j]) {
+ if ($selfcol->[$k] eq $unique_constraints[$i]->{'cols'}->[$j]) {
$found = 1;
last;
}
}
if ($found) {
- --$constraints[$i]->{needed};
+ --$unique_constraints[$i]->{needed};
return 1;
}
}
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::HelperRels;
+
+require "t/run/27ordered.tl";
+run_tests(DBICTest->schema);
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::HelperRels;
+
+require "t/run/28result_set_column.tl";
+run_tests(DBICTest->schema);
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::HelperRels;
+
+require "t/run/29dbicadmin.tl";
+run_tests(DBICTest->schema);
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::HelperRels;
+
+require "t/run/29inflate_datetime.tl";
+run_tests(DBICTest->schema);
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::HelperRels;
+
+require "t/run/30ensure_class_loaded.tl";
+run_tests(DBICTest->schema);
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::HelperRels;
+
+require "t/run/30join_torture.tl";
+run_tests(DBICTest->schema);
unlink($db_file . "-journal") if -e $db_file . "-journal";
mkdir("t/var") unless -d "t/var";
- my $dsn = "dbi:SQLite:${db_file}";
+ my $dsn = $ENV{"DBICTEST_DSN"} || "dbi:SQLite:${db_file}";
+ my $dbuser = $ENV{"DBICTEST_DBUSER"} || '';
+ my $dbpass = $ENV{"DBICTEST_DBPASS"} || '';
+
+# my $dsn = "dbi:SQLite:${db_file}";
- return DBICTest::Schema->compose_connection('DBICTest' => $dsn);
+ return DBICTest::Schema->compose_connection('DBICTest' => $dsn, $dbuser, $dbpass);
}
1;
--- /dev/null
+# belongs to t/run/30ensure_class_loaded.tl
+package # hide from PAUSE
+ DBICTest::FakeComponent;
+use warnings;
+use strict;
+
+1;
-package # hide from PAUSE
+package # hide from PAUSE
DBICTest::Schema;
use base qw/DBIx::Class::Schema/;
__PACKAGE__->load_classes(qw/
Artist
+ Employee
CD
Link
Bookmark
+ #Casecheck
#dummy
Track
Tag
'#dummy',
'SelfRef',
'ArtistUndirectedMap',
+ 'ArtistSourceName',
'Producer',
'CD_to_Producer',
),
- qw/SelfRefAlias TreeLike TwoKeyTreeLike/
+ qw/SelfRefAlias TreeLike TwoKeyTreeLike Event/
);
1;
--- /dev/null
+package # hide from PAUSE
+ DBICTest::Schema::ArtistSourceName;
+
+use base 'DBICTest::Schema::Artist';
+
+__PACKAGE__->source_name('SourceNameArtists');
+
+1;
--- /dev/null
+package # hide from PAUSE
+ DBICTest::Schema::Employee;
+
+use base 'DBIx::Class';
+
+__PACKAGE__->load_components(qw( Ordered PK::Auto Core ));
+
+__PACKAGE__->table('employee');
+
+__PACKAGE__->add_columns(
+ employee_id => {
+ data_type => 'integer',
+ is_auto_increment => 1
+ },
+ position => {
+ data_type => 'integer',
+ },
+ group_id => {
+ data_type => 'integer',
+ is_nullable => 1,
+ },
+ name => {
+ data_type => 'varchar',
+ size => 100,
+ is_nullable => 1,
+ },
+);
+
+__PACKAGE__->set_primary_key('employee_id');
+__PACKAGE__->position_column('position');
+
+#__PACKAGE__->add_unique_constraint(position_group => [ qw/position group_id/ ]);
+
+__PACKAGE__->mk_classdata('field_name_for', {
+ employee_id => 'primary key',
+ position => 'list position',
+ group_id => 'collection column',
+ name => 'employee name',
+});
+
+1;
--- /dev/null
+package DBICTest::Schema::Event;
+
+use strict;
+use warnings;
+use base qw/DBIx::Class/;
+
+__PACKAGE__->load_components(qw/InflateColumn::DateTime PK::Auto Core/);
+
+__PACKAGE__->table('event');
+
+__PACKAGE__->add_columns(
+ id => { data_type => 'integer', is_auto_increment => 1 },
+ starts_at => { data_type => 'datetime' }
+);
+
+__PACKAGE__->set_primary_key('id');
+
+1;
},
);
__PACKAGE__->set_primary_key(qw/id/);
-__PACKAGE__->belongs_to('parent', 'TreeLike',
+__PACKAGE__->belongs_to('parent', 'DBICTest::Schema::TreeLike',
{ 'foreign.id' => 'self.parent' });
1;
},
);
__PACKAGE__->set_primary_key(qw/id1 id2/);
+__PACKAGE__->add_unique_constraint('tktlnameunique' => ['name']);
__PACKAGE__->belongs_to('parent', 'TwoKeyTreeLike',
{ 'foreign.id1' => 'self.parent1', 'foreign.id2' => 'self.parent2'});
my $schema = DBICTest->initialise;
-$schema->storage->on_connect_do([ "PRAGMA synchronous = OFF" ]);
+# $schema->storage->on_connect_do([ "PRAGMA synchronous = OFF" ]);
my $dbh = $schema->storage->dbh;
close IN;
- $dbh->do($_) for split(/\n\n/, $sql);
+ $dbh->do($_) for split(/;\n/, $sql);
}
$schema->storage->dbh->do("PRAGMA synchronous = OFF");
[ 18, 1, 3, "Beehind You"],
]);
+$schema->populate('Event', [
+ [ qw/id starts_at/ ],
+ [ 1, '2006-04-25 22:24:33' ],
+]);
+
$schema->populate('Link', [
[ qw/id title/ ],
[ 1, 'aaa' ]
--
-- Created by SQL::Translator::Producer::SQLite
--- Created on Fri May 12 01:09:57 2006
+-- Created on Tue May 23 21:10:54 2006
--
BEGIN TRANSACTION;
--
+-- Table: employee
+--
+CREATE TABLE employee (
+ employee_id INTEGER PRIMARY KEY NOT NULL,
+ position integer NOT NULL,
+ group_id integer,
+ name varchar(100)
+);
+
+--
-- Table: serialized
--
CREATE TABLE serialized (
);
--
--- Table: link
+-- Table: self_ref
--
-CREATE TABLE link (
+CREATE TABLE self_ref (
id INTEGER PRIMARY KEY NOT NULL,
- url varchar(100),
- title varchar(100)
+ name varchar(100) NOT NULL
);
--
--- Table: self_ref
+-- Table: tags
--
-CREATE TABLE self_ref (
- id INTEGER PRIMARY KEY NOT NULL,
- name varchar(100) NOT NULL
+CREATE TABLE tags (
+ tagid INTEGER PRIMARY KEY NOT NULL,
+ cd integer NOT NULL,
+ tag varchar(100) NOT NULL
);
--
);
--
--- Table: tags
+-- Table: link
--
-CREATE TABLE tags (
- tagid INTEGER PRIMARY KEY NOT NULL,
- cd integer NOT NULL,
- tag varchar(100) NOT NULL
+CREATE TABLE link (
+ id INTEGER PRIMARY KEY NOT NULL,
+ url varchar(100),
+ title varchar(100)
+);
+
+--
+-- Table: event
+--
+CREATE TABLE event (
+ id INTEGER PRIMARY KEY NOT NULL,
+ starts_at datetime NOT NULL
);
--
);
--
--- Table: producer
---
-CREATE TABLE producer (
- producerid INTEGER PRIMARY KEY NOT NULL,
- name varchar(100) NOT NULL
-);
-
---
-- Table: onekey
--
CREATE TABLE onekey (
cd integer NOT NULL
);
+--
+-- Table: producer
+--
+CREATE TABLE producer (
+ producerid INTEGER PRIMARY KEY NOT NULL,
+ name varchar(100) NOT NULL
+);
+
+CREATE UNIQUE INDEX tktlnameunique_twokeytreelike on twokeytreelike (name);
+CREATE UNIQUE INDEX artist_title_cd on cd (artist, title);
COMMIT;
sub run_tests {
my $schema = shift;
-plan tests => 47;
+plan tests => 60;
# figure out if we've got a version of sqlite that is older than 3.2.6, in
# which case COUNT(DISTINCT()) doesn't work
ok($art->update, 'Update run');
+my $record_jp = $schema->resultset("Artist")->search(undef, { join => 'cds' })->search(undef, { prefetch => 'cds' })->next;
+
+ok($record_jp, "prefetch on same rel okay");
+
+my $record_fn = $schema->resultset("Artist")->search(undef, { join => 'cds' })->search({'cds.cdid' => '1'}, {join => 'artist_undirected_maps'})->next;
+
+ok($record_fn, "funny join is okay");
+
@art = $schema->resultset("Artist")->search({ name => 'We Are In Rehab' });
cmp_ok(@art, '==', 1, "Changed artist returned by search");
is($new_again->ID, 'DBICTest::Artist|artist|artistid=4', 'unique object id generated correctly');
+# Test backwards compatibility
+{
+ my $artist_by_hash = $schema->resultset('Artist')->find(artistid => 4);
+ is($artist_by_hash->name, 'Man With A Spoon', 'Retrieved correctly');
+ is($artist_by_hash->ID, 'DBICTest::Artist|artist|artistid=4', 'unique object id generated correctly');
+}
+
is($schema->resultset("Artist")->count, 4, 'count ok');
+# test find_or_new
+{
+ my $existing_obj = $schema->resultset('Artist')->find_or_new({
+ artistid => 4,
+ });
+
+ is($existing_obj->name, 'Man With A Spoon', 'find_or_new: found existing artist');
+ ok($existing_obj->in_storage, 'existing artist is in storage');
+
+ my $new_obj = $schema->resultset('Artist')->find_or_new({
+ artistid => 5,
+ name => 'find_or_new',
+ });
+
+ is($new_obj->name, 'find_or_new', 'find_or_new: instantiated a new artist');
+ ok(! $new_obj->in_storage, 'new artist is not in storage');
+}
+
my $cd = $schema->resultset("CD")->find(1);
my %cols = $cd->get_columns;
my $search = [ { 'tags.tag' => 'Cheesy' }, { 'tags.tag' => 'Blue' } ];
-my $or_rs = $schema->resultset("CD")->search($search, { join => 'tags',
+my( $or_rs ) = $schema->resultset("CD")->search_rs($search, { join => 'tags',
order_by => 'cdid' });
cmp_ok($or_rs->count, '==', 5, 'Search with OR ok');
is($art->name, 'Test _cond_for_update_delete', 'updated second artist name');
}
-#test cascade_delete thru many_many relations
-my $art_del = $schema->resultset("Artist")->find({ artistid => 1 });
-$art_del->delete;
-cmp_ok( $schema->resultset("CD")->search({artist => 1}), '==', 0, 'Cascading through has_many top level.');
-cmp_ok( $schema->resultset("CD_to_Producer")->search({cd => 1}), '==', 0, 'Cascading through has_many children.');
+# test source_name
+{
+ # source_name should be set for normal modules
+ is($schema->source('CD')->source_name, 'CD', 'source_name is set to moniker');
-$schema->source("Artist")->{_columns}{'artistid'} = {};
+ # test the result source that sets source_name explictly
+ ok($schema->source('SourceNameArtists'), 'SourceNameArtists result source exists');
-my $typeinfo = $schema->source("Artist")->column_info('artistid');
-is($typeinfo->{data_type}, 'INTEGER', 'column_info ok');
-$schema->source("Artist")->column_info('artistid');
-ok($schema->source("Artist")->{_columns_info_loaded} == 1, 'Columns info flag set');
+ my @artsn = $schema->resultset('SourceNameArtists')->search({}, { order_by => 'name DESC' });
+ cmp_ok(@artsn, '==', 4, "Four artists returned");
+}
my $newbook = $schema->resultset( 'Bookmark' )->find(1);
};
ok(!$@, "stringify to false value doesn't cause error");
+# test cascade_delete through many_to_many relations
+{
+ my $art_del = $schema->resultset("Artist")->find({ artistid => 1 });
+ $art_del->delete;
+ cmp_ok( $schema->resultset("CD")->search({artist => 1}), '==', 0, 'Cascading through has_many top level.');
+ cmp_ok( $schema->resultset("CD_to_Producer")->search({cd => 1}), '==', 0, 'Cascading through has_many children.');
+}
+
+# test column_info
+{
+ $schema->source("Artist")->{_columns}{'artistid'} = {};
+
+ my $typeinfo = $schema->source("Artist")->column_info('artistid');
+ is($typeinfo->{data_type}, 'INTEGER', 'column_info ok');
+ $schema->source("Artist")->column_info('artistid');
+ ok($schema->source("Artist")->{_columns_info_loaded} == 1, 'Columns info flag set');
+}
+
+# test remove_columns
+{
+ is_deeply([$schema->source('CD')->columns], [qw/cdid artist title year/]);
+ $schema->source('CD')->remove_columns('year');
+ is_deeply([$schema->source('CD')->columns], [qw/cdid artist title/]);
+}
+
}
1;
'name' => {
'data_type' => 'varchar',
'is_nullable' => 0,
- }
+ },
};
is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
use strict;
use warnings;
-plan tests => 26;
+plan tests => 32;
# has_a test
my $cd = $schema->resultset("CD")->find(4);
is( ($artist->search_related('cds'))[3]->title, 'Big Flop', 'create_related ok' );
+my( $rs_from_list ) = $artist->search_related_rs('cds');
+is( ref($rs_from_list), 'DBIx::Class::ResultSet', 'search_related_rs in list context returns rs' );
+
+( $rs_from_list ) = $artist->cds_rs();
+is( ref($rs_from_list), 'DBIx::Class::ResultSet', 'relation_rs in list context returns rs' );
+
# count_related
is( $artist->count_related('cds'), 4, 'count_related ok' );
$artist->delete_related( cds => { title => 'Greatest Hits' });
cmp_ok( $schema->resultset("CD")->search( title => 'Greatest Hits' ), '==', 0, 'delete_related ok' );
+# find_or_new_related with an existing record
+$cd = $artist->find_or_new_related( 'cds', { title => 'Big Flop' } );
+is( $cd->year, 2005, 'find_or_new_related on existing record ok' );
+ok( $cd->in_storage, 'find_or_new_related on existing record: is in_storage' );
+
+# find_or_new_related instantiating a new record
+$cd = $artist->find_or_new_related( 'cds', {
+ title => 'Greatest Hits 2: Louder Than Ever',
+ year => 2007,
+} );
+is( $cd->title, 'Greatest Hits 2: Louder Than Ever', 'find_or_new_related new record ok' );
+ok( ! $cd->in_storage, 'find_or_new_related on a new record: not in_storage' );
+
SKIP: {
skip "relationship checking needs fixing", 1;
# try to add a bogus relationship using the wrong cols
sub run_tests {
my $schema = shift;
-
my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_PG_${_}" } qw/DSN USER PASS/};
#warn "$dsn $user $pass";
plan skip_all, 'Set $ENV{DBICTEST_PG_DSN}, _USER and _PASS to run this test'
- . ' (note: creates and drops a table named artist!)' unless ($dsn && $user);
+ . ' (note: creates and drops tables named artist and casecheck!)' unless ($dsn && $user);
-plan tests => 4;
+plan tests => 8;
DBICTest::Schema->compose_connection('PgTest' => $dsn, $user, $pass);
my $dbh = PgTest->schema->storage->dbh;
+PgTest->schema->source("Artist")->name("testschema.artist");
+$dbh->do("CREATE SCHEMA testschema;");
-$dbh->do("CREATE TABLE artist (artistid serial PRIMARY KEY, name VARCHAR(255), charfield CHAR(10));");
+$dbh->do("CREATE TABLE testschema.artist (artistid serial PRIMARY KEY, name VARCHAR(100), charfield CHAR(10));");
+ok ( $dbh->do('CREATE TABLE testschema.casecheck (id serial PRIMARY KEY, "name" VARCHAR(1), "NAME" VARCHAR(2), "UC_NAME" VARCHAR(3));'), 'Creation of casecheck table');
PgTest::Artist->load_components('PK::Auto');
'name' => {
'data_type' => 'character varying',
'is_nullable' => 1,
- 'size' => 255,
+ 'size' => 100,
'default_value' => undef,
},
'charfield' => {
};
-my $type_info = PgTest->schema->storage->columns_info_for('artist');
+my $type_info = PgTest->schema->storage->columns_info_for('testschema.artist');
my $artistid_defval = delete $type_info->{artistid}->{default_value};
like($artistid_defval,
- qr/^nextval\('public\.artist_artistid_seq'::(?:text|regclass)\)/,
+ qr/^nextval\('([^\.]*\.){0,1}artist_artistid_seq'::(?:text|regclass)\)/,
'columns_info_for - sequence matches Pg get_autoinc_seq expectations');
is_deeply($type_info, $test_type_info,
'columns_info_for - column data types');
-$dbh->do("DROP TABLE artist;");
+my $name_info = PgTest::Casecheck->column_info( 'name' );
+is( $name_info->{size}, 1, "Case sensitive matching info for 'name'" );
+
+my $NAME_info = PgTest::Casecheck->column_info( 'NAME' );
+is( $NAME_info->{size}, 2, "Case sensitive matching info for 'NAME'" );
+
+my $uc_name_info = PgTest::Casecheck->column_info( 'uc_name' );
+is( $uc_name_info->{size}, 3, "Case insensitive matching info for 'uc_name'" );
+
+$dbh->do("DROP TABLE testschema.artist;");
+$dbh->do("DROP TABLE testschema.casecheck;");
+$dbh->do("DROP SCHEMA testschema;");
}
my $dbh = DB2Test->schema->storage->dbh;
-{
- local $SIG{__WARN__} = sub {};
- $dbh->do("DROP TABLE artist;");
-}
+$dbh->do("DROP TABLE artist", { RaiseError => 0, PrintError => 0 });
$dbh->do("CREATE TABLE artist (artistid INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1), name VARCHAR(255), charfield CHAR(10));");
--- /dev/null
+sub run_tests {
+my $schema = shift;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_DB2_400_${_}" } qw/DSN USER PASS/};
+
+#warn "$dsn $user $pass";
+
+# Probably best to pass the DBQ option in the DSN to specify a specific
+# libray. Something like:
+# DBICTEST_DB2_400_DSN='dbi:ODBC:dsn=MyAS400;DBQ=MYLIB'
+plan skip_all, 'Set $ENV{DBICTEST_DB2_400_DSN}, _USER and _PASS to run this test'
+ unless ($dsn && $user);
+
+plan tests => 6;
+
+DBICTest::Schema->compose_connection('DB2Test' => $dsn, $user, $pass);
+
+my $dbh = DB2Test->schema->storage->dbh;
+
+$dbh->do("DROP TABLE artist", { RaiseError => 0, PrintError => 0 });
+
+$dbh->do("CREATE TABLE artist (artistid INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1), name VARCHAR(255), charfield CHAR(10))");
+
+DB2Test::Artist->load_components('PK::Auto');
+
+# test primary key handling
+my $new = DB2Test::Artist->create({ name => 'foo' });
+ok($new->artistid, "Auto-PK worked");
+
+# test LIMIT support
+for (1..6) {
+ DB2Test::Artist->create({ name => 'Artist ' . $_ });
+}
+my $it = DB2Test::Artist->search( {},
+ { rows => 3,
+ order_by => 'artistid'
+ }
+);
+is( $it->count, 3, "LIMIT count ok" );
+is( $it->next->name, "foo", "iterator->next ok" );
+$it->next;
+is( $it->next->name, "Artist 2", "iterator->next ok" );
+is( $it->next, undef, "next past end of resultset ok" );
+
+my $test_type_info = {
+ 'artistid' => {
+ 'data_type' => 'INTEGER',
+ 'is_nullable' => 0,
+ 'size' => 10
+ },
+ 'name' => {
+ 'data_type' => 'VARCHAR',
+ 'is_nullable' => 1,
+ 'size' => 255
+ },
+ 'charfield' => {
+ 'data_type' => 'CHAR',
+ 'is_nullable' => 1,
+ 'size' => 10
+ },
+};
+
+
+my $type_info = DB2Test->schema->storage->columns_info_for('artist');
+is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
+
+
+
+# clean up our mess
+$dbh->do("DROP TABLE artist");
+
+}
+
+1;
eval "use DBD::SQLite";
plan $@
? ( skip_all => 'needs DBD::SQLite for testing' )
- : ( tests => 44 );
+ : ( tests => 42 );
}
# figure out if we've got a version of sqlite that is older than 3.2.6, in
);
cmp_ok( scalar $rs->all, '==', scalar $rs->slice(0, $rs->count - 1), 'slice() with join has same count as all()' );
-eval { $rs->search(undef, { rows => 0, offset => 3 })->all; };
-
-ok($@, "rows => 0 errors: $@");
-
$rs = $schema->resultset("Artist")->search(
{ 'liner_notes.notes' => 'Kill Yourself!' },
{ join => { 'cds' => 'liner_notes' } });
cmp_ok($queries, '==', 1, 'Only one query run');
-# has_many resulting in an additional select if no records available despite prefetch
-my $track = $schema->resultset("Artist")->create( {
- artistid => 4,
- name => 'Artist without CDs',
-} );
-
-$queries = 0;
-$schema->storage->debug(1);
-
-my $artist_without_cds = $schema->resultset("Artist")->find(4, {
- join => [qw/ cds /],
- prefetch => [qw/ cds /],
-});
-my @no_cds = $artist_without_cds->cds;
-
-is($queries, 1, 'prefetch ran only 1 sql statement');
-
-$schema->storage->debug(0);
-
} # end run_tests
1;
sub run_tests {
my $schema = shift;
-plan tests => 18;
+plan tests => 34;
my $artistid = 1;
my $title = 'UNIQUE Constraint';
is($cd2->title, $cd1->title, 'title is correct');
is($cd2->year, $cd1->year, 'year is correct');
-my $cd3 = $schema->resultset('CD')->update_or_create(
+my $cd3 = $schema->resultset('CD')->find($artistid, $title, { key => 'artist_title' });
+
+is($cd3->get_column('artist'), $cd1->get_column('artist'), 'find by specific key, ordered columns: artist is correct');
+is($cd3->title, $cd1->title, 'title is correct');
+is($cd3->year, $cd1->year, 'year is correct');
+
+my $cd4 = $schema->resultset('CD')->update_or_create(
{
artist => $artistid,
title => $title,
},
);
-ok(! $cd3->is_changed, 'update_or_create without key: row is clean');
-is($cd3->cdid, $cd2->cdid, 'cdid is correct');
-is($cd3->get_column('artist'), $cd2->get_column('artist'), 'artist is correct');
-is($cd3->title, $cd2->title, 'title is correct');
-is($cd3->year, 2007, 'updated year is correct');
+ok(! $cd4->is_changed, 'update_or_create without key: row is clean');
+is($cd4->cdid, $cd2->cdid, 'cdid is correct');
+is($cd4->get_column('artist'), $cd2->get_column('artist'), 'artist is correct');
+is($cd4->title, $cd2->title, 'title is correct');
+is($cd4->year, 2007, 'updated year is correct');
-my $cd4 = $schema->resultset('CD')->update_or_create(
+my $cd5 = $schema->resultset('CD')->update_or_create(
{
artist => $artistid,
title => $title,
{ key => 'artist_title' }
);
-ok(! $cd4->is_changed, 'update_or_create by specific key: row is clean');
-is($cd4->cdid, $cd2->cdid, 'cdid is correct');
-is($cd4->get_column('artist'), $cd2->get_column('artist'), 'artist is correct');
-is($cd4->title, $cd2->title, 'title is correct');
-is($cd4->year, 2007, 'updated year is correct');
+ok(! $cd5->is_changed, 'update_or_create by specific key: row is clean');
+is($cd5->cdid, $cd2->cdid, 'cdid is correct');
+is($cd5->get_column('artist'), $cd2->get_column('artist'), 'artist is correct');
+is($cd5->title, $cd2->title, 'title is correct');
+is($cd5->year, 2007, 'updated year is correct');
-my $cd5 = $schema->resultset('CD')->update_or_create(
+my $cd6 = $schema->resultset('CD')->update_or_create(
{
cdid => $cd2->cdid,
artist => 1,
{ key => 'primary' }
);
-ok(! $cd5->is_changed, 'update_or_create by PK: row is clean');
-is($cd5->cdid, $cd2->cdid, 'cdid is correct');
-is($cd5->get_column('artist'), $cd2->get_column('artist'), 'artist is correct');
-is($cd5->title, $cd2->title, 'title is correct');
-is($cd5->year, 2005, 'updated year is correct');
+ok(! $cd6->is_changed, 'update_or_create by PK: row is clean');
+is($cd6->cdid, $cd2->cdid, 'cdid is correct');
+is($cd6->get_column('artist'), $cd2->get_column('artist'), 'artist is correct');
+is($cd6->title, $cd2->title, 'title is correct');
+is($cd6->year, 2005, 'updated year is correct');
+
+my $cd7 = $schema->resultset('CD')->find_or_create(
+ {
+ artist => $artistid,
+ title => $title,
+ year => 2010,
+ },
+ { key => 'artist_title' }
+);
+
+is($cd7->cdid, $cd1->cdid, 'find_or_create by specific key: cdid is correct');
+is($cd7->get_column('artist'), $cd1->get_column('artist'), 'artist is correct');
+is($cd7->title, $cd1->title, 'title is correct');
+is($cd7->year, $cd1->year, 'year is correct');
+
+my $artist = $schema->resultset('Artist')->find($artistid);
+my $cd8 = $artist->find_or_create_related('cds',
+ {
+ artist => $artistid,
+ title => $title,
+ year => 2020,
+ },
+ { key => 'artist_title' }
+);
+
+is($cd8->cdid, $cd1->cdid, 'find_or_create related by specific key: cdid is correct');
+is($cd8->get_column('artist'), $cd1->get_column('artist'), 'artist is correct');
+is($cd8->title, $cd1->title, 'title is correct');
+is($cd8->year, $cd1->year, 'year is correct');
+
+my $cd9 = $artist->update_or_create_related('cds',
+ {
+ artist => $artistid,
+ title => $title,
+ year => 2021,
+ },
+ { key => 'artist_title' }
+);
+
+ok(! $cd9->is_changed, 'update_or_create by specific key: row is clean');
+is($cd9->cdid, $cd1->cdid, 'cdid is correct');
+is($cd9->get_column('artist'), $cd1->get_column('artist'), 'artist is correct');
+is($cd9->title, $cd1->title, 'title is correct');
+is($cd9->year, 2021, 'year is correct');
}
--- /dev/null
+# vim: filetype=perl
+
+sub run_tests {
+
+ plan tests => 321;
+ my $schema = shift;
+
+ my $employees = $schema->resultset('Employee');
+ $employees->delete();
+
+ foreach (1..5) {
+ $employees->create({ name=>'temp' });
+ }
+ $employees = $employees->search(undef,{order_by=>'position'});
+ ok( check_rs($employees), "intial positions" );
+
+ hammer_rs( $employees );
+
+ #return;
+
+ DBICTest::Employee->grouping_column('group_id');
+ $employees->delete();
+ foreach my $group_id (1..3) {
+ foreach (1..6) {
+ $employees->create({ name=>'temp', group_id=>$group_id });
+ }
+ }
+ $employees = $employees->search(undef,{order_by=>'group_id,position'});
+
+ foreach my $group_id (1..3) {
+ my $group_employees = $employees->search({group_id=>$group_id});
+ $group_employees->all();
+ ok( check_rs($group_employees), "group intial positions" );
+ hammer_rs( $group_employees );
+ }
+
+}
+
+sub hammer_rs {
+ my $rs = shift;
+ my $employee;
+ my $count = $rs->count();
+ my $position_column = $rs->result_class->position_column();
+
+ foreach my $position (1..$count) {
+
+ ($row) = $rs->search({ $position_column=>$position })->all();
+ $row->move_previous();
+ ok( check_rs($rs), "move_previous( $position )" );
+
+ ($row) = $rs->search({ $position_column=>$position })->all();
+ $row->move_next();
+ ok( check_rs($rs), "move_next( $position )" );
+
+ ($row) = $rs->search({ $position_column=>$position })->all();
+ $row->move_first();
+ ok( check_rs($rs), "move_first( $position )" );
+
+ ($row) = $rs->search({ $position_column=>$position })->all();
+ $row->move_last();
+ ok( check_rs($rs), "move_last( $position )" );
+
+ foreach my $to_position (1..$count) {
+ ($row) = $rs->search({ $position_column=>$position })->all();
+ $row->move_to($to_position);
+ ok( check_rs($rs), "move_to( $position => $to_position )" );
+ }
+
+ ($row) = $rs->search({ position=>$position })->all();
+ if ($position==1) {
+ ok( !$row->previous_sibling(), 'no previous sibling' );
+ ok( !$row->first_sibling(), 'no first sibling' );
+ }
+ else {
+ ok( $row->previous_sibling(), 'previous sibling' );
+ ok( $row->first_sibling(), 'first sibling' );
+ }
+ if ($position==$count) {
+ ok( !$row->next_sibling(), 'no next sibling' );
+ ok( !$row->last_sibling(), 'no last sibling' );
+ }
+ else {
+ ok( $row->next_sibling(), 'next sibling' );
+ ok( $row->last_sibling(), 'last sibling' );
+ }
+
+ }
+}
+
+sub check_rs {
+ my( $rs ) = @_;
+ $rs->reset();
+ my $position_column = $rs->result_class->position_column();
+ my $expected_position = 0;
+ while (my $row = $rs->next()) {
+ $expected_position ++;
+ if ($row->get_column($position_column)!=$expected_position) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+1;
--- /dev/null
+sub run_tests {
+my $schema = shift;
+
+plan tests => 8;
+
+my $rs = $cd = $schema->resultset("CD")->search({});
+
+my $rs_title = $rs->get_column('title');
+my $rs_year = $rs->get_column('year');
+
+is($rs_title->next, 'Spoonful of bees', "next okay");
+
+my @all = $rs_title->all;
+cmp_ok(scalar @all, '==', 5, "five titles returned");
+
+cmp_ok($rs_year->max, '==', 2001, "max okay for year");
+is($rs_title->min, 'Caterwaulin\' Blues', "min okay for title");
+
+cmp_ok($rs_year->sum, '==', 9996, "three artists returned");
+
+my $psrs = $schema->resultset('CD')->search({},
+ {
+ '+select' => \'COUNT(*)',
+ '+as' => 'count'
+ }
+);
+ok(defined($psrs->get_column('count')), '+select/+as count');
+
+$psrs = $schema->resultset('CD')->search({},
+ {
+ '+select' => [ \'COUNT(*)', 'title' ],
+ '+as' => [ 'count', 'addedtitle' ]
+ }
+);
+ok(defined($psrs->get_column('count')), '+select/+as arrayref count');
+ok(defined($psrs->get_column('addedtitle')), '+select/+as title');
+}
+
+1;
--- /dev/null
+# vim: filetype=perl
+
+sub run_tests {
+
+ eval 'require JSON';
+ plan skip_all, 'Install JSON to run this test' if ($@);
+
+ eval 'require Text::CSV_XS';
+ if ($@) {
+ eval 'require Text::CSV_PP';
+ plan skip_all, 'Install Text::CSV_XS or Text::CSV_PP to run this test' if ($@);
+ }
+
+ plan tests => 5;
+ my $schema = shift;
+
+ my $employees = $schema->resultset('Employee');
+ my $cmd = qq|script/dbicadmin --schema=DBICTest::Schema --class=Employee --tlibs --connect='["dbi:SQLite:dbname=t/var/DBIxClass.db","",""]' --force --tlibs|;
+
+ `$cmd --op=insert --set='{name:"Matt"}'`;
+ ok( ($employees->count()==1), 'insert count' );
+
+ my $employee = $employees->find(1);
+ ok( ($employee->name() eq 'Matt'), 'insert valid' );
+
+ `$cmd --op=update --set='{name:"Trout"}'`;
+ $employee = $employees->find(1);
+ ok( ($employee->name() eq 'Trout'), 'update' );
+
+ `$cmd --op=insert --set='{name:"Aran"}'`;
+ my $data = `$cmd --op=select --attrs='{order_by:"name"}'`;
+ ok( ($data=~/Aran.*Trout/s), 'select with attrs' );
+
+ `$cmd --op=delete --where='{name:"Trout"}'`;
+ ok( ($employees->count()==1), 'delete' );
+}
+
+1;
--- /dev/null
+sub run_tests {
+my $schema = shift;
+
+eval { require DateTime::Format::MySQL };
+plan skip_all => "Need DateTime::Format::MySQL for inflation tests" if $@;
+
+plan tests => 2;
+
+# inflation test
+my $event = $schema->resultset("Event")->find(1);
+
+isa_ok($event->starts_at, 'DateTime', 'DateTime returned');
+
+is($event->starts_at, '2006-04-25T22:24:33', 'Correct date/time');
+
+}
+
+1;
--- /dev/null
+use Class::Inspector;
+
+BEGIN {
+ package TestPackage::A;
+ sub some_method {}
+}
+
+sub run_tests {
+
+my $schema = shift;
+plan tests => 6;
+
+ok(Class::Inspector->loaded('TestPackage::A'),
+ 'anon. package exists');
+eval {
+ $schema->ensure_class_loaded('TestPackage::A');
+};
+
+ok(!$@, 'ensure_class_loaded detected an anon. class');
+
+eval {
+ $schema->ensure_class_loaded('FakePackage::B');
+};
+
+like($@, qr/Can't locate/,
+ 'ensure_class_loaded threw exception for nonexistent class');
+
+ok(!Class::Inspector->loaded('DBICTest::FakeComponent'),
+ 'DBICTest::FakeComponent not loaded yet');
+
+eval {
+ $schema->ensure_class_loaded('DBICTest::FakeComponent');
+};
+
+ok(!$@, 'ensure_class_loaded detected an existing but non-loaded class');
+ok(Class::Inspector->loaded('DBICTest::FakeComponent'),
+ 'DBICTest::FakeComponent now loaded');
+}
+
+1;
--- /dev/null
+sub run_tests {
+my $schema = shift;
+
+plan tests => 4;
+
+my $rs1 = $schema->resultset("Artist")->search({ 'tags.tag' => 'Blue' }, { join => {'cds' => 'tracks'}, prefetch => {'cds' => 'tags'} });
+my @artists = $rs1->all;
+cmp_ok(@artists, '==', 1, "Two artists returned");
+
+my $rs2 = $rs1->search({ artistid => '1' }, { join => {'cds' => {'cd_to_producer' => 'producer'} } });
+my $rs3 = $rs2->search_related('cds')->search({'cds.title' => 'Forkful of bees'});
+cmp_ok($rs3->count, '==', 3, "Three artists returned");
+
+my $rs4 = $schema->resultset("CD")->search({ 'artist.artistid' => '1' }, { join => ['tracks', 'artist'], prefetch => 'artist' });
+my @rs4_results = $rs4->all;
+
+
+is($rs4_results[0]->cdid, 1, "correct artist returned");
+
+my $rs5 = $rs4->search({'tracks.title' => 'Sticky Honey'});
+is($rs5->count, 1, "search without using previous joins okay");
+
+}
+
+1;