From: Matt S Trout Date: Wed, 17 May 2006 00:22:33 +0000 (+0000) Subject: Merge 'DBIx-Class-current' into 'datetime' X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=commitdiff_plain;h=4f0bd3e78e96cf74378f791bb290b329abc48098;hp=445e5e31324c54ce3ffe96199e4e308efaa0823e;p=dbsrgits%2FDBIx-Class-Historic.git Merge 'DBIx-Class-current' into 'datetime' r10262@obrien (orig r1521): bluefeet | 2006-04-27 02:36:52 +0100 New dbicadmin script for bringing dbic objects to the unix command line. r10332@obrien (orig r1529): bluefeet | 2006-04-29 02:31:20 +0100 Initial JSON support for the dbicadmin script. r10333@obrien (orig r1530): bluefeet | 2006-04-29 03:22:45 +0100 Fixes to dbicadmin as well as the ability to support SELECTs. r10334@obrien (orig r1531): bluefeet | 2006-04-29 03:44:47 +0100 Newlines after each csv lines. Add trace option. r10335@obrien (orig r1532): bluefeet | 2006-04-29 04:25:15 +0100 Docced JSON usage and added support for the attrs option. r10343@obrien (orig r1540): castaway | 2006-04-29 18:47:24 +0100 add create_ddl_dir for creating versioned sql statements from schema, and make DBICTest use it r10344@obrien (orig r1541): castaway | 2006-04-29 19:06:51 +0100 Document create_ddl_dir method r10345@obrien (orig r1542): castaway | 2006-04-29 20:38:43 +0100 Default to using sqlt on deploy, if available r10350@obrien (orig r1543): bluefeet | 2006-04-30 14:52:04 +0100 Fix some errors with using unique constraints with Ordered. r10351@obrien (orig r1544): bluefeet | 2006-04-30 15:41:53 +0100 No longer support unique constraints in Ordered. r10367@obrien (orig r1545): bluefeet | 2006-04-30 16:37:09 +0100 Tests for dbicadmin. r10369@obrien (orig r1546): bluefeet | 2006-04-30 16:37:41 +0100 Moved scripts to scrupt per what other CPAN modules do. r10370@obrien (orig r1547): bluefeet | 2006-04-30 16:38:19 +0100 Call scripts/ script/ in the dbicadmin tests. r10371@obrien (orig r1548): dwc | 2006-04-30 18:27:50 +0100 bluefeet disabled the unique constraint in [1544] r10372@obrien (orig r1549): dwc | 2006-04-30 18:38:26 +0100 Fix evals for skipping test r10387@obrien (orig r1564): matthewt | 2006-05-03 15:19:00 +0100 r1642@thor (orig r1505): matthewt | 2006-04-22 16:29:28 +0000 cycle tests and a weaken call r1657@thor (orig r1520): bluefeet | 2006-04-26 22:15:41 +0000 Document the exitance of the DBIx::Class::ResultSource::schema() accessor. r1660@thor (orig r1523): matthewt | 2006-04-27 20:43:45 +0000 pod patch from ted r1698@thor (orig r1561): dwc | 2006-05-01 19:29:37 +0000 Add example of multi-column foreign keys r1699@thor (orig r1562): dwc | 2006-05-01 19:31:19 +0000 Add missing comma in example ;) r10388@obrien (orig r1565): semifor | 2006-05-03 21:41:54 +0100 Automatic primary key class for DB2/400 over ODBC r10389@obrien (orig r1566): semifor | 2006-05-03 22:07:59 +0100 Load ::DBI::ODBC400 when a DB2/400 ODBC backend is detected. r10390@obrien (orig r1567): semifor | 2006-05-03 22:20:08 +0100 - Corrected a minor typo in a comment. - Added an attribution in the documentation code the module is based on. r10392@obrien (orig r1569): jesper | 2006-05-05 18:25:54 +0100 Fix to make the Postgresql-code handle Schemas. This should be non-intrusive to non-schema-users. r10402@obrien (orig r1579): semifor | 2006-05-08 18:41:54 +0100 Generalized the loading of subclasses for specfic ODBC backends. r10405@obrien (orig r1582): semifor | 2006-05-09 01:02:54 +0100 - Factored out sql_maker arguments so they can be customized by derived ::Storage::DBI::* modules. - Customized sql_maker arguments for DB2/400 over ODBC. r10413@obrien (orig r1590): matthewt | 2006-05-09 14:45:52 +0100 r5828@cain (orig r1581): matthewt | 2006-05-08 23:03:00 +0000 couple bugfixes r10416@obrien (orig r1593): matthewt | 2006-05-09 19:03:41 +0100 r5838@cain (orig r1591): bluefeet | 2006-05-09 15:00:56 +0000 Comment to DBIX_CLASS_STORAGE_DBI_DEBUG stating that it is read on storage creation. r10418@obrien (orig r1595): bluefeet | 2006-05-09 23:02:44 +0100 Add search_rs to ResultSet and a new {$rel}_rs accessor to has_many. r10419@obrien (orig r1596): bluefeet | 2006-05-09 23:17:38 +0100 Fixes to _rs related docs. r10420@obrien (orig r1597): semifor | 2006-05-10 00:21:39 +0100 Test case for DB2/400 over ODBC. r10421@obrien (orig r1598): semifor | 2006-05-10 00:37:16 +0100 Test case for DB2/400 over ODBC. r10424@obrien (orig r1601): dwc | 2006-05-10 16:02:14 +0100 Row::update encapsulates this when passed a hashref; no point in duplication r10425@obrien (orig r1602): dwc | 2006-05-10 16:55:35 +0100 Revert previous bugfix; will apply to trunk r10427@obrien (orig r1604): dwc | 2006-05-10 17:01:46 +0100 r8956@fortuna (orig r1603): dwc | 2006-05-10 12:00:11 -0400 Row::update encapsulates this when passed a hashref; using set_columns bypasses deflation r10428@obrien (orig r1605): dwc | 2006-05-10 21:46:16 +0100 - Fix error message for bad find usage - Restore backwards compatibility for e.g. $rs->find(id => $val) - Add a test for the $rs->find(id => $val) backwards compatibility r10429@obrien (orig r1606): bluefeet | 2006-05-11 02:49:58 +0100 dbicadmin now works when not specifying the where clause. r10442@obrien (orig r1619): matthewt | 2006-05-12 15:16:48 +0100 r5900@cain (orig r1613): jguenther | 2006-05-11 19:20:59 +0000 Added a couple examples to the cookbook r5901@cain (orig r1614): jguenther | 2006-05-11 21:53:25 +0000 Fixed cookbook example to actually work r5902@cain (orig r1615): matthewt | 2006-05-12 00:56:54 +0000 performance fix for cascade_update r5903@cain (orig r1616): matthewt | 2006-05-12 01:04:37 +0000 fixup to gen-schema.pl r5904@cain (orig r1617): matthewt | 2006-05-12 02:17:18 +0000 fixup for stringify that can be false in find_or_create_related r10443@obrien (orig r1620): bluefeet | 2006-05-12 21:49:30 +0100 Testing commit. r10447@obrien (orig r1624): matthewt | 2006-05-14 19:27:01 +0100 r5922@cain (orig r1623): matthewt | 2006-05-14 18:25:56 +0000 tweaked might_have test for -current r10448@obrien (orig r1625): castaway | 2006-05-14 20:11:48 +0100 Add foreign key constraint for new bookmark table r10449@obrien (orig r1626): matthewt | 2006-05-15 02:33:12 +0100 don't ask r10450@obrien (orig r1627): matthewt | 2006-05-15 02:34:00 +0100 don't ask r10451@obrien (orig r1628): matthewt | 2006-05-15 05:19:23 +0100 dumped options from Build.PL r10452@obrien (orig r1629): gphat | 2006-05-15 18:46:01 +0100 Add profiling support --- diff --git a/Build.PL b/Build.PL index f1d2ad8..2ab62b9 100644 --- a/Build.PL +++ b/Build.PL @@ -15,18 +15,16 @@ my %arguments = ( 'Class::Data::Accessor' => 0.01, 'Carp::Clan' => 0, 'DBI' => 1.40, + 'Module::Find' => 0, + 'Class::Inspector' => 0, }, build_requires => { 'DBD::SQLite' => 1.11, }, - recommends => { - 'Data::UUID' => 0, - 'Module::Find' => 0, - 'Class::Inspector' => 0, - }, create_makefile_pl => 'passthrough', create_readme => 1, - test_files => [ glob('t/*.t'), glob('t/*/*.t') ] + test_files => [ glob('t/*.t'), glob('t/*/*.t') ], + script_files => [ glob('script/*') ], ); Module::Build->new(%arguments)->create_build_script; diff --git a/Changes b/Changes index e8b7378..3a51cb7 100644 --- a/Changes +++ b/Changes @@ -1,5 +1,7 @@ Revision history for DBIx::Class + - refactor debugging to allow for profiling using Storage::Statistics + - removed Data::UUID from deps, made other optionals required - modified SQLT parser to skip dupe table names - added remove_column(s) to ResultSource/ResultSourceProxy - added add_column alias to ResultSourceProxy @@ -16,6 +18,15 @@ Revision history for DBIx::Class - CDBICompat: override find_or_create to fix column casing when ColumnCase is loaded +0.06003 + - make find_or_create_related check defined() instead of truth + - don't unnecessarily fetch rels for cascade_update + - don't set_columns explicitly in update_or_create; instead use + update($hashref) so InflateColumn works + - fix for has_many prefetch with 0 related rows + - make limit error if rows => 0 + - added memory cycle tests and a long-needed weaken call + 0.06002 2006-04-20 00:42:41 - fix set_from_related to accept undef - fix to Dumper-induced hash iteration bug diff --git a/TODO b/TODO index 4380aca..136e01a 100644 --- a/TODO +++ b/TODO @@ -51,3 +51,4 @@ SQLT modules so an app can do its own deploy without SQLT on the target system + diff --git a/lib/DBIx/Class.pm b/lib/DBIx/Class.pm index f141676..75b87d6 100644 --- a/lib/DBIx/Class.pm +++ b/lib/DBIx/Class.pm @@ -228,6 +228,8 @@ Todd Lipcon wdh: Will Hawes +gphat: Cory G Watson + =head1 LICENSE You may distribute this code under the same terms as Perl itself. diff --git a/lib/DBIx/Class/Manual/Cookbook.pod b/lib/DBIx/Class/Manual/Cookbook.pod index 35b7d40..081a4d0 100644 --- a/lib/DBIx/Class/Manual/Cookbook.pod +++ b/lib/DBIx/Class/Manual/Cookbook.pod @@ -690,4 +690,160 @@ is enough. If the left quote differs form the right quote, the first notation should be used. name_sep needs to be set to allow the SQL generator to put the quotes the correct place. +=head2 Overloading methods + +L uses the L package, which provides for redispatch of +method calls. You have to use calls to C to overload methods. +More information on using L with L can be found in +L. + +=head3 Changing one field whenever another changes + +For example, say that you have three columns, C, C, and +C. You would like to make changes to C and have +C be automagically set to the value of C squared. +You can accomplish this by overriding C: + + sub store_column { + my ( $self, $name, $value ) = @_; + if ($name eq 'number') { + $self->squared($value * $value); + } + $self->next::method($name, $value); + } + +Note that the hard work is done by the call to C, which +redispatches your call to store_column to the superclass(es). + +=head3 Automatically creating related objects + +You might have a class C which has many Cs. Further, you +want to create a C object every time you insert an C object. +You can accomplish this by overriding C: + + sub insert { + my ( $class, $args_ref ) = @_; + my $self = $class->next::method($args_ref); + $self->cds->new({})->fill_from_artist($self)->insert; + return $self; + } + +where C is a method you specify in C which sets +values in C based on the data in the C object you pass in. + +=head2 Debugging DBIx::Class objects with Data::Dumper + +L can be a very useful tool for debugging, but sometimes it can +be hard to find the pertinent data in all the data it can generate. +Specifically, if one naively tries to use it like so, + + use Data::Dumper; + + my $cd = $schema->resultset('CD')->find(1); + print Dumper($cd); + +several pages worth of data from the CD object's schema and result source will +be dumped to the screen. Since usually one is only interested in a few column +values of the object, this is not very helpful. + +Luckily, it is possible to modify the data before L outputs +it. Simply define a hook that L will call on the object before +dumping it. For example, + + package My::DB::CD; + + sub _dumper_hook { + $_[0] = bless { + %{ $_[0] }, + result_source => undef, + }, ref($_[0]); + } + + [...] + + use Data::Dumper; + + $Data::Dumper::Freezer = '_dumper_hook'; + + my $cd = $schema->resultset('CD')->find(1); + print Dumper($cd); + # dumps $cd without its ResultSource + +If the structure of your schema is such that there is a common base class for +all your table classes, simply put a method similar to C<_dumper_hook> in the +base class and set C<$Data::Dumper::Freezer> to its name and L +will automagically clean up your data before printing it. See +L for more information. + +=head2 Retrieving a row object's Schema + +It is possible to get a Schema object from a row object like so, + + my $schema = $cd->result_source->schema; + my $artist_rs = $schema->resultset('Artist'); + # for example + +This can be useful when you don't want to pass around a Schema object to every +method. + +=head2 Profiling + +When you enable L's debugging it prints the SQL +executed as well as notifications of query completion and transaction +begin/commit. If you'd like to profile the SQL you can subclass the +L class and write your own profiling +mechanism: + + package My::Profiler; + use strict; + + use base 'DBIx::Class::Storage::Statistics'; + + use Time::HiRes qw(time); + + my $start; + + sub query_start { + my $self = shift(); + my $sql = shift(); + my $params = @_; + + print "Executing $sql: ".join(', ', @params)."\n"; + $start = time(); + } + + sub query_end { + my $self = shift(); + my $sql = shift(); + my @params = @_; + + printf("Execution took %0.4f seconds.\n", time() - $start); + $start = undef; + } + + 1; + +You can then install that class as the debugging object: + + __PACKAGE__->storage()->debugobj(new My::Profiler()); + __PACKAGE__->storage()->debug(1); + +A more complicated example might involve storing each execution of SQL in an +array: + + sub query_end { + my $self = shift(); + my $sql = shift(); + my @params = @_; + + my $elapsed = time() - $start; + push(@{ $calls{$sql} }, { + params => \@params, + elapsed => $elapsed + }); + } + +You could then create average, high and low execution times for an SQL +statement and dig down to see if certain parameters cause aberrant behavior. + =cut diff --git a/lib/DBIx/Class/Ordered.pm b/lib/DBIx/Class/Ordered.pm index f721c69..8e2c74d 100644 --- a/lib/DBIx/Class/Ordered.pm +++ b/lib/DBIx/Class/Ordered.pm @@ -279,20 +279,18 @@ sub move_to { my $from_position = $self->get_column( $position_column ); return 0 if ( $to_position < 1 ); return 0 if ( $from_position==$to_position ); + my @between = ( + ( $from_position < $to_position ) + ? ( $from_position+1, $to_position ) + : ( $to_position, $from_position-1 ) + ); my $rs = $self->result_source->resultset->search({ - -and => [ - $position_column => - { -between => [ $from_position, $to_position ] }, - ], + $position_column => { -between => [ @between ] }, $self->_grouping_clause(), }); my $op = ($from_position>$to_position) ? '+' : '-'; - my $case_stmt = "CASE $position_column \n". - " WHEN $from_position THEN $to_position\n". - " ELSE $position_column $op 1\n". - "END"; - $rs->update({ $position_column => \$case_stmt }); - $self->store_column( $position_column => $to_position ); + $rs->update({ $position_column => \"$position_column $op 1" }); + $self->update({ $position_column => $to_position }); return 1; } @@ -353,6 +351,15 @@ __END__ =head1 BUGS +=head2 Unique Constraints + +Unique indexes and constraints on the position column are not +supported at this time. It would be make sense to support them, +but there are some unexpected database issues that make this +hard to do. The main problem from the author's view is that +SQLite (the DB engine that we use for testing) does not support +ORDER BY on updates. + =head2 Race Condition on Insert If a position is not specified for an insert than a position @@ -369,7 +376,7 @@ you've pre-loaded the objects then when you move one of the objects the position of the other object will not reflect their new value until you reload them from the database. -The are times when you will want to move objects as groups, such +There are times when you will want to move objects as groups, such as changeing the parent of several objects at once - this directly conflicts with this problem. One solution is for us to write a ResultSet class that supports a parent() method, for example. Another diff --git a/lib/DBIx/Class/Relationship.pm b/lib/DBIx/Class/Relationship.pm index 44ed65b..b5d6932 100644 --- a/lib/DBIx/Class/Relationship.pm +++ b/lib/DBIx/Class/Relationship.pm @@ -131,6 +131,8 @@ of C. { prefetch => [qw/book/], }); my @book_objs = $obj->books; + my $books_rs = $obj->books; + ( $books_rs ) = $obj->books_rs; $obj->add_to_books(\%col_data); @@ -139,9 +141,14 @@ foreign class store the calling class's primary key in one (or more) of its columns. You should pass the name of the column in the foreign class as the $cond argument, or specify a complete join condition. -As well as the accessor method, a method named C<< add_to_ >> -will also be added to your Row items, this allows you to insert new -related items, using the same mechanism as in L. +Three methods are created when you create a has_many relationship. The first +method is the expected accessor method. The second is almost exactly the same +as the accessor method but "_rs" is added to the end of the method name. This +method works just like the normal accessor, except that it returns a resultset +no matter what, even in list context. The third method, named +C<< add_to_ >>, will also be added to your Row items, this allows +you to insert new related items, using the same mechanism as in +L. If you delete an object in a class with a C relationship, all related objects will be deleted as well. However, any database-level diff --git a/lib/DBIx/Class/Relationship/Accessor.pm b/lib/DBIx/Class/Relationship/Accessor.pm index 035661a..b20eb16 100644 --- a/lib/DBIx/Class/Relationship/Accessor.pm +++ b/lib/DBIx/Class/Relationship/Accessor.pm @@ -48,6 +48,7 @@ sub add_relationship_accessor { ); } elsif ($acc_type eq 'multi') { $meth{$rel} = sub { shift->search_related($rel, @_) }; + $meth{"${rel}_rs"} = sub { shift->search_related_rs($rel, @_) }; $meth{"add_to_${rel}"} = sub { shift->create_related($rel, @_); }; } else { $class->throw_exception("No such relationship accessor type $acc_type"); diff --git a/lib/DBIx/Class/Relationship/Base.pm b/lib/DBIx/Class/Relationship/Base.pm index 2e3cd89..0401c0a 100644 --- a/lib/DBIx/Class/Relationship/Base.pm +++ b/lib/DBIx/Class/Relationship/Base.pm @@ -29,27 +29,42 @@ methods, for predefined ones, look in L. __PACKAGE__->add_relationship('relname', 'Foreign::Class', $cond, $attrs); -The condition needs to be an SQL::Abstract-style representation of the -join between the tables. When resolving the condition for use in a JOIN, -keys using the pseudo-table I are resolved to mean "the Table on the -other side of the relationship", and values using the pseudo-table I +The condition needs to be an L-style representation of the +join between the tables. When resolving the condition for use in a C, +keys using the pseudo-table C are resolved to mean "the Table on the +other side of the relationship", and values using the pseudo-table C are resolved to mean "the Table this class is representing". Other restrictions, such as by value, sub-select and other tables, may also be -used. Please check your database for JOIN parameter support. +used. Please check your database for C parameter support. -For example, if you're creating a rel from Author to Book, where the Book -table has a column author_id containing the ID of the Author row: +For example, if you're creating a relationship from C to C, where +the C table has a column C containing the ID of the C +row: { 'foreign.author_id' => 'self.id' } -will result in the JOIN clause +will result in the C clause - author me JOIN book book ON bar.author_id = me.id + author me JOIN book book ON book.author_id = me.id -You can specify as many foreign => self mappings as necessary. Each key/value -pair provided in a hashref will be used as ANDed conditions, to add an ORed -condition, use an arrayref of hashrefs. See the L documentation -for more details. +For multi-column foreign keys, you will need to specify a C-to-C +mapping for each column in the key. For example, if you're creating a +relationship from C to C, where the C table refers to a +publisher and a type (e.g. "paperback"): + + { + 'foreign.publisher_id' => 'self.publisher_id', + 'foreign.type_id' => 'self.type_id', + } + +This will result in the C clause: + + book me JOIN edition edition ON edition.publisher_id = me.publisher_id + AND edition.type_id = me.type_id + +Each key-value pair provided in a hashref will be used as Ced conditions. +To add an Ced condition, use an arrayref of hashrefs. See the +L documentation for more details. Valid attributes are as follows: @@ -160,7 +175,8 @@ sub related_resultset { =head2 search_related - $rs->search_related('relname', $cond, $attrs); + @objects = $rs->search_related('relname', $cond, $attrs); + $objects_rs = $rs->search_related('relname', $cond, $attrs); Run a search on a related resultset. The search will be restricted to the item or items represented by the L it was called @@ -172,6 +188,19 @@ sub search_related { return shift->related_resultset(shift)->search(@_); } +=head2 search_related_rs + + ( $objects_rs ) = $rs->search_related_rs('relname', $cond, $attrs); + +This method works exactly the same as search_related, except that +it garauntees a restultset, even in list context. + +=cut + +sub search_related_rs { + return shift->related_resultset(shift)->search_rs(@_); +} + =head2 count_related $obj->count_related('relname', $cond, $attrs); @@ -264,7 +293,8 @@ L for details. sub find_or_create_related { my $self = shift; - return $self->find_related(@_) || $self->create_related(@_); + my $obj = $self->find_related(@_); + return (defined($obj) ? $obj : $self->create_related(@_)); } =head2 update_or_create_related diff --git a/lib/DBIx/Class/Relationship/CascadeActions.pm b/lib/DBIx/Class/Relationship/CascadeActions.pm index aa88043..3d5da76 100644 --- a/lib/DBIx/Class/Relationship/CascadeActions.pm +++ b/lib/DBIx/Class/Relationship/CascadeActions.pm @@ -33,6 +33,10 @@ sub update { my %rels = map { $_ => $source->relationship_info($_) } $source->relationships; my @cascade = grep { $rels{$_}{attrs}{cascade_update} } keys %rels; foreach my $rel (@cascade) { + next if ( + $rels{$rel}{attrs}{accessor} eq 'single' + && !exists($self->{_relationship_data}{$rel}) + ); $_->update for grep defined, $self->$rel; } return $ret; diff --git a/lib/DBIx/Class/ResultSet.pm b/lib/DBIx/Class/ResultSet.pm index 2b347ed..d6f0dd2 100644 --- a/lib/DBIx/Class/ResultSet.pm +++ b/lib/DBIx/Class/ResultSet.pm @@ -196,7 +196,28 @@ call it as C. sub search { my $self = shift; - + my $rs = $self->search_rs( @_ ); + return (wantarray ? $rs->all : $rs); +} + +=head2 search_rs + +=over 4 + +=item Arguments: $cond, \%attrs? + +=item Return Value: $resultset + +=back + +This method does the same exact thing as search() except it will +always return a resultset, even in list context. + +=cut + +sub search_rs { + my $self = shift; + my $attrs = { %{$self->{attrs}} }; my $having = delete $attrs->{having}; $attrs = { %$attrs, %{ pop(@_) } } if @_ > 1 and ref $_[$#_] eq 'HASH'; @@ -229,12 +250,12 @@ sub search { unless (@_) { # no search, effectively just a clone my $rows = $self->get_cache; - if( @{$rows} ) { + if ($rows) { $rs->set_cache($rows); } } - return (wantarray ? $rs->all : $rs); + return $rs; } =head2 search_literal @@ -320,10 +341,14 @@ sub find { $hash = {}; @{$hash}{@cols} = @_; } + elsif (@_) { + # For backwards compatibility + $hash = {@_}; + } else { $self->throw_exception( "Arguments to find must be a hashref or match the number of columns in the " - . exists $attrs->{key} ? "$attrs->{key} unique constraint" : "primary key" + . (exists $attrs->{key} ? "$attrs->{key} unique constraint" : "primary key") ); } @@ -439,6 +464,10 @@ sub cursor { Inflates the first result without creating a cursor if the resultset has any records in it; if not returns nothing. Used by L as an optimisation. +Can optionally take an additional condition *only* - this is a fast-code-path +method; if you need to add extra joins or similar call ->search and then +->single without a condition on the $rs returned from that. + =cut sub single { @@ -569,9 +598,9 @@ first record from the resultset. sub next { my ($self) = @_; - if (@{$self->{all_cache} || []}) { + if (my $cache = $self->get_cache) { $self->{all_cache_position} ||= 0; - return $self->{all_cache}->[$self->{all_cache_position}++]; + return $cache->[$self->{all_cache_position}++]; } if ($self->{attrs}{cache}) { $self->{all_cache_position} = 1; @@ -661,9 +690,9 @@ sub _collapse_result { last unless (@raw = $self->cursor->next); $row = $self->{stashed_row} = \@raw; $tree = $self->_collapse_result($as, $row, $c_prefix); - #warn Data::Dumper::Dumper($tree, $row); } - @$target = @final; + @$target = (@final ? @final : [ {}, {} ]); + # single empty result to indicate an empty prefetched has_many } return $info; @@ -710,7 +739,7 @@ clause. sub count { my $self = shift; return $self->search(@_)->count if @_ and defined $_[0]; - return scalar @{ $self->get_cache } if @{ $self->get_cache }; + return scalar @{ $self->get_cache } if $self->get_cache; my $count = $self->_count; return 0 unless $count; @@ -787,7 +816,7 @@ is returned in list context. sub all { my ($self) = @_; - return @{ $self->get_cache } if @{ $self->get_cache }; + return @{ $self->get_cache } if $self->get_cache; my @obj; @@ -1238,8 +1267,7 @@ sub update_or_create { my $row = $self->find($hash, $attrs); if (defined $row) { - $row->set_columns($hash); - $row->update; + $row->update($hash); return $row; } @@ -1261,7 +1289,7 @@ Gets the contents of the cache for the resultset, if the cache is set. =cut sub get_cache { - shift->{all_cache} || []; + shift->{all_cache}; } =head2 set_cache @@ -1284,13 +1312,7 @@ than re-querying the database even if the cache attr is not set. sub set_cache { my ( $self, $data ) = @_; $self->throw_exception("set_cache requires an arrayref") - if ref $data ne 'ARRAY'; - my $result_class = $self->result_class; - foreach( @$data ) { - $self->throw_exception( - "cannot cache object of type '$_', expected '$result_class'" - ) if ref $_ ne $result_class; - } + if defined($data) && (ref $data ne 'ARRAY'); $self->{all_cache} = $data; } @@ -1309,7 +1331,7 @@ Clears the cache for the resultset. =cut sub clear_cache { - shift->set_cache([]); + shift->set_cache(undef); } =head2 related_resultset @@ -1570,6 +1592,83 @@ C can be used with the following relationship types: C, C (or if you're using C, any relationship declared with an accessor type of 'single' or 'filter'). +=head2 page + +=over 4 + +=item Value: $page + +=back + +Makes the resultset paged and specifies the page to retrieve. Effectively +identical to creating a non-pages resultset and then calling ->page($page) +on it. + +=head2 rows + +=over 4 + +=item Value: $rows + +=back + +Specifes the maximum number of rows for direct retrieval or the number of +rows per page if the page attribute or method is used. + +=head2 group_by + +=over 4 + +=item Value: \@columns + +=back + +A arrayref of columns to group by. Can include columns of joined tables. + + group_by => [qw/ column1 column2 ... /] + +=head2 having + +=over 4 + +=item Value: $condition + +=back + +HAVING is a select statement attribute that is applied between GROUP BY and +ORDER BY. It is applied to the after the grouping calculations have been +done. + + having => { 'count(employee)' => { '>=', 100 } } + +=head2 distinct + +=over 4 + +=item Value: (0 | 1) + +=back + +Set to 1 to group by all columns. + +=head2 cache + +Set to 1 to cache search results. This prevents extra SQL queries if you +revisit rows in your ResultSet: + + my $resultset = $schema->resultset('Artist')->search( undef, { cache => 1 } ); + + while( my $artist = $resultset->next ) { + ... do stuff ... + } + + $rs->first; # without cache, this would issue a query + +By default, searches are not cached. + +For more examples of using these attributes, see +L. + =head2 from =over 4 @@ -1583,21 +1682,35 @@ statements generated by L, allowing you to express custom C clauses. NOTE: Use this on your own risk. This allows you to shoot off your foot! + C will usually do what you need and it is strongly recommended that you avoid using C unless you cannot achieve the desired result using C. +And we really do mean "cannot", not just tried and failed. Attempting to use +this because you're having problems with C is like trying to use x86 +ASM because you've got a syntax error in your C. Trust us on this. + +Now, if you're still really, really sure you need to use this (and if you're +not 100% sure, ask the mailing list first), here's an explanation of how this +works. -In simple terms, C works as follows: +The syntax is as follows - + [ + { => }, [ - { => , -join_type => 'inner|left|right' } - [] # nested JOIN (optional) - { => } - ] + { => , -join_type => 'inner|left|right' }, + [], # nested JOIN (optional) + { => , ... (more conditions) }, + ], + # More of the above [ ] may follow for additional joins + ] - JOIN -
- [JOIN ...] - ON = + + JOIN + + [JOIN ...] + ON = + An easy way to follow the examples below is to remember the following: @@ -1663,83 +1776,6 @@ with a father in the person table, we could explicitly use C: # SELECT child.* FROM person child # INNER JOIN person father ON child.father_id = father.id -=head2 page - -=over 4 - -=item Value: $page - -=back - -Makes the resultset paged and specifies the page to retrieve. Effectively -identical to creating a non-pages resultset and then calling ->page($page) -on it. - -=head2 rows - -=over 4 - -=item Value: $rows - -=back - -Specifes the maximum number of rows for direct retrieval or the number of -rows per page if the page attribute or method is used. - -=head2 group_by - -=over 4 - -=item Value: \@columns - -=back - -A arrayref of columns to group by. Can include columns of joined tables. - - group_by => [qw/ column1 column2 ... /] - -=head2 having - -=over 4 - -=item Value: $condition - -=back - -HAVING is a select statement attribute that is applied between GROUP BY and -ORDER BY. It is applied to the after the grouping calculations have been -done. - - having => { 'count(employee)' => { '>=', 100 } } - -=head2 distinct - -=over 4 - -=item Value: (0 | 1) - -=back - -Set to 1 to group by all columns. - -=head2 cache - -Set to 1 to cache search results. This prevents extra SQL queries if you -revisit rows in your ResultSet: - - my $resultset = $schema->resultset('Artist')->search( undef, { cache => 1 } ); - - while( my $artist = $resultset->next ) { - ... do stuff ... - } - - $rs->first; # without cache, this would issue a query - -By default, searches are not cached. - -For more examples of using these attributes, see -L. - =cut 1; diff --git a/lib/DBIx/Class/ResultSource.pm b/lib/DBIx/Class/ResultSource.pm index ee55dbc..4ce8e08 100644 --- a/lib/DBIx/Class/ResultSource.pm +++ b/lib/DBIx/Class/ResultSource.pm @@ -356,7 +356,10 @@ Returns an expression of the source to be supplied to storage to specify retrieval from this source. In the case of a database, the required FROM clause contents. -=cut +=head2 schema + +Returns the L object that this result source +belongs too. =head2 storage diff --git a/lib/DBIx/Class/Schema.pm b/lib/DBIx/Class/Schema.pm index 5bd741e..c1ea074 100644 --- a/lib/DBIx/Class/Schema.pm +++ b/lib/DBIx/Class/Schema.pm @@ -4,6 +4,7 @@ use strict; use warnings; use Carp::Clan qw/^DBIx::Class/; +use Scalar::Util qw/weaken/; use base qw/DBIx::Class/; @@ -94,6 +95,7 @@ sub register_source { $reg{$moniker} = $source; $self->source_registrations(\%reg); $source->schema($self); + weaken($source->{schema}) if ref($self); if ($source->result_class) { my %map = %{$self->class_mappings}; $map{$source->result_class} = $moniker; @@ -714,6 +716,41 @@ sub deploy { $self->storage->deploy($self, undef, $sqltargs); } +=head2 create_ddl_dir (EXPERIMENTAL) + +=over 4 + +=item Arguments: \@databases, $version, $directory, $sqlt_args + +=back + +Creates an SQL file based on the Schema, for each of the specified +database types, in the given directory. + +Note that this feature is currently EXPERIMENTAL and may not work correctly +across all databases, or fully handle complex relationships. + +=cut + +sub create_ddl_dir +{ + my $self = shift; + + $self->throw_exception("Can't create_ddl_dir without storage") unless $self->storage; + $self->storage->create_ddl_dir($self, @_); +} + +sub ddl_filename +{ + my ($self, $type, $dir, $version) = @_; + + my $filename = ref($self); + $filename =~ s/^.*:://; + $filename = "$dir$filename-$version-$type.sql"; + + return $filename; +} + 1; =head1 AUTHORS diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 7eab86f..918d876 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -1,4 +1,5 @@ package DBIx::Class::Storage::DBI; +# -*- mode: cperl; cperl-indent-level: 2 -*- use base 'DBIx::Class::Storage'; @@ -7,9 +8,9 @@ use warnings; use DBI; use SQL::Abstract::Limit; use DBIx::Class::Storage::DBI::Cursor; +use DBIx::Class::Storage::Statistics; use IO::File; use Carp::Clan qw/DBIx::Class/; - BEGIN { package DBIC::SQL::Abstract; # Would merge upstream, but nate doesn't reply :( @@ -20,6 +21,8 @@ sub select { my ($self, $table, $fields, $where, $order, @rest) = @_; $table = $self->_quote($table) unless ref($table); @rest = (-1) unless defined $rest[0]; + die "LIMIT 0 Does Not Compute" if $rest[0] == 0; + # and anyway, SQL::Abstract::Limit will cause a barf if we don't first local $self->{having_bind} = []; my ($sql, @ret) = $self->SUPER::select( $table, $self->_recurse_fields($fields), $where, $order, @rest @@ -222,17 +225,6 @@ sub name_sep { return $self->{name_sep}; } - - - -package DBIx::Class::Storage::DBI::DebugCallback; - -sub print { - my ($self, $string) = @_; - $string =~ m/^(\w+)/; - ${$self}->($1, $string); -} - } # End of BEGIN block use base qw/DBIx::Class/; @@ -240,20 +232,25 @@ use base qw/DBIx::Class/; __PACKAGE__->load_components(qw/AccessorGroup/); __PACKAGE__->mk_group_accessors('simple' => - qw/_connect_info _dbh _sql_maker _conn_pid _conn_tid debug debugfh + qw/_connect_info _dbh _sql_maker _conn_pid _conn_tid debug debugobj cursor on_connect_do transaction_depth/); sub new { my $new = bless({}, ref $_[0] || $_[0]); $new->cursor("DBIx::Class::Storage::DBI::Cursor"); $new->transaction_depth(0); + + $new->debugobj(new DBIx::Class::Storage::Statistics()); + + my $fh; if (defined($ENV{DBIX_CLASS_STORAGE_DBI_DEBUG}) && ($ENV{DBIX_CLASS_STORAGE_DBI_DEBUG} =~ /=(.+)$/)) { - $new->debugfh(IO::File->new($1, 'w')) + $fh = IO::File->new($1, 'w') or $new->throw_exception("Cannot open trace file $1"); } else { - $new->debugfh(IO::File->new('>&STDERR')); + $fh = IO::File->new('>&STDERR'); } + $new->debugobj->debugfh($fh); $new->debug(1) if $ENV{DBIX_CLASS_STORAGE_DBI_DEBUG}; return $new; } @@ -302,29 +299,38 @@ Executes the sql statements given as a listref on every db connect. =head2 debug -Causes SQL trace information to be emitted on C filehandle -(or C if C has not specifically been set). +Causes SQL trace information to be emitted on the C object. +(or C if C has not specifically been set). =head2 debugfh -Sets or retrieves the filehandle used for trace/debug output. This -should be an IO::Handle compatible object (only the C method is -used). Initially set to be STDERR - although see information on the +Set or retrieve the filehandle used for trace/debug output. This should be +an IO::Handle compatible ojbect (only the C method is used. Initially +set to be STDERR - although see information on the L environment variable. +=head2 debugobj + +Sets or retrieves the object used for metric collection. Defaults to an instance +of L that is campatible with the original +method of using a coderef as a callback. See the aforementioned Statistics +class for more information. + =head2 debugcb Sets a callback to be executed each time a statement is run; takes a sub -reference. Overrides debugfh. Callback is executed as $sub->($op, $info) -where $op is SELECT/INSERT/UPDATE/DELETE and $info is what would normally -be printed. +reference. Callback is executed as $sub->($op, $info) where $op is +SELECT/INSERT/UPDATE/DELETE and $info is what would normally be printed. -=cut +See L for a better way. +=cut sub debugcb { - my ($self, $cb) = @_; - my $cb_obj = bless(\$cb, 'DBIx::Class::Storage::DBI::DebugCallback'); - $self->debugfh($cb_obj); + my $self = shift(); + + if($self->debugobj()->can('callback')) { + $self->debugobj()->callback(shift()); + } } sub disconnect { @@ -377,10 +383,16 @@ sub dbh { return $self->_dbh; } +sub _sql_maker_args { + my ($self) = @_; + + return ( limit_dialect => $self->dbh ); +} + sub sql_maker { my ($self) = @_; unless ($self->_sql_maker) { - $self->_sql_maker(new DBIC::SQL::Abstract( limit_dialect => $self->dbh )); + $self->_sql_maker(new DBIC::SQL::Abstract( $self->_sql_maker_args )); } return $self->_sql_maker; } @@ -424,10 +436,13 @@ sub _populate_dbh { eval "require DBIx::Class::Storage::DBI::${driver}"; unless ($@) { bless $self, "DBIx::Class::Storage::DBI::${driver}"; + $self->_rebless() if $self->can('_rebless'); } # if on-connect sql statements are given execute them foreach my $sql_statement (@{$self->on_connect_do || []}) { + $self->debugobj->query_start($sql_statement) if $self->debug(); $self->_dbh->do($sql_statement); + $self->debugobj->query_end($sql_statement) if $self->debug(); } $self->_conn_pid($$); @@ -479,7 +494,7 @@ sub txn_begin { if ($self->{transaction_depth}++ == 0) { my $dbh = $self->dbh; if ($dbh->{AutoCommit}) { - $self->debugfh->print("BEGIN WORK\n") + $self->debugobj->txn_begin() if ($self->debug); $dbh->begin_work; } @@ -497,14 +512,14 @@ sub txn_commit { if ($self->{transaction_depth} == 0) { my $dbh = $self->dbh; unless ($dbh->{AutoCommit}) { - $self->debugfh->print("COMMIT\n") + $self->debugobj->txn_commit() if ($self->debug); $dbh->commit; } } else { if (--$self->{transaction_depth} == 0) { - $self->debugfh->print("COMMIT\n") + $self->debugobj->txn_commit() if ($self->debug); $self->dbh->commit; } @@ -526,14 +541,14 @@ sub txn_rollback { if ($self->{transaction_depth} == 0) { my $dbh = $self->dbh; unless ($dbh->{AutoCommit}) { - $self->debugfh->print("ROLLBACK\n") + $self->debugobj->txn_rollback() if ($self->debug); $dbh->rollback; } } else { if (--$self->{transaction_depth} == 0) { - $self->debugfh->print("ROLLBACK\n") + $self->debugobj->txn_rollback() if ($self->debug); $self->dbh->rollback; } @@ -557,8 +572,8 @@ sub _execute { my ($sql, @bind) = $self->sql_maker->$op($ident, @args); unshift(@bind, @$extra_bind) if $extra_bind; if ($self->debug) { - my @debug_bind = map { defined $_ ? qq{`$_'} : q{`NULL'} } @bind; - $self->debugfh->print("$sql: " . join(', ', @debug_bind) . "\n"); + my @debug_bind = map { defined $_ ? qq{'$_'} : q{'NULL'} } @bind; + $self->debugobj->query_start($sql, @debug_bind); } my $sth = eval { $self->sth($sql,$op) }; @@ -569,6 +584,7 @@ sub _execute { @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args my $rv; if ($sth) { + my $time = time(); $rv = eval { $sth->execute(@bind) }; if ($@ || !$rv) { @@ -577,6 +593,10 @@ sub _execute { } else { $self->throw_exception("'$sql' did not generate a statement."); } + if ($self->debug) { + my @debug_bind = map { defined $_ ? qq{`$_'} : q{`NULL'} } @bind; + $self->debugobj->query_end($sql, @debug_bind); + } return (wantarray ? ($rv, $sth, @bind) : $rv); } @@ -616,6 +636,8 @@ sub _select { $self->sql_maker->_default_limit_syntax eq "GenericSubQ") { $attrs->{software_limit} = 1; } else { + $self->throw_exception("rows attribute must be positive if present") + if (defined($attrs->{rows}) && !($attrs->{rows} > 0)); push @args, $attrs->{rows}, $attrs->{offset}; } return $self->_execute(@args); @@ -661,7 +683,8 @@ sub columns_info_for { $dbh->{RaiseError} = 1; $dbh->{PrintError} = 0; eval { - my $sth = $dbh->column_info( undef, undef, $table, '%' ); + my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table); + my $sth = $dbh->column_info( undef,$schema, $tab, '%' ); $sth->execute(); while ( my $info = $sth->fetchrow_hashref() ){ my %column_info; @@ -714,26 +737,104 @@ sub last_insert_id { sub sqlt_type { shift->dbh->{Driver}->{Name} } +sub create_ddl_dir +{ + my ($self, $schema, $databases, $version, $dir, $sqltargs) = @_; + + if(!$dir || !-d $dir) + { + warn "No directory given, using ./\n"; + $dir = "./"; + } + $databases ||= ['MySQL', 'SQLite', 'PostgreSQL']; + $databases = [ $databases ] if(ref($databases) ne 'ARRAY'); + $version ||= $schema->VERSION || '1.x'; + + eval "use SQL::Translator"; + $self->throw_exception("Can't deploy without SQL::Translator: $@") if $@; + + my $sqlt = SQL::Translator->new({ +# debug => 1, + add_drop_table => 1, + }); + foreach my $db (@$databases) + { + $sqlt->reset(); + $sqlt->parser('SQL::Translator::Parser::DBIx::Class'); +# $sqlt->parser_args({'DBIx::Class' => $schema); + $sqlt->data($schema); + $sqlt->producer($db); + + my $file; + my $filename = $schema->ddl_filename($db, $dir, $version); + if(-e $filename) + { + $self->throw_exception("$filename already exists, skipping $db"); + next; + } + open($file, ">$filename") + or $self->throw_exception("Can't open $filename for writing ($!)"); + my $output = $sqlt->translate; +#use Data::Dumper; +# print join(":", keys %{$schema->source_registrations}); +# print Dumper($sqlt->schema); + if(!$output) + { + $self->throw_exception("Failed to translate to $db. (" . $sqlt->error . ")"); + next; + } + print $file $output; + close($file); + } + +} + sub deployment_statements { - my ($self, $schema, $type, $sqltargs) = @_; + my ($self, $schema, $type, $version, $dir, $sqltargs) = @_; $type ||= $self->sqlt_type; + $version ||= $schema->VERSION || '1.x'; + $dir ||= './'; eval "use SQL::Translator"; - $self->throw_exception("Can't deploy without SQL::Translator: $@") if $@; - eval "use SQL::Translator::Parser::DBIx::Class;"; - $self->throw_exception($@) if $@; - eval "use SQL::Translator::Producer::${type};"; - $self->throw_exception($@) if $@; - my $tr = SQL::Translator->new(%$sqltargs); - SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema ); - return "SQL::Translator::Producer::${type}"->can('produce')->($tr); + if(!$@) + { + eval "use SQL::Translator::Parser::DBIx::Class;"; + $self->throw_exception($@) if $@; + eval "use SQL::Translator::Producer::${type};"; + $self->throw_exception($@) if $@; + my $tr = SQL::Translator->new(%$sqltargs); + SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema ); + return "SQL::Translator::Producer::${type}"->can('produce')->($tr); + } + + my $filename = $schema->ddl_filename($type, $dir, $version); + if(!-f $filename) + { +# $schema->create_ddl_dir([ $type ], $version, $dir, $sqltargs); + $self->throw_exception("No SQL::Translator, and no Schema file found, aborting deploy"); + return; + } + my $file; + open($file, "<$filename") + or $self->throw_exception("Can't open $filename ($!)"); + my @rows = <$file>; + close($file); + + return join('', @rows); + } sub deploy { my ($self, $schema, $type, $sqltargs) = @_; - foreach my $statement ( $self->deployment_statements($schema, $type, $sqltargs) ) { + foreach my $statement ( $self->deployment_statements($schema, $type, undef, undef, $sqltargs) ) { for ( split(";\n", $statement)) { - $self->debugfh->print("$_\n") if $self->debug; + next if($_ =~ /^--/); + next if(!$_); +# next if($_ =~ /^DROP/m); + next if($_ =~ /^BEGIN TRANSACTION/m); + next if($_ =~ /^COMMIT/m); + $self->debugobj->query_begin($_) if $self->debug; $self->dbh->do($_) or warn "SQL was:\n $_"; + $self->debugobj->query_end($_) if $self->debug; } } } @@ -767,6 +868,11 @@ is produced (as when the L method is set). If the value is of the form C<1=/path/name> then the trace output is written to the file C. +This environment variable is checked when the storage object is first +created (when you call connect on your schema). So, run-time changes +to this environment variable will not take effect unless you also +re-connect on your schema. + =head1 AUTHORS Matt S. Trout diff --git a/lib/DBIx/Class/Storage/DBI/ODBC.pm b/lib/DBIx/Class/Storage/DBI/ODBC.pm new file mode 100644 index 0000000..f33100c --- /dev/null +++ b/lib/DBIx/Class/Storage/DBI/ODBC.pm @@ -0,0 +1,48 @@ +package DBIx::Class::Storage::DBI::ODBC; +use strict; +use warnings; + +use base qw/DBIx::Class::Storage::DBI/; + +sub _rebless { + my ($self) = @_; + + my $dbh = $self->_dbh; + my $dbtype = eval { $dbh->get_info(17) }; + unless ( $@ ) { + # Translate the backend name into a perl identifier + $dbtype =~ s/\W/_/gi; + my $class = "DBIx::Class::Storage::DBI::ODBC::${dbtype}"; + eval "require $class"; + bless $self, $class unless $@; + } +} + + +1; + +=head1 NAME + +DBIx::Class::Storage::DBI::ODBC - Base class for ODBC drivers + +=head1 SYNOPSIS + + # In your table classes + __PACKAGE__->load_components(qw/Core/); + + +=head1 DESCRIPTION + +This class simply provides a mechanism for discovering and loading a sub-class +for a specific ODBC backend. It should be transparent to the user. + + +=head1 AUTHORS + +Marc Mims C<< >> + +=head1 LICENSE + +You may distribute this code under the same terms as Perl itself. + +=cut diff --git a/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm b/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm new file mode 100644 index 0000000..d4e6218 --- /dev/null +++ b/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm @@ -0,0 +1,66 @@ +package DBIx::Class::Storage::DBI::ODBC::DB2_400_SQL; +use strict; +use warnings; + +use base qw/DBIx::Class::Storage::DBI::ODBC/; + +sub last_insert_id +{ + my ($self) = @_; + + my $dbh = $self->_dbh; + + # get the schema/table separator: + # '.' when SQL naming is active + # '/' when system naming is active + my $sep = $dbh->get_info(41); + my $sth = $dbh->prepare_cached( + "SELECT IDENTITY_VAL_LOCAL() FROM SYSIBM${sep}SYSDUMMY1", {}, 3); + $sth->execute(); + + my @res = $sth->fetchrow_array(); + + return @res ? $res[0] : undef; +} + +sub _sql_maker_args { + my ($self) = @_; + + return ( + limit_dialect => 'FetchFirst', + name_sep => $self->_dbh->get_info(41) + ); +} + +1; + +=head1 NAME + +DBIx::Class::Storage::DBI::ODBC::DB2_400_SQL - Support specific to DB2/400 +over ODBC + +=head1 SYNOPSIS + + # In your table classes + __PACKAGE__->load_components(qw/PK::Auto Core/); + __PACKAGE__->set_primary_key('id'); + + +=head1 DESCRIPTION + +This class implements support specific to DB2/400 over ODBC, including +auto-increment primary keys, SQL::Abstract::Limit dialect, and name separator +for for connections using either SQL naming or System naming. + + +=head1 AUTHORS + +Marc Mims C<< >> + +Based on DBIx::Class::Storage::DBI::DB2 by Jess Robinson. + +=head1 LICENSE + +You may distribute this code under the same terms as Perl itself. + +=cut diff --git a/lib/DBIx/Class/Storage/DBI/Pg.pm b/lib/DBIx/Class/Storage/DBI/Pg.pm index 1352c25..526abac 100644 --- a/lib/DBIx/Class/Storage/DBI/Pg.pm +++ b/lib/DBIx/Class/Storage/DBI/Pg.pm @@ -21,11 +21,12 @@ sub get_autoinc_seq { my ($schema,$table) = $source->name =~ /^(.+)\.(.+)$/ ? ($1,$2) : (undef,$source->name); while (my $col = shift @pri) { - my $info = $dbh->column_info(undef,$schema,$table,$col)->fetchrow_arrayref; - if (defined $info->[12] and $info->[12] =~ + my $info = $dbh->column_info(undef,$schema,$table,$col)->fetchrow_hashref; + if (defined $info->{COLUMN_DEF} and $info->{COLUMN_DEF} =~ /^nextval\(+'([^']+)'::(?:text|regclass)\)/) { - return $1; # may need to strip quotes -- see if this works + my $seq = $1; + return $seq =~ /\./ ? $seq : $info->{TABLE_SCHEM} . "." . $seq; # may need to strip quotes -- see if this works } } } diff --git a/lib/DBIx/Class/Storage/Statistics.pm b/lib/DBIx/Class/Storage/Statistics.pm new file mode 100644 index 0000000..ec9edda --- /dev/null +++ b/lib/DBIx/Class/Storage/Statistics.pm @@ -0,0 +1,111 @@ +package DBIx::Class::Storage::Statistics; +use strict; + +use base qw/DBIx::Class::AccessorGroup Class::Data::Accessor/; +__PACKAGE__->mk_group_accessors(simple => qw/callback debugfh/); + +=head1 NAME + +DBIx::Class::Storage::Statistics - SQL Statistics + +=head1 SYNOPSIS + +=head1 DESCRIPTION + +This class is called by DBIx::Class::Storage::DBI as a means of collecting +statistics on it's actions. Using this class alone merely prints the SQL +executed, the fact that it completes and begin/end notification for +transactions. + +To really use this class you should subclass it and create your own method +for collecting the statistics as discussed in L. + +=head1 METHODS + +=cut + +=head2 new + +Returns a new L object. + +=cut +sub new { + my $self = bless({}, ref($_[0]) || $_[0]); + + return $self; +} + +=head2 debugfh + +Sets or retrieves the filehandle used for trace/debug output. This should +be an IO::Handle compatible object (only the C method is used). Initially +should be set to STDERR - although see information on the +L environment variable. + +=head2 txn_begin + +Called when a transaction begins. + +=cut +sub txn_begin { + my $self = shift(); +} + +=head2 txn_rollback + +Called when a transaction is rolled back. + +=cut +sub txn_rollback { + my $self = shift(); +} + +=head2 txn_commit + +Called when a transaction is committed. + +=cut +sub txn_commit { + my $self = shift(); +} + +=head2 query_start + +Called before a query is executed. The first argument is the SQL string being +executed and subsequent arguments are the parameters used for the query. + +=cut +sub query_start { + my $self = shift(); + my $string = shift(); + + if(defined($self->callback())) { + $string =~ m/^(\w+)/; + $self->callback()->($1, $string); + return; + } + + $self->debugfh->print("$string: " . join(', ', @_) . "\n"); +} + +=head2 query_end + +Called when a query finishes executing. Has the same arguments as query_start. + +=cut +sub query_end { + my $self = shift(); + my $string = shift(); +} + +1; + +=head1 AUTHORS + +Cory G. Watson + +=head1 LICENSE + +You may distribute this code under the same license as Perl itself. + +=cut diff --git a/maint/gen-schema.pl b/maint/gen-schema.pl index d8d2ca1..ffd2df7 100755 --- a/maint/gen-schema.pl +++ b/maint/gen-schema.pl @@ -4,9 +4,8 @@ use strict; use warnings; use lib qw(lib t/lib); -use DBICTest; -use DBICTest::Schema::HelperRels; +use DBICTest::Schema; -my $schema = DBICTest->initialise; +my $schema = DBICTest::Schema->connect; -print $schema->storage->deployment_statements($schema); +print $schema->storage->deployment_statements($schema, 'SQLite'); diff --git a/maint/gen-tests.pl b/maint/gen-tests.pl index 0fc6180..48e71a7 100755 --- a/maint/gen-tests.pl +++ b/maint/gen-tests.pl @@ -22,4 +22,4 @@ run_tests(DBICTest->schema); EOF close $fh; } -} \ No newline at end of file +} diff --git a/script/dbicadmin b/script/dbicadmin new file mode 100755 index 0000000..9eec9b7 --- /dev/null +++ b/script/dbicadmin @@ -0,0 +1,221 @@ +#!/usr/bin/perl +use strict; +use warnings; + +use Getopt::Long; +use Pod::Usage; +use JSON qw( jsonToObj ); + +$JSON::BareKey = 1; +$JSON::QuotApos = 1; + +GetOptions( + 'schema=s' => \my $schema_class, + 'class=s' => \my $resultset_class, + 'connect=s' => \my $connect, + 'op=s' => \my $op, + 'set=s' => \my $set, + 'where=s' => \my $where, + 'attrs=s' => \my $attrs, + 'format=s' => \my $format, + 'force' => \my $force, + 'trace' => \my $trace, + 'quiet' => \my $quiet, + 'help' => \my $help, + 'tlibs' => \my $t_libs, +); + +if ($t_libs) { + unshift( @INC, 't/lib', 'lib' ); +} + +pod2usage(1) if ($help); +$ENV{DBIX_CLASS_STORAGE_DBI_DEBUG} = 1 if ($trace); + +die('No op specified') if(!$op); +die('Invalid op') if ($op!~/^insert|update|delete|select$/s); +my $csv_class; +if ($op eq 'select') { + $format ||= 'tsv'; + die('Invalid format') if ($format!~/^tsv|csv$/s); + $csv_class = 'Text::CSV_XS'; + eval{ require Text::CSV_XS }; + if ($@) { + $csv_class = 'Text::CSV_PP'; + eval{ require Text::CSV_PP }; + die('The select op requires either the Text::CSV_XS or the Text::CSV_PP module') if ($@); + } +} + +die('No schema specified') if(!$schema_class); +eval("require $schema_class"); +die('Unable to load schema') if ($@); +$connect = jsonToObj( $connect ) if ($connect); +my $schema = $schema_class->connect( + ( $connect ? @$connect : () ) +); + +die('No class specified') if(!$resultset_class); +my $resultset = eval{ $schema->resultset($resultset_class) }; +die('Unable to load the class with the schema') if ($@); + +$set = jsonToObj( $set ) if ($set); +$where = jsonToObj( $where ) if ($where); +$attrs = jsonToObj( $attrs ) if ($attrs); + +if ($op eq 'insert') { + die('Do not use the where option with the insert op') if ($where); + die('Do not use the attrs option with the insert op') if ($attrs); + my $obj = $resultset->create( $set ); + print ''.ref($resultset).' ID: '.join(',',$obj->id())."\n"; +} +elsif ($op eq 'update') { + $resultset = $resultset->search( ($where||{}) ); + my $count = $resultset->count(); + print "This action will modify $count ".ref($resultset)." records.\n" if (!$quiet); + if ( $force || confirm() ) { + $resultset->update_all( $set ); + } +} +elsif ($op eq 'delete') { + die('Do not use the set option with the delete op') if ($set); + $resultset = $resultset->search( ($where||{}), ($attrs||()) ); + my $count = $resultset->count(); + print "This action will delete $count ".ref($resultset)." records.\n" if (!$quiet); + if ( $force || confirm() ) { + $resultset->delete_all(); + } +} +elsif ($op eq 'select') { + die('Do not use the set option with the select op') if ($set); + my $csv = $csv_class->new({ + sep_char => ( $format eq 'tsv' ? "\t" : ',' ), + }); + $resultset = $resultset->search( ($where||{}), ($attrs||()) ); + my @columns = $resultset->result_source->columns(); + $csv->combine( @columns ); + print $csv->string()."\n"; + while (my $row = $resultset->next()) { + my @fields; + foreach my $column (@columns) { + push( @fields, $row->get_column($column) ); + } + $csv->combine( @fields ); + print $csv->string()."\n"; + } +} + +sub confirm { + print "Are you sure you want to do this? (type YES to confirm) "; + my $response = ; + return 1 if ($response=~/^YES/); + return; +} + +__END__ + +=head1 NAME + +dbicadmin - Execute operations upon DBIx::Class objects. + +=head1 SYNOPSIS + + dbicadmin --op=insert --schema=My::Schema --class=Class --set=JSON + dbicadmin --op=update --schema=My::Schema --class=Class --set=JSON --where=JSON + dbicadmin --op=delete --schema=My::Schema --class=Class --where=JSON + dbicadmin --op=select --schema=My::Schema --class=Class --where=JSON --format=tsv + +=head1 DESCRIPTION + +This utility provides the ability to run INSERTs, UPDATEs, +DELETEs, and SELECTs on any DBIx::Class object. + +=head1 OPTIONS + +=head2 op + +The type of operation. Valid values are insert, update, delete, +and select. + +=head2 schema + +The name of your schema class. + +=head2 class + +The name of the class, within your schema, that you want to run +the operation on. + +=head2 connect + +A JSON array to be passed to your schema class upon connecting. +The array will need to be compatible with whatever the DBIC +->connect() method requires. + +=head2 set + +This option must be valid JSON data string and is passed in to +the DBIC update() method. Use this option with the update +and insert ops. + +=head2 where + +This option must be valid JSON data string and is passed in as +the first argument to the DBIC search() method. Use this +option with the update, delete, and select ops. + +=head2 attrs + +This option must be valid JSON data string and is passed in as +the second argument to the DBIC search() method. Use this +option with the update, delete, and select ops. + +=head2 help + +Display this help page. + +=head2 force + +Suppresses the confirmation dialogues that are usually displayed +when someone runs a DELETE or UPDATE action. + +=head2 quiet + +Do not display status messages. + +=head2 trace + +Turns on tracing on the DBI storage, thus printing SQL as it is +executed. + +=head2 tlibs + +This option is purely for testing during the DBIC installation. Do +not use it. + +=head1 JSON + +JSON is a lightweight data-interchange format. It allows you +to express complex data structures for use in the where and +set options. + +This module turns on L's BareKey and QuotApos options so +that your data can look a bit more readable. + + --where={"this":"that"} # generic JSON + --where={this:'that'} # with BareKey and QuoteApos + +Consider wrapping your JSON in outer quotes so that you don't +have to escape your inner quotes. + + --where={this:\"that\"} # no outer quote + --where='{this:"that"}' # outer quoted + +=head1 AUTHOR + +Aran Deltac + +=head1 LICENSE + +You may distribute this code under the same terms as Perl itself. + diff --git a/t/31stats.t b/t/31stats.t new file mode 100644 index 0000000..a478d28 --- /dev/null +++ b/t/31stats.t @@ -0,0 +1,109 @@ +#!/usr/bin/perl + +use strict; +use warnings; +use Test::More; + +BEGIN { + eval "use DBD::SQLite"; + plan $@ + ? ( skip_all => 'needs DBD::SQLite for testing' ) + : ( tests => 13 ); +} + +use lib qw(t/lib); + +use_ok('DBICTest'); +use_ok('DBICTest::HelperRels'); + +my $cbworks = 0; + +DBICTest->schema->storage->debugcb(sub { $cbworks = 1; }); +DBICTest->schema->storage->debug(0); +my $rs = DBICTest::CD->search({}); +$rs->count(); +ok(!$cbworks, 'Callback not called with debug disabled'); + +DBICTest->schema->storage->debug(1); + +$rs->count(); +ok($cbworks, 'Debug callback worked.'); + +my $prof = new DBIx::Test::Profiler(); +DBICTest->schema->storage->debugobj($prof); + +# Test non-transaction calls. +$rs->count(); +ok($prof->{'query_start'}, 'query_start called'); +ok($prof->{'query_end'}, 'query_end called'); +ok(!$prof->{'txn_begin'}, 'txn_begin not called'); +ok(!$prof->{'txn_commit'}, 'txn_commit not called'); + +$prof->reset(); + +# Test transaction calls +DBICTest->schema->txn_begin(); +ok($prof->{'txn_begin'}, 'txn_begin called'); + +$rs = DBICTest::CD->search({}); +$rs->count(); +ok($prof->{'query_start'}, 'query_start called'); +ok($prof->{'query_end'}, 'query_end called'); + +DBICTest->schema->txn_commit(); +ok($prof->{'txn_commit'}, 'txn_commit called'); + +$prof->reset(); + +# Test a rollback +DBICTest->schema->txn_begin(); +$rs = DBICTest::CD->search({}); +$rs->count(); +DBICTest->schema->txn_rollback(); +ok($prof->{'txn_rollback'}, 'txn_rollback called'); + +DBICTest->schema->storage->debug(0); + +package DBIx::Test::Profiler; +use strict; + +sub new { + my $self = bless({}); +} + +sub query_start { + my $self = shift(); + $self->{'query_start'} = 1; +} + +sub query_end { + my $self = shift(); + $self->{'query_end'} = 1; +} + +sub txn_begin { + my $self = shift(); + $self->{'txn_begin'} = 1; +} + +sub txn_rollback { + my $self = shift(); + $self->{'txn_rollback'} = 1; +} + +sub txn_commit { + my $self = shift(); + $self->{'txn_commit'} = 1; +} + +sub reset { + my $self = shift(); + + $self->{'query_start'} = 0; + $self->{'query_end'} = 0; + $self->{'txn_begin'} = 0; + $self->{'txn_rollback'} = 0; + $self->{'txn_end'} = 0; +} + +1; diff --git a/t/52cycle.t b/t/52cycle.t new file mode 100644 index 0000000..0c1e330 --- /dev/null +++ b/t/52cycle.t @@ -0,0 +1,23 @@ +use strict; +use warnings; +use Test::More; + +use lib qw(t/lib); + +BEGIN { + eval { require Test::Memory::Cycle }; + if ($@) { + plan skip_all => "leak test needs Test::Memory::Cycle"; + } else { + plan tests => 1; + } +} + +use DBICTest; +use DBICTest::Schema; + +import Test::Memory::Cycle; + +my $s = DBICTest::Schema->clone; + +memory_cycle_ok($s, 'No cycles in schema'); diff --git a/t/basicrels/146db2_400.t b/t/basicrels/146db2_400.t new file mode 100644 index 0000000..2ac494c --- /dev/null +++ b/t/basicrels/146db2_400.t @@ -0,0 +1,7 @@ +use Test::More; +use lib qw(t/lib); +use DBICTest; +use DBICTest::BasicRels; + +require "t/run/146db2_400.tl"; +run_tests(DBICTest->schema); diff --git a/t/basicrels/26might_have.t b/t/basicrels/26might_have.t new file mode 100644 index 0000000..f2942e4 --- /dev/null +++ b/t/basicrels/26might_have.t @@ -0,0 +1,7 @@ +use Test::More; +use lib qw(t/lib); +use DBICTest; +use DBICTest::BasicRels; + +require "t/run/26might_have.tl"; +run_tests(DBICTest->schema); diff --git a/t/helperrels/146db2_400.t b/t/helperrels/146db2_400.t new file mode 100644 index 0000000..655bc05 --- /dev/null +++ b/t/helperrels/146db2_400.t @@ -0,0 +1,7 @@ +use Test::More; +use lib qw(t/lib); +use DBICTest; +use DBICTest::HelperRels; + +require "t/run/146db2_400.tl"; +run_tests(DBICTest->schema); diff --git a/t/helperrels/26might_have.t b/t/helperrels/26might_have.t new file mode 100644 index 0000000..d3ec615 --- /dev/null +++ b/t/helperrels/26might_have.t @@ -0,0 +1,7 @@ +use Test::More; +use lib qw(t/lib); +use DBICTest; +use DBICTest::HelperRels; + +require "t/run/26might_have.tl"; +run_tests(DBICTest->schema); diff --git a/t/helperrels/26sqlt.t b/t/helperrels/26sqlt.t index 0c074cc..af87625 100644 --- a/t/helperrels/26sqlt.t +++ b/t/helperrels/26sqlt.t @@ -6,6 +6,8 @@ use DBICTest::HelperRels; eval "use SQL::Translator"; plan skip_all => 'SQL::Translator required' if $@; +# do not taunt happy dave ball + my $schema = DBICTest::Schema; plan tests => 33; @@ -77,6 +79,10 @@ my @fk_constraints = 'selftable' => 'tags', 'foreigntable' => 'cd', 'selfcols' => ['cd'], 'foreigncols' => ['cdid'], 'needed' => 1, on_delete => 'CASCADE', on_update => 'CASCADE'}, + {'display' => 'bookmark -> link', + 'selftable' => 'bookmark', 'foreigntable' => 'link', + 'selfcols' => ['link'], 'foreigncols' => ['id'], + 'needed' => 1, on_delete => '', on_update => ''}, ); my @unique_constraints = ( @@ -86,9 +92,9 @@ my @unique_constraints = ( {'display' => 'twokeytreelike name unique', 'table' => 'twokeytreelike', 'cols' => ['name'], 'needed' => 1}, - {'display' => 'employee position and group_id unique', - 'table' => 'employee', cols => ['position', 'group_id'], - 'needed' => 1}, +# {'display' => 'employee position and group_id unique', +# 'table' => 'employee', cols => ['position', 'group_id'], +# 'needed' => 1}, ); my $tschema = $translator->schema(); diff --git a/t/helperrels/29dbicadmin.t b/t/helperrels/29dbicadmin.t new file mode 100644 index 0000000..ea5882e --- /dev/null +++ b/t/helperrels/29dbicadmin.t @@ -0,0 +1,7 @@ +use Test::More; +use lib qw(t/lib); +use DBICTest; +use DBICTest::HelperRels; + +require "t/run/29dbicadmin.tl"; +run_tests(DBICTest->schema); diff --git a/t/lib/DBICTest.pm b/t/lib/DBICTest.pm index 628696a..5ffdf90 100755 --- a/t/lib/DBICTest.pm +++ b/t/lib/DBICTest.pm @@ -13,9 +13,13 @@ sub initialise { unlink($db_file . "-journal") if -e $db_file . "-journal"; mkdir("t/var") unless -d "t/var"; - my $dsn = "dbi:SQLite:${db_file}"; + my $dsn = $ENV{"DBICTEST_DSN"} || "dbi:SQLite:${db_file}"; + my $dbuser = $ENV{"DBICTEST_DBUSER"} || ''; + my $dbpass = $ENV{"DBICTEST_DBPASS"} || ''; + +# my $dsn = "dbi:SQLite:${db_file}"; - return DBICTest::Schema->compose_connection('DBICTest' => $dsn); + return DBICTest::Schema->compose_connection('DBICTest' => $dsn, $dbuser, $dbpass); } 1; diff --git a/t/lib/DBICTest/Schema.pm b/t/lib/DBICTest/Schema.pm index f51a145..72e1da6 100644 --- a/t/lib/DBICTest/Schema.pm +++ b/t/lib/DBICTest/Schema.pm @@ -9,6 +9,8 @@ __PACKAGE__->load_classes(qw/ Artist Employee CD + Link + Bookmark #dummy Track Tag diff --git a/t/lib/DBICTest/Schema/Bookmark.pm b/t/lib/DBICTest/Schema/Bookmark.pm new file mode 100644 index 0000000..4f9ec44 --- /dev/null +++ b/t/lib/DBICTest/Schema/Bookmark.pm @@ -0,0 +1,26 @@ +package # hide from PAUSE + DBICTest::Schema::Bookmark; + + use base 'DBIx::Class::Core'; + + +use strict; +use warnings; + +__PACKAGE__->load_components(qw/PK::Auto Core/); +__PACKAGE__->table('bookmark'); +__PACKAGE__->add_columns(qw/id link/); +__PACKAGE__->add_columns( + 'id' => { + data_type => 'integer', + is_auto_increment => 1 + }, + 'link' => { + data_type => 'integer', + }, +); + +__PACKAGE__->set_primary_key('id'); +__PACKAGE__->belongs_to(link => 'DBICTest::Schema::Link' ); + +1; diff --git a/t/lib/DBICTest/Schema/Employee.pm b/t/lib/DBICTest/Schema/Employee.pm index 4ebeffd..e91f872 100644 --- a/t/lib/DBICTest/Schema/Employee.pm +++ b/t/lib/DBICTest/Schema/Employee.pm @@ -29,7 +29,7 @@ __PACKAGE__->add_columns( __PACKAGE__->set_primary_key('employee_id'); __PACKAGE__->position_column('position'); -__PACKAGE__->add_unique_constraint(position_group => [ qw/position group_id/ ]); +#__PACKAGE__->add_unique_constraint(position_group => [ qw/position group_id/ ]); __PACKAGE__->mk_classdata('field_name_for', { employee_id => 'primary key', diff --git a/t/lib/DBICTest/Schema/Link.pm b/t/lib/DBICTest/Schema/Link.pm new file mode 100644 index 0000000..72574ea --- /dev/null +++ b/t/lib/DBICTest/Schema/Link.pm @@ -0,0 +1,31 @@ +package # hide from PAUSE + DBICTest::Schema::Link; + +use base 'DBIx::Class::Core'; + +use strict; +use warnings; + +__PACKAGE__->load_components(qw/PK::Auto Core/); +__PACKAGE__->table('link'); +__PACKAGE__->add_columns( + 'id' => { + data_type => 'integer', + is_auto_increment => 1 + }, + 'url' => { + data_type => 'varchar', + size => 100, + is_nullable => 1, + }, + 'title' => { + data_type => 'varchar', + size => 100, + is_nullable => 1, + }, +); +__PACKAGE__->set_primary_key('id'); + +use overload '""' => sub { shift->url }, fallback=> 1; + +1; diff --git a/t/lib/DBICTest/Setup.pm b/t/lib/DBICTest/Setup.pm index 7f57408..833bebf 100755 --- a/t/lib/DBICTest/Setup.pm +++ b/t/lib/DBICTest/Setup.pm @@ -4,7 +4,7 @@ use DBICTest; my $schema = DBICTest->initialise; -$schema->storage->on_connect_do([ "PRAGMA synchronous = OFF" ]); +# $schema->storage->on_connect_do([ "PRAGMA synchronous = OFF" ]); my $dbh = $schema->storage->dbh; @@ -142,4 +142,14 @@ $schema->populate('Event', [ [ 1, '2006-04-25 22:24:33' ], ]); +$schema->populate('Link', [ + [ qw/id title/ ], + [ 1, 'aaa' ] +]); + +$schema->populate('Bookmark', [ + [ qw/id link/ ], + [ 1, 1 ] +]); + 1; diff --git a/t/lib/sqlite.sql b/t/lib/sqlite.sql index c3270e3..9e1894c 100644 --- a/t/lib/sqlite.sql +++ b/t/lib/sqlite.sql @@ -1,6 +1,6 @@ -- -- Created by SQL::Translator::Producer::SQLite --- Created on Wed Apr 26 03:18:22 2006 +-- Created on Sun May 14 18:25:49 2006 -- BEGIN TRANSACTION; @@ -79,6 +79,14 @@ CREATE TABLE cd ( ); -- +-- Table: bookmark +-- +CREATE TABLE bookmark ( + id INTEGER PRIMARY KEY NOT NULL, + link integer NOT NULL +); + +-- -- Table: track -- CREATE TABLE track ( @@ -97,6 +105,15 @@ CREATE TABLE self_ref ( ); -- +-- Table: tags +-- +CREATE TABLE tags ( + tagid INTEGER PRIMARY KEY NOT NULL, + cd integer NOT NULL, + tag varchar(100) NOT NULL +); + +-- -- Table: treelike -- CREATE TABLE treelike ( @@ -106,12 +123,12 @@ CREATE TABLE treelike ( ); -- --- Table: tags +-- Table: link -- -CREATE TABLE tags ( - tagid INTEGER PRIMARY KEY NOT NULL, - cd integer NOT NULL, - tag varchar(100) NOT NULL +CREATE TABLE link ( + id INTEGER PRIMARY KEY NOT NULL, + url varchar(100), + title varchar(100) ); -- @@ -168,7 +185,6 @@ CREATE TABLE producer ( name varchar(100) NOT NULL ); -CREATE UNIQUE INDEX position_group_employee on employee (position, group_id); CREATE UNIQUE INDEX tktlnameunique_twokeytreelike on twokeytreelike (name); CREATE UNIQUE INDEX artist_title_cd on cd (artist, title); COMMIT; diff --git a/t/run/01core.tl b/t/run/01core.tl index c1a5b46..05e4dd3 100644 --- a/t/run/01core.tl +++ b/t/run/01core.tl @@ -1,7 +1,7 @@ sub run_tests { my $schema = shift; -plan tests => 55; +plan tests => 58; # figure out if we've got a version of sqlite that is older than 3.2.6, in # which case COUNT(DISTINCT()) doesn't work @@ -86,6 +86,13 @@ is($new_again->name, 'Man With A Spoon', 'Retrieved correctly'); is($new_again->ID, 'DBICTest::Artist|artist|artistid=4', 'unique object id generated correctly'); +# Test backwards compatibility +{ + my $artist_by_hash = $schema->resultset('Artist')->find(artistid => 4); + is($artist_by_hash->name, 'Man With A Spoon', 'Retrieved correctly'); + is($artist_by_hash->ID, 'DBICTest::Artist|artist|artistid=4', 'unique object id generated correctly'); +} + is($schema->resultset("Artist")->count, 4, 'count ok'); # test find_or_new @@ -158,7 +165,7 @@ is($schema->class("Artist")->field_name_for->{name}, 'artist name', 'mk_classdat my $search = [ { 'tags.tag' => 'Cheesy' }, { 'tags.tag' => 'Blue' } ]; -my $or_rs = $schema->resultset("CD")->search($search, { join => 'tags', +my( $or_rs ) = $schema->resultset("CD")->search_rs($search, { join => 'tags', order_by => 'cdid' }); cmp_ok($or_rs->count, '==', 5, 'Search with OR ok'); @@ -230,6 +237,14 @@ ok($schema->storage(), 'Storage available'); cmp_ok(@artsn, '==', 4, "Four artists returned"); } +my $newbook = $schema->resultset( 'Bookmark' )->find(1); + +$@ = ''; +eval { +my $newlink = $newbook->link; +}; +ok(!$@, "stringify to false value doesn't cause error"); + # test cascade_delete through many_to_many relations { my $art_del = $schema->resultset("Artist")->find({ artistid => 1 }); diff --git a/t/run/06relationship.tl b/t/run/06relationship.tl index b85fea1..a66211e 100644 --- a/t/run/06relationship.tl +++ b/t/run/06relationship.tl @@ -3,7 +3,7 @@ my $schema = shift; use strict; use warnings; -plan tests => 30; +plan tests => 32; # has_a test my $cd = $schema->resultset("CD")->find(4); @@ -38,6 +38,12 @@ if ($INC{'DBICTest/HelperRels.pm'}) { is( ($artist->search_related('cds'))[3]->title, 'Big Flop', 'create_related ok' ); +my( $rs_from_list ) = $artist->search_related_rs('cds'); +is( ref($rs_from_list), 'DBIx::Class::ResultSet', 'search_related_rs in list context returns rs' ); + +( $rs_from_list ) = $artist->cds_rs(); +is( ref($rs_from_list), 'DBIx::Class::ResultSet', 'relation_rs in list context returns rs' ); + # count_related is( $artist->count_related('cds'), 4, 'count_related ok' ); diff --git a/t/run/12pg.tl b/t/run/12pg.tl index ee3e819..d71e39c 100644 --- a/t/run/12pg.tl +++ b/t/run/12pg.tl @@ -1,6 +1,5 @@ sub run_tests { my $schema = shift; - my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_PG_${_}" } qw/DSN USER PASS/}; #warn "$dsn $user $pass"; @@ -13,8 +12,9 @@ plan tests => 4; DBICTest::Schema->compose_connection('PgTest' => $dsn, $user, $pass); my $dbh = PgTest->schema->storage->dbh; - -$dbh->do("CREATE TABLE artist (artistid serial PRIMARY KEY, name VARCHAR(255), charfield CHAR(10));"); +PgTest->schema->source("Artist")->name("testschema.artist"); +$dbh->do("CREATE SCHEMA testschema;"); +$dbh->do("CREATE TABLE testschema.artist (artistid serial PRIMARY KEY, name VARCHAR(255), charfield CHAR(10));"); PgTest::Artist->load_components('PK::Auto'); @@ -47,15 +47,16 @@ my $test_type_info = { }; -my $type_info = PgTest->schema->storage->columns_info_for('artist'); +my $type_info = PgTest->schema->storage->columns_info_for('testschema.artist'); my $artistid_defval = delete $type_info->{artistid}->{default_value}; like($artistid_defval, - qr/^nextval\('public\.artist_artistid_seq'::(?:text|regclass)\)/, + qr/^nextval\('([^\.]*\.){0,1}artist_artistid_seq'::(?:text|regclass)\)/, 'columns_info_for - sequence matches Pg get_autoinc_seq expectations'); is_deeply($type_info, $test_type_info, 'columns_info_for - column data types'); -$dbh->do("DROP TABLE artist;"); +$dbh->do("DROP TABLE testschema.artist;"); +$dbh->do("DROP SCHEMA testschema;"); } diff --git a/t/run/146db2_400.tl b/t/run/146db2_400.tl new file mode 100644 index 0000000..ac6cd47 --- /dev/null +++ b/t/run/146db2_400.tl @@ -0,0 +1,74 @@ +sub run_tests { +my $schema = shift; + +my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_DB2_400_${_}" } qw/DSN USER PASS/}; + +#warn "$dsn $user $pass"; + +# Probably best to pass the DBQ option in the DSN to specify a specific +# libray. Something like: +# DBICTEST_DB2_400_DSN='dbi:ODBC:dsn=MyAS400;DBQ=MYLIB' +plan skip_all, 'Set $ENV{DBICTEST_DB2_400_DSN}, _USER and _PASS to run this test' + unless ($dsn && $user); + +plan tests => 6; + +DBICTest::Schema->compose_connection('DB2Test' => $dsn, $user, $pass); + +my $dbh = DB2Test->schema->storage->dbh; + +$dbh->do("DROP TABLE artist", { RaiseError => 0, PrintError => 0 }); + +$dbh->do("CREATE TABLE artist (artistid INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1), name VARCHAR(255), charfield CHAR(10))"); + +DB2Test::Artist->load_components('PK::Auto'); + +# test primary key handling +my $new = DB2Test::Artist->create({ name => 'foo' }); +ok($new->artistid, "Auto-PK worked"); + +# test LIMIT support +for (1..6) { + DB2Test::Artist->create({ name => 'Artist ' . $_ }); +} +my $it = DB2Test::Artist->search( {}, + { rows => 3, + order_by => 'artistid' + } +); +is( $it->count, 3, "LIMIT count ok" ); +is( $it->next->name, "foo", "iterator->next ok" ); +$it->next; +is( $it->next->name, "Artist 2", "iterator->next ok" ); +is( $it->next, undef, "next past end of resultset ok" ); + +my $test_type_info = { + 'artistid' => { + 'data_type' => 'INTEGER', + 'is_nullable' => 0, + 'size' => 10 + }, + 'name' => { + 'data_type' => 'VARCHAR', + 'is_nullable' => 1, + 'size' => 255 + }, + 'charfield' => { + 'data_type' => 'CHAR', + 'is_nullable' => 1, + 'size' => 10 + }, +}; + + +my $type_info = DB2Test->schema->storage->columns_info_for('artist'); +is_deeply($type_info, $test_type_info, 'columns_info_for - column data types'); + + + +# clean up our mess +$dbh->do("DROP TABLE artist"); + +} + +1; diff --git a/t/run/16joins.tl b/t/run/16joins.tl index c83aa7c..15603aa 100644 --- a/t/run/16joins.tl +++ b/t/run/16joins.tl @@ -7,7 +7,7 @@ BEGIN { eval "use DBD::SQLite"; plan $@ ? ( skip_all => 'needs DBD::SQLite for testing' ) - : ( tests => 42 ); + : ( tests => 44 ); } # figure out if we've got a version of sqlite that is older than 3.2.6, in @@ -101,6 +101,10 @@ $rs = $schema->resultset("CD")->search( ); cmp_ok( scalar $rs->all, '==', scalar $rs->slice(0, $rs->count - 1), 'slice() with join has same count as all()' ); +eval { $rs->search(undef, { rows => 0, offset => 3 })->all; }; + +ok($@, "rows => 0 errors: $@"); + $rs = $schema->resultset("Artist")->search( { 'liner_notes.notes' => 'Kill Yourself!' }, { join => { 'cds' => 'liner_notes' } }); @@ -273,6 +277,25 @@ $schema->storage->debug(0); cmp_ok($queries, '==', 1, 'Only one query run'); +# has_many resulting in an additional select if no records available despite prefetch +my $track = $schema->resultset("Artist")->create( { + artistid => 4, + name => 'Artist without CDs', +} ); + +$queries = 0; +$schema->storage->debug(1); + +my $artist_without_cds = $schema->resultset("Artist")->find(4, { + join => [qw/ cds /], + prefetch => [qw/ cds /], +}); +my @no_cds = $artist_without_cds->cds; + +is($queries, 1, 'prefetch ran only 1 sql statement'); + +$schema->storage->debug(0); + } # end run_tests 1; diff --git a/t/run/23cache.tl b/t/run/23cache.tl index 74a6ae9..a822601 100644 --- a/t/run/23cache.tl +++ b/t/run/23cache.tl @@ -6,7 +6,7 @@ $schema->storage->debugcb( sub{ $queries++ } ); eval "use DBD::SQLite"; plan skip_all => 'needs DBD::SQLite for testing' if $@; -plan tests => 23; +plan tests => 22; my $rs = $schema->resultset("Artist")->search( { artistid => 1 } @@ -14,7 +14,7 @@ my $rs = $schema->resultset("Artist")->search( my $artist = $rs->first; -is( scalar @{ $rs->get_cache }, 0, 'cache is not populated without cache attribute' ); +ok( !defined($rs->get_cache), 'cache is not populated without cache attribute' ); $rs = $schema->resultset('Artist')->search( undef, { cache => 1 } ); my $artists = [ $rs->all ]; @@ -23,7 +23,7 @@ is( scalar @{$rs->get_cache}, 3, 'all() populates cache for search with cache at $rs->clear_cache; -is( scalar @{$rs->get_cache}, 0, 'clear_cache is functional' ); +ok( !defined($rs->get_cache), 'clear_cache is functional' ); $rs->next; @@ -38,12 +38,6 @@ $cd = $schema->resultset('CD')->find(1); $rs->clear_cache; -eval { - $rs->set_cache( [ $cd ] ); -}; - -is( scalar @{$rs->get_cache}, 0, 'set_cache() only accepts objects of correct type for the resultset' ); - $queries = 0; $schema->storage->debug(1); diff --git a/t/run/26might_have.tl b/t/run/26might_have.tl new file mode 100644 index 0000000..0b700e8 --- /dev/null +++ b/t/run/26might_have.tl @@ -0,0 +1,43 @@ +sub run_tests { +my $schema = shift; + +my $queries; +#$schema->storage->debugfh(IO::File->new('t/var/temp.trace', 'w')); +$schema->storage->debugcb( sub{ $queries++ } ); + +eval "use DBD::SQLite"; +plan skip_all => 'needs DBD::SQLite for testing' if $@; +plan tests => 2; + + +my $cd = $schema->resultset("CD")->find(1); +$cd->title('test'); + +# SELECT count +$queries = 0; +$schema->storage->debug(1); + +$cd->update; + +is($queries, 1, 'liner_notes (might_have) not prefetched - do not load +liner_notes on update'); + +$schema->storage->debug(0); + + +my $cd2 = $schema->resultset("CD")->find(2, {prefetch => 'liner_notes'}); +$cd2->title('test2'); + +# SELECT count +$queries = 0; +$schema->storage->debug(1); + +$cd2->update; + +is($queries, 1, 'liner_notes (might_have) prefetched - do not load +liner_notes on update'); + +$schema->storage->debug(0); +} + +1; diff --git a/t/run/27ordered.tl b/t/run/27ordered.tl index 5ced6bf..3a53951 100644 --- a/t/run/27ordered.tl +++ b/t/run/27ordered.tl @@ -16,6 +16,8 @@ sub run_tests { hammer_rs( $employees ); + #return; + DBICTest::Employee->grouping_column('group_id'); $employees->delete(); foreach my $group_id (1..3) { @@ -42,29 +44,29 @@ sub hammer_rs { foreach my $position (1..$count) { - $row = $rs->find({ $position_column=>$position }); + ($row) = $rs->search({ $position_column=>$position })->all(); $row->move_previous(); ok( check_rs($rs), "move_previous( $position )" ); - $row = $rs->find({ $position_column=>$position }); + ($row) = $rs->search({ $position_column=>$position })->all(); $row->move_next(); ok( check_rs($rs), "move_next( $position )" ); - $row = $rs->find({ $position_column=>$position }); + ($row) = $rs->search({ $position_column=>$position })->all(); $row->move_first(); ok( check_rs($rs), "move_first( $position )" ); - $row = $rs->find({ $position_column=>$position }); + ($row) = $rs->search({ $position_column=>$position })->all(); $row->move_last(); ok( check_rs($rs), "move_last( $position )" ); foreach my $to_position (1..$count) { - $row = $rs->find({ $position_column=>$position }); + ($row) = $rs->search({ $position_column=>$position })->all(); $row->move_to($to_position); ok( check_rs($rs), "move_to( $position => $to_position )" ); } - $row = $rs->find({ position=>$position }); + ($row) = $rs->search({ position=>$position })->all(); if ($position==1) { ok( !$row->previous_sibling(), 'no previous sibling' ); ok( !$row->first_sibling(), 'no first sibling' ); diff --git a/t/run/29dbicadmin.tl b/t/run/29dbicadmin.tl new file mode 100644 index 0000000..93c42a1 --- /dev/null +++ b/t/run/29dbicadmin.tl @@ -0,0 +1,38 @@ +# vim: filetype=perl + +sub run_tests { + + eval 'require JSON'; + plan skip_all, 'Install JSON to run this test' if ($@); + + eval 'require Text::CSV_XS'; + if ($@) { + eval 'require Text::CSV_PP'; + plan skip_all, 'Install Text::CSV_XS or Text::CSV_PP to run this test' if ($@); + } + + plan tests => 5; + my $schema = shift; + + my $employees = $schema->resultset('Employee'); + my $cmd = qq|script/dbicadmin --schema=DBICTest::Schema --class=Employee --tlibs --connect='["dbi:SQLite:dbname=t/var/DBIxClass.db","",""]' --force --tlibs|; + + `$cmd --op=insert --set='{name:"Matt"}'`; + ok( ($employees->count()==1), 'insert count' ); + + my $employee = $employees->find(1); + ok( ($employee->name() eq 'Matt'), 'insert valid' ); + + `$cmd --op=update --set='{name:"Trout"}'`; + $employee = $employees->find(1); + ok( ($employee->name() eq 'Trout'), 'update' ); + + `$cmd --op=insert --set='{name:"Aran"}'`; + my $data = `$cmd --op=select --attrs='{order_by:"name"}'`; + ok( ($data=~/Aran.*Trout/s), 'select with attrs' ); + + `$cmd --op=delete --where='{name:"Trout"}'`; + ok( ($employees->count()==1), 'delete' ); +} + +1;