'Class::Data::Accessor' => 0.01,
'Carp::Clan' => 0,
'DBI' => 1.40,
+ 'Module::Find' => 0,
+ 'Class::Inspector' => 0,
},
build_requires => {
'DBD::SQLite' => 1.11,
},
- recommends => {
- 'Data::UUID' => 0,
- 'Module::Find' => 0,
- 'Class::Inspector' => 0,
- },
create_makefile_pl => 'passthrough',
create_readme => 1,
- test_files => [ glob('t/*.t'), glob('t/*/*.t') ]
+ test_files => [ glob('t/*.t'), glob('t/*/*.t') ],
+ script_files => [ glob('script/*') ],
);
Module::Build->new(%arguments)->create_build_script;
Revision history for DBIx::Class
+ - refactor debugging to allow for profiling using Storage::Statistics
+ - removed Data::UUID from deps, made other optionals required
- modified SQLT parser to skip dupe table names
- added remove_column(s) to ResultSource/ResultSourceProxy
- added add_column alias to ResultSourceProxy
- CDBICompat: override find_or_create to fix column casing when
ColumnCase is loaded
+0.06003
+ - make find_or_create_related check defined() instead of truth
+ - don't unnecessarily fetch rels for cascade_update
+ - don't set_columns explicitly in update_or_create; instead use
+ update($hashref) so InflateColumn works
+ - fix for has_many prefetch with 0 related rows
+ - make limit error if rows => 0
+ - added memory cycle tests and a long-needed weaken call
+
0.06002 2006-04-20 00:42:41
- fix set_from_related to accept undef
- fix to Dumper-induced hash iteration bug
SQLT modules so an app can do its own deploy without SQLT on the target
system
+
wdh: Will Hawes
+gphat: Cory G Watson <gphat@cpan.org>
+
=head1 LICENSE
You may distribute this code under the same terms as Perl itself.
notation should be used. name_sep needs to be set to allow the
SQL generator to put the quotes the correct place.
+=head2 Overloading methods
+
+L<DBIx::Class> uses the L<Class::C3> package, which provides for redispatch of
+method calls. You have to use calls to C<next::method> to overload methods.
+More information on using L<Class::C3> with L<DBIx::Class> can be found in
+L<DBIx::Class::Manual::Component>.
+
+=head3 Changing one field whenever another changes
+
+For example, say that you have three columns, C<id>, C<number>, and
+C<squared>. You would like to make changes to C<number> and have
+C<squared> be automagically set to the value of C<number> squared.
+You can accomplish this by overriding C<store_column>:
+
+ sub store_column {
+ my ( $self, $name, $value ) = @_;
+ if ($name eq 'number') {
+ $self->squared($value * $value);
+ }
+ $self->next::method($name, $value);
+ }
+
+Note that the hard work is done by the call to C<next::method>, which
+redispatches your call to store_column to the superclass(es).
+
+=head3 Automatically creating related objects
+
+You might have a class C<Artist> which has many C<CD>s. Further, you
+want to create a C<CD> object every time you insert an C<Artist> object.
+You can accomplish this by overriding C<insert>:
+
+ sub insert {
+ my ( $class, $args_ref ) = @_;
+ my $self = $class->next::method($args_ref);
+ $self->cds->new({})->fill_from_artist($self)->insert;
+ return $self;
+ }
+
+where C<fill_from_artist> is a method you specify in C<CD> which sets
+values in C<CD> based on the data in the C<Artist> object you pass in.
+
+=head2 Debugging DBIx::Class objects with Data::Dumper
+
+L<Data::Dumper> can be a very useful tool for debugging, but sometimes it can
+be hard to find the pertinent data in all the data it can generate.
+Specifically, if one naively tries to use it like so,
+
+ use Data::Dumper;
+
+ my $cd = $schema->resultset('CD')->find(1);
+ print Dumper($cd);
+
+several pages worth of data from the CD object's schema and result source will
+be dumped to the screen. Since usually one is only interested in a few column
+values of the object, this is not very helpful.
+
+Luckily, it is possible to modify the data before L<Data::Dumper> outputs
+it. Simply define a hook that L<Data::Dumper> will call on the object before
+dumping it. For example,
+
+ package My::DB::CD;
+
+ sub _dumper_hook {
+ $_[0] = bless {
+ %{ $_[0] },
+ result_source => undef,
+ }, ref($_[0]);
+ }
+
+ [...]
+
+ use Data::Dumper;
+
+ $Data::Dumper::Freezer = '_dumper_hook';
+
+ my $cd = $schema->resultset('CD')->find(1);
+ print Dumper($cd);
+ # dumps $cd without its ResultSource
+
+If the structure of your schema is such that there is a common base class for
+all your table classes, simply put a method similar to C<_dumper_hook> in the
+base class and set C<$Data::Dumper::Freezer> to its name and L<Data::Dumper>
+will automagically clean up your data before printing it. See
+L<Data::Dumper/EXAMPLES> for more information.
+
+=head2 Retrieving a row object's Schema
+
+It is possible to get a Schema object from a row object like so,
+
+ my $schema = $cd->result_source->schema;
+ my $artist_rs = $schema->resultset('Artist');
+ # for example
+
+This can be useful when you don't want to pass around a Schema object to every
+method.
+
+=head2 Profiling
+
+When you enable L<DBIx::Class::Storage::DBI>'s debugging it prints the SQL
+executed as well as notifications of query completion and transaction
+begin/commit. If you'd like to profile the SQL you can subclass the
+L<DBIx::Class::Storage::Statistics> class and write your own profiling
+mechanism:
+
+ package My::Profiler;
+ use strict;
+
+ use base 'DBIx::Class::Storage::Statistics';
+
+ use Time::HiRes qw(time);
+
+ my $start;
+
+ sub query_start {
+ my $self = shift();
+ my $sql = shift();
+ my $params = @_;
+
+ print "Executing $sql: ".join(', ', @params)."\n";
+ $start = time();
+ }
+
+ sub query_end {
+ my $self = shift();
+ my $sql = shift();
+ my @params = @_;
+
+ printf("Execution took %0.4f seconds.\n", time() - $start);
+ $start = undef;
+ }
+
+ 1;
+
+You can then install that class as the debugging object:
+
+ __PACKAGE__->storage()->debugobj(new My::Profiler());
+ __PACKAGE__->storage()->debug(1);
+
+A more complicated example might involve storing each execution of SQL in an
+array:
+
+ sub query_end {
+ my $self = shift();
+ my $sql = shift();
+ my @params = @_;
+
+ my $elapsed = time() - $start;
+ push(@{ $calls{$sql} }, {
+ params => \@params,
+ elapsed => $elapsed
+ });
+ }
+
+You could then create average, high and low execution times for an SQL
+statement and dig down to see if certain parameters cause aberrant behavior.
+
=cut
my $from_position = $self->get_column( $position_column );
return 0 if ( $to_position < 1 );
return 0 if ( $from_position==$to_position );
+ my @between = (
+ ( $from_position < $to_position )
+ ? ( $from_position+1, $to_position )
+ : ( $to_position, $from_position-1 )
+ );
my $rs = $self->result_source->resultset->search({
- -and => [
- $position_column =>
- { -between => [ $from_position, $to_position ] },
- ],
+ $position_column => { -between => [ @between ] },
$self->_grouping_clause(),
});
my $op = ($from_position>$to_position) ? '+' : '-';
- my $case_stmt = "CASE $position_column \n".
- " WHEN $from_position THEN $to_position\n".
- " ELSE $position_column $op 1\n".
- "END";
- $rs->update({ $position_column => \$case_stmt });
- $self->store_column( $position_column => $to_position );
+ $rs->update({ $position_column => \"$position_column $op 1" });
+ $self->update({ $position_column => $to_position });
return 1;
}
=head1 BUGS
+=head2 Unique Constraints
+
+Unique indexes and constraints on the position column are not
+supported at this time. It would be make sense to support them,
+but there are some unexpected database issues that make this
+hard to do. The main problem from the author's view is that
+SQLite (the DB engine that we use for testing) does not support
+ORDER BY on updates.
+
=head2 Race Condition on Insert
If a position is not specified for an insert than a position
the position of the other object will not reflect their new value
until you reload them from the database.
-The are times when you will want to move objects as groups, such
+There are times when you will want to move objects as groups, such
as changeing the parent of several objects at once - this directly
conflicts with this problem. One solution is for us to write a
ResultSet class that supports a parent() method, for example. Another
{ prefetch => [qw/book/],
});
my @book_objs = $obj->books;
+ my $books_rs = $obj->books;
+ ( $books_rs ) = $obj->books_rs;
$obj->add_to_books(\%col_data);
columns. You should pass the name of the column in the foreign class as the
$cond argument, or specify a complete join condition.
-As well as the accessor method, a method named C<< add_to_<relname> >>
-will also be added to your Row items, this allows you to insert new
-related items, using the same mechanism as in L<DBIx::Class::Relationship::Base/"create_related">.
+Three methods are created when you create a has_many relationship. The first
+method is the expected accessor method. The second is almost exactly the same
+as the accessor method but "_rs" is added to the end of the method name. This
+method works just like the normal accessor, except that it returns a resultset
+no matter what, even in list context. The third method, named
+C<< add_to_<relname> >>, will also be added to your Row items, this allows
+you to insert new related items, using the same mechanism as in
+L<DBIx::Class::Relationship::Base/"create_related">.
If you delete an object in a class with a C<has_many> relationship, all
related objects will be deleted as well. However, any database-level
);
} elsif ($acc_type eq 'multi') {
$meth{$rel} = sub { shift->search_related($rel, @_) };
+ $meth{"${rel}_rs"} = sub { shift->search_related_rs($rel, @_) };
$meth{"add_to_${rel}"} = sub { shift->create_related($rel, @_); };
} else {
$class->throw_exception("No such relationship accessor type $acc_type");
__PACKAGE__->add_relationship('relname', 'Foreign::Class', $cond, $attrs);
-The condition needs to be an SQL::Abstract-style representation of the
-join between the tables. When resolving the condition for use in a JOIN,
-keys using the pseudo-table I<foreign> are resolved to mean "the Table on the
-other side of the relationship", and values using the pseudo-table I<self>
+The condition needs to be an L<SQL::Abstract>-style representation of the
+join between the tables. When resolving the condition for use in a C<JOIN>,
+keys using the pseudo-table C<foreign> are resolved to mean "the Table on the
+other side of the relationship", and values using the pseudo-table C<self>
are resolved to mean "the Table this class is representing". Other
restrictions, such as by value, sub-select and other tables, may also be
-used. Please check your database for JOIN parameter support.
+used. Please check your database for C<JOIN> parameter support.
-For example, if you're creating a rel from Author to Book, where the Book
-table has a column author_id containing the ID of the Author row:
+For example, if you're creating a relationship from C<Author> to C<Book>, where
+the C<Book> table has a column C<author_id> containing the ID of the C<Author>
+row:
{ 'foreign.author_id' => 'self.id' }
-will result in the JOIN clause
+will result in the C<JOIN> clause
- author me JOIN book book ON bar.author_id = me.id
+ author me JOIN book book ON book.author_id = me.id
-You can specify as many foreign => self mappings as necessary. Each key/value
-pair provided in a hashref will be used as ANDed conditions, to add an ORed
-condition, use an arrayref of hashrefs. See the L<SQL::Abstract> documentation
-for more details.
+For multi-column foreign keys, you will need to specify a C<foreign>-to-C<self>
+mapping for each column in the key. For example, if you're creating a
+relationship from C<Book> to C<Edition>, where the C<Edition> table refers to a
+publisher and a type (e.g. "paperback"):
+
+ {
+ 'foreign.publisher_id' => 'self.publisher_id',
+ 'foreign.type_id' => 'self.type_id',
+ }
+
+This will result in the C<JOIN> clause:
+
+ book me JOIN edition edition ON edition.publisher_id = me.publisher_id
+ AND edition.type_id = me.type_id
+
+Each key-value pair provided in a hashref will be used as C<AND>ed conditions.
+To add an C<OR>ed condition, use an arrayref of hashrefs. See the
+L<SQL::Abstract> documentation for more details.
Valid attributes are as follows:
=head2 search_related
- $rs->search_related('relname', $cond, $attrs);
+ @objects = $rs->search_related('relname', $cond, $attrs);
+ $objects_rs = $rs->search_related('relname', $cond, $attrs);
Run a search on a related resultset. The search will be restricted to the
item or items represented by the L<DBIx::Class::ResultSet> it was called
return shift->related_resultset(shift)->search(@_);
}
+=head2 search_related_rs
+
+ ( $objects_rs ) = $rs->search_related_rs('relname', $cond, $attrs);
+
+This method works exactly the same as search_related, except that
+it garauntees a restultset, even in list context.
+
+=cut
+
+sub search_related_rs {
+ return shift->related_resultset(shift)->search_rs(@_);
+}
+
=head2 count_related
$obj->count_related('relname', $cond, $attrs);
sub find_or_create_related {
my $self = shift;
- return $self->find_related(@_) || $self->create_related(@_);
+ my $obj = $self->find_related(@_);
+ return (defined($obj) ? $obj : $self->create_related(@_));
}
=head2 update_or_create_related
my %rels = map { $_ => $source->relationship_info($_) } $source->relationships;
my @cascade = grep { $rels{$_}{attrs}{cascade_update} } keys %rels;
foreach my $rel (@cascade) {
+ next if (
+ $rels{$rel}{attrs}{accessor} eq 'single'
+ && !exists($self->{_relationship_data}{$rel})
+ );
$_->update for grep defined, $self->$rel;
}
return $ret;
sub search {
my $self = shift;
-
+ my $rs = $self->search_rs( @_ );
+ return (wantarray ? $rs->all : $rs);
+}
+
+=head2 search_rs
+
+=over 4
+
+=item Arguments: $cond, \%attrs?
+
+=item Return Value: $resultset
+
+=back
+
+This method does the same exact thing as search() except it will
+always return a resultset, even in list context.
+
+=cut
+
+sub search_rs {
+ my $self = shift;
+
my $attrs = { %{$self->{attrs}} };
my $having = delete $attrs->{having};
$attrs = { %$attrs, %{ pop(@_) } } if @_ > 1 and ref $_[$#_] eq 'HASH';
unless (@_) { # no search, effectively just a clone
my $rows = $self->get_cache;
- if( @{$rows} ) {
+ if ($rows) {
$rs->set_cache($rows);
}
}
- return (wantarray ? $rs->all : $rs);
+ return $rs;
}
=head2 search_literal
$hash = {};
@{$hash}{@cols} = @_;
}
+ elsif (@_) {
+ # For backwards compatibility
+ $hash = {@_};
+ }
else {
$self->throw_exception(
"Arguments to find must be a hashref or match the number of columns in the "
- . exists $attrs->{key} ? "$attrs->{key} unique constraint" : "primary key"
+ . (exists $attrs->{key} ? "$attrs->{key} unique constraint" : "primary key")
);
}
Inflates the first result without creating a cursor if the resultset has
any records in it; if not returns nothing. Used by L</find> as an optimisation.
+Can optionally take an additional condition *only* - this is a fast-code-path
+method; if you need to add extra joins or similar call ->search and then
+->single without a condition on the $rs returned from that.
+
=cut
sub single {
sub next {
my ($self) = @_;
- if (@{$self->{all_cache} || []}) {
+ if (my $cache = $self->get_cache) {
$self->{all_cache_position} ||= 0;
- return $self->{all_cache}->[$self->{all_cache_position}++];
+ return $cache->[$self->{all_cache_position}++];
}
if ($self->{attrs}{cache}) {
$self->{all_cache_position} = 1;
last unless (@raw = $self->cursor->next);
$row = $self->{stashed_row} = \@raw;
$tree = $self->_collapse_result($as, $row, $c_prefix);
- #warn Data::Dumper::Dumper($tree, $row);
}
- @$target = @final;
+ @$target = (@final ? @final : [ {}, {} ]);
+ # single empty result to indicate an empty prefetched has_many
}
return $info;
sub count {
my $self = shift;
return $self->search(@_)->count if @_ and defined $_[0];
- return scalar @{ $self->get_cache } if @{ $self->get_cache };
+ return scalar @{ $self->get_cache } if $self->get_cache;
my $count = $self->_count;
return 0 unless $count;
sub all {
my ($self) = @_;
- return @{ $self->get_cache } if @{ $self->get_cache };
+ return @{ $self->get_cache } if $self->get_cache;
my @obj;
my $row = $self->find($hash, $attrs);
if (defined $row) {
- $row->set_columns($hash);
- $row->update;
+ $row->update($hash);
return $row;
}
=cut
sub get_cache {
- shift->{all_cache} || [];
+ shift->{all_cache};
}
=head2 set_cache
sub set_cache {
my ( $self, $data ) = @_;
$self->throw_exception("set_cache requires an arrayref")
- if ref $data ne 'ARRAY';
- my $result_class = $self->result_class;
- foreach( @$data ) {
- $self->throw_exception(
- "cannot cache object of type '$_', expected '$result_class'"
- ) if ref $_ ne $result_class;
- }
+ if defined($data) && (ref $data ne 'ARRAY');
$self->{all_cache} = $data;
}
=cut
sub clear_cache {
- shift->set_cache([]);
+ shift->set_cache(undef);
}
=head2 related_resultset
C<has_one> (or if you're using C<add_relationship>, any relationship declared
with an accessor type of 'single' or 'filter').
+=head2 page
+
+=over 4
+
+=item Value: $page
+
+=back
+
+Makes the resultset paged and specifies the page to retrieve. Effectively
+identical to creating a non-pages resultset and then calling ->page($page)
+on it.
+
+=head2 rows
+
+=over 4
+
+=item Value: $rows
+
+=back
+
+Specifes the maximum number of rows for direct retrieval or the number of
+rows per page if the page attribute or method is used.
+
+=head2 group_by
+
+=over 4
+
+=item Value: \@columns
+
+=back
+
+A arrayref of columns to group by. Can include columns of joined tables.
+
+ group_by => [qw/ column1 column2 ... /]
+
+=head2 having
+
+=over 4
+
+=item Value: $condition
+
+=back
+
+HAVING is a select statement attribute that is applied between GROUP BY and
+ORDER BY. It is applied to the after the grouping calculations have been
+done.
+
+ having => { 'count(employee)' => { '>=', 100 } }
+
+=head2 distinct
+
+=over 4
+
+=item Value: (0 | 1)
+
+=back
+
+Set to 1 to group by all columns.
+
+=head2 cache
+
+Set to 1 to cache search results. This prevents extra SQL queries if you
+revisit rows in your ResultSet:
+
+ my $resultset = $schema->resultset('Artist')->search( undef, { cache => 1 } );
+
+ while( my $artist = $resultset->next ) {
+ ... do stuff ...
+ }
+
+ $rs->first; # without cache, this would issue a query
+
+By default, searches are not cached.
+
+For more examples of using these attributes, see
+L<DBIx::Class::Manual::Cookbook>.
+
=head2 from
=over 4
clauses.
NOTE: Use this on your own risk. This allows you to shoot off your foot!
+
C<join> will usually do what you need and it is strongly recommended that you
avoid using C<from> unless you cannot achieve the desired result using C<join>.
+And we really do mean "cannot", not just tried and failed. Attempting to use
+this because you're having problems with C<join> is like trying to use x86
+ASM because you've got a syntax error in your C. Trust us on this.
+
+Now, if you're still really, really sure you need to use this (and if you're
+not 100% sure, ask the mailing list first), here's an explanation of how this
+works.
-In simple terms, C<from> works as follows:
+The syntax is as follows -
+ [
+ { <alias1> => <table1> },
[
- { <alias> => <table>, -join_type => 'inner|left|right' }
- [] # nested JOIN (optional)
- { <table.column> => <foreign_table.foreign_key> }
- ]
+ { <alias2> => <table2>, -join_type => 'inner|left|right' },
+ [], # nested JOIN (optional)
+ { <table1.column1> => <table2.column2>, ... (more conditions) },
+ ],
+ # More of the above [ ] may follow for additional joins
+ ]
- JOIN
- <alias> <table>
- [JOIN ...]
- ON <table.column> = <foreign_table.foreign_key>
+ <table1> <alias1>
+ JOIN
+ <table2> <alias2>
+ [JOIN ...]
+ ON <table1.column1> = <table2.column2>
+ <more joins may follow>
An easy way to follow the examples below is to remember the following:
# SELECT child.* FROM person child
# INNER JOIN person father ON child.father_id = father.id
-=head2 page
-
-=over 4
-
-=item Value: $page
-
-=back
-
-Makes the resultset paged and specifies the page to retrieve. Effectively
-identical to creating a non-pages resultset and then calling ->page($page)
-on it.
-
-=head2 rows
-
-=over 4
-
-=item Value: $rows
-
-=back
-
-Specifes the maximum number of rows for direct retrieval or the number of
-rows per page if the page attribute or method is used.
-
-=head2 group_by
-
-=over 4
-
-=item Value: \@columns
-
-=back
-
-A arrayref of columns to group by. Can include columns of joined tables.
-
- group_by => [qw/ column1 column2 ... /]
-
-=head2 having
-
-=over 4
-
-=item Value: $condition
-
-=back
-
-HAVING is a select statement attribute that is applied between GROUP BY and
-ORDER BY. It is applied to the after the grouping calculations have been
-done.
-
- having => { 'count(employee)' => { '>=', 100 } }
-
-=head2 distinct
-
-=over 4
-
-=item Value: (0 | 1)
-
-=back
-
-Set to 1 to group by all columns.
-
-=head2 cache
-
-Set to 1 to cache search results. This prevents extra SQL queries if you
-revisit rows in your ResultSet:
-
- my $resultset = $schema->resultset('Artist')->search( undef, { cache => 1 } );
-
- while( my $artist = $resultset->next ) {
- ... do stuff ...
- }
-
- $rs->first; # without cache, this would issue a query
-
-By default, searches are not cached.
-
-For more examples of using these attributes, see
-L<DBIx::Class::Manual::Cookbook>.
-
=cut
1;
retrieval from this source. In the case of a database, the required FROM
clause contents.
-=cut
+=head2 schema
+
+Returns the L<DBIx::Class::Schema> object that this result source
+belongs too.
=head2 storage
use warnings;
use Carp::Clan qw/^DBIx::Class/;
+use Scalar::Util qw/weaken/;
use base qw/DBIx::Class/;
$reg{$moniker} = $source;
$self->source_registrations(\%reg);
$source->schema($self);
+ weaken($source->{schema}) if ref($self);
if ($source->result_class) {
my %map = %{$self->class_mappings};
$map{$source->result_class} = $moniker;
$self->storage->deploy($self, undef, $sqltargs);
}
+=head2 create_ddl_dir (EXPERIMENTAL)
+
+=over 4
+
+=item Arguments: \@databases, $version, $directory, $sqlt_args
+
+=back
+
+Creates an SQL file based on the Schema, for each of the specified
+database types, in the given directory.
+
+Note that this feature is currently EXPERIMENTAL and may not work correctly
+across all databases, or fully handle complex relationships.
+
+=cut
+
+sub create_ddl_dir
+{
+ my $self = shift;
+
+ $self->throw_exception("Can't create_ddl_dir without storage") unless $self->storage;
+ $self->storage->create_ddl_dir($self, @_);
+}
+
+sub ddl_filename
+{
+ my ($self, $type, $dir, $version) = @_;
+
+ my $filename = ref($self);
+ $filename =~ s/^.*:://;
+ $filename = "$dir$filename-$version-$type.sql";
+
+ return $filename;
+}
+
1;
=head1 AUTHORS
package DBIx::Class::Storage::DBI;
+# -*- mode: cperl; cperl-indent-level: 2 -*-
use base 'DBIx::Class::Storage';
use DBI;
use SQL::Abstract::Limit;
use DBIx::Class::Storage::DBI::Cursor;
+use DBIx::Class::Storage::Statistics;
use IO::File;
use Carp::Clan qw/DBIx::Class/;
-
BEGIN {
package DBIC::SQL::Abstract; # Would merge upstream, but nate doesn't reply :(
my ($self, $table, $fields, $where, $order, @rest) = @_;
$table = $self->_quote($table) unless ref($table);
@rest = (-1) unless defined $rest[0];
+ die "LIMIT 0 Does Not Compute" if $rest[0] == 0;
+ # and anyway, SQL::Abstract::Limit will cause a barf if we don't first
local $self->{having_bind} = [];
my ($sql, @ret) = $self->SUPER::select(
$table, $self->_recurse_fields($fields), $where, $order, @rest
return $self->{name_sep};
}
-
-
-
-package DBIx::Class::Storage::DBI::DebugCallback;
-
-sub print {
- my ($self, $string) = @_;
- $string =~ m/^(\w+)/;
- ${$self}->($1, $string);
-}
-
} # End of BEGIN block
use base qw/DBIx::Class/;
__PACKAGE__->load_components(qw/AccessorGroup/);
__PACKAGE__->mk_group_accessors('simple' =>
- qw/_connect_info _dbh _sql_maker _conn_pid _conn_tid debug debugfh
+ qw/_connect_info _dbh _sql_maker _conn_pid _conn_tid debug debugobj
cursor on_connect_do transaction_depth/);
sub new {
my $new = bless({}, ref $_[0] || $_[0]);
$new->cursor("DBIx::Class::Storage::DBI::Cursor");
$new->transaction_depth(0);
+
+ $new->debugobj(new DBIx::Class::Storage::Statistics());
+
+ my $fh;
if (defined($ENV{DBIX_CLASS_STORAGE_DBI_DEBUG}) &&
($ENV{DBIX_CLASS_STORAGE_DBI_DEBUG} =~ /=(.+)$/)) {
- $new->debugfh(IO::File->new($1, 'w'))
+ $fh = IO::File->new($1, 'w')
or $new->throw_exception("Cannot open trace file $1");
} else {
- $new->debugfh(IO::File->new('>&STDERR'));
+ $fh = IO::File->new('>&STDERR');
}
+ $new->debugobj->debugfh($fh);
$new->debug(1) if $ENV{DBIX_CLASS_STORAGE_DBI_DEBUG};
return $new;
}
=head2 debug
-Causes SQL trace information to be emitted on C<debugfh> filehandle
-(or C<STDERR> if C<debugfh> has not specifically been set).
+Causes SQL trace information to be emitted on the C<debugobj> object.
+(or C<STDERR> if C<debugobj> has not specifically been set).
=head2 debugfh
-Sets or retrieves the filehandle used for trace/debug output. This
-should be an IO::Handle compatible object (only the C<print> method is
-used). Initially set to be STDERR - although see information on the
+Set or retrieve the filehandle used for trace/debug output. This should be
+an IO::Handle compatible ojbect (only the C<print> method is used. Initially
+set to be STDERR - although see information on the
L<DBIX_CLASS_STORAGE_DBI_DEBUG> environment variable.
+=head2 debugobj
+
+Sets or retrieves the object used for metric collection. Defaults to an instance
+of L<DBIx::Class::Storage::Statistics> that is campatible with the original
+method of using a coderef as a callback. See the aforementioned Statistics
+class for more information.
+
=head2 debugcb
Sets a callback to be executed each time a statement is run; takes a sub
-reference. Overrides debugfh. Callback is executed as $sub->($op, $info)
-where $op is SELECT/INSERT/UPDATE/DELETE and $info is what would normally
-be printed.
+reference. Callback is executed as $sub->($op, $info) where $op is
+SELECT/INSERT/UPDATE/DELETE and $info is what would normally be printed.
-=cut
+See L<debugobj> for a better way.
+=cut
sub debugcb {
- my ($self, $cb) = @_;
- my $cb_obj = bless(\$cb, 'DBIx::Class::Storage::DBI::DebugCallback');
- $self->debugfh($cb_obj);
+ my $self = shift();
+
+ if($self->debugobj()->can('callback')) {
+ $self->debugobj()->callback(shift());
+ }
}
sub disconnect {
return $self->_dbh;
}
+sub _sql_maker_args {
+ my ($self) = @_;
+
+ return ( limit_dialect => $self->dbh );
+}
+
sub sql_maker {
my ($self) = @_;
unless ($self->_sql_maker) {
- $self->_sql_maker(new DBIC::SQL::Abstract( limit_dialect => $self->dbh ));
+ $self->_sql_maker(new DBIC::SQL::Abstract( $self->_sql_maker_args ));
}
return $self->_sql_maker;
}
eval "require DBIx::Class::Storage::DBI::${driver}";
unless ($@) {
bless $self, "DBIx::Class::Storage::DBI::${driver}";
+ $self->_rebless() if $self->can('_rebless');
}
# if on-connect sql statements are given execute them
foreach my $sql_statement (@{$self->on_connect_do || []}) {
+ $self->debugobj->query_start($sql_statement) if $self->debug();
$self->_dbh->do($sql_statement);
+ $self->debugobj->query_end($sql_statement) if $self->debug();
}
$self->_conn_pid($$);
if ($self->{transaction_depth}++ == 0) {
my $dbh = $self->dbh;
if ($dbh->{AutoCommit}) {
- $self->debugfh->print("BEGIN WORK\n")
+ $self->debugobj->txn_begin()
if ($self->debug);
$dbh->begin_work;
}
if ($self->{transaction_depth} == 0) {
my $dbh = $self->dbh;
unless ($dbh->{AutoCommit}) {
- $self->debugfh->print("COMMIT\n")
+ $self->debugobj->txn_commit()
if ($self->debug);
$dbh->commit;
}
}
else {
if (--$self->{transaction_depth} == 0) {
- $self->debugfh->print("COMMIT\n")
+ $self->debugobj->txn_commit()
if ($self->debug);
$self->dbh->commit;
}
if ($self->{transaction_depth} == 0) {
my $dbh = $self->dbh;
unless ($dbh->{AutoCommit}) {
- $self->debugfh->print("ROLLBACK\n")
+ $self->debugobj->txn_rollback()
if ($self->debug);
$dbh->rollback;
}
}
else {
if (--$self->{transaction_depth} == 0) {
- $self->debugfh->print("ROLLBACK\n")
+ $self->debugobj->txn_rollback()
if ($self->debug);
$self->dbh->rollback;
}
my ($sql, @bind) = $self->sql_maker->$op($ident, @args);
unshift(@bind, @$extra_bind) if $extra_bind;
if ($self->debug) {
- my @debug_bind = map { defined $_ ? qq{`$_'} : q{`NULL'} } @bind;
- $self->debugfh->print("$sql: " . join(', ', @debug_bind) . "\n");
+ my @debug_bind = map { defined $_ ? qq{'$_'} : q{'NULL'} } @bind;
+ $self->debugobj->query_start($sql, @debug_bind);
}
my $sth = eval { $self->sth($sql,$op) };
@bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
my $rv;
if ($sth) {
+ my $time = time();
$rv = eval { $sth->execute(@bind) };
if ($@ || !$rv) {
} else {
$self->throw_exception("'$sql' did not generate a statement.");
}
+ if ($self->debug) {
+ my @debug_bind = map { defined $_ ? qq{`$_'} : q{`NULL'} } @bind;
+ $self->debugobj->query_end($sql, @debug_bind);
+ }
return (wantarray ? ($rv, $sth, @bind) : $rv);
}
$self->sql_maker->_default_limit_syntax eq "GenericSubQ") {
$attrs->{software_limit} = 1;
} else {
+ $self->throw_exception("rows attribute must be positive if present")
+ if (defined($attrs->{rows}) && !($attrs->{rows} > 0));
push @args, $attrs->{rows}, $attrs->{offset};
}
return $self->_execute(@args);
$dbh->{RaiseError} = 1;
$dbh->{PrintError} = 0;
eval {
- my $sth = $dbh->column_info( undef, undef, $table, '%' );
+ my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table);
+ my $sth = $dbh->column_info( undef,$schema, $tab, '%' );
$sth->execute();
while ( my $info = $sth->fetchrow_hashref() ){
my %column_info;
sub sqlt_type { shift->dbh->{Driver}->{Name} }
+sub create_ddl_dir
+{
+ my ($self, $schema, $databases, $version, $dir, $sqltargs) = @_;
+
+ if(!$dir || !-d $dir)
+ {
+ warn "No directory given, using ./\n";
+ $dir = "./";
+ }
+ $databases ||= ['MySQL', 'SQLite', 'PostgreSQL'];
+ $databases = [ $databases ] if(ref($databases) ne 'ARRAY');
+ $version ||= $schema->VERSION || '1.x';
+
+ eval "use SQL::Translator";
+ $self->throw_exception("Can't deploy without SQL::Translator: $@") if $@;
+
+ my $sqlt = SQL::Translator->new({
+# debug => 1,
+ add_drop_table => 1,
+ });
+ foreach my $db (@$databases)
+ {
+ $sqlt->reset();
+ $sqlt->parser('SQL::Translator::Parser::DBIx::Class');
+# $sqlt->parser_args({'DBIx::Class' => $schema);
+ $sqlt->data($schema);
+ $sqlt->producer($db);
+
+ my $file;
+ my $filename = $schema->ddl_filename($db, $dir, $version);
+ if(-e $filename)
+ {
+ $self->throw_exception("$filename already exists, skipping $db");
+ next;
+ }
+ open($file, ">$filename")
+ or $self->throw_exception("Can't open $filename for writing ($!)");
+ my $output = $sqlt->translate;
+#use Data::Dumper;
+# print join(":", keys %{$schema->source_registrations});
+# print Dumper($sqlt->schema);
+ if(!$output)
+ {
+ $self->throw_exception("Failed to translate to $db. (" . $sqlt->error . ")");
+ next;
+ }
+ print $file $output;
+ close($file);
+ }
+
+}
+
sub deployment_statements {
- my ($self, $schema, $type, $sqltargs) = @_;
+ my ($self, $schema, $type, $version, $dir, $sqltargs) = @_;
$type ||= $self->sqlt_type;
+ $version ||= $schema->VERSION || '1.x';
+ $dir ||= './';
eval "use SQL::Translator";
- $self->throw_exception("Can't deploy without SQL::Translator: $@") if $@;
- eval "use SQL::Translator::Parser::DBIx::Class;";
- $self->throw_exception($@) if $@;
- eval "use SQL::Translator::Producer::${type};";
- $self->throw_exception($@) if $@;
- my $tr = SQL::Translator->new(%$sqltargs);
- SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema );
- return "SQL::Translator::Producer::${type}"->can('produce')->($tr);
+ if(!$@)
+ {
+ eval "use SQL::Translator::Parser::DBIx::Class;";
+ $self->throw_exception($@) if $@;
+ eval "use SQL::Translator::Producer::${type};";
+ $self->throw_exception($@) if $@;
+ my $tr = SQL::Translator->new(%$sqltargs);
+ SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema );
+ return "SQL::Translator::Producer::${type}"->can('produce')->($tr);
+ }
+
+ my $filename = $schema->ddl_filename($type, $dir, $version);
+ if(!-f $filename)
+ {
+# $schema->create_ddl_dir([ $type ], $version, $dir, $sqltargs);
+ $self->throw_exception("No SQL::Translator, and no Schema file found, aborting deploy");
+ return;
+ }
+ my $file;
+ open($file, "<$filename")
+ or $self->throw_exception("Can't open $filename ($!)");
+ my @rows = <$file>;
+ close($file);
+
+ return join('', @rows);
+
}
sub deploy {
my ($self, $schema, $type, $sqltargs) = @_;
- foreach my $statement ( $self->deployment_statements($schema, $type, $sqltargs) ) {
+ foreach my $statement ( $self->deployment_statements($schema, $type, undef, undef, $sqltargs) ) {
for ( split(";\n", $statement)) {
- $self->debugfh->print("$_\n") if $self->debug;
+ next if($_ =~ /^--/);
+ next if(!$_);
+# next if($_ =~ /^DROP/m);
+ next if($_ =~ /^BEGIN TRANSACTION/m);
+ next if($_ =~ /^COMMIT/m);
+ $self->debugobj->query_begin($_) if $self->debug;
$self->dbh->do($_) or warn "SQL was:\n $_";
+ $self->debugobj->query_end($_) if $self->debug;
}
}
}
If the value is of the form C<1=/path/name> then the trace output is
written to the file C</path/name>.
+This environment variable is checked when the storage object is first
+created (when you call connect on your schema). So, run-time changes
+to this environment variable will not take effect unless you also
+re-connect on your schema.
+
=head1 AUTHORS
Matt S. Trout <mst@shadowcatsystems.co.uk>
--- /dev/null
+package DBIx::Class::Storage::DBI::ODBC;
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI/;
+
+sub _rebless {
+ my ($self) = @_;
+
+ my $dbh = $self->_dbh;
+ my $dbtype = eval { $dbh->get_info(17) };
+ unless ( $@ ) {
+ # Translate the backend name into a perl identifier
+ $dbtype =~ s/\W/_/gi;
+ my $class = "DBIx::Class::Storage::DBI::ODBC::${dbtype}";
+ eval "require $class";
+ bless $self, $class unless $@;
+ }
+}
+
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ODBC - Base class for ODBC drivers
+
+=head1 SYNOPSIS
+
+ # In your table classes
+ __PACKAGE__->load_components(qw/Core/);
+
+
+=head1 DESCRIPTION
+
+This class simply provides a mechanism for discovering and loading a sub-class
+for a specific ODBC backend. It should be transparent to the user.
+
+
+=head1 AUTHORS
+
+Marc Mims C<< <marc@sssonline.com> >>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
--- /dev/null
+package DBIx::Class::Storage::DBI::ODBC::DB2_400_SQL;
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI::ODBC/;
+
+sub last_insert_id
+{
+ my ($self) = @_;
+
+ my $dbh = $self->_dbh;
+
+ # get the schema/table separator:
+ # '.' when SQL naming is active
+ # '/' when system naming is active
+ my $sep = $dbh->get_info(41);
+ my $sth = $dbh->prepare_cached(
+ "SELECT IDENTITY_VAL_LOCAL() FROM SYSIBM${sep}SYSDUMMY1", {}, 3);
+ $sth->execute();
+
+ my @res = $sth->fetchrow_array();
+
+ return @res ? $res[0] : undef;
+}
+
+sub _sql_maker_args {
+ my ($self) = @_;
+
+ return (
+ limit_dialect => 'FetchFirst',
+ name_sep => $self->_dbh->get_info(41)
+ );
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ODBC::DB2_400_SQL - Support specific to DB2/400
+over ODBC
+
+=head1 SYNOPSIS
+
+ # In your table classes
+ __PACKAGE__->load_components(qw/PK::Auto Core/);
+ __PACKAGE__->set_primary_key('id');
+
+
+=head1 DESCRIPTION
+
+This class implements support specific to DB2/400 over ODBC, including
+auto-increment primary keys, SQL::Abstract::Limit dialect, and name separator
+for for connections using either SQL naming or System naming.
+
+
+=head1 AUTHORS
+
+Marc Mims C<< <marc@sssonline.com> >>
+
+Based on DBIx::Class::Storage::DBI::DB2 by Jess Robinson.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
my ($schema,$table) = $source->name =~ /^(.+)\.(.+)$/ ? ($1,$2)
: (undef,$source->name);
while (my $col = shift @pri) {
- my $info = $dbh->column_info(undef,$schema,$table,$col)->fetchrow_arrayref;
- if (defined $info->[12] and $info->[12] =~
+ my $info = $dbh->column_info(undef,$schema,$table,$col)->fetchrow_hashref;
+ if (defined $info->{COLUMN_DEF} and $info->{COLUMN_DEF} =~
/^nextval\(+'([^']+)'::(?:text|regclass)\)/)
{
- return $1; # may need to strip quotes -- see if this works
+ my $seq = $1;
+ return $seq =~ /\./ ? $seq : $info->{TABLE_SCHEM} . "." . $seq; # may need to strip quotes -- see if this works
}
}
}
--- /dev/null
+package DBIx::Class::Storage::Statistics;
+use strict;
+
+use base qw/DBIx::Class::AccessorGroup Class::Data::Accessor/;
+__PACKAGE__->mk_group_accessors(simple => qw/callback debugfh/);
+
+=head1 NAME
+
+DBIx::Class::Storage::Statistics - SQL Statistics
+
+=head1 SYNOPSIS
+
+=head1 DESCRIPTION
+
+This class is called by DBIx::Class::Storage::DBI as a means of collecting
+statistics on it's actions. Using this class alone merely prints the SQL
+executed, the fact that it completes and begin/end notification for
+transactions.
+
+To really use this class you should subclass it and create your own method
+for collecting the statistics as discussed in L<DBIx::Class::Manual::Cookbook>.
+
+=head1 METHODS
+
+=cut
+
+=head2 new
+
+Returns a new L<DBIx::Class::Storage::Statistics> object.
+
+=cut
+sub new {
+ my $self = bless({}, ref($_[0]) || $_[0]);
+
+ return $self;
+}
+
+=head2 debugfh
+
+Sets or retrieves the filehandle used for trace/debug output. This should
+be an IO::Handle compatible object (only the C<print> method is used). Initially
+should be set to STDERR - although see information on the
+L<DBIX_CLASS_STORAGE_DBI_DEBUG> environment variable.
+
+=head2 txn_begin
+
+Called when a transaction begins.
+
+=cut
+sub txn_begin {
+ my $self = shift();
+}
+
+=head2 txn_rollback
+
+Called when a transaction is rolled back.
+
+=cut
+sub txn_rollback {
+ my $self = shift();
+}
+
+=head2 txn_commit
+
+Called when a transaction is committed.
+
+=cut
+sub txn_commit {
+ my $self = shift();
+}
+
+=head2 query_start
+
+Called before a query is executed. The first argument is the SQL string being
+executed and subsequent arguments are the parameters used for the query.
+
+=cut
+sub query_start {
+ my $self = shift();
+ my $string = shift();
+
+ if(defined($self->callback())) {
+ $string =~ m/^(\w+)/;
+ $self->callback()->($1, $string);
+ return;
+ }
+
+ $self->debugfh->print("$string: " . join(', ', @_) . "\n");
+}
+
+=head2 query_end
+
+Called when a query finishes executing. Has the same arguments as query_start.
+
+=cut
+sub query_end {
+ my $self = shift();
+ my $string = shift();
+}
+
+1;
+
+=head1 AUTHORS
+
+Cory G. Watson <gphat@cpan.org>
+
+=head1 LICENSE
+
+You may distribute this code under the same license as Perl itself.
+
+=cut
use warnings;
use lib qw(lib t/lib);
-use DBICTest;
-use DBICTest::Schema::HelperRels;
+use DBICTest::Schema;
-my $schema = DBICTest->initialise;
+my $schema = DBICTest::Schema->connect;
-print $schema->storage->deployment_statements($schema);
+print $schema->storage->deployment_statements($schema, 'SQLite');
EOF
close $fh;
}
-}
\ No newline at end of file
+}
--- /dev/null
+#!/usr/bin/perl
+use strict;
+use warnings;
+
+use Getopt::Long;
+use Pod::Usage;
+use JSON qw( jsonToObj );
+
+$JSON::BareKey = 1;
+$JSON::QuotApos = 1;
+
+GetOptions(
+ 'schema=s' => \my $schema_class,
+ 'class=s' => \my $resultset_class,
+ 'connect=s' => \my $connect,
+ 'op=s' => \my $op,
+ 'set=s' => \my $set,
+ 'where=s' => \my $where,
+ 'attrs=s' => \my $attrs,
+ 'format=s' => \my $format,
+ 'force' => \my $force,
+ 'trace' => \my $trace,
+ 'quiet' => \my $quiet,
+ 'help' => \my $help,
+ 'tlibs' => \my $t_libs,
+);
+
+if ($t_libs) {
+ unshift( @INC, 't/lib', 'lib' );
+}
+
+pod2usage(1) if ($help);
+$ENV{DBIX_CLASS_STORAGE_DBI_DEBUG} = 1 if ($trace);
+
+die('No op specified') if(!$op);
+die('Invalid op') if ($op!~/^insert|update|delete|select$/s);
+my $csv_class;
+if ($op eq 'select') {
+ $format ||= 'tsv';
+ die('Invalid format') if ($format!~/^tsv|csv$/s);
+ $csv_class = 'Text::CSV_XS';
+ eval{ require Text::CSV_XS };
+ if ($@) {
+ $csv_class = 'Text::CSV_PP';
+ eval{ require Text::CSV_PP };
+ die('The select op requires either the Text::CSV_XS or the Text::CSV_PP module') if ($@);
+ }
+}
+
+die('No schema specified') if(!$schema_class);
+eval("require $schema_class");
+die('Unable to load schema') if ($@);
+$connect = jsonToObj( $connect ) if ($connect);
+my $schema = $schema_class->connect(
+ ( $connect ? @$connect : () )
+);
+
+die('No class specified') if(!$resultset_class);
+my $resultset = eval{ $schema->resultset($resultset_class) };
+die('Unable to load the class with the schema') if ($@);
+
+$set = jsonToObj( $set ) if ($set);
+$where = jsonToObj( $where ) if ($where);
+$attrs = jsonToObj( $attrs ) if ($attrs);
+
+if ($op eq 'insert') {
+ die('Do not use the where option with the insert op') if ($where);
+ die('Do not use the attrs option with the insert op') if ($attrs);
+ my $obj = $resultset->create( $set );
+ print ''.ref($resultset).' ID: '.join(',',$obj->id())."\n";
+}
+elsif ($op eq 'update') {
+ $resultset = $resultset->search( ($where||{}) );
+ my $count = $resultset->count();
+ print "This action will modify $count ".ref($resultset)." records.\n" if (!$quiet);
+ if ( $force || confirm() ) {
+ $resultset->update_all( $set );
+ }
+}
+elsif ($op eq 'delete') {
+ die('Do not use the set option with the delete op') if ($set);
+ $resultset = $resultset->search( ($where||{}), ($attrs||()) );
+ my $count = $resultset->count();
+ print "This action will delete $count ".ref($resultset)." records.\n" if (!$quiet);
+ if ( $force || confirm() ) {
+ $resultset->delete_all();
+ }
+}
+elsif ($op eq 'select') {
+ die('Do not use the set option with the select op') if ($set);
+ my $csv = $csv_class->new({
+ sep_char => ( $format eq 'tsv' ? "\t" : ',' ),
+ });
+ $resultset = $resultset->search( ($where||{}), ($attrs||()) );
+ my @columns = $resultset->result_source->columns();
+ $csv->combine( @columns );
+ print $csv->string()."\n";
+ while (my $row = $resultset->next()) {
+ my @fields;
+ foreach my $column (@columns) {
+ push( @fields, $row->get_column($column) );
+ }
+ $csv->combine( @fields );
+ print $csv->string()."\n";
+ }
+}
+
+sub confirm {
+ print "Are you sure you want to do this? (type YES to confirm) ";
+ my $response = <STDIN>;
+ return 1 if ($response=~/^YES/);
+ return;
+}
+
+__END__
+
+=head1 NAME
+
+dbicadmin - Execute operations upon DBIx::Class objects.
+
+=head1 SYNOPSIS
+
+ dbicadmin --op=insert --schema=My::Schema --class=Class --set=JSON
+ dbicadmin --op=update --schema=My::Schema --class=Class --set=JSON --where=JSON
+ dbicadmin --op=delete --schema=My::Schema --class=Class --where=JSON
+ dbicadmin --op=select --schema=My::Schema --class=Class --where=JSON --format=tsv
+
+=head1 DESCRIPTION
+
+This utility provides the ability to run INSERTs, UPDATEs,
+DELETEs, and SELECTs on any DBIx::Class object.
+
+=head1 OPTIONS
+
+=head2 op
+
+The type of operation. Valid values are insert, update, delete,
+and select.
+
+=head2 schema
+
+The name of your schema class.
+
+=head2 class
+
+The name of the class, within your schema, that you want to run
+the operation on.
+
+=head2 connect
+
+A JSON array to be passed to your schema class upon connecting.
+The array will need to be compatible with whatever the DBIC
+->connect() method requires.
+
+=head2 set
+
+This option must be valid JSON data string and is passed in to
+the DBIC update() method. Use this option with the update
+and insert ops.
+
+=head2 where
+
+This option must be valid JSON data string and is passed in as
+the first argument to the DBIC search() method. Use this
+option with the update, delete, and select ops.
+
+=head2 attrs
+
+This option must be valid JSON data string and is passed in as
+the second argument to the DBIC search() method. Use this
+option with the update, delete, and select ops.
+
+=head2 help
+
+Display this help page.
+
+=head2 force
+
+Suppresses the confirmation dialogues that are usually displayed
+when someone runs a DELETE or UPDATE action.
+
+=head2 quiet
+
+Do not display status messages.
+
+=head2 trace
+
+Turns on tracing on the DBI storage, thus printing SQL as it is
+executed.
+
+=head2 tlibs
+
+This option is purely for testing during the DBIC installation. Do
+not use it.
+
+=head1 JSON
+
+JSON is a lightweight data-interchange format. It allows you
+to express complex data structures for use in the where and
+set options.
+
+This module turns on L<JSON>'s BareKey and QuotApos options so
+that your data can look a bit more readable.
+
+ --where={"this":"that"} # generic JSON
+ --where={this:'that'} # with BareKey and QuoteApos
+
+Consider wrapping your JSON in outer quotes so that you don't
+have to escape your inner quotes.
+
+ --where={this:\"that\"} # no outer quote
+ --where='{this:"that"}' # outer quoted
+
+=head1 AUTHOR
+
+Aran Deltac <bluefeet@cpan.org>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
--- /dev/null
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use Test::More;
+
+BEGIN {
+ eval "use DBD::SQLite";
+ plan $@
+ ? ( skip_all => 'needs DBD::SQLite for testing' )
+ : ( tests => 13 );
+}
+
+use lib qw(t/lib);
+
+use_ok('DBICTest');
+use_ok('DBICTest::HelperRels');
+
+my $cbworks = 0;
+
+DBICTest->schema->storage->debugcb(sub { $cbworks = 1; });
+DBICTest->schema->storage->debug(0);
+my $rs = DBICTest::CD->search({});
+$rs->count();
+ok(!$cbworks, 'Callback not called with debug disabled');
+
+DBICTest->schema->storage->debug(1);
+
+$rs->count();
+ok($cbworks, 'Debug callback worked.');
+
+my $prof = new DBIx::Test::Profiler();
+DBICTest->schema->storage->debugobj($prof);
+
+# Test non-transaction calls.
+$rs->count();
+ok($prof->{'query_start'}, 'query_start called');
+ok($prof->{'query_end'}, 'query_end called');
+ok(!$prof->{'txn_begin'}, 'txn_begin not called');
+ok(!$prof->{'txn_commit'}, 'txn_commit not called');
+
+$prof->reset();
+
+# Test transaction calls
+DBICTest->schema->txn_begin();
+ok($prof->{'txn_begin'}, 'txn_begin called');
+
+$rs = DBICTest::CD->search({});
+$rs->count();
+ok($prof->{'query_start'}, 'query_start called');
+ok($prof->{'query_end'}, 'query_end called');
+
+DBICTest->schema->txn_commit();
+ok($prof->{'txn_commit'}, 'txn_commit called');
+
+$prof->reset();
+
+# Test a rollback
+DBICTest->schema->txn_begin();
+$rs = DBICTest::CD->search({});
+$rs->count();
+DBICTest->schema->txn_rollback();
+ok($prof->{'txn_rollback'}, 'txn_rollback called');
+
+DBICTest->schema->storage->debug(0);
+
+package DBIx::Test::Profiler;
+use strict;
+
+sub new {
+ my $self = bless({});
+}
+
+sub query_start {
+ my $self = shift();
+ $self->{'query_start'} = 1;
+}
+
+sub query_end {
+ my $self = shift();
+ $self->{'query_end'} = 1;
+}
+
+sub txn_begin {
+ my $self = shift();
+ $self->{'txn_begin'} = 1;
+}
+
+sub txn_rollback {
+ my $self = shift();
+ $self->{'txn_rollback'} = 1;
+}
+
+sub txn_commit {
+ my $self = shift();
+ $self->{'txn_commit'} = 1;
+}
+
+sub reset {
+ my $self = shift();
+
+ $self->{'query_start'} = 0;
+ $self->{'query_end'} = 0;
+ $self->{'txn_begin'} = 0;
+ $self->{'txn_rollback'} = 0;
+ $self->{'txn_end'} = 0;
+}
+
+1;
--- /dev/null
+use strict;
+use warnings;
+use Test::More;
+
+use lib qw(t/lib);
+
+BEGIN {
+ eval { require Test::Memory::Cycle };
+ if ($@) {
+ plan skip_all => "leak test needs Test::Memory::Cycle";
+ } else {
+ plan tests => 1;
+ }
+}
+
+use DBICTest;
+use DBICTest::Schema;
+
+import Test::Memory::Cycle;
+
+my $s = DBICTest::Schema->clone;
+
+memory_cycle_ok($s, 'No cycles in schema');
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::BasicRels;
+
+require "t/run/146db2_400.tl";
+run_tests(DBICTest->schema);
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::BasicRels;
+
+require "t/run/26might_have.tl";
+run_tests(DBICTest->schema);
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::HelperRels;
+
+require "t/run/146db2_400.tl";
+run_tests(DBICTest->schema);
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::HelperRels;
+
+require "t/run/26might_have.tl";
+run_tests(DBICTest->schema);
eval "use SQL::Translator";
plan skip_all => 'SQL::Translator required' if $@;
+# do not taunt happy dave ball
+
my $schema = DBICTest::Schema;
plan tests => 33;
'selftable' => 'tags', 'foreigntable' => 'cd',
'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
'needed' => 1, on_delete => 'CASCADE', on_update => 'CASCADE'},
+ {'display' => 'bookmark -> link',
+ 'selftable' => 'bookmark', 'foreigntable' => 'link',
+ 'selfcols' => ['link'], 'foreigncols' => ['id'],
+ 'needed' => 1, on_delete => '', on_update => ''},
);
my @unique_constraints = (
{'display' => 'twokeytreelike name unique',
'table' => 'twokeytreelike', 'cols' => ['name'],
'needed' => 1},
- {'display' => 'employee position and group_id unique',
- 'table' => 'employee', cols => ['position', 'group_id'],
- 'needed' => 1},
+# {'display' => 'employee position and group_id unique',
+# 'table' => 'employee', cols => ['position', 'group_id'],
+# 'needed' => 1},
);
my $tschema = $translator->schema();
--- /dev/null
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::HelperRels;
+
+require "t/run/29dbicadmin.tl";
+run_tests(DBICTest->schema);
unlink($db_file . "-journal") if -e $db_file . "-journal";
mkdir("t/var") unless -d "t/var";
- my $dsn = "dbi:SQLite:${db_file}";
+ my $dsn = $ENV{"DBICTEST_DSN"} || "dbi:SQLite:${db_file}";
+ my $dbuser = $ENV{"DBICTEST_DBUSER"} || '';
+ my $dbpass = $ENV{"DBICTEST_DBPASS"} || '';
+
+# my $dsn = "dbi:SQLite:${db_file}";
- return DBICTest::Schema->compose_connection('DBICTest' => $dsn);
+ return DBICTest::Schema->compose_connection('DBICTest' => $dsn, $dbuser, $dbpass);
}
1;
Artist
Employee
CD
+ Link
+ Bookmark
#dummy
Track
Tag
--- /dev/null
+package # hide from PAUSE
+ DBICTest::Schema::Bookmark;
+
+ use base 'DBIx::Class::Core';
+
+
+use strict;
+use warnings;
+
+__PACKAGE__->load_components(qw/PK::Auto Core/);
+__PACKAGE__->table('bookmark');
+__PACKAGE__->add_columns(qw/id link/);
+__PACKAGE__->add_columns(
+ 'id' => {
+ data_type => 'integer',
+ is_auto_increment => 1
+ },
+ 'link' => {
+ data_type => 'integer',
+ },
+);
+
+__PACKAGE__->set_primary_key('id');
+__PACKAGE__->belongs_to(link => 'DBICTest::Schema::Link' );
+
+1;
__PACKAGE__->set_primary_key('employee_id');
__PACKAGE__->position_column('position');
-__PACKAGE__->add_unique_constraint(position_group => [ qw/position group_id/ ]);
+#__PACKAGE__->add_unique_constraint(position_group => [ qw/position group_id/ ]);
__PACKAGE__->mk_classdata('field_name_for', {
employee_id => 'primary key',
--- /dev/null
+package # hide from PAUSE
+ DBICTest::Schema::Link;
+
+use base 'DBIx::Class::Core';
+
+use strict;
+use warnings;
+
+__PACKAGE__->load_components(qw/PK::Auto Core/);
+__PACKAGE__->table('link');
+__PACKAGE__->add_columns(
+ 'id' => {
+ data_type => 'integer',
+ is_auto_increment => 1
+ },
+ 'url' => {
+ data_type => 'varchar',
+ size => 100,
+ is_nullable => 1,
+ },
+ 'title' => {
+ data_type => 'varchar',
+ size => 100,
+ is_nullable => 1,
+ },
+);
+__PACKAGE__->set_primary_key('id');
+
+use overload '""' => sub { shift->url }, fallback=> 1;
+
+1;
my $schema = DBICTest->initialise;
-$schema->storage->on_connect_do([ "PRAGMA synchronous = OFF" ]);
+# $schema->storage->on_connect_do([ "PRAGMA synchronous = OFF" ]);
my $dbh = $schema->storage->dbh;
[ 1, '2006-04-25 22:24:33' ],
]);
+$schema->populate('Link', [
+ [ qw/id title/ ],
+ [ 1, 'aaa' ]
+]);
+
+$schema->populate('Bookmark', [
+ [ qw/id link/ ],
+ [ 1, 1 ]
+]);
+
1;
--
-- Created by SQL::Translator::Producer::SQLite
--- Created on Wed Apr 26 03:18:22 2006
+-- Created on Sun May 14 18:25:49 2006
--
BEGIN TRANSACTION;
);
--
+-- Table: bookmark
+--
+CREATE TABLE bookmark (
+ id INTEGER PRIMARY KEY NOT NULL,
+ link integer NOT NULL
+);
+
+--
-- Table: track
--
CREATE TABLE track (
);
--
+-- Table: tags
+--
+CREATE TABLE tags (
+ tagid INTEGER PRIMARY KEY NOT NULL,
+ cd integer NOT NULL,
+ tag varchar(100) NOT NULL
+);
+
+--
-- Table: treelike
--
CREATE TABLE treelike (
);
--
--- Table: tags
+-- Table: link
--
-CREATE TABLE tags (
- tagid INTEGER PRIMARY KEY NOT NULL,
- cd integer NOT NULL,
- tag varchar(100) NOT NULL
+CREATE TABLE link (
+ id INTEGER PRIMARY KEY NOT NULL,
+ url varchar(100),
+ title varchar(100)
);
--
name varchar(100) NOT NULL
);
-CREATE UNIQUE INDEX position_group_employee on employee (position, group_id);
CREATE UNIQUE INDEX tktlnameunique_twokeytreelike on twokeytreelike (name);
CREATE UNIQUE INDEX artist_title_cd on cd (artist, title);
COMMIT;
sub run_tests {
my $schema = shift;
-plan tests => 55;
+plan tests => 58;
# figure out if we've got a version of sqlite that is older than 3.2.6, in
# which case COUNT(DISTINCT()) doesn't work
is($new_again->ID, 'DBICTest::Artist|artist|artistid=4', 'unique object id generated correctly');
+# Test backwards compatibility
+{
+ my $artist_by_hash = $schema->resultset('Artist')->find(artistid => 4);
+ is($artist_by_hash->name, 'Man With A Spoon', 'Retrieved correctly');
+ is($artist_by_hash->ID, 'DBICTest::Artist|artist|artistid=4', 'unique object id generated correctly');
+}
+
is($schema->resultset("Artist")->count, 4, 'count ok');
# test find_or_new
my $search = [ { 'tags.tag' => 'Cheesy' }, { 'tags.tag' => 'Blue' } ];
-my $or_rs = $schema->resultset("CD")->search($search, { join => 'tags',
+my( $or_rs ) = $schema->resultset("CD")->search_rs($search, { join => 'tags',
order_by => 'cdid' });
cmp_ok($or_rs->count, '==', 5, 'Search with OR ok');
cmp_ok(@artsn, '==', 4, "Four artists returned");
}
+my $newbook = $schema->resultset( 'Bookmark' )->find(1);
+
+$@ = '';
+eval {
+my $newlink = $newbook->link;
+};
+ok(!$@, "stringify to false value doesn't cause error");
+
# test cascade_delete through many_to_many relations
{
my $art_del = $schema->resultset("Artist")->find({ artistid => 1 });
use strict;
use warnings;
-plan tests => 30;
+plan tests => 32;
# has_a test
my $cd = $schema->resultset("CD")->find(4);
is( ($artist->search_related('cds'))[3]->title, 'Big Flop', 'create_related ok' );
+my( $rs_from_list ) = $artist->search_related_rs('cds');
+is( ref($rs_from_list), 'DBIx::Class::ResultSet', 'search_related_rs in list context returns rs' );
+
+( $rs_from_list ) = $artist->cds_rs();
+is( ref($rs_from_list), 'DBIx::Class::ResultSet', 'relation_rs in list context returns rs' );
+
# count_related
is( $artist->count_related('cds'), 4, 'count_related ok' );
sub run_tests {
my $schema = shift;
-
my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_PG_${_}" } qw/DSN USER PASS/};
#warn "$dsn $user $pass";
DBICTest::Schema->compose_connection('PgTest' => $dsn, $user, $pass);
my $dbh = PgTest->schema->storage->dbh;
-
-$dbh->do("CREATE TABLE artist (artistid serial PRIMARY KEY, name VARCHAR(255), charfield CHAR(10));");
+PgTest->schema->source("Artist")->name("testschema.artist");
+$dbh->do("CREATE SCHEMA testschema;");
+$dbh->do("CREATE TABLE testschema.artist (artistid serial PRIMARY KEY, name VARCHAR(255), charfield CHAR(10));");
PgTest::Artist->load_components('PK::Auto');
};
-my $type_info = PgTest->schema->storage->columns_info_for('artist');
+my $type_info = PgTest->schema->storage->columns_info_for('testschema.artist');
my $artistid_defval = delete $type_info->{artistid}->{default_value};
like($artistid_defval,
- qr/^nextval\('public\.artist_artistid_seq'::(?:text|regclass)\)/,
+ qr/^nextval\('([^\.]*\.){0,1}artist_artistid_seq'::(?:text|regclass)\)/,
'columns_info_for - sequence matches Pg get_autoinc_seq expectations');
is_deeply($type_info, $test_type_info,
'columns_info_for - column data types');
-$dbh->do("DROP TABLE artist;");
+$dbh->do("DROP TABLE testschema.artist;");
+$dbh->do("DROP SCHEMA testschema;");
}
--- /dev/null
+sub run_tests {
+my $schema = shift;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_DB2_400_${_}" } qw/DSN USER PASS/};
+
+#warn "$dsn $user $pass";
+
+# Probably best to pass the DBQ option in the DSN to specify a specific
+# libray. Something like:
+# DBICTEST_DB2_400_DSN='dbi:ODBC:dsn=MyAS400;DBQ=MYLIB'
+plan skip_all, 'Set $ENV{DBICTEST_DB2_400_DSN}, _USER and _PASS to run this test'
+ unless ($dsn && $user);
+
+plan tests => 6;
+
+DBICTest::Schema->compose_connection('DB2Test' => $dsn, $user, $pass);
+
+my $dbh = DB2Test->schema->storage->dbh;
+
+$dbh->do("DROP TABLE artist", { RaiseError => 0, PrintError => 0 });
+
+$dbh->do("CREATE TABLE artist (artistid INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1), name VARCHAR(255), charfield CHAR(10))");
+
+DB2Test::Artist->load_components('PK::Auto');
+
+# test primary key handling
+my $new = DB2Test::Artist->create({ name => 'foo' });
+ok($new->artistid, "Auto-PK worked");
+
+# test LIMIT support
+for (1..6) {
+ DB2Test::Artist->create({ name => 'Artist ' . $_ });
+}
+my $it = DB2Test::Artist->search( {},
+ { rows => 3,
+ order_by => 'artistid'
+ }
+);
+is( $it->count, 3, "LIMIT count ok" );
+is( $it->next->name, "foo", "iterator->next ok" );
+$it->next;
+is( $it->next->name, "Artist 2", "iterator->next ok" );
+is( $it->next, undef, "next past end of resultset ok" );
+
+my $test_type_info = {
+ 'artistid' => {
+ 'data_type' => 'INTEGER',
+ 'is_nullable' => 0,
+ 'size' => 10
+ },
+ 'name' => {
+ 'data_type' => 'VARCHAR',
+ 'is_nullable' => 1,
+ 'size' => 255
+ },
+ 'charfield' => {
+ 'data_type' => 'CHAR',
+ 'is_nullable' => 1,
+ 'size' => 10
+ },
+};
+
+
+my $type_info = DB2Test->schema->storage->columns_info_for('artist');
+is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
+
+
+
+# clean up our mess
+$dbh->do("DROP TABLE artist");
+
+}
+
+1;
eval "use DBD::SQLite";
plan $@
? ( skip_all => 'needs DBD::SQLite for testing' )
- : ( tests => 42 );
+ : ( tests => 44 );
}
# figure out if we've got a version of sqlite that is older than 3.2.6, in
);
cmp_ok( scalar $rs->all, '==', scalar $rs->slice(0, $rs->count - 1), 'slice() with join has same count as all()' );
+eval { $rs->search(undef, { rows => 0, offset => 3 })->all; };
+
+ok($@, "rows => 0 errors: $@");
+
$rs = $schema->resultset("Artist")->search(
{ 'liner_notes.notes' => 'Kill Yourself!' },
{ join => { 'cds' => 'liner_notes' } });
cmp_ok($queries, '==', 1, 'Only one query run');
+# has_many resulting in an additional select if no records available despite prefetch
+my $track = $schema->resultset("Artist")->create( {
+ artistid => 4,
+ name => 'Artist without CDs',
+} );
+
+$queries = 0;
+$schema->storage->debug(1);
+
+my $artist_without_cds = $schema->resultset("Artist")->find(4, {
+ join => [qw/ cds /],
+ prefetch => [qw/ cds /],
+});
+my @no_cds = $artist_without_cds->cds;
+
+is($queries, 1, 'prefetch ran only 1 sql statement');
+
+$schema->storage->debug(0);
+
} # end run_tests
1;
eval "use DBD::SQLite";
plan skip_all => 'needs DBD::SQLite for testing' if $@;
-plan tests => 23;
+plan tests => 22;
my $rs = $schema->resultset("Artist")->search(
{ artistid => 1 }
my $artist = $rs->first;
-is( scalar @{ $rs->get_cache }, 0, 'cache is not populated without cache attribute' );
+ok( !defined($rs->get_cache), 'cache is not populated without cache attribute' );
$rs = $schema->resultset('Artist')->search( undef, { cache => 1 } );
my $artists = [ $rs->all ];
$rs->clear_cache;
-is( scalar @{$rs->get_cache}, 0, 'clear_cache is functional' );
+ok( !defined($rs->get_cache), 'clear_cache is functional' );
$rs->next;
$rs->clear_cache;
-eval {
- $rs->set_cache( [ $cd ] );
-};
-
-is( scalar @{$rs->get_cache}, 0, 'set_cache() only accepts objects of correct type for the resultset' );
-
$queries = 0;
$schema->storage->debug(1);
--- /dev/null
+sub run_tests {
+my $schema = shift;
+
+my $queries;
+#$schema->storage->debugfh(IO::File->new('t/var/temp.trace', 'w'));
+$schema->storage->debugcb( sub{ $queries++ } );
+
+eval "use DBD::SQLite";
+plan skip_all => 'needs DBD::SQLite for testing' if $@;
+plan tests => 2;
+
+
+my $cd = $schema->resultset("CD")->find(1);
+$cd->title('test');
+
+# SELECT count
+$queries = 0;
+$schema->storage->debug(1);
+
+$cd->update;
+
+is($queries, 1, 'liner_notes (might_have) not prefetched - do not load
+liner_notes on update');
+
+$schema->storage->debug(0);
+
+
+my $cd2 = $schema->resultset("CD")->find(2, {prefetch => 'liner_notes'});
+$cd2->title('test2');
+
+# SELECT count
+$queries = 0;
+$schema->storage->debug(1);
+
+$cd2->update;
+
+is($queries, 1, 'liner_notes (might_have) prefetched - do not load
+liner_notes on update');
+
+$schema->storage->debug(0);
+}
+
+1;
hammer_rs( $employees );
+ #return;
+
DBICTest::Employee->grouping_column('group_id');
$employees->delete();
foreach my $group_id (1..3) {
foreach my $position (1..$count) {
- $row = $rs->find({ $position_column=>$position });
+ ($row) = $rs->search({ $position_column=>$position })->all();
$row->move_previous();
ok( check_rs($rs), "move_previous( $position )" );
- $row = $rs->find({ $position_column=>$position });
+ ($row) = $rs->search({ $position_column=>$position })->all();
$row->move_next();
ok( check_rs($rs), "move_next( $position )" );
- $row = $rs->find({ $position_column=>$position });
+ ($row) = $rs->search({ $position_column=>$position })->all();
$row->move_first();
ok( check_rs($rs), "move_first( $position )" );
- $row = $rs->find({ $position_column=>$position });
+ ($row) = $rs->search({ $position_column=>$position })->all();
$row->move_last();
ok( check_rs($rs), "move_last( $position )" );
foreach my $to_position (1..$count) {
- $row = $rs->find({ $position_column=>$position });
+ ($row) = $rs->search({ $position_column=>$position })->all();
$row->move_to($to_position);
ok( check_rs($rs), "move_to( $position => $to_position )" );
}
- $row = $rs->find({ position=>$position });
+ ($row) = $rs->search({ position=>$position })->all();
if ($position==1) {
ok( !$row->previous_sibling(), 'no previous sibling' );
ok( !$row->first_sibling(), 'no first sibling' );
--- /dev/null
+# vim: filetype=perl
+
+sub run_tests {
+
+ eval 'require JSON';
+ plan skip_all, 'Install JSON to run this test' if ($@);
+
+ eval 'require Text::CSV_XS';
+ if ($@) {
+ eval 'require Text::CSV_PP';
+ plan skip_all, 'Install Text::CSV_XS or Text::CSV_PP to run this test' if ($@);
+ }
+
+ plan tests => 5;
+ my $schema = shift;
+
+ my $employees = $schema->resultset('Employee');
+ my $cmd = qq|script/dbicadmin --schema=DBICTest::Schema --class=Employee --tlibs --connect='["dbi:SQLite:dbname=t/var/DBIxClass.db","",""]' --force --tlibs|;
+
+ `$cmd --op=insert --set='{name:"Matt"}'`;
+ ok( ($employees->count()==1), 'insert count' );
+
+ my $employee = $employees->find(1);
+ ok( ($employee->name() eq 'Matt'), 'insert valid' );
+
+ `$cmd --op=update --set='{name:"Trout"}'`;
+ $employee = $employees->find(1);
+ ok( ($employee->name() eq 'Trout'), 'update' );
+
+ `$cmd --op=insert --set='{name:"Aran"}'`;
+ my $data = `$cmd --op=select --attrs='{order_by:"name"}'`;
+ ok( ($data=~/Aran.*Trout/s), 'select with attrs' );
+
+ `$cmd --op=delete --where='{name:"Trout"}'`;
+ ok( ($employees->count()==1), 'delete' );
+}
+
+1;