prefetch
- Massive optimization of the DBI storage layer - reduce the
amount of connected() calls
- - New ::Storage::DBI method last_dbh() - it will still return a
- newly connected $dbh if we start unconnected, but will not ping
- the server on every invocation unlike dbh()
- Some fixes of multi-create corner cases
- Multiple POD improvements
+ - Added exception when resultset is called without an argument
0.08108 2009-07-05 23:15:00 (UTC)
- Fixed the has_many prefetch with limit/group deficiency -
=head2 Connecting
-To connect to your Schema, you need to provide the connection details. The
-arguments are the same as for L<DBI/connect>:
+To connect to your Schema, you need to provide the connection details or a
+database handle.
+
+=head3 Via connection details
+
+The arguments are the same as for L<DBI/connect>:
my $schema = My::Schema->connect('dbi:SQLite:/home/me/myapp/my.db');
See L<DBIx::Class::Schema::Storage::DBI/connect_info> for more information about
this and other special C<connect>-time options.
+=head3 Via a database handle
+
+The supplied coderef is expected to return a single connected database handle
+(e.g. a L<DBI> C<$dbh>)
+
+ my $schema = My::Schema->connect (
+ sub { Some::DBH::Factory->connect },
+ \%extra_attrs,
+ );
+
=head2 Basic usage
Once you've defined the basic classes, either manually or using
# these steps are necessary to keep the external appearance of
# ->update($upd) so that other things overloading update() will
# work properly
- my %original_values = $self->get_inflated_columns;
+ my %original_values = $self->get_columns;
my %existing_changes = $self->get_dirty_columns;
# See if any of the *supplied* changes would affect the ordering
# extra selectors do not go in the subquery and there is no point of ordering it
delete $sub_attrs->{$_} for qw/collapse select _prefetch_select as order_by/;
- # if we prefetch, we group_by primary keys only as this is what we would get out of the rs via ->next/->all
- # clobber old group_by regardless
+ # if we prefetch, we group_by primary keys only as this is what we would get out
+ # of the rs via ->next/->all. We DO WANT to clobber old group_by regardless
if ( keys %{$attrs->{collapse}} ) {
$sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } ($rsrc->primary_columns) ]
}
if (my $g = $attrs->{group_by}) {
my @current_group_by = map
{ $_ =~ /\./ ? $_ : "$attrs->{alias}.$_" }
- (ref $g eq 'ARRAY' ? @$g : $g );
+ @$g
+ ;
if (
join ("\x00", sort @current_group_by)
);
}
- if ($attrs->{group_by} and ! ref $attrs->{group_by}) {
+ if ($attrs->{group_by} and ref $attrs->{group_by} ne 'ARRAY') {
$attrs->{group_by} = [ $attrs->{group_by} ];
}
$self->{related_resultsets} = {};
foreach my $relname (keys %related_stuff) {
- my $rel_obj = $related_stuff{$relname};
- my @cands;
- if (Scalar::Util::blessed($rel_obj)
- && $rel_obj->isa('DBIx::Class::Row'))
- {
- @cands = ($rel_obj);
- }
- elsif (ref $rel_obj eq 'ARRAY') {
- @cands = @$rel_obj;
- }
+ next unless $source->has_relationship ($relname);
+
+ my @cands = ref $related_stuff{$relname} eq 'ARRAY'
+ ? @{$related_stuff{$relname}}
+ : $related_stuff{$relname}
+ ;
- if (@cands) {
+ if (@cands
+ && Scalar::Util::blessed($cands[0])
+ && $cands[0]->isa('DBIx::Class::Row')
+ ) {
my $reverse = $source->reverse_relationship_info($relname);
foreach my $obj (@cands) {
$obj->set_from_related($_, $self) for keys %$reverse;
sub resultset {
my ($self, $moniker) = @_;
+ $self->throw_exception('resultset() expects a source name')
+ unless defined $moniker;
return $self->source($moniker)->resultset;
}
}
sub _default_dbi_connect_attributes {
- return { AutoCommit => 1 };
+ return {
+ AutoCommit => 1,
+ RaiseError => 1,
+ PrintError => 0,
+ };
}
=head2 on_connect_do
}
};
+ # ->connected might unset $@ - copy
my $exception = $@;
if(!$exception) { return $want_array ? @result : $result[0] }
# We were not connected - reconnect and retry, but let any
# exception fall right through this time
+ carp "Retrying $code after catching disconnected exception: $exception"
+ if $ENV{DBIC_DBIRETRY_DEBUG};
$self->_populate_dbh;
$self->$code($self->_dbh, @_);
}
$self->txn_commit;
};
+ # ->connected might unset $@ - copy
my $exception = $@;
if(!$exception) { return $want_array ? @result : $result[0] }
# We were not connected, and was first try - reconnect and retry
# via the while loop
+ carp "Retrying $coderef after catching disconnected exception: $exception"
+ if $ENV{DBIC_DBIRETRY_DEBUG};
$self->_populate_dbh;
}
}
=cut
sub connected {
- my ($self) = @_;
+ my $self = shift;
+ return 0 unless $self->_seems_connected;
- if(my $dbh = $self->_dbh) {
- if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) {
- $self->_dbh(undef);
- $self->{_dbh_gen}++;
- return;
- }
- else {
- $self->_verify_pid;
- return 0 if !$self->_dbh;
- }
- return ($dbh->FETCH('Active') && $self->_ping);
+ #be on the safe side
+ local $self->_dbh->{RaiseError} = 1;
+
+ return $self->_ping;
+}
+
+sub _seems_connected {
+ my $self = shift;
+
+ my $dbh = $self->_dbh
+ or return 0;
+
+ if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) {
+ $self->_dbh(undef);
+ $self->{_dbh_gen}++;
+ return 0;
+ }
+ else {
+ $self->_verify_pid;
+ return 0 if !$self->_dbh;
}
- return 0;
+ return $dbh->FETCH('Active');
}
sub _ping {
Returns a C<$dbh> - a data base handle of class L<DBI>. The returned handle
is guaranteed to be healthy by implicitly calling L</connected>, and if
-necessary performing a reconnection before returning.
+necessary performing a reconnection before returning. Keep in mind that this
+is very B<expensive> on some database engines. Consider using L<dbh_do>
+instead.
=cut
return $self->_dbh;
}
-=head2 last_dbh
-
-This returns the B<last> available C<$dbh> if any, or attempts to
-connect and returns the resulting handle. This method differs from
-L</dbh> by not validating if a preexisting handle is still healthy
-via L</connected>. Make sure you take appropriate precautions
-when using this method, as the C<$dbh> may be useless at this point.
-
-=cut
-
-sub last_dbh {
+# this is the internal "get dbh or connect (don't check)" method
+sub _get_dbh {
my $self = shift;
$self->_populate_dbh unless $self->_dbh;
return $self->_dbh;
return (
bindtype=>'columns',
array_datatypes => 1,
- limit_dialect => $self->last_dbh,
+ limit_dialect => $self->_get_dbh,
%{$self->_sql_maker_opts}
);
}
my ($self) = @_;
my @info = @{$self->_dbi_connect_info || []};
+ $self->_dbh(undef); # in case ->connected failed we might get sent here
$self->_dbh($self->_connect(@info));
$self->_conn_pid($$);
$updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch(
'nextval',
$col_info->{sequence} ||
- $self->_dbh_get_autoinc_seq($self->last_dbh, $source)
+ $self->_dbh_get_autoinc_seq($self->_get_dbh, $source)
);
}
}
=cut
-sub sqlt_type { shift->last_dbh->{Driver}->{Name} }
+sub sqlt_type { shift->_get_dbh->{Driver}->{Name} }
=head2 bind_attribute_by_data_type
sub deployment_statements {
my ($self, $schema, $type, $version, $dir, $sqltargs) = @_;
- # Need to be connected to get the correct sqlt_type
- $self->last_dbh() unless $type;
$type ||= $self->sqlt_type;
$version ||= $schema->schema_version || '1.x';
$dir ||= './';
return if $line =~ /^\s+$/; # skip whitespace only
$self->_query_start($line);
eval {
- # a previous error may invalidate $dbh - thus we need to use dbh()
- # to guarantee a healthy $dbh (this is temporary until we get
- # proper error handling on deploy() )
- $self->dbh->do($line);
+ # do a dbh_do cycle here, as we need some error checking in
+ # place (even though we will ignore errors)
+ $self->dbh_do (sub { $_[1]->do($line) });
};
if ($@) {
carp qq{$@ (running "${line}")};
sub datetime_parser {
my $self = shift;
return $self->{datetime_parser} ||= do {
- $self->last_dbh;
+ $self->_populate_dbh unless $self->_dbh;
$self->build_datetime_parser(@_);
};
}
sub DESTROY {
my $self = shift;
- return if !$self->_dbh;
- $self->_verify_pid;
+ $self->_verify_pid if $self->_dbh;
+
+ # some databases need this to stop spewing warnings
+ if (my $dbh = $self->_dbh) {
+ eval { $dbh->disconnect };
+ }
+
$self->_dbh(undef);
}
if ($identity_insert) {
my $table = $source->from;
- $self->last_dbh->do("SET IDENTITY_INSERT $table ON");
+ $self->_get_dbh->do("SET IDENTITY_INSERT $table ON");
}
$self->next::method(@_);
if ($identity_insert) {
my $table = $source->from;
- $self->last_dbh->do("SET IDENTITY_INSERT $table OFF");
+ $self->_get_dbh->do("SET IDENTITY_INSERT $table OFF");
}
}
grep { not exists $to_insert->{$_} } (@pk_guids, @auto_guids);
for my $guid_col (@get_guids_for) {
- my ($new_guid) = $self->last_dbh->selectrow_array('SELECT NEWID()');
+ my ($new_guid) = $self->_get_dbh->selectrow_array('SELECT NEWID()');
$updated_cols->{$guid_col} = $to_insert->{$guid_col} = $new_guid;
}
sub _svp_begin {
my ($self, $name) = @_;
- $self->last_dbh->do("SAVE TRANSACTION $name");
+ $self->_get_dbh->do("SAVE TRANSACTION $name");
}
# A new SAVE TRANSACTION with the same name releases the previous one.
sub _svp_rollback {
my ($self, $name) = @_;
- $self->last_dbh->do("ROLLBACK TRANSACTION $name");
+ $self->_get_dbh->do("ROLLBACK TRANSACTION $name");
}
sub build_datetime_parser {
sub _rebless {
my ($self) = @_;
- my $dbtype = eval { $self->last_dbh->get_info(17) };
+ my $dbtype = eval { $self->_get_dbh->get_info(17) };
unless ( $@ ) {
# Translate the backend name into a perl identifier
my $self = shift;
my $sql_rowset_size = shift || 2;
- $self->_dbh->{odbc_SQL_ROWSET_SIZE} = $sql_rowset_size;
+ $self->_get_dbh->{odbc_SQL_ROWSET_SIZE} = $sql_rowset_size;
}
=head2 connect_call_use_MARS
if ($dsn !~ /MARS_Connection=/) {
$self->_dbi_connect_info->[0] = "$dsn;MARS_Connection=Yes";
- my $connected = defined $self->_dbh;
+ my $was_connected = defined $self->_dbh;
$self->disconnect;
- $self->ensure_connected if $connected;
+ $self->ensure_connected if $was_connected;
}
}
sub _rebless {
my ($self) = @_;
- my $version = eval { $self->last_dbh->get_info(18); };
+ my $version = eval { $self->_get_dbh->get_info(18); };
if ( !$@ ) {
my ($major, $minor, $patchlevel) = split(/\./, $version);
sub _sequence_fetch {
my ( $self, $type, $seq ) = @_;
- my ($id) = $self->last_dbh->selectrow_array("SELECT ${seq}.${type} FROM DUAL");
+ my ($id) = $self->_get_dbh->selectrow_array("SELECT ${seq}.${type} FROM DUAL");
return $id;
}
"alter session set nls_timestamp_tz_format='$timestamp_tz_format'");
}
+sub _svp_begin {
+ my ($self, $name) = @_;
+
+ $self->_get_dbh->do("SAVEPOINT $name");
+}
+
=head2 source_bind_attributes
Handle LOB types in Oracle. Under a certain size (4k?), you can get away
sub _svp_rollback {
my ($self, $name) = @_;
- $self->last_dbh->do("ROLLBACK TO SAVEPOINT $name")
+ $self->_get_dbh->do("ROLLBACK TO SAVEPOINT $name")
}
=head1 AUTHOR
sub with_deferred_fk_checks {
my ($self, $sub) = @_;
- $self->last_dbh->do('SET CONSTRAINTS ALL DEFERRED');
+ $self->_get_dbh->do('SET CONSTRAINTS ALL DEFERRED');
$sub->();
}
sub _sequence_fetch {
my ( $self, $type, $seq ) = @_;
- my ($id) = $self->last_dbh->selectrow_array("SELECT nextval('${seq}')");
+ my ($id) = $self->_get_dbh->selectrow_array("SELECT nextval('${seq}')");
return $id;
}
sub _svp_begin {
my ($self, $name) = @_;
- $self->last_dbh->pg_savepoint($name);
+ $self->_get_dbh->pg_savepoint($name);
}
sub _svp_release {
my ($self, $name) = @_;
- $self->last_dbh->pg_release($name);
+ $self->_get_dbh->pg_release($name);
}
sub _svp_rollback {
my ($self, $name) = @_;
- $self->last_dbh->pg_rollback_to($name);
+ $self->_get_dbh->pg_rollback_to($name);
}
1;
if (ref($self) eq 'DBIx::Class::Storage::DBI::Sybase') {
my $dbtype = eval {
- @{$self->last_dbh->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})}[2]
+ @{$self->_get_dbh->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})}[2]
} || '';
my $exception = $@;
sub placeholders_supported {
my $self = shift;
- my $dbh = $self->last_dbh;
+ my $dbh = $self->_get_dbh;
return eval {
# There's also $dbh->{syb_dynamic_supported} but it can be inaccurate for this
sub _rebless {
my $self = shift;
- my $dbh = $self->last_dbh;
+ my $dbh = $self->_get_dbh;
if (not $self->placeholders_with_type_conversion_supported) {
bless $self,
sub _svp_begin {
my ($self, $name) = @_;
- $self->last_dbh->do("SAVEPOINT $name");
+ $self->_get_dbh->do("SAVEPOINT $name");
}
sub _svp_release {
my ($self, $name) = @_;
- $self->last_dbh->do("RELEASE SAVEPOINT $name");
+ $self->_get_dbh->do("RELEASE SAVEPOINT $name");
}
sub _svp_rollback {
my ($self, $name) = @_;
- $self->last_dbh->do("ROLLBACK TO SAVEPOINT $name")
+ $self->_get_dbh->do("ROLLBACK TO SAVEPOINT $name")
}
sub is_replicating {
- my $status = shift->last_dbh->selectrow_hashref('show slave status');
+ my $status = shift->_get_dbh->selectrow_hashref('show slave status');
return ($status->{Slave_IO_Running} eq 'Yes') && ($status->{Slave_SQL_Running} eq 'Yes');
}
sub lag_behind_master {
- return shift->last_dbh->selectrow_hashref('show slave status')->{Seconds_Behind_Master};
+ return shift->_get_dbh->selectrow_hashref('show slave status')->{Seconds_Behind_Master};
}
# MySql can not do subquery update/deletes, only way is slow per-row operations.
}
}
+throws_ok { $schema->resultset} qr/resultset\(\) expects a source name/, 'resultset with no argument throws exception';
+
done_testing;
# clean up our mess
END {
+ my $dbh = eval { $schema->storage->_dbh };
$dbh->do("DROP TABLE artist") if $dbh;
}
# clean up our mess
END {
+ my $dbh = eval { $schema->storage->_dbh };
$dbh->do("DROP TABLE artist") if $dbh;
}
-
'bar',
undef,
{
+ %{$storage->_default_dbi_connect_attributes || {} },
PrintError => 0,
AutoCommit => 1,
},
args => [
{
on_connect_do => [qw/a b c/],
- PrintError => 0,
- AutoCommit => 1,
+ PrintError => 1,
+ AutoCommit => 0,
on_disconnect_do => [qw/d e f/],
user => 'bar',
dsn => 'foo',
'bar',
undef,
{
- PrintError => 0,
- AutoCommit => 1,
+ %{$storage->_default_dbi_connect_attributes || {} },
+ PrintError => 1,
+ AutoCommit => 0,
},
],
},
}
-# We do not count pings during deploy() because of the flux
-# around sqlt. Eventually there should be no pings at all
+# measure pings around deploy() separately
my $schema = DBICTest->init_schema( sqlite_use_file => 1, no_populate => 1 );
-TODO: {
- local $TODO = 'Unable to fix before proper deploy() error handling';
- is ($ping_count, 0, 'no _ping() calls during deploy');
- $ping_count = 0;
-}
+is ($ping_count, 0, 'no _ping() calls during deploy');
+$ping_count = 0;
+
+
DBICTest->populate_schema ($schema);
# clean up our mess
END {
+ my $dbh = eval { $schema->storage->_dbh };
$dbh->do("DROP TABLE artist") if $dbh;
}
+++ /dev/null
-use warnings;
-use strict;
-
-use Test::More;
-use Test::Exception;
-
-use lib qw(t/lib);
-use DBICTest;
-
-my $schema = DBICTest->init_schema();
-
-TODO: {
- local $TODO = 'call accessors when calling create() or update()';
-
- my $row =
- $schema->resultset('Track')->new_result( { title => 'foo', cd => 1 } );
- $row->increment(1);
- $row->insert;
- is( $row->increment, 2 );
-
- $row =
- $schema->resultset('Track')
- ->create( { title => 'bar', cd => 1, increment => 1 } );
- is( $row->increment, 2 );
-
- # $row isa DBICTest::Schema::Track
- $row->get_from_storage;
- is( $row->increment, 2 );
-
- $row->update( { increment => 3 } );
- $row->get_from_storage;
- is( $row->increment, 4 );
-
- $row->increment(3);
- $row->get_from_storage;
- is( $row->increment, 4 );
-
- throws_ok (sub {
- $row =
- $schema->resultset('Track')
- ->create( { title => 'bar', cd => 2, set_increment => 1 } );
- }, qr/no such column/i);
-}
-
-done_testing;
use strict;
-use warnings;
+use warnings;
use Test::More;
+use Test::Exception;
use lib qw(t/lib);
use DBICTest;
eval { require DateTime };
plan skip_all => "Need DateTime for inflation tests" if $@;
-plan tests => 22;
-
$schema->class('CD') ->inflate_column( 'year',
{ inflate => sub { DateTime->new( year => shift ) },
deflate => sub { shift->year } }
ok(!$@, 'set_inflated_column with DateTime object');
$cd->update;
-$cd = $schema->resultset("CD")->find(3);
+$cd = $schema->resultset("CD")->find(3);
is( $cd->year->year, $now->year, 'deflate ok' );
-$cd = $schema->resultset("CD")->find(3);
+$cd = $schema->resultset("CD")->find(3);
my $before_year = $cd->year->year;
eval { $cd->set_inflated_column('year', \'year + 1') };
ok(!$@, 'set_inflated_column to "year + 1"');
TODO: {
local $TODO = 'this was left in without a TODO - should it work?';
- eval {
+ lives_ok (sub {
$cd->store_inflated_column('year', \'year + 1');
is_deeply( $cd->year, \'year + 1', 'deflate ok' );
- };
- ok(!$@, 'store_inflated_column to "year + 1"');
+ }, 'store_inflated_column to "year + 1"');
}
-$cd = $schema->resultset("CD")->find(3);
+$cd = $schema->resultset("CD")->find(3);
is( $cd->year->year, $before_year+1, 'deflate ok' );
# store_inflated_column test
-$cd = $schema->resultset("CD")->find(3);
+$cd = $schema->resultset("CD")->find(3);
eval { $cd->store_inflated_column('year', $now) };
ok(!$@, 'store_inflated_column with DateTime object');
$cd->update;
is( $cd->year->year, $now->year, 'deflate ok' );
# update tests
-$cd = $schema->resultset("CD")->find(3);
+$cd = $schema->resultset("CD")->find(3);
eval { $cd->update({'year' => $now}) };
ok(!$@, 'update using DateTime object ok');
is($cd->year->year, $now->year, 'deflate ok');
-$cd = $schema->resultset("CD")->find(3);
+$cd = $schema->resultset("CD")->find(3);
$before_year = $cd->year->year;
eval { $cd->update({'year' => \'year + 1'}) };
ok(!$@, 'update using scalarref ok');
-$cd = $schema->resultset("CD")->find(3);
+$cd = $schema->resultset("CD")->find(3);
is($cd->year->year, $before_year + 1, 'deflate ok');
# discard_changes test
-$cd = $schema->resultset("CD")->find(3);
+$cd = $schema->resultset("CD")->find(3);
# inflate the year
$before_year = $cd->year->year;
$cd->update({ year => \'year + 1'});
my $copy = $cd->copy({ year => $now, title => "zemoose" });
isnt( $copy->year->year, $before_year, "copy" );
-
+
+done_testing;
use Data::Dumper;
my @serializers = (
- { module => 'YAML.pm',
- inflater => sub { YAML::Load (shift) },
- deflater => sub { die "Expecting a reference" unless (ref $_[0]); YAML::Dump (shift) },
+ { module => 'YAML.pm',
+ inflater => sub { YAML::Load (shift) },
+ deflater => sub { die "Expecting a reference" unless (ref $_[0]); YAML::Dump (shift) },
},
- { module => 'Storable.pm',
- inflater => sub { Storable::thaw (shift) },
- deflater => sub { die "Expecting a reference" unless (ref $_[0]); Storable::nfreeze (shift) },
+ { module => 'Storable.pm',
+ inflater => sub { Storable::thaw (shift) },
+ deflater => sub { die "Expecting a reference" unless (ref $_[0]); Storable::nfreeze (shift) },
},
);
foreach my $serializer (@serializers) {
eval { require $serializer->{module} };
unless ($@) {
- $selected = $serializer;
- last;
+ $selected = $serializer;
+ last;
}
}
plan (skip_all => "No suitable serializer found") unless $selected;
-plan (tests => 11);
DBICTest::Schema::Serialized->inflate_column( 'serialized',
{ inflate => $selected->{inflater},
deflate => $selected->{deflater},
my $struct_hash = {
a => 1,
- b => [
+ b => [
{ c => 2 },
],
d => 3,
};
my $struct_array = [
- 'a',
- {
- b => 1,
- c => 2
+ 'a',
+ {
+ b => 1,
+ c => 2,
},
'd',
];
#======= testing hashref serialization
my $object = $rs->create( {
- id => 1,
serialized => '',
} );
ok($object->update( { serialized => $struct_hash } ), 'hashref deflation');
is_deeply($inflated, $struct_hash, 'inflated hash matches original');
$object = $rs->create( {
- id => 2,
serialized => '',
} );
-eval { $object->set_inflated_column('serialized', $struct_hash) };
-ok(!$@, 'set_inflated_column to a hashref');
+$object->set_inflated_column('serialized', $struct_hash);
is_deeply($object->serialized, $struct_hash, 'inflated hash matches original');
+$object = $rs->new({});
+$object->serialized ($struct_hash);
+$object->insert;
+is_deeply (
+ $rs->find ({id => $object->id})->serialized,
+ $struct_hash,
+ 'new/insert works',
+);
#====== testing arrayref serialization
ok($inflated = $object->serialized, 'arrayref inflation');
is_deeply($inflated, $struct_array, 'inflated array matches original');
+$object = $rs->new({});
+$object->serialized ($struct_array);
+$object->insert;
+is_deeply (
+ $rs->find ({id => $object->id})->serialized,
+ $struct_array,
+ 'new/insert works',
+);
-#===== make sure make_column_dirty ineracts reasonably with inflation
+#===== make sure make_column_dirty interacts reasonably with inflation
$object = $rs->first;
$object->update ({serialized => { x => 'y'}});
$object->update;
is_deeply ($rs->first->serialized, { x => 'z' }, 'changes made it to the db' );
+
+done_testing;
close IN;
for my $chunk ( split (/;\s*\n+/, $sql) ) {
if ( $chunk =~ / ^ (?! --\s* ) \S /xm ) { # there is some real sql in the chunk - a non-space at the start of the string which is not a comment
- $schema->storage->dbh->do($chunk) or print "Error on SQL: $chunk\n";
+ $schema->storage->dbh_do(sub { $_[1]->do($chunk) }) or print "Error on SQL: $chunk\n";
}
}
}
__PACKAGE__->table('serialized');
__PACKAGE__->add_columns(
- 'id' => { data_type => 'integer' },
+ 'id' => { data_type => 'integer', is_auto_increment => 1 },
'serialized' => { data_type => 'text' },
);
__PACKAGE__->set_primary_key('id');
data_type => 'smalldatetime',
is_nullable => 1
},
- increment => {
- data_type => 'integer',
- is_nullable => 1,
- accessor => '_increment',
- }
);
__PACKAGE__->set_primary_key('trackid');
__PACKAGE__->might_have( cd_single => 'DBICTest::Schema::CD', 'single_track' );
__PACKAGE__->might_have( lyrics => 'DBICTest::Schema::Lyrics', 'track_id' );
-sub increment {
- my $self = shift;
- if(@_) {
- return $self->_increment($_[0] + 1);
- }
- return $self->_increment();
-}
-
-sub set_increment {
- my $self = shift;
- if(@_) {
- return $self->_increment($_[0]);
- }
- return $self->_increment();
-}
-
1;
--- Created on Thu Jul 30 09:37:43 2009
+-- Created on Wed Aug 12 16:10:43 2009
--
title varchar(100) NOT NULL,
last_updated_on datetime,
last_updated_at datetime,
- small_dt smalldatetime,
- increment integer
+ small_dt smalldatetime
);
CREATE INDEX track_idx_cd ON track (cd);
'(
SELECT
cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track,
- single_track.trackid, single_track.cd, single_track.position, single_track.title, single_track.last_updated_on, single_track.last_updated_at, single_track.small_dt, single_track.increment,
- single_track_2.trackid, single_track_2.cd, single_track_2.position, single_track_2.title, single_track_2.last_updated_on, single_track_2.last_updated_at, single_track_2.small_dt, single_track_2.increment,
+ single_track.trackid, single_track.cd, single_track.position, single_track.title, single_track.last_updated_on, single_track.last_updated_at, single_track.small_dt,
+ single_track_2.trackid, single_track_2.cd, single_track_2.position, single_track_2.title, single_track_2.last_updated_on, single_track_2.last_updated_at, single_track_2.small_dt,
cd.cdid, cd.artist, cd.title, cd.year, cd.genreid, cd.single_track
FROM artist me
LEFT JOIN cd cds ON cds.artist = me.artistid
$most_tracks_rs->as_query,
'(
SELECT me.cdid, me.track_count,
- tracks.trackid, tracks.cd, tracks.position, tracks.title, tracks.last_updated_on, tracks.last_updated_at, tracks.small_dt, tracks.increment,
+ tracks.trackid, tracks.cd, tracks.position, tracks.title, tracks.last_updated_on, tracks.last_updated_at, tracks.small_dt,
liner_notes.liner_id, liner_notes.notes
FROM (
SELECT me.cdid, COUNT( tracks.trackid ) AS track_count
is ($rs->count, 5, 'Correct count of CDs');
}
+# RT 47779, test group_by as a scalar ref
+{
+ my $track_rs = $schema->resultset ('Track')->search (
+ { 'me.cd' => { -in => [ $cd_rs->get_column ('cdid')->all ] } },
+ {
+ select => [
+ 'me.cd',
+ { count => 'me.trackid' },
+ ],
+ as => [qw/
+ cd
+ track_count
+ /],
+ group_by => \'SUBSTR(me.cd, 1, 1)',
+ prefetch => 'cd',
+ },
+ );
+
+ is_same_sql_bind (
+ $track_rs->count_rs->as_query,
+ '(
+ SELECT COUNT( * )
+ FROM (
+ SELECT SUBSTR(me.cd, 1, 1)
+ FROM track me
+ JOIN cd cd ON cd.cdid = me.cd
+ WHERE ( me.cd IN ( ?, ?, ?, ?, ? ) )
+ GROUP BY SUBSTR(me.cd, 1, 1)
+ )
+ count_subq
+ )',
+ [ map { [ 'me.cd' => $_] } ($cd_rs->get_column ('cdid')->all) ],
+ 'count() query generated expected SQL',
+ );
+}
+
done_testing;