Revision history for DBIx::Class
- - Replication updates: Improved the replication tests so that they are
- more reliable and accurate, and hopefully solve some cross platform
- issues. Bugfixes related to naming particular replicants in a
- 'force_pool' attribute. Lots of documentation updates, including a
- new Introduction.pod file. Fixed the way we detect transaction to
- make this more reliable and forward looking. Fixed some trouble with
- the way Moose Types are used.
+ - Replication updates:
+ - Improved the replication tests so that they are more reliable
+ and accurate, and hopefully solve some cross platform issues.
+ - Bugfixes related to naming particular replicants in a
+ 'force_pool' attribute.
+ - Lots of documentation updates, including a new Introduction.pod
+ file.
+ - Fixed the way we detect transaction to make this more reliable
+ and forward looking.
+ - Fixed some trouble with the way Moose Types are used.
+ - Made discard_chages/get_from_storage replication aware (they
+ now read from the master storage by default)
+ - Refactor of MSSQL storage drivers, with some new features:
+ - Support for placeholders for MSSQL via DBD::Sybase with proper
+ autodetection
+ - 'uniqueidentifier' support with auto newid()
+ - Dynamic cursor support and other MARS options for ODBC
+ - savepoints with auto_savepoint => 1
+ - Support for MSSQL 'money' type
+ - Support for 'smalldatetime' type used in MSSQL and Sybase for
+ InflateColumn::DateTime
+ - support for Postgres 'timestamp without timezone' type in
+ InflateColumn::DateTime (RT#48389)
- Added new MySQL specific on_connect_call macro 'set_strict_mode'
(also known as make_mysql_not_suck_as_much)
- - Added call to Pod::Inherit in Makefile.PL -
- currently at author-time only, so we need to add the produced
- .pod files to the MANIFEST
-
+ - Multiple prefetch-related fixes:
+ - Adjust overly agressive subquery join-chain pruning
+ - Always preserve the outer join-chain - fixes numerous
+ problems with search_related chaining
+ - Deal with the distinct => 1 attribute properly when using
+ prefetch
+ - Massive optimization of the DBI storage layer - reduce the
+ amount of connected() calls
+ - New ::Storage::DBI method last_dbh() - it will still return a
+ newly connected $dbh if we start unconnected, but will not ping
+ the server on every invocation unlike dbh()
+ - Multiple POD improvements
0.08108 2009-07-05 23:15:00 (UTC)
- Fixed the has_many prefetch with limit/group deficiency -
test_requires 'Test::More' => 0.92;
test_requires 'Test::Warn' => 0.11;
+test_requires 'File::Temp' => 0.22;
+
+
# Core
requires 'List::Util' => 0;
requires 'Scalar::Util' => 0;
recommends 'SQL::Translator' => 0.09004;
-install_script (qw|
- script/dbicadmin
-|);
-
-tests_recursive (qw|
- t
-|);
-
-resources 'IRC' => 'irc://irc.perl.org/#dbix-class';
-resources 'license' => 'http://dev.perl.org/licenses/';
-resources 'repository' => 'http://dev.catalyst.perl.org/svnweb/bast/browse/DBIx-Class/';
-resources 'MailingList' => 'http://lists.scsys.co.uk/cgi-bin/mailman/listinfo/dbix-class';
-
-
-# re-build README and require extra modules for testing if we're in a checkout
+my %replication_requires = (
+ 'Moose', => 0.87,
+ 'MooseX::AttributeHelpers' => 0.21,
+ 'MooseX::Types', => 0.16,
+ 'namespace::clean' => 0.11,
+ 'Hash::Merge', => 0.11,
+);
my %force_requires_if_author = (
+ %replication_requires,
+
# 'Module::Install::Pod::Inherit' => 0.01,
'Test::Pod::Coverage' => 1.04,
'SQL::Translator' => 0.09007,
# t/96_is_deteministic_value.t
'DateTime::Format::Strptime'=> 0,
- # t/93storage_replication.t
- 'Moose', => 0.87,
- 'MooseX::AttributeHelpers' => 0.21,
- 'MooseX::Types', => 0.16,
- 'namespace::clean' => 0.11,
- 'Hash::Merge', => 0.11,
-
# database-dependent reqs
#
$ENV{DBICTEST_PG_DSN}
,
);
+
+install_script (qw|
+ script/dbicadmin
+|);
+
+tests_recursive (qw|
+ t
+|);
+
+resources 'IRC' => 'irc://irc.perl.org/#dbix-class';
+resources 'license' => 'http://dev.perl.org/licenses/';
+resources 'repository' => 'http://dev.catalyst.perl.org/svnweb/bast/browse/DBIx-Class/';
+resources 'MailingList' => 'http://lists.scsys.co.uk/cgi-bin/mailman/listinfo/dbix-class';
+
+
+# re-build README and require extra modules for testing if we're in a checkout
+
if ($Module::Install::AUTHOR) {
warn <<'EOW';
******************************************************************************
unlink 'MANIFEST';
}
-# eval { require Module::Install::Pod::Inherit };
-# PodInherit() if !$@;
+# require Module::Install::Pod::Inherit;
+# PodInherit();
}
auto_install();
return $rs->all(); # all records for page 1
+ return $rs->page(2); # records for page 2
+
You can get a L<Data::Page> object for the resultset (suitable for use
in e.g. a template) using the C<pager> method:
=head2 Retrieve one and only one row from a resultset
-Sometimes you need only the first "top" row of a resultset. While this can be
-easily done with L<< $rs->first|DBIx::Class::ResultSet/first >>, it is suboptimal,
-as a full blown cursor for the resultset will be created and then immediately
-destroyed after fetching the first row object.
-L<< $rs->single|DBIx::Class::ResultSet/single >> is
-designed specifically for this case - it will grab the first returned result
+Sometimes you need only the first "top" row of a resultset. While this
+can be easily done with L<< $rs->first|DBIx::Class::ResultSet/first
+>>, it is suboptimal, as a full blown cursor for the resultset will be
+created and then immediately destroyed after fetching the first row
+object. L<< $rs->single|DBIx::Class::ResultSet/single >> is designed
+specifically for this case - it will grab the first returned result
without even instantiating a cursor.
Before replacing all your calls to C<first()> with C<single()> please observe the
=over
=item *
+
While single() takes a search condition just like search() does, it does
_not_ accept search attributes. However one can always chain a single() to
a search():
- my $top_cd = $cd_rs -> search({}, { order_by => 'rating' }) -> single;
+ my $top_cd = $cd_rs->search({}, { order_by => 'rating' })->single;
=item *
+
Since single() is the engine behind find(), it is designed to fetch a
single row per database query. Thus a warning will be issued when the
underlying SELECT returns more than one row. Sometimes however this usage
at the top of the charts at any given time. If you know what you are doing,
you can silence the warning by explicitly limiting the resultset size:
- my $top_cd = $cd_rs -> search ({}, { order_by => 'rating', rows => 1 }) -> single;
+ my $top_cd = $cd_rs->search ({}, { order_by => 'rating', rows => 1 })->single;
=back
(e.g. it contains Unions, Sub-Selects, Stored Procedures, etc.) or has to
be optimized for your database in a special way, but you still want to
get the results as a L<DBIx::Class::ResultSet>.
-The recommended way to accomplish this is by defining a separate ResultSource
-for your query. You can then inject complete SQL statements using a scalar
-reference (this is a feature of L<SQL::Abstract>).
-Say you want to run a complex custom query on your user data, here's what
-you have to add to your User class:
-
- package My::Schema::Result::User;
+This is accomplished by defining a
+L<ResultSource::View|DBIx::Class::ResultSource::View> for your query,
+almost like you would define a regular ResultSource.
+ package My::Schema::Result::UserFriendsComplex;
+ use strict;
+ use warnings;
use base qw/DBIx::Class/;
- # ->load_components, ->table, ->add_columns, etc.
-
- # Make a new ResultSource based on the User class
- my $source = __PACKAGE__->result_source_instance();
- my $new_source = $source->new( $source );
- $new_source->source_name( 'UserFriendsComplex' );
-
- # Hand in your query as a scalar reference
- # It will be added as a sub-select after FROM,
- # so pay attention to the surrounding brackets!
- $new_source->name( \<<SQL );
- ( SELECT u.* FROM user u
- INNER JOIN user_friends f ON u.id = f.user_id
- WHERE f.friend_user_id = ?
- UNION
- SELECT u.* FROM user u
- INNER JOIN user_friends f ON u.id = f.friend_user_id
- WHERE f.user_id = ? )
- SQL
-
- # Finally, register your new ResultSource with your Schema
- My::Schema->register_extra_source( 'UserFriendsComplex' => $new_source );
+ __PACKAGE__->load_components('Core');
+ __PACKAGE__->table_class('DBIx::Class::ResultSource::View');
+
+ # ->table, ->add_columns, etc.
+
+ # do not attempt to deploy() this view
+ __PACKAGE__->result_source_instance->is_virtual(1);
+
+ __PACKAGE__->result_source_instance->view_definition(q[
+ SELECT u.* FROM user u
+ INNER JOIN user_friends f ON u.id = f.user_id
+ WHERE f.friend_user_id = ?
+ UNION
+ SELECT u.* FROM user u
+ INNER JOIN user_friends f ON u.id = f.friend_user_id
+ WHERE f.user_id = ?
+ ]);
Next, you can execute your complex query using bind parameters like this:
- my $friends = [ $schema->resultset( 'UserFriendsComplex' )->search( {},
+ my $friends = $schema->resultset( 'UserFriendsComplex' )->search( {},
{
bind => [ 12345, 12345 ]
}
- ) ];
+ );
... and you'll get back a perfect L<DBIx::Class::ResultSet> (except, of course,
that you cannot modify the rows it contains, ie. cannot call L</update>,
L</delete>, ... on it).
-If you prefer to have the definitions of these custom ResultSources in separate
-files (instead of stuffing all of them into the same ResultSource class), you
-can achieve the same with subclassing the ResultSource class and defining the
-new ResultSource there:
-
- package My::Schema::Result::UserFriendsComplex;
+Note that you cannot have bind parameters unless is_virtual is set to true.
- use base qw/My::Schema::Result::User/;
+=over
- __PACKAGE__->table('dummy'); # currently must be called before anything else
+=item * NOTE
- # Hand in your query as a scalar reference
- # It will be added as a sub-select after FROM,
- # so pay attention to the surrounding brackets!
- __PACKAGE__->result_source_instance->name( \<<SQL );
- ( SELECT u.* FROM user u
- INNER JOIN user_friends f ON u.id = f.user_id
- WHERE f.friend_user_id = ?
- UNION
- SELECT u.* FROM user u
- INNER JOIN user_friends f ON u.id = f.friend_user_id
- WHERE f.user_id = ? )
- SQL
+If you're using the old deprecated C<< $rsrc_instance->name(\'( SELECT ...') >>
+method for custom SQL execution, you are highly encouraged to update your code
+to use a virtual view as above. If you do not want to change your code, and just
+want to suppress the deprecation warning when you call
+L<DBIx::Class::Schema/deploy>, add this line to your source definition, so that
+C<deploy> will exclude this "table":
- 1;
+ sub sqlt_deploy_hook { $_[1]->schema->drop_table ($_[1]) }
-TIMTOWDI.
+=back
=head2 Using specific columns
my( $self, $to_position ) = @_;
return 0 if ( $to_position < 1 );
- my $from_position = $self->_position;
- return 0 if ( $from_position == $to_position );
-
my $position_column = $self->position_column;
- {
- my $guard = $self->result_source->schema->txn_scope_guard;
+ my $guard;
- my ($direction, @between);
- if ( $from_position < $to_position ) {
- $direction = -1;
- @between = map { $self->_position_value ($_) } ( $from_position + 1, $to_position );
- }
- else {
- $direction = 1;
- @between = map { $self->_position_value ($_) } ( $to_position, $from_position - 1 );
- }
+ if ($self->is_column_changed ($position_column) ) {
+ # something changed our position, we have no idea where we
+ # used to be - requery without using discard_changes
+ # (we need only a specific column back)
- my $new_pos_val = $self->_position_value ($to_position); # record this before the shift
+ $guard = $self->result_source->schema->txn_scope_guard;
- # we need to null-position the moved row if the position column is part of a constraint
- if (grep { $_ eq $position_column } ( map { @$_ } (values %{{ $self->result_source->unique_constraints }} ) ) ) {
- $self->_ordered_internal_update({ $position_column => $self->null_position_value });
- }
+ my $cursor = $self->result_source->resultset->search(
+ $self->ident_condition,
+ { select => $position_column },
+ )->cursor;
- $self->_shift_siblings ($direction, @between);
- $self->_ordered_internal_update({ $position_column => $new_pos_val });
+ my ($pos) = $cursor->next;
+ $self->$position_column ($pos);
+ delete $self->{_dirty_columns}{$position_column};
+ }
- $guard->commit;
+ my $from_position = $self->_position;
- return 1;
+ if ( $from_position == $to_position ) { # FIXME this will not work for non-numeric order
+ $guard->commit if $guard;
+ return 0;
}
+
+ $guard ||= $self->result_source->schema->txn_scope_guard;
+
+ my ($direction, @between);
+ if ( $from_position < $to_position ) {
+ $direction = -1;
+ @between = map { $self->_position_value ($_) } ( $from_position + 1, $to_position );
+ }
+ else {
+ $direction = 1;
+ @between = map { $self->_position_value ($_) } ( $to_position, $from_position - 1 );
+ }
+
+ my $new_pos_val = $self->_position_value ($to_position); # record this before the shift
+
+ # we need to null-position the moved row if the position column is part of a constraint
+ if (grep { $_ eq $position_column } ( map { @$_ } (values %{{ $self->result_source->unique_constraints }} ) ) ) {
+ $self->_ordered_internal_update({ $position_column => $self->null_position_value });
+ }
+
+ $self->_shift_siblings ($direction, @between);
+ $self->_ordered_internal_update({ $position_column => $new_pos_val });
+
+ $guard->commit;
+ return 1;
}
=head2 move_to_group
my $position_column = $self->position_column;
return 0 if ( defined($to_position) and $to_position < 1 );
- if ($self->_is_in_group ($to_group) ) {
- return 0 if not defined $to_position;
- return $self->move_to ($to_position);
+
+ # check if someone changed the _grouping_columns - this will
+ # prevent _is_in_group working, so we need to requery the db
+ # for the original values
+ my (@dirty_cols, %values, $guard);
+ for ($self->_grouping_columns) {
+ $values{$_} = $self->get_column ($_);
+ push @dirty_cols, $_ if $self->is_column_changed ($_);
}
- {
- my $guard = $self->result_source->schema->txn_scope_guard;
+ # re-query only the dirty columns, and restore them on the
+ # object (subsequent code will update them to the correct
+ # after-move values)
+ if (@dirty_cols) {
+ $guard = $self->result_source->schema->txn_scope_guard;
- # Move to end of current group to adjust siblings
- $self->move_last;
+ my $cursor = $self->result_source->resultset->search(
+ $self->ident_condition,
+ { select => \@dirty_cols },
+ )->cursor;
- $self->set_inflated_columns({ %$to_group, $position_column => undef });
- my $new_group_last_posval = $self->_last_sibling_posval;
- my $new_group_last_position = $self->_position_from_value (
- $new_group_last_posval
- );
+ my @original_values = $cursor->next;
+ $self->set_inflated_columns ({ %values, map { $_ => shift @original_values } (@dirty_cols) });
+ }
- if ( not defined($to_position) or $to_position > $new_group_last_position) {
- $self->set_column(
- $position_column => $new_group_last_position
- ? $self->_next_position_value ( $new_group_last_posval )
- : $self->_initial_position_value
- );
- }
- else {
- my $bumped_pos_val = $self->_position_value ($to_position);
- my @between = ($to_position, $new_group_last_position);
- $self->_shift_siblings (1, @between); #shift right
- $self->set_column( $position_column => $bumped_pos_val );
- }
+ if ($self->_is_in_group ($to_group) ) {
+ my $ret;
+ if (defined $to_position) {
+ $ret = $self->move_to ($to_position);
+ }
+
+ $guard->commit if $guard;
+ return $ret||0;
+ }
- $self->_ordered_internal_update;
+ $guard ||= $self->result_source->schema->txn_scope_guard;
- $guard->commit;
+ # Move to end of current group to adjust siblings
+ $self->move_last;
- return 1;
+ $self->set_inflated_columns({ %$to_group, $position_column => undef });
+ my $new_group_last_posval = $self->_last_sibling_posval;
+ my $new_group_last_position = $self->_position_from_value (
+ $new_group_last_posval
+ );
+
+ if ( not defined($to_position) or $to_position > $new_group_last_position) {
+ $self->set_column(
+ $position_column => $new_group_last_position
+ ? $self->_next_position_value ( $new_group_last_posval )
+ : $self->_initial_position_value
+ );
+ }
+ else {
+ my $bumped_pos_val = $self->_position_value ($to_position);
+ my @between = ($to_position, $new_group_last_position);
+ $self->_shift_siblings (1, @between); #shift right
+ $self->set_column( $position_column => $bumped_pos_val );
}
+
+ $self->_ordered_internal_update;
+
+ $guard->commit;
+
+ return 1;
}
=head2 insert
# this is set by _ordered_internal_update()
return $self->next::method(@_) if $self->{_ORDERED_INTERNAL_UPDATE};
- my $upd = shift;
- $self->set_inflated_columns($upd) if $upd;
- my %changes = $self->get_dirty_columns;
- $self->discard_changes;
-
my $position_column = $self->position_column;
+ my @ordering_columns = ($self->_grouping_columns, $position_column);
+
+
+ # these steps are necessary to keep the external appearance of
+ # ->update($upd) so that other things overloading update() will
+ # work properly
+ my %original_values = $self->get_inflated_columns;
+ my %existing_changes = $self->get_dirty_columns;
+
+ # See if any of the *supplied* changes would affect the ordering
+ # The reason this is so contrived, is that we want to leverage
+ # the datatype aware value comparing, while at the same time
+ # keep the original value intact (it will be updated later by the
+ # corresponding routine)
+
+ my %upd = %{shift || {}};
+ my %changes = %existing_changes;
+
+ for (@ordering_columns) {
+ next unless exists $upd{$_};
+
+ # we do not want to keep propagating this to next::method
+ # as it will be a done deal by the time get there
+ my $value = delete $upd{$_};
+ $self->set_inflated_columns ({ $_ => $value });
+
+ # see if an update resulted in a dirty column
+ # it is important to preserve the old value, as it
+ # will be needed to carry on a successfull move()
+ # operation without re-querying the database
+ if ($self->is_column_changed ($_) && not exists $existing_changes{$_}) {
+ $changes{$_} = $value;
+ $self->set_inflated_columns ({ $_ => $original_values{$_} });
+ delete $self->{_dirty_columns}{$_};
+ }
+ }
# if nothing group/position related changed - short circuit
- if (not grep { exists $changes{$_} } ($self->_grouping_columns, $position_column) ) {
- return $self->next::method( \%changes, @_ );
+ if (not grep { exists $changes{$_} } ( @ordering_columns ) ) {
+ return $self->next::method( \%upd, @_ );
}
{
# create new_group by taking the current group and inserting changes
my $new_group = {$self->_grouping_clause};
foreach my $col (keys %$new_group) {
- if (exists $changes{$col}) {
- $new_group->{$col} = delete $changes{$col}; # don't want to pass this on to next::method
- }
+ $new_group->{$col} = $changes{$col} if exists $changes{$col};
}
$self->move_to_group(
$new_group,
(exists $changes{$position_column}
- # The FIXME bit contradicts the documentation: when changing groups without supplying explicit
- # positions in move_to_group(), we push the item to the end of the group.
- # However when I was rewriting this, the position from the old group was clearly passed to the new one
+ # The FIXME bit contradicts the documentation: POD states that
+ # when changing groups without supplying explicit positions in
+ # move_to_group(), we push the item to the end of the group.
+ # However when I was rewriting this, the position from the old
+ # group was clearly passed to the new one
# Probably needs to go away (by ribasushi)
- ? delete $changes{$position_column} # means there was a position change supplied with the update too
- : $self->_position # FIXME!
+ ? $changes{$position_column} # means there was a position change supplied with the update too
+ : $self->_position # FIXME! (replace with undef)
),
);
}
elsif (exists $changes{$position_column}) {
- $self->move_to(delete $changes{$position_column});
+ $self->move_to($changes{$position_column});
}
my @res;
my $want = wantarray();
if (not defined $want) {
- $self->next::method( \%changes, @_ );
+ $self->next::method( \%upd, @_ );
}
elsif ($want) {
- @res = $self->next::method( \%changes, @_ );
+ @res = $self->next::method( \%upd, @_ );
}
else {
- $res[0] = $self->next::method( \%changes, @_ );
+ $res[0] = $self->next::method( \%upd, @_ );
}
$guard->commit;
=head2 _grouping_clause
This method returns one or more name=>value pairs for limiting a search
-by the grouping column(s). If the grouping column is not
-defined then this will return an empty list.
+by the grouping column(s). If the grouping column is not defined then
+this will return an empty list.
=cut
sub _grouping_clause {
=cut
-sub _ident_values {
- my ($self) = @_;
- return (map { $self->{_column_data}{$_} } $self->primary_columns);
-}
-
-=head2 discard_changes ($attrs)
-
-Re-selects the row from the database, losing any changes that had
-been made.
-
-This method can also be used to refresh from storage, retrieving any
-changes made since the row was last read from storage.
-
-$attrs is expected to be a hashref of attributes suitable for passing as the
-second argument to $resultset->search($cond, $attrs);
-
-=cut
-
-sub discard_changes {
- my ($self, $attrs) = @_;
- delete $self->{_dirty_columns};
- return unless $self->in_storage; # Don't reload if we aren't real!
-
- if( my $current_storage = $self->get_from_storage($attrs)) {
-
- # Set $self to the current.
- %$self = %$current_storage;
-
- # Avoid a possible infinite loop with
- # sub DESTROY { $_[0]->discard_changes }
- bless $current_storage, 'Do::Not::Exist';
-
- return $self;
- } else {
- $self->in_storage(0);
- return $self;
- }
-}
-
=head2 id
Returns the primary key(s) for a row. Can't be called as
return (wantarray ? @pk : $pk[0]);
}
+sub _ident_values {
+ my ($self) = @_;
+ return (map { $self->{_column_data}{$_} } $self->primary_columns);
+}
+
=head2 ID
Returns a unique id string identifying a row object by primary key.
Used by L<DBIx::Class::CDBICompat::LiveObjectIndex> and
L<DBIx::Class::ObjectCache>.
+=over
+
+=item WARNING
+
+The default C<_create_ID> method used by this function orders the returned
+values by the alphabetical order of the primary column names, B<unlike>
+the L</id> method, which follows the same order in which columns were fed
+to L<DBIx::Class::ResultSource/set_primary_key>.
+
+=back
+
=cut
sub ID {
my $sub_attrs = { %$attrs };
# extra selectors do not go in the subquery and there is no point of ordering it
- delete $sub_attrs->{$_} for qw/collapse prefetch_select select as order_by/;
+ delete $sub_attrs->{$_} for qw/collapse select _prefetch_select as order_by/;
# if we prefetch, we group_by primary keys only as this is what we would get out of the rs via ->next/->all
# clobber old group_by regardless
$attrs->{group_by} = [ $attrs->{group_by} ];
}
+ # generate the distinct induced group_by early, as prefetch will be carried via a
+ # subquery (since a group_by is present)
+ if (delete $attrs->{distinct}) {
+ $attrs->{group_by} ||= [ grep { !ref($_) || (ref($_) ne 'HASH') } @{$attrs->{select}} ];
+ }
+
$attrs->{collapse} ||= {};
if ( my $prefetch = delete $attrs->{prefetch} ) {
$prefetch = $self->_merge_attr( {}, $prefetch );
my @prefetch =
$source->_resolve_prefetch( $prefetch, $alias, $join_map, $prefetch_ordering, $attrs->{collapse} );
- $attrs->{prefetch_select} = [ map { $_->[0] } @prefetch ];
- push @{ $attrs->{select} }, @{$attrs->{prefetch_select}};
+ # we need to somehow mark which columns came from prefetch
+ $attrs->{_prefetch_select} = [ map { $_->[0] } @prefetch ];
+
+ push @{ $attrs->{select} }, @{$attrs->{_prefetch_select}};
push @{ $attrs->{as} }, (map { $_->[1] } @prefetch);
push( @{$attrs->{order_by}}, @$prefetch_ordering );
$attrs->{_collapse_order_by} = \@$prefetch_ordering;
}
-
- if (delete $attrs->{distinct}) {
- $attrs->{group_by} ||= [ grep { !ref($_) || (ref($_) ne 'HASH') } @{$attrs->{select}} ];
- }
-
# if both page and offset are specified, produce a combined offset
# even though it doesn't make much sense, this is what pre 081xx has
# been doing
# Create a query (view) based result source, in a result class
package MyDB::Schema::Result::Year2000CDs;
- use DBIx::Class::ResultSource::View;
-
__PACKAGE__->load_components('Core');
__PACKAGE__->table_class('DBIx::Class::ResultSource::View');
=item auto_nextval
-Set this to a true value for a column whose value is retrieved
-automatically from an oracle sequence. If you do not use an Oracle
-trigger to get the nextval, you have to set sequence as well.
+Set this to a true value for a column whose value is retrieved automatically
+from a sequence or function (if supported by your Storage driver.) For a
+sequence, if you do not use a trigger to get the nextval, you have to set the
+L</sequence> value as well.
+
+Also set this for MSSQL columns with the 'uniqueidentifier'
+L<DBIx::Class::ResultSource/data_type> whose values you want to automatically
+generate using C<NEWID()>, unless they are a primary key in which case this will
+be done anyway.
=item extra
sub name_unique_constraint {
my ($self, $cols) = @_;
- return join '_', $self->name, @$cols;
+ my $name = $self->name;
+ $name = $$name if (ref $name eq 'SCALAR');
+
+ return join '_', $name, @$cols;
}
=head2 unique_constraints
package MyDB::Schema::Result::Year2000CDs;
- use DBIx::Class::ResultSource::View;
+ use base qw/DBIx::Class/;
__PACKAGE__->load_components('Core');
__PACKAGE__->table_class('DBIx::Class::ResultSource::View');
__PACKAGE__->result_source_instance->is_virtual(1);
__PACKAGE__->result_source_instance->view_definition(
"SELECT cdid, artist, title FROM cd WHERE year ='2000'"
- );
+ );
+ __PACKAGE__->add_columns(
+ 'cdid' => {
+ data_type => 'integer',
+ is_auto_increment => 1,
+ },
+ 'artist' => {
+ data_type => 'integer',
+ },
+ 'title' => {
+ data_type => 'varchar',
+ size => 100,
+ },
+ );
=head1 DESCRIPTION
View object that inherits from L<DBIx::Class::ResultSource>
-This class extends ResultSource to add basic view support.
+This class extends ResultSource to add basic view support.
-A view has a L</view_definition>, which contains an SQL query. The
-query cannot have parameters. It may contain JOINs, sub selects and
-any other SQL your database supports.
+A view has a L</view_definition>, which contains a SQL query. The query can
+only have parameters if L</is_virtual> is set to true. It may contain JOINs,
+sub selects and any other SQL your database supports.
View definition SQL is deployed to your database on
L<DBIx::Class::Schema/deploy> unless you set L</is_virtual> to true.
exist in your database as a real view. The L</view_definition> in this
case replaces the view name in a FROM clause in a subselect.
+=head1 EXAMPLES
+
+Having created the MyDB::Schema::Year2000CDs schema as shown in the SYNOPSIS
+above, you can then:
+
+ $2000_cds = $schema->resultset('Year2000CDs')
+ ->search()
+ ->all();
+ $count = $schema->resultset('Year2000CDs')
+ ->search()
+ ->count();
+
+If you modified the schema to include a placeholder
+
+ __PACKAGE__->result_source_instance->view_definition(
+ "SELECT cdid, artist, title FROM cd WHERE year ='?'"
+ );
+
+and ensuring you have is_virtual set to true:
+
+ __PACKAGE__->result_source_instance->is_virtual(1);
+
+You could now say:
+
+ $2001_cds = $schema->resultset('Year2000CDs')
+ ->search({}, { bind => [2001] })
+ ->all();
+ $count = $schema->resultset('Year2000CDs')
+ ->search({}, { bind => [2001] })
+ ->count();
+
=head1 SQL EXAMPLES
=over
use base qw/DBIx::Class::ResultSourceProxy/;
use DBIx::Class::ResultSource::Table;
+use Scalar::Util ();
__PACKAGE__->mk_classdata(table_class => 'DBIx::Class::ResultSource::Table');
my $class_has_table_instance = ($table and $table->result_class eq $class);
return $table if $class_has_table_instance;
+ my $table_class = $class->table_class;
+ $class->ensure_class_loaded($table_class);
+
if( $table ) {
- $table = $class->table_class->new({
+ $table = $table_class->new({
%$table,
result_class => $class,
source_name => undef,
});
}
else {
- $table = $class->table_class->new({
+ $table = $table_class->new({
name => undef,
result_class => $class,
source_name => undef,
sub table {
my ($class, $table) = @_;
return $class->result_source_instance->name unless $table;
- unless (ref $table) {
- $table = $class->table_class->new({
+
+ unless (Scalar::Util::blessed($table) && $table->isa($class->table_class)) {
+
+ my $table_class = $class->table_class;
+ $class->ensure_class_loaded($table_class);
+
+ $table = $table_class->new({
$class->can('result_source_instance') ?
%{$class->result_source_instance||{}} : (),
name => $table,
my $resultset = $self->result_source->resultset;
if(defined $attrs) {
- $resultset = $resultset->search(undef, $attrs);
+ $resultset = $resultset->search(undef, $attrs);
}
return $resultset->find($self->{_orig_ident} || $self->ident_condition);
}
+=head2 discard_changes ($attrs)
+
+Re-selects the row from the database, losing any changes that had
+been made.
+
+This method can also be used to refresh from storage, retrieving any
+changes made since the row was last read from storage.
+
+$attrs is expected to be a hashref of attributes suitable for passing as the
+second argument to $resultset->search($cond, $attrs);
+
+=cut
+
+sub discard_changes {
+ my ($self, $attrs) = @_;
+ delete $self->{_dirty_columns};
+ return unless $self->in_storage; # Don't reload if we aren't real!
+
+ # add a replication default to read from the master only
+ $attrs = { force_pool => 'master', %{$attrs||{}} };
+
+ if( my $current_storage = $self->get_from_storage($attrs)) {
+
+ # Set $self to the current.
+ %$self = %$current_storage;
+
+ # Avoid a possible infinite loop with
+ # sub DESTROY { $_[0]->discard_changes }
+ bless $current_storage, 'Do::Not::Exist';
+
+ return $self;
+ }
+ else {
+ $self->in_storage(0);
+ return $self;
+ }
+}
+
+
=head2 throw_exception
See L<DBIx::Class::Schema/throw_exception>.
$quoted_alias = $self->_quote ('column_' . (@inner_select + 1) );
}
# column name seen more than once - alias it
- elsif ($orig_colname && ($seen_names{$orig_colname} > 1) ) {
+ elsif ($orig_colname &&
+ ($seen_names{$orig_colname} && $seen_names{$orig_colname} > 1) ) {
$quoted_alias = $self->_quote ("${table}__${orig_colname}");
}
$self->{"${_}_bind"} = [] for (qw/having from order/);
- if (ref $table eq 'SCALAR') {
- $table = $$table;
- }
- elsif (not ref $table) {
+ if (not ref($table) or ref($table) eq 'SCALAR') {
$table = $self->_quote($table);
}
+
local $self->{rownum_hack_count} = 1
if (defined $rest[0] && $self->{limit_dialect} eq 'RowNum');
@rest = (-1) unless defined $rest[0];
sub insert {
my $self = shift;
my $table = shift;
- $table = $self->_quote($table) unless ref($table);
+ $table = $self->_quote($table);
# SQLA will emit INSERT INTO $table ( ) VALUES ( )
# which is sadly understood only by MySQL. Change default behavior here,
sub update {
my $self = shift;
my $table = shift;
- $table = $self->_quote($table) unless ref($table);
+ $table = $self->_quote($table);
$self->SUPER::update($table, @_);
}
sub delete {
my $self = shift;
my $table = shift;
- $table = $self->_quote($table) unless ref($table);
+ $table = $self->_quote($table);
$self->SUPER::delete($table, @_);
}
sub _quote {
my ($self, $label) = @_;
return '' unless defined $label;
+ return $$label if ref($label) eq 'SCALAR';
return "*" if $label eq '*';
return $label unless $self->{quote_char};
if(ref $self->{quote_char} eq "ARRAY"){
my $self = shift;
my $table = $_[0];
- $table = $self->_quote($table) unless ref($table);
+ $table = $self->_quote($table);
if (! $_[1] or (ref $_[1] eq 'HASH' and !keys %{$_[1]} ) ) {
return "INSERT INTO ${table} () VALUES ()"
$dsn,
$user,
$password,
- { AutoCommit => 0 },
+ { AutoCommit => 1 },
);
my $schema2 = Library::Schema->connect($coderef_returning_dbh);
use List::Util();
__PACKAGE__->mk_group_accessors('simple' =>
- qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts
- _conn_pid _conn_tid transaction_depth _dbh_autocommit savepoints/
+ qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid
+ _conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints/
);
# the values for these accessors are picked out (and deleted) from
}
}
- %attrs = () if (ref $args[0] eq 'CODE'); # _connect() never looks past $args[0] in this case
+ if (ref $args[0] eq 'CODE') {
+ # _connect() never looks past $args[0] in this case
+ %attrs = ()
+ } else {
+ %attrs = (
+ %{ $self->_default_dbi_connect_attributes || {} },
+ %attrs,
+ );
+ }
$self->_dbi_connect_info([@args, keys %attrs ? \%attrs : ()]);
$self->_connect_info;
}
+sub _default_dbi_connect_attributes {
+ return { AutoCommit => 1 };
+}
+
=head2 on_connect_do
This method is deprecated in favour of setting via L</connect_info>.
my $exception = $@;
if(!$exception) { return $want_array ? @result : $result[0] }
- if($tried++ > 0 || $self->connected) {
+ if($tried++ || $self->connected) {
eval { $self->txn_rollback };
my $rollback_exception = $@;
if($rollback_exception) {
sub disconnect {
my ($self) = @_;
- if( $self->connected ) {
+ if( $self->_dbh ) {
my @actions;
push @actions, ( $self->on_disconnect_call || () );
$sub->();
}
+=head2 connected
+
+=over
+
+=item Arguments: none
+
+=item Return Value: 1|0
+
+=back
+
+Verifies that the the current database handle is active and ready to execute
+an SQL statement (i.e. the connection did not get stale, server is still
+answering, etc.) This method is used internally by L</dbh>.
+
+=cut
+
sub connected {
my ($self) = @_;
=head2 dbh
-Returns the dbh - a data base handle of class L<DBI>.
+Returns a C<$dbh> - a data base handle of class L<DBI>. The returned handle
+is guaranteed to be healthy by implicitly calling L</connected>, and if
+necessary performing a reconnection before returning.
=cut
sub dbh {
my ($self) = @_;
- $self->ensure_connected;
+ if (not $self->_dbh) {
+ $self->_populate_dbh;
+ } else {
+ $self->ensure_connected;
+ }
+ return $self->_dbh;
+}
+
+=head2 last_dbh
+
+This returns the B<last> available C<$dbh> if any, or attempts to
+connect and returns the resulting handle. This method differs from
+L</dbh> by not validating if a preexisting handle is still healthy
+via L</connected>. Make sure you take appropriate precautions
+when using this method, as the C<$dbh> may be useless at this point.
+
+=cut
+
+sub last_dbh {
+ my $self = shift;
+ $self->_populate_dbh unless $self->_dbh;
return $self->_dbh;
}
sub _sql_maker_args {
my ($self) = @_;
- return ( bindtype=>'columns', array_datatypes => 1, limit_dialect => $self->dbh, %{$self->_sql_maker_opts} );
+ return (
+ bindtype=>'columns',
+ array_datatypes => 1,
+ limit_dialect => $self->last_dbh,
+ %{$self->_sql_maker_opts}
+ );
}
sub sql_maker {
sub _populate_dbh {
my ($self) = @_;
+
my @info = @{$self->_dbi_connect_info || []};
$self->_dbh($self->_connect(@info));
# there is no transaction in progress by definition
$self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
+ $self->_run_connection_actions unless $self->{_in_determine_driver};
+}
+
+sub _run_connection_actions {
+ my $self = shift;
my @actions;
push @actions, ( $self->on_connect_call || () );
sub _determine_driver {
my ($self) = @_;
- if (ref $self eq 'DBIx::Class::Storage::DBI') {
- my $driver;
+ if (not $self->_driver_determined) {
+ my $started_unconnected = 0;
+ local $self->{_in_determine_driver} = 1;
+
+ if (ref($self) eq __PACKAGE__) {
+ my $driver;
+ if ($self->_dbh) { # we are connected
+ $driver = $self->_dbh->{Driver}{Name};
+ } else {
+ # try to use dsn to not require being connected, the driver may still
+ # force a connection in _rebless to determine version
+ ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i;
+ $started_unconnected = 1;
+ }
- if ($self->_dbh) { # we are connected
- $driver = $self->_dbh->{Driver}{Name};
- } else {
- # try to use dsn to not require being connected, the driver may still
- # force a connection in _rebless to determine version
- ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i;
+ my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
+ if ($self->load_optional_class($storage_class)) {
+ mro::set_mro($storage_class, 'c3');
+ bless $self, $storage_class;
+ $self->_rebless();
+ }
}
- my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
- if ($self->load_optional_class($storage_class)) {
- mro::set_mro($storage_class, 'c3');
- bless $self, $storage_class;
- $self->_rebless();
- }
+ $self->_driver_determined(1);
+
+ $self->_run_connection_actions
+ if $started_unconnected && defined $self->_dbh;
}
}
sub txn_begin {
my $self = shift;
- $self->ensure_connected();
if($self->{transaction_depth} == 0) {
$self->debugobj->txn_begin()
if $self->debug;
- # this isn't ->_dbh-> because
- # we should reconnect on begin_work
- # for AutoCommit users
- $self->dbh->begin_work;
+
+ # being here implies we have AutoCommit => 1
+ # if the user is utilizing txn_do - good for
+ # him, otherwise we need to ensure that the
+ # $dbh is healthy on BEGIN
+ my $dbh_method = $self->{_in_dbh_do} ? '_dbh' : 'dbh';
+ $self->$dbh_method->begin_work;
+
} elsif ($self->auto_savepoint) {
$self->svp_begin;
}
sub insert {
my ($self, $source, $to_insert) = @_;
+# redispatch to insert method of storage we reblessed into, if necessary
+ if (not $self->_driver_determined) {
+ $self->_determine_driver;
+ goto $self->can('insert');
+ }
+
my $ident = $source->from;
my $bind_attributes = $self->source_bind_attributes($source);
my $updated_cols = {};
- $self->ensure_connected;
foreach my $col ( $source->columns ) {
if ( !defined $to_insert->{$col} ) {
my $col_info = $source->column_info($col);
if ( $col_info->{auto_nextval} ) {
- $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch( 'nextval', $col_info->{sequence} || $self->_dbh_get_autoinc_seq($self->dbh, $source) );
+ $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch(
+ 'nextval',
+ $col_info->{sequence} ||
+ $self->_dbh_get_autoinc_seq($self->last_dbh, $source)
+ );
}
}
}
@colvalues{@$cols} = (0..$#$cols);
my ($sql, @bind) = $self->sql_maker->insert($table, \%colvalues);
+ $self->_determine_driver;
+
$self->_query_start( $sql, @bind );
my $sth = $self->sth($sql);
sub update {
my $self = shift @_;
my $source = shift @_;
+ $self->_determine_driver;
my $bind_attributes = $self->source_bind_attributes($source);
return $self->_execute('update' => [], $source, $bind_attributes, @_);
sub delete {
my $self = shift @_;
my $source = shift @_;
-
+ $self->_determine_driver;
my $bind_attrs = $self->source_bind_attributes($source);
return $self->_execute('delete' => [], $source, $bind_attrs, @_);
( $attrs->{rows} && keys %{$attrs->{collapse}} )
||
( $attrs->{group_by} && @{$attrs->{group_by}} &&
- $attrs->{prefetch_select} && @{$attrs->{prefetch_select}} )
+ $attrs->{_prefetch_select} && @{$attrs->{_prefetch_select}} )
) {
($ident, $select, $where, $attrs)
= $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs);
# separate attributes
my $sub_attrs = { %$attrs };
delete $attrs->{$_} for qw/where bind rows offset group_by having/;
- delete $sub_attrs->{$_} for qw/for collapse prefetch_select _collapse_order_by select as/;
+ delete $sub_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/;
- my $alias = $attrs->{alias};
+ my $select_root_alias = $attrs->{alias};
my $sql_maker = $self->sql_maker;
# create subquery select list - consider only stuff *not* brought in by the prefetch
my $sub_select = [];
- for my $i (0 .. @{$attrs->{select}} - @{$attrs->{prefetch_select}} - 1) {
+ my $sub_group_by;
+ for my $i (0 .. @{$attrs->{select}} - @{$attrs->{_prefetch_select}} - 1) {
my $sel = $attrs->{select}[$i];
# alias any functions to the dbic-side 'as' label
];
}
- # mangle {from}
+ # mangle {from}, keep in mind that $from is "headless" from here on
my $join_root = shift @$from;
- my @outer_from = @$from;
my %inner_joins;
my %join_info = map { $_->[0]{-alias} => $_->[0] } (@$from);
- # in complex search_related chains $alias may *not* be 'me'
- # so always include it in the inner join, and also shift away
- # from the outer stack, so that the two datasets actually do
- # meet
- if ($join_root->{-alias} ne $alias) {
- $inner_joins{$alias} = 1;
-
- while (@outer_from && $outer_from[0][0]{-alias} ne $alias) {
- shift @outer_from;
- }
- if (! @outer_from) {
- $self->throw_exception ("Unable to find '$alias' in the {from} stack, something is wrong");
- }
-
- shift @outer_from; # the new subquery will represent this alias, so get rid of it
- }
+ # in complex search_related chains $select_root_alias may *not* be
+ # 'me' so always include it in the inner join
+ $inner_joins{$select_root_alias} = 1 if ($join_root->{-alias} ne $select_root_alias);
# decide which parts of the join will remain on the inside
# if a multi-type join was needed in the subquery ("multi" is indicated by
# presence in {collapse}) - add a group_by to simulate the collapse in the subq
- for my $alias (keys %inner_joins) {
-
- # the dot comes from some weirdness in collapse
- # remove after the rewrite
- if ($attrs->{collapse}{".$alias"}) {
- $sub_attrs->{group_by} ||= $sub_select;
- last;
+ unless ($sub_attrs->{group_by}) {
+ for my $alias (keys %inner_joins) {
+
+ # the dot comes from some weirdness in collapse
+ # remove after the rewrite
+ if ($attrs->{collapse}{".$alias"}) {
+ $sub_attrs->{group_by} ||= $sub_select;
+ last;
+ }
}
}
$where,
$sub_attrs
);
-
- # put it in the new {from}
- unshift @outer_from, {
- -alias => $alias,
+ my $subq_joinspec = {
+ -alias => $select_root_alias,
-source_handle => $join_root->{-source_handle},
- $alias => $subq,
+ $select_root_alias => $subq,
};
+ # Generate a new from (really just replace the join slot with the subquery)
+ # Before we would start the outer chain from the subquery itself (i.e.
+ # SELECT ... FROM (SELECT ... ) alias JOIN ..., but this turned out to be
+ # a bad idea for search_related, as the root of the chain was effectively
+ # lost (i.e. $artist_rs->search_related ('cds'... ) would result in alias
+ # of 'cds', which would prevent from doing things like order_by artist.*)
+ # See t/prefetch/via_search_related.t for a better idea
+ my @outer_from;
+ if ($join_root->{-alias} eq $select_root_alias) { # just swap the root part and we're done
+ @outer_from = (
+ $subq_joinspec,
+ @$from,
+ )
+ }
+ else { # this is trickier
+ @outer_from = ($join_root);
+
+ for my $j (@$from) {
+ if ($j->[0]{-alias} eq $select_root_alias) {
+ push @outer_from, [
+ $subq_joinspec,
+ @{$j}[1 .. $#$j],
+ ];
+ }
+ else {
+ push @outer_from, $j;
+ }
+ }
+ }
+
# This is totally horrific - the $where ends up in both the inner and outer query
# Unfortunately not much can be done until SQLA2 introspection arrives, and even
# then if where conditions apply to the *right* side of the prefetch, you may have
# also note: this adds -result_source => $rsrc to the column info
#
# usage:
-# my $col_sources = $self->_resolve_column_info($ident, [map $_->[0], @{$bind}]);
+# my $col_sources = $self->_resolve_column_info($ident, @column_names);
sub _resolve_column_info {
my ($self, $ident, $colnames) = @_;
my ($alias2src, $root_alias) = $self->_resolve_ident_sources($ident);
my $sep = $self->_sql_maker_opts->{name_sep} || '.';
$sep = "\Q$sep\E";
- my (%return, %converted);
+ my (%return, %seen_cols);
+
+ # compile a global list of column names, to be able to properly
+ # disambiguate unqualified column names (if at all possible)
+ for my $alias (keys %$alias2src) {
+ my $rsrc = $alias2src->{$alias};
+ for my $colname ($rsrc->columns) {
+ push @{$seen_cols{$colname}}, $alias;
+ }
+ }
+
+ COLUMN:
foreach my $col (@$colnames) {
my ($alias, $colname) = $col =~ m/^ (?: ([^$sep]+) $sep)? (.+) $/x;
- # deal with unqualified cols - we assume the main alias for all
- # unqualified ones, ugly but can't think of anything better right now
- $alias ||= $root_alias;
+ unless ($alias) {
+ # see if the column was seen exactly once (so we know which rsrc it came from)
+ if ($seen_cols{$colname} and @{$seen_cols{$colname}} == 1) {
+ $alias = $seen_cols{$colname}[0];
+ }
+ else {
+ next COLUMN;
+ }
+ }
my $rsrc = $alias2src->{$alias};
- $return{$col} = $rsrc && { %{$rsrc->column_info($colname)}, -result_source => $rsrc };
+ $return{$col} = $rsrc && {
+ %{$rsrc->column_info($colname)},
+ -result_source => $rsrc,
+ -source_alias => $alias,
+ };
}
+
return \%return;
}
=cut
-sub sqlt_type { shift->dbh->{Driver}->{Name} }
+sub sqlt_type { shift->last_dbh->{Driver}->{Name} }
=head2 bind_attribute_by_data_type
sub deployment_statements {
my ($self, $schema, $type, $version, $dir, $sqltargs) = @_;
# Need to be connected to get the correct sqlt_type
- $self->ensure_connected() unless $type;
+ $self->last_dbh() unless $type;
$type ||= $self->sqlt_type;
$version ||= $schema->schema_version || '1.x';
$dir ||= './';
return if $line =~ /^\s+$/; # skip whitespace only
$self->_query_start($line);
eval {
- $self->dbh->do($line); # shouldn't be using ->dbh ?
+ # a previous error may invalidate $dbh - thus we need to use dbh()
+ # to guarantee a healthy $dbh (this is temporary until we get
+ # proper error handling on deploy() )
+ $self->dbh->do($line);
};
if ($@) {
carp qq{$@ (running "${line}")};
sub datetime_parser {
my $self = shift;
return $self->{datetime_parser} ||= do {
- $self->ensure_connected;
+ $self->last_dbh;
$self->build_datetime_parser(@_);
};
}
DBIx::Class can do some wonderful magic with handling exceptions,
disconnections, and transactions when you use C<< AutoCommit => 1 >>
-combined with C<txn_do> for transaction support.
+(the default) combined with C<txn_do> for transaction support.
If you set C<< AutoCommit => 0 >> in your connect info, then you are always
in an assumed transaction between commits, and you're telling us you'd
be with raw DBI.
-
=head1 AUTHORS
Matt S. Trout <mst@shadowcatsystems.co.uk>
use base qw/DBIx::Class::Storage::DBI::AmbiguousGlob DBIx::Class::Storage::DBI/;
use mro 'c3';
+use List::Util();
+
+__PACKAGE__->mk_group_accessors(simple => qw/
+ _identity _identity_method
+/);
+
__PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks::MSSQL');
-sub _dbh_last_insert_id {
- my ($self, $dbh, $source, $col) = @_;
- my ($id) = $dbh->selectrow_array('SELECT SCOPE_IDENTITY()');
- return $id;
+sub insert_bulk {
+ my $self = shift;
+ my ($source, $cols, $data) = @_;
+
+ my $identity_insert = 0;
+
+ COLUMNS:
+ foreach my $col (@{$cols}) {
+ if ($source->column_info($col)->{is_auto_increment}) {
+ $identity_insert = 1;
+ last COLUMNS;
+ }
+ }
+
+ if ($identity_insert) {
+ my $table = $source->from;
+ $self->last_dbh->do("SET IDENTITY_INSERT $table ON");
+ }
+
+ $self->next::method(@_);
+
+ if ($identity_insert) {
+ my $table = $source->from;
+ $self->last_dbh->do("SET IDENTITY_INSERT $table OFF");
+ }
+}
+
+# support MSSQL GUID column types
+
+sub insert {
+ my $self = shift;
+ my ($source, $to_insert) = @_;
+
+ my $updated_cols = {};
+
+ my %guid_cols;
+ my @pk_cols = $source->primary_columns;
+ my %pk_cols;
+ @pk_cols{@pk_cols} = ();
+
+ my @pk_guids = grep {
+ $source->column_info($_)->{data_type} =~ /^uniqueidentifier/i
+ } @pk_cols;
+
+ my @auto_guids = grep {
+ $source->column_info($_)->{data_type} =~ /^uniqueidentifier/i
+ &&
+ $source->column_info($_)->{auto_nextval}
+ } grep { not exists $pk_cols{$_} } $source->columns;
+
+ my @get_guids_for =
+ grep { not exists $to_insert->{$_} } (@pk_guids, @auto_guids);
+
+ for my $guid_col (@get_guids_for) {
+ my ($new_guid) = $self->last_dbh->selectrow_array('SELECT NEWID()');
+ $updated_cols->{$guid_col} = $to_insert->{$guid_col} = $new_guid;
+ }
+
+ $updated_cols = { %$updated_cols, %{ $self->next::method(@_) } };
+
+ return $updated_cols;
+}
+
+sub _prep_for_execute {
+ my $self = shift;
+ my ($op, $extra_bind, $ident, $args) = @_;
+
+# cast MONEY values properly
+ if ($op eq 'insert' || $op eq 'update') {
+ my $fields = $args->[0];
+
+ for my $col (keys %$fields) {
+ # $ident is a result source object with INSERT/UPDATE ops
+ if ($ident->column_info ($col)->{data_type} =~ /^money\z/i) {
+ my $val = $fields->{$col};
+ $fields->{$col} = \['CAST(? AS MONEY)', [ $col => $val ]];
+ }
+ }
+ }
+
+ my ($sql, $bind) = $self->next::method (@_);
+
+ if ($op eq 'insert') {
+ $sql .= ';SELECT SCOPE_IDENTITY()';
+
+ my $col_info = $self->_resolve_column_info($ident, [map $_->[0], @{$bind}]);
+ if (List::Util::first { $_->{is_auto_increment} } (values %$col_info) ) {
+
+ my $table = $ident->from;
+ my $identity_insert_on = "SET IDENTITY_INSERT $table ON";
+ my $identity_insert_off = "SET IDENTITY_INSERT $table OFF";
+ $sql = "$identity_insert_on; $sql; $identity_insert_off";
+ }
+ }
+
+ return ($sql, $bind);
+}
+
+sub _execute {
+ my $self = shift;
+ my ($op) = @_;
+
+ my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_);
+
+ if ($op eq 'insert') {
+
+ # this should bring back the result of SELECT SCOPE_IDENTITY() we tacked
+ # on in _prep_for_execute above
+ my ($identity) = $sth->fetchrow_array;
+
+ # SCOPE_IDENTITY failed, but we can do something else
+ if ( (! $identity) && $self->_identity_method) {
+ ($identity) = $self->_dbh->selectrow_array(
+ 'select ' . $self->_identity_method
+ );
+ }
+
+ $self->_identity($identity);
+ $sth->finish;
+ }
+
+ return wantarray ? ($rv, $sth, @bind) : $rv;
+}
+
+sub last_insert_id { shift->_identity }
+
+# savepoint syntax is the same as in Sybase ASE
+
+sub _svp_begin {
+ my ($self, $name) = @_;
+
+ $self->last_dbh->do("SAVE TRANSACTION $name");
+}
+
+# A new SAVE TRANSACTION with the same name releases the previous one.
+sub _svp_release { 1 }
+
+sub _svp_rollback {
+ my ($self, $name) = @_;
+
+ $self->last_dbh->do("ROLLBACK TRANSACTION $name");
}
sub build_datetime_parser {
sub sqlt_type { 'SQLServer' }
sub _sql_maker_opts {
- my ( $self, $opts ) = @_;
+ my ( $self, $opts ) = @_;
- if ( $opts ) {
- $self->{_sql_maker_opts} = { %$opts };
- }
+ if ( $opts ) {
+ $self->{_sql_maker_opts} = { %$opts };
+ }
- return { limit_dialect => 'Top', %{$self->{_sql_maker_opts}||{}} };
+ return { limit_dialect => 'Top', %{$self->{_sql_maker_opts}||{}} };
}
1;
=head1 NAME
-DBIx::Class::Storage::DBI::MSSQL - Storage::DBI subclass for MSSQL
+DBIx::Class::Storage::DBI::MSSQL - Base Class for Microsoft SQL Server support
+in DBIx::Class
=head1 SYNOPSIS
-This subclass supports MSSQL, and can in theory be used directly
-via the C<storage_type> mechanism:
+This is the base class for Microsoft SQL Server support, used by
+L<DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server> and
+L<DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server>.
- $schema->storage_type('::DBI::MSSQL');
- $schema->connect_info('dbi:....', ...);
+=head1 IMPLEMENTATION NOTES
-However, as there is no L<DBD::MSSQL>, you will probably want to use
-one of the other DBD-specific MSSQL classes, such as
-L<DBIx::Class::Storage::DBI::Sybase::MSSQL>. These classes will
-merge this class with a DBD-specific class to obtain fully
-correct behavior for your scenario.
+Microsoft SQL Server supports three methods of retrieving the IDENTITY
+value for inserted row: IDENT_CURRENT, @@IDENTITY, and SCOPE_IDENTITY().
+SCOPE_IDENTITY is used here because it is the safest. However, it must
+be called is the same execute statement, not just the same connection.
-=head1 METHODS
+So, this implementation appends a SELECT SCOPE_IDENTITY() statement
+onto each INSERT to accommodate that requirement.
-=head2 last_insert_id
+C<SELECT @@IDENTITY> can also be used by issuing:
-=head2 sqlt_type
+ $self->_identity_method('@@identity');
-=head2 build_datetime_parser
+it will only be used if SCOPE_IDENTITY() fails.
-The resulting parser handles the MSSQL C<DATETIME> type, but is almost
-certainly not sufficient for the other MSSQL 2008 date/time types.
+This is more dangerous, as inserting into a table with an on insert trigger that
+inserts into another table with an identity will give erroneous results on
+recent versions of SQL Server.
-=head1 AUTHORS
+=head1 AUTHOR
-Brian Cassidy <bricas@cpan.org>
+See L<DBIx::Class/CONTRIBUTORS>.
=head1 LICENSE
sub _rebless {
my ($self) = @_;
- my $dbtype = eval { $self->dbh->get_info(17) };
+ my $dbtype = eval { $self->last_dbh->get_info(17) };
+
unless ( $@ ) {
# Translate the backend name into a perl identifier
$dbtype =~ s/\W/_/gi;
use base qw/DBIx::Class::Storage::DBI::MSSQL/;
use mro 'c3';
+use Carp::Clan qw/^DBIx::Class/;
use List::Util();
+use Scalar::Util ();
-sub insert_bulk {
- my $self = shift;
- my ($source, $cols, $data) = @_;
+__PACKAGE__->mk_group_accessors(simple => qw/
+ _using_dynamic_cursors
+/);
- my $identity_insert = 0;
+=head1 NAME
- COLUMNS:
- foreach my $col (@{$cols}) {
- if ($source->column_info($col)->{is_auto_increment}) {
- $identity_insert = 1;
- last COLUMNS;
- }
- }
+DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server - Support specific
+to Microsoft SQL Server over ODBC
+
+=head1 DESCRIPTION
+
+This class implements support specific to Microsoft SQL Server over ODBC. It is
+loaded automatically by by DBIx::Class::Storage::DBI::ODBC when it detects a
+MSSQL back-end.
+
+Most of the functionality is provided from the superclass
+L<DBIx::Class::Storage::DBI::MSSQL>.
+
+=head1 MULTIPLE ACTIVE STATEMENTS
+
+The following options are alternative ways to enable concurrent executing
+statement support. Each has its own advantages and drawbacks.
+
+=head2 connect_call_use_dynamic_cursors
+
+Use as:
+
+ on_connect_call => 'use_dynamic_cursors'
+
+in your L<DBIx::Class::Storage::DBI/connect_info> as one way to enable multiple
+concurrent statements.
+
+Will add C<< odbc_cursortype => 2 >> to your DBI connection attributes. See
+L<DBD::ODBC/odbc_cursortype> for more information.
+
+Alternatively, you can add it yourself and dynamic cursor support will be
+automatically enabled.
+
+If you're using FreeTDS, C<tds_version> must be set to at least C<8.0>.
+
+This will not work with CODE ref connect_info's.
+
+B<WARNING:> this will break C<SCOPE_IDENTITY()>, and C<SELECT @@IDENTITY> will
+be used instead, which on SQL Server 2005 and later will return erroneous
+results on tables which have an on insert trigger that inserts into another
+table with an C<IDENTITY> column.
+
+=cut
+
+sub connect_call_use_dynamic_cursors {
+ my $self = shift;
- if ($identity_insert) {
- my $table = $source->from;
- $self->dbh->do("SET IDENTITY_INSERT $table ON");
+ if (ref($self->_dbi_connect_info->[0]) eq 'CODE') {
+ croak 'cannot set DBI attributes on a CODE ref connect_info';
}
- $self->next::method(@_);
+ my $dbi_attrs = $self->_dbi_connect_info->[-1];
- if ($identity_insert) {
- my $table = $source->from;
- $self->dbh->do("SET IDENTITY_INSERT $table OFF");
+ unless (ref($dbi_attrs) && Scalar::Util::reftype($dbi_attrs) eq 'HASH') {
+ $dbi_attrs = {};
+ push @{ $self->_dbi_connect_info }, $dbi_attrs;
+ }
+
+ if (not exists $dbi_attrs->{odbc_cursortype}) {
+ # turn on support for multiple concurrent statements, unless overridden
+ $dbi_attrs->{odbc_cursortype} = 2;
+ my $connected = defined $self->_dbh;
+ $self->disconnect;
+ $self->ensure_connected if $connected;
+ $self->_set_dynamic_cursors;
}
}
-sub _prep_for_execute {
+sub _set_dynamic_cursors {
my $self = shift;
- my ($op, $extra_bind, $ident, $args) = @_;
-
-# cast MONEY values properly
- if ($op eq 'insert' || $op eq 'update') {
- my $fields = $args->[0];
- my $col_info = $self->_resolve_column_info($ident, [keys %$fields]);
-
- for my $col (keys %$fields) {
- if ($col_info->{$col}{data_type} =~ /^money\z/i) {
- my $val = $fields->{$col};
- $fields->{$col} = \['CAST(? AS MONEY)', [ $col => $val ]];
- }
- }
+ my $dbh = $self->_dbh;
+
+ eval {
+ local $dbh->{RaiseError} = 1;
+ local $dbh->{PrintError} = 0;
+ $dbh->do('SELECT @@IDENTITY');
+ };
+ if ($@) {
+ croak <<'EOF';
+
+Your drivers do not seem to support dynamic cursors (odbc_cursortype => 2),
+if you're using FreeTDS, make sure to set tds_version to 8.0 or greater.
+EOF
}
- my ($sql, $bind) = $self->next::method (@_);
-
- if ($op eq 'insert') {
- $sql .= ';SELECT SCOPE_IDENTITY()';
+ $self->_using_dynamic_cursors(1);
+ $self->_identity_method('@@identity');
+}
- my $col_info = $self->_resolve_column_info($ident, [map $_->[0], @{$bind}]);
- if (List::Util::first { $_->{is_auto_increment} } (values %$col_info) ) {
+sub _rebless {
+ no warnings 'uninitialized';
+ my $self = shift;
- my $table = $ident->from;
- my $identity_insert_on = "SET IDENTITY_INSERT $table ON";
- my $identity_insert_off = "SET IDENTITY_INSERT $table OFF";
- $sql = "$identity_insert_on; $sql; $identity_insert_off";
- }
+ if (ref($self->_dbi_connect_info->[0]) ne 'CODE' &&
+ eval { $self->_dbi_connect_info->[-1]{odbc_cursortype} } == 2) {
+ $self->_set_dynamic_cursors;
+ return;
}
- return ($sql, $bind);
+ $self->_using_dynamic_cursors(0);
}
-sub _execute {
- my $self = shift;
- my ($op) = @_;
+=head2 connect_call_use_server_cursors
+
+Use as:
+
+ on_connect_call => 'use_server_cursors'
+
+May allow multiple active select statements. See
+L<DBD::ODBC/odbc_SQL_ROWSET_SIZE> for more information.
+
+Takes an optional parameter for the value to set the attribute to, default is
+C<2>.
+
+B<WARNING>: this does not work on all versions of SQL Server, and may lock up
+your database!
+
+=cut
- my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_);
- if ($op eq 'insert') {
- $self->{_scope_identity} = $sth->fetchrow_array;
- $sth->finish;
- }
+sub connect_call_use_server_cursors {
+ my $self = shift;
+ my $sql_rowset_size = shift || 2;
- return wantarray ? ($rv, $sth, @bind) : $rv;
+ $self->_dbh->{odbc_SQL_ROWSET_SIZE} = $sql_rowset_size;
}
-sub last_insert_id { shift->{_scope_identity} }
+=head2 connect_call_use_MARS
-1;
+Use as:
-__END__
+ on_connect_call => 'use_MARS'
-=head1 NAME
+Use to enable a feature of SQL Server 2005 and later, "Multiple Active Result
+Sets". See L<DBD::ODBC::FAQ/Does DBD::ODBC support Multiple Active Statements?>
+for more information.
-DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server - Support specific
-to Microsoft SQL Server over ODBC
+B<WARNING>: This has implications for the way transactions are handled.
-=head1 DESCRIPTION
+=cut
-This class implements support specific to Microsoft SQL Server over ODBC,
-including auto-increment primary keys and SQL::Abstract::Limit dialect. It
-is loaded automatically by by DBIx::Class::Storage::DBI::ODBC when it
-detects a MSSQL back-end.
+sub connect_call_use_MARS {
+ my $self = shift;
+
+ my $dsn = $self->_dbi_connect_info->[0];
-=head1 IMPLEMENTATION NOTES
+ if (ref($dsn) eq 'CODE') {
+ croak 'cannot change the DBI DSN on a CODE ref connect_info';
+ }
-Microsoft SQL Server supports three methods of retrieving the IDENTITY
-value for inserted row: IDENT_CURRENT, @@IDENTITY, and SCOPE_IDENTITY().
-SCOPE_IDENTITY is used here because it is the safest. However, it must
-be called is the same execute statement, not just the same connection.
+ if ($dsn !~ /MARS_Connection=/) {
+ $self->_dbi_connect_info->[0] = "$dsn;MARS_Connection=Yes";
+ my $connected = defined $self->_dbh;
+ $self->disconnect;
+ $self->ensure_connected if $connected;
+ }
+}
-So, this implementation appends a SELECT SCOPE_IDENTITY() statement
-onto each INSERT to accommodate that requirement.
+1;
-=head1 AUTHORS
+=head1 AUTHOR
-Marc Mims C<< <marc@questright.com> >>
+See L<DBIx::Class/CONTRIBUTORS>.
=head1 LICENSE
sub _rebless {
my ($self) = @_;
- my $version = eval { $self->dbh->get_info(18); };
+ my $version = eval { $self->last_dbh->get_info(18); };
if ( !$@ ) {
my ($major, $minor, $patchlevel) = split(/\./, $version);
sub _sequence_fetch {
my ( $self, $type, $seq ) = @_;
- my ($id) = $self->dbh->selectrow_array("SELECT ${seq}.${type} FROM DUAL");
+ my ($id) = $self->last_dbh->selectrow_array("SELECT ${seq}.${type} FROM DUAL");
return $id;
}
sub _svp_begin {
my ($self, $name) = @_;
- $self->dbh->do("SAVEPOINT $name");
+ $self->last_dbh->do("SAVEPOINT $name");
}
=head2 source_bind_attributes
sub _svp_rollback {
my ($self, $name) = @_;
- $self->dbh->do("ROLLBACK TO SAVEPOINT $name")
+ $self->last_dbh->do("ROLLBACK TO SAVEPOINT $name")
}
=head1 AUTHOR
sub with_deferred_fk_checks {
my ($self, $sub) = @_;
- $self->dbh->do('SET CONSTRAINTS ALL DEFERRED');
+ $self->last_dbh->do('SET CONSTRAINTS ALL DEFERRED');
$sub->();
}
my ($self,$source,$col) = @_;
my @pri = $source->primary_columns;
- my ($schema,$table) = $source->name =~ /^(.+)\.(.+)$/ ? ($1,$2)
- : (undef,$source->name);
+
+ my $schema;
+ my $table = $source->name;
+
+ if (ref $table eq 'SCALAR') {
+ $table = $$table;
+ }
+ elsif ($table =~ /^(.+)\.(.+)$/) {
+ ($schema, $table) = ($1, $2);
+ }
$self->dbh_do('_dbh_get_autoinc_seq', $schema, $table, @pri);
}
sub _sequence_fetch {
my ( $self, $type, $seq ) = @_;
- my ($id) = $self->dbh->selectrow_array("SELECT nextval('${seq}')");
+ my ($id) = $self->last_dbh->selectrow_array("SELECT nextval('${seq}')");
return $id;
}
sub _svp_begin {
my ($self, $name) = @_;
- $self->dbh->pg_savepoint($name);
+ $self->last_dbh->pg_savepoint($name);
}
sub _svp_release {
my ($self, $name) = @_;
- $self->dbh->pg_release($name);
+ $self->last_dbh->pg_release($name);
}
sub _svp_rollback {
my ($self, $name) = @_;
- $self->dbh->pg_rollback_to($name);
+ $self->last_dbh->pg_rollback_to($name);
}
1;
sub _rebless {
my $self = shift;
- my $dbtype = eval { @{$self->dbh->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})}[2] };
+ my $dbtype = eval {
+ @{$self->last_dbh
+ ->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})
+ }[2]
+ };
unless ( $@ ) {
$dbtype =~ s/\W/_/gi;
my $subclass = "DBIx::Class::Storage::DBI::Sybase::${dbtype}";
return $@ ? 0 : 1;
}
+sub _placeholders_supported {
+ my $self = shift;
+ my $dbh = $self->last_dbh;
+
+ return eval {
+# There's also $dbh->{syb_dynamic_supported} but it can be inaccurate for this
+# purpose.
+ local $dbh->{PrintError} = 0;
+ local $dbh->{RaiseError} = 1;
+# this specifically tests a bind that is NOT a string
+ $dbh->selectrow_array('select 1 where 1 = ?', {}, 1);
+ };
+}
+
1;
=head1 AUTHORS
use base qw/
DBIx::Class::Storage::DBI::Sybase::Base
- DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server
+ DBIx::Class::Storage::DBI::MSSQL
/;
use mro 'c3';
+sub _rebless {
+ my $self = shift;
+ my $dbh = $self->last_dbh;
+
+ if (not $self->_placeholders_supported) {
+ bless $self,
+ 'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars';
+ $self->_rebless;
+ }
+
+# LongReadLen doesn't work with MSSQL through DBD::Sybase, and the default is
+# huge on some versions of SQL server and can cause memory problems, so we
+# fix it up here.
+ my $text_size = eval { $self->_dbi_connect_info->[-1]->{LongReadLen} } ||
+ 32768; # the DBD::Sybase default
+
+ $dbh->do("set textsize $text_size");
+}
+
1;
=head1 NAME
-DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server - Storage::DBI subclass for MSSQL via
-DBD::Sybase
+DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server - Support for Microsoft
+SQL Server via DBD::Sybase
=head1 SYNOPSIS
This subclass supports MSSQL server connections via L<DBD::Sybase>.
-=head1 CAVEATS
-
-This storage driver uses L<DBIx::Class::Storage::DBI::NoBindVars> as a base.
-This means that bind variables will be interpolated (properly quoted of course)
-into the SQL query itself, without using bind placeholders.
+=head1 DESCRIPTION
-More importantly this means that caching of prepared statements is explicitly
-disabled, as the interpolation renders it useless.
+This driver tries to determine whether your version of L<DBD::Sybase> and
+supporting libraries (usually FreeTDS) support using placeholders, if not the
+storage will be reblessed to
+L<DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars>.
-=head1 AUTHORS
+The MSSQL specific functionality is provided by
+L<DBIx::Class::Storage::DBI::MSSQL>.
-Brandon L Black <blblack@gmail.com>
+=head1 AUTHOR
-Justin Hunter <justin.d.hunter@gmail.com>
+See L<DBIx::Class/CONTRIBUTORS>.
=head1 LICENSE
--- /dev/null
+package DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars;
+
+use strict;
+use warnings;
+
+use base qw/
+ DBIx::Class::Storage::DBI::NoBindVars
+ DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server
+/;
+use mro 'c3';
+
+sub _rebless {
+ my $self = shift;
+
+ $self->disable_sth_caching(1);
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars - Support for Microsoft
+SQL Server via DBD::Sybase without placeholders
+
+=head1 SYNOPSIS
+
+This subclass supports MSSQL server connections via DBD::Sybase when ? style
+placeholders are not available.
+
+=head1 DESCRIPTION
+
+If you are using this driver then your combination of L<DBD::Sybase> and
+libraries (most likely FreeTDS) does not support ? style placeholders.
+
+This storage driver uses L<DBIx::Class::Storage::DBI::NoBindVars> as a base.
+This means that bind variables will be interpolated (properly quoted of course)
+into the SQL query itself, without using bind placeholders.
+
+More importantly this means that caching of prepared statements is explicitly
+disabled, as the interpolation renders it useless.
+
+In all other respects, it is a subclass of
+L<DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server>.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
sub _svp_begin {
my ($self, $name) = @_;
- $self->dbh->do("SAVEPOINT $name");
+ $self->last_dbh->do("SAVEPOINT $name");
}
sub _svp_release {
my ($self, $name) = @_;
- $self->dbh->do("RELEASE SAVEPOINT $name");
+ $self->last_dbh->do("RELEASE SAVEPOINT $name");
}
sub _svp_rollback {
my ($self, $name) = @_;
- $self->dbh->do("ROLLBACK TO SAVEPOINT $name")
+ $self->last_dbh->do("ROLLBACK TO SAVEPOINT $name")
}
sub is_replicating {
- my $status = shift->dbh->selectrow_hashref('show slave status');
+ my $status = shift->last_dbh->selectrow_hashref('show slave status');
return ($status->{Slave_IO_Running} eq 'Yes') && ($status->{Slave_SQL_Running} eq 'Yes');
}
sub lag_behind_master {
- return shift->dbh->selectrow_hashref('show slave status')->{Seconds_Behind_Master};
+ return shift->last_dbh->selectrow_hashref('show slave status')->{Seconds_Behind_Master};
}
# MySql can not do subquery update/deletes, only way is slow per-row operations.
Storage::DBI autodetects the underlying MySQL database, and re-blesses the
C<$storage> object into this class.
- my $schema = MyDb::Schema->connect( $dsn, $user, $pass, { set_strict_mode => 1 } );
+ my $schema = MyDb::Schema->connect( $dsn, $user, $pass, { on_connect_call => 'set_strict_mode' } );
=head1 DESCRIPTION
my $source = $dbicschema->source($moniker);
my $table_name = $source->name;
- # Skip custom query sources
- next if ref $table_name;
+ # FIXME - this isn't the right way to do it, but sqlt does not
+ # support quoting properly to be signaled about this
+ $table_name = $$table_name if ref $table_name eq 'SCALAR';
# Its possible to have multiple DBIC sources using the same table
next if $tables{$table_name};
my $othertable = $source->related_source($rel);
my $rel_table = $othertable->name;
+ # FIXME - this isn't the right way to do it, but sqlt does not
+ # support quoting properly to be signaled about this
+ $rel_table = $$rel_table if ref $rel_table eq 'SCALAR';
+
my $reverse_rels = $source->reverse_relationship_info($rel);
my ($otherrelname, $otherrelationship) = each %{$reverse_rels};
) {
$schema->add_table ($tables{$table}{object});
$tables{$table}{source} -> _invoke_sqlt_deploy_hook( $tables{$table}{object} );
- }
+ # the hook might have already removed the table
+ if ($schema->get_table($table) && $table =~ /^ \s* \( \s* SELECT \s+/ix) {
+ warn <<'EOW';
+
+Custom SQL through ->name(\'( SELECT ...') is DEPRECATED, for more details see
+"Arbitrary SQL through a custom ResultSource" in DBIx::Class::Manual::Cookbook
+or http://search.cpan.org/dist/DBIx-Class/lib/DBIx/Class/Manual/Cookbook.pod
+
+EOW
+
+ # remove the table as there is no way someone might want to
+ # actually deploy this
+ $schema->drop_table ($table);
+ }
+ }
my %views;
foreach my $moniker (sort @view_monikers)
my $source = $dbicschema->source($moniker);
my $view_name = $source->name;
+ # FIXME - this isn't the right way to do it, but sqlt does not
+ # support quoting properly to be signaled about this
+ $view_name = $$view_name if ref $view_name eq 'SCALAR';
+
# Skip custom query sources
next if ref $view_name;
eval { $rs->count };
is_same_sql_bind(
$sql, \@bind,
- "SELECT COUNT( * ) FROM `cd` `me` JOIN `artist` `artist` ON ( `artist`.`artistid` = `me`.`artist` ) WHERE ( `artist`.`name` = ? AND `me`.`year` = ? )", ["'Caterwauler McCrae'", "'2001'"],
+ "SELECT COUNT( * ) FROM cd `me` JOIN `artist` `artist` ON ( `artist`.`artistid` = `me`.`artist` ) WHERE ( `artist`.`name` = ? AND `me`.`year` = ? )", ["'Caterwauler McCrae'", "'2001'"],
'got correct SQL for count query with quoting'
);
eval { $rs->count };
is_same_sql_bind(
$sql, \@bind,
- "SELECT COUNT( * ) FROM [cd] [me] JOIN [artist] [artist] ON ( [artist].[artistid] = [me].[artist] ) WHERE ( [artist].[name] = ? AND [me].[year] = ? )", ["'Caterwauler McCrae'", "'2001'"],
+ "SELECT COUNT( * ) FROM cd [me] JOIN [artist] [artist] ON ( [artist].[artistid] = [me].[artist] ) WHERE ( [artist].[name] = ? AND [me].[year] = ? )", ["'Caterwauler McCrae'", "'2001'"],
'got correct SQL for count query with bracket quoting'
);
eval { $rs->count };
is_same_sql_bind(
$sql, \@bind,
- "SELECT COUNT( * ) FROM `cd` `me` JOIN `artist` `artist` ON ( `artist`.`artistid` = `me`.`artist` ) WHERE ( `artist`.`name` = ? AND `me`.`year` = ? )", ["'Caterwauler McCrae'", "'2001'"],
+ "SELECT COUNT( * ) FROM cd `me` JOIN `artist` `artist` ON ( `artist`.`artistid` = `me`.`artist` ) WHERE ( `artist`.`name` = ? AND `me`.`year` = ? )", ["'Caterwauler McCrae'", "'2001'"],
'got correct SQL for count query with quoting'
);
eval { $rs->count };
is_same_sql_bind(
$sql, \@bind,
- "SELECT COUNT( * ) FROM [cd] [me] JOIN [artist] [artist] ON ( [artist].[artistid] = [me].[artist] ) WHERE ( [artist].[name] = ? AND [me].[year] = ? )", ["'Caterwauler McCrae'", "'2001'"],
+ "SELECT COUNT( * ) FROM cd [me] JOIN [artist] [artist] ON ( [artist].[artistid] = [me].[artist] ) WHERE ( [artist].[name] = ? AND [me].[year] = ? )", ["'Caterwauler McCrae'", "'2001'"],
'got correct SQL for count query with bracket quoting'
);
my $schema = DBICTest->init_schema();
-plan tests => 103;
-
eval { require DateTime::Format::SQLite };
my $NO_DTFM = $@ ? 1 : 0;
ok (! DBIx::Class::ResultSource->can ($_), "$_ no longer provided by DBIx::Class::ResultSource");
}
}
+
+#------------------------------
+# READ THIS BEFORE "FIXING"
+#------------------------------
+#
+# make sure we got rid of discard_changes mess - this is a mess and a source
+# of great confusion. Here I simply die if the methods are available, which
+# is wrong on its own (we *have* to provide some sort of back-compat, even
+# if with warnings). Here is how I envision things should actually be. Also
+# note that a lot of the deprecation can be started today (i.e. the switch
+# from get_from_storage to copy_from_storage). So:
+#
+# $row->discard_changes =>
+# warning, and delegation to reload_from_storage
+#
+# $row->reload_from_storage =>
+# does what discard changes did in 0.08 - issues a query to the db
+# and repopulates all column slots, regardless of dirty states etc.
+#
+# $row->revert_changes =>
+# does what discard_changes should have done initially (before it became
+# a dual-purpose call). In order to make this work we will have to
+# augment $row to carry its own initial-state, much like svn has a
+# copy of the current checkout in contrast to cvs.
+#
+# my $db_row = $row->get_from_storage =>
+# warns and delegates to an improved name copy_from_storage, with the
+# same semantics
+#
+# my $db_row = $row->copy_from_storage =>
+# a much better/descriptive name than get_from_storage
+#
+#------------------------------
+# READ THIS BEFORE "FIXING"
+#------------------------------
+#
+SKIP: {
+ skip "Something needs to be done before 0.09", 2 if $DBIx::Class::VERSION < 0.09;
+
+ my $row = $schema->resultset ('Artist')->next;
+
+ for (qw/discard_changes get_from_storage/) {
+ ok (! $row->can ($_), "$_ needs *some* sort of facelift before 0.09 ships - current state of affairs is unacceptable");
+ }
+}
+
+done_testing;
$dbh->do("CREATE TABLE artist (artistid NUMBER(12), name VARCHAR(255), rank NUMBER(38), charfield VARCHAR2(10))");
$dbh->do("CREATE TABLE sequence_test (pkid1 NUMBER(12), pkid2 NUMBER(12), nonpkid NUMBER(12), name VARCHAR(255))");
$dbh->do("CREATE TABLE cd (cdid NUMBER(12), artist NUMBER(12), title VARCHAR(255), year VARCHAR(4))");
-$dbh->do("CREATE TABLE track (trackid NUMBER(12), cd NUMBER(12), position NUMBER(12), title VARCHAR(255), last_updated_on DATE, last_updated_at DATE)");
+$dbh->do("CREATE TABLE track (trackid NUMBER(12), cd NUMBER(12), position NUMBER(12), title VARCHAR(255), last_updated_on DATE, last_updated_at DATE, small_dt DATE)");
$dbh->do("ALTER TABLE artist ADD (CONSTRAINT artist_pk PRIMARY KEY (artistid))");
$dbh->do("ALTER TABLE sequence_test ADD (CONSTRAINT sequence_test_constraint PRIMARY KEY (pkid1, pkid2))");
plan skip_all => 'Set $ENV{DBICTEST_MSSQL_ODBC_DSN}, _USER and _PASS to run this test'
unless ($dsn && $user);
-plan tests => 33;
+plan tests => 39;
+DBICTest::Schema->load_classes('ArtistGUID');
my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
{
my ($storage, $dbh) = @_;
eval { $dbh->do("DROP TABLE artist") };
$dbh->do(<<'SQL');
-
CREATE TABLE artist (
artistid INT IDENTITY NOT NULL,
name VARCHAR(100),
charfield CHAR(10) NULL,
primary key(artistid)
)
-
SQL
-
});
my %seen_id;
-# fresh $schema so we start unconnected
-$schema = DBICTest::Schema->connect($dsn, $user, $pass);
+my @opts = (
+ { on_connect_call => 'use_dynamic_cursors' },
+ {},
+);
+my $new;
+
+# test Auto-PK with different options
+for my $opts (@opts) {
+ SKIP: {
+ $schema = DBICTest::Schema->connect($dsn, $user, $pass, $opts);
+
+ eval {
+ $schema->storage->ensure_connected
+ };
+ if ($@ =~ /dynamic cursors/) {
+ skip
+'Dynamic Cursors not functional, tds_version 8.0 or greater required if using'.
+' FreeTDS', 1;
+ }
-# test primary key handling
-my $new = $schema->resultset('Artist')->create({ name => 'foo' });
-ok($new->artistid > 0, "Auto-PK worked");
+ $schema->resultset('Artist')->search({ name => 'foo' })->delete;
+
+ $new = $schema->resultset('Artist')->create({ name => 'foo' });
+
+ ok($new->artistid > 0, "Auto-PK worked");
+ }
+}
$seen_id{$new->artistid}++;
is( $it->next->name, "Artist 2", "iterator->next ok" );
is( $it->next, undef, "next past end of resultset ok" );
+# test GUID columns
+
+$schema->storage->dbh_do (sub {
+ my ($storage, $dbh) = @_;
+ eval { $dbh->do("DROP TABLE artist") };
+ $dbh->do(<<'SQL');
+CREATE TABLE artist (
+ artistid UNIQUEIDENTIFIER NOT NULL,
+ name VARCHAR(100),
+ rank INT NOT NULL DEFAULT '13',
+ charfield CHAR(10) NULL,
+ a_guid UNIQUEIDENTIFIER,
+ primary key(artistid)
+)
+SQL
+});
+
+# start disconnected to make sure insert works on an un-reblessed storage
+$schema = DBICTest::Schema->connect($dsn, $user, $pass);
+
+my $row;
+lives_ok {
+ $row = $schema->resultset('ArtistGUID')->create({ name => 'mtfnpy' })
+} 'created a row with a GUID';
+
+ok(
+ eval { $row->artistid },
+ 'row has GUID PK col populated',
+);
+diag $@ if $@;
+
+ok(
+ eval { $row->a_guid },
+ 'row has a GUID col with auto_nextval populated',
+);
+diag $@ if $@;
+
+my $row_from_db = $schema->resultset('ArtistGUID')
+ ->search({ name => 'mtfnpy' })->first;
+
+is $row_from_db->artistid, $row->artistid,
+ 'PK GUID round trip';
+
+is $row_from_db->a_guid, $row->a_guid,
+ 'NON-PK GUID round trip';
+
# test MONEY type
$schema->storage->dbh_do (sub {
my ($storage, $dbh) = @_;
my $rs = $schema->resultset('Money');
-my $row;
lives_ok {
$row = $rs->create({ amount => 100 });
} 'inserted a money value';
-is $rs->find($row->id)->amount, '100.00', 'money value round-trip';
+cmp_ok $rs->find($row->id)->amount, '==', 100, 'money value round-trip';
lives_ok {
$row->update({ amount => 200 });
} 'updated a money value';
-is $rs->find($row->id)->amount, '200.00', 'updated money value round-trip';
+cmp_ok $rs->find($row->id)->amount, '==', 200,
+ 'updated money value round-trip';
lives_ok {
$row->update({ amount => undef });
eval { $dbh->do("DROP TABLE Owners") };
eval { $dbh->do("DROP TABLE Books") };
$dbh->do(<<'SQL');
-
-
CREATE TABLE Books (
id INT IDENTITY (1, 1) NOT NULL,
source VARCHAR(100),
id INT IDENTITY (1, 1) NOT NULL,
name VARCHAR(100),
)
-
SQL
});
is ($owners->page(3)->count, 2, 'has-many prefetch returns correct count');
is ($owners->page(3)->count_rs->next, 2, 'has-many prefetch returns correct count_rs');
- # make sure count does not become overly complex FIXME
+ # make sure count does not become overly complex
is_same_sql_bind (
$owners->page(3)->count_rs->as_query,
'(
is ($books->page(2)->count, 1, 'Prefetched grouped search returns correct count');
is ($books->page(2)->count_rs->next, 1, 'Prefetched grouped search returns correct count_rs');
- # make sure count does not become overly complex FIXME
+ # make sure count does not become overly complex (FIXME - the distinct-induced group_by is incorrect)
is_same_sql_bind (
$books->page(2)->count_rs->as_query,
'(
FROM [books] [me]
JOIN [owners] [owner] ON [owner].[id] = [me].[owner]
WHERE ( ( ( [owner].[name] = ? OR [owner].[name] = ? ) AND [source] = ? ) )
- GROUP BY [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price], [owner].[id], [owner].[name]
+ GROUP BY [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price]
ORDER BY [me].[id] DESC
) [count_subq]
)',
# clean up our mess
END {
- if (my $dbh = eval { $schema->storage->_dbh }) {
- $dbh->do('DROP TABLE artist');
- $dbh->do('DROP TABLE money_test');
- $dbh->do('DROP TABLE Books');
- $dbh->do('DROP TABLE Owners');
- }
+ if (my $dbh = eval { $schema->storage->_dbh }) {
+ eval { $dbh->do("DROP TABLE $_") }
+ for qw/artist money_test Books Owners/;
+ }
}
# vim:sw=2 sts=2
plan skip_all => 'Set $ENV{DBICTEST_MSSQL_DSN}, _USER and _PASS to run this test'
unless ($dsn);
-plan tests => 13;
+my $TESTS = 13;
-my $schema = DBICTest::Schema->clone;
-$schema->connection($dsn, $user, $pass);
+plan tests => $TESTS * 2;
-# start disconnected to test reconnection
-$schema->storage->ensure_connected;
-$schema->storage->_dbh->disconnect;
+my @storage_types = (
+ 'DBI::Sybase::Microsoft_SQL_Server',
+ 'DBI::Sybase::Microsoft_SQL_Server::NoBindVars',
+);
+my $storage_idx = -1;
+my $schema;
+
+for my $storage_type (@storage_types) {
+ $storage_idx++;
+
+ $schema = DBICTest::Schema->clone;
+
+ if ($storage_idx != 0) { # autodetect
+ $schema->storage_type("::$storage_type");
+ }
+
+ $schema->connection($dsn, $user, $pass);
-isa_ok($schema->storage, 'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server');
+ $schema->storage->ensure_connected;
-my $dbh;
-lives_ok (sub {
- $dbh = $schema->storage->dbh;
-}, 'reconnect works');
+ if ($storage_idx == 0 && ref($schema->storage) =~ /NoBindVars\z/) {
+ my $tb = Test::More->builder;
+ $tb->skip('no placeholders') for 1..$TESTS;
+ next;
+ }
-$dbh->do("IF OBJECT_ID('artist', 'U') IS NOT NULL
- DROP TABLE artist");
-$dbh->do("IF OBJECT_ID('cd', 'U') IS NOT NULL
- DROP TABLE cd");
+ isa_ok($schema->storage, "DBIx::Class::Storage::$storage_type");
+
+# start disconnected to test reconnection
+ $schema->storage->_dbh->disconnect;
-$dbh->do("CREATE TABLE artist (artistid INT IDENTITY PRIMARY KEY, name VARCHAR(100), rank INT DEFAULT '13', charfield CHAR(10) NULL);");
-$dbh->do("CREATE TABLE cd (cdid INT IDENTITY PRIMARY KEY, artist INT, title VARCHAR(100), year VARCHAR(100), genreid INT NULL, single_track INT NULL);");
+ my $dbh;
+ lives_ok (sub {
+ $dbh = $schema->storage->dbh;
+ }, 'reconnect works');
+
+ $dbh->do("IF OBJECT_ID('artist', 'U') IS NOT NULL
+ DROP TABLE artist");
+ $dbh->do("IF OBJECT_ID('cd', 'U') IS NOT NULL
+ DROP TABLE cd");
+
+ $dbh->do("CREATE TABLE artist (artistid INT IDENTITY PRIMARY KEY, name VARCHAR(100), rank INT DEFAULT '13', charfield CHAR(10) NULL);");
+ $dbh->do("CREATE TABLE cd (cdid INT IDENTITY PRIMARY KEY, artist INT, title VARCHAR(100), year VARCHAR(100), genreid INT NULL, single_track INT NULL);");
# Just to test compat shim, Auto is in Core
-$schema->class('Artist')->load_components('PK::Auto::MSSQL');
+ $schema->class('Artist')->load_components('PK::Auto::MSSQL');
# Test PK
-my $new = $schema->resultset('Artist')->create( { name => 'foo' } );
-ok($new->artistid, "Auto-PK worked");
+ my $new = $schema->resultset('Artist')->create( { name => 'foo' } );
+ ok($new->artistid, "Auto-PK worked");
# Test LIMIT
-for (1..6) {
- $schema->resultset('Artist')->create( { name => 'Artist ' . $_, rank => $_ } );
-}
+ for (1..6) {
+ $schema->resultset('Artist')->create( { name => 'Artist ' . $_, rank => $_ } );
+ }
-my $it = $schema->resultset('Artist')->search( { },
- { rows => 3,
- offset => 2,
- order_by => 'artistid'
- }
-);
+ my $it = $schema->resultset('Artist')->search( { },
+ { rows => 3,
+ offset => 2,
+ order_by => 'artistid'
+ }
+ );
# Test ? in data don't get treated as placeholders
-my $cd = $schema->resultset('CD')->create( {
- artist => 1,
- title => 'Does this break things?',
- year => 2007,
-} );
-ok($cd->id, 'Not treating ? in data as placeholders');
-
-is( $it->count, 3, "LIMIT count ok" );
-ok( $it->next->name, "iterator->next ok" );
-$it->next;
-$it->next;
-is( $it->next, undef, "next past end of resultset ok" );
+ my $cd = $schema->resultset('CD')->create( {
+ artist => 1,
+ title => 'Does this break things?',
+ year => 2007,
+ } );
+ ok($cd->id, 'Not treating ? in data as placeholders');
+
+ is( $it->count, 3, "LIMIT count ok" );
+ ok( $it->next->name, "iterator->next ok" );
+ $it->next;
+ $it->next;
+ is( $it->next, undef, "next past end of resultset ok" );
# test MONEY column support
-$schema->storage->dbh_do (sub {
- my ($storage, $dbh) = @_;
- eval { $dbh->do("DROP TABLE money_test") };
- $dbh->do(<<'SQL');
-
-CREATE TABLE money_test (
- id INT IDENTITY PRIMARY KEY,
- amount MONEY NULL
-)
-
+ $schema->storage->dbh_do (sub {
+ my ($storage, $dbh) = @_;
+ eval { $dbh->do("DROP TABLE money_test") };
+ $dbh->do(<<'SQL');
+ CREATE TABLE money_test (
+ id INT IDENTITY PRIMARY KEY,
+ amount MONEY NULL
+ )
SQL
-});
+ });
-my $rs = $schema->resultset('Money');
+ my $rs = $schema->resultset('Money');
-my $row;
-lives_ok {
- $row = $rs->create({ amount => 100 });
-} 'inserted a money value';
+ my $row;
+ lives_ok {
+ $row = $rs->create({ amount => 100 });
+ } 'inserted a money value';
-is $rs->find($row->id)->amount, 100, 'money value round-trip';
+ cmp_ok $rs->find($row->id)->amount, '==', 100, 'money value round-trip';
-lives_ok {
- $row->update({ amount => 200 });
-} 'updated a money value';
+ lives_ok {
+ $row->update({ amount => 200 });
+ } 'updated a money value';
-is $rs->find($row->id)->amount, 200, 'updated money value round-trip';
+ cmp_ok $rs->find($row->id)->amount, '==', 200,
+ 'updated money value round-trip';
-lives_ok {
- $row->update({ amount => undef });
-} 'updated a money value to NULL';
+ lives_ok {
+ $row->update({ amount => undef });
+ } 'updated a money value to NULL';
-is $rs->find($row->id)->amount, undef,'updated money value to NULL round-trip';
+ is $rs->find($row->id)->amount,
+ undef, 'updated money value to NULL round-trip';
+}
# clean up our mess
END {
- $dbh->do("IF OBJECT_ID('artist', 'U') IS NOT NULL DROP TABLE artist")
- if $dbh;
- $dbh->do("IF OBJECT_ID('cd', 'U') IS NOT NULL DROP TABLE cd")
- if $dbh;
- $dbh->do("IF OBJECT_ID('money_test', 'U') IS NOT NULL DROP TABLE money_test")
- if $dbh;
+ if (my $dbh = eval { $schema->storage->dbh }) {
+ $dbh->do("IF OBJECT_ID('artist', 'U') IS NOT NULL DROP TABLE artist");
+ $dbh->do("IF OBJECT_ID('cd', 'U') IS NOT NULL DROP TABLE cd");
+ $dbh->do("IF OBJECT_ID('money_test', 'U') IS NOT NULL DROP TABLE money_test");
+ }
}
cmp_ok ($cds->count, '>', 2, 'Initially populated with more than 2 CDs');
my $table = $cds->result_source->name;
+$table = $$table if ref $table eq 'SCALAR';
my $subsel = $cds->search ({}, {
columns => [qw/cdid title/],
from => \ "(SELECT cdid, title FROM $table LIMIT 2) me",
my $schema = DBICTest->init_schema();
-plan tests => 1269;
-
my $employees = $schema->resultset('Employee');
$employees->delete();
my $group_3 = $employees->search({group_id=>3});
my $to_group = 1;
my $to_pos = undef;
-# now that we have transactions we need to work around stupid sqlite
{
my @empl = $group_3->all;
while (my $employee = shift @empl) {
- $employee->discard_changes; # since we are effective shift()ing the $rs while doing this
$employee->move_to_group($to_group, $to_pos);
$to_pos++;
$to_group = $to_group==1 ? 2 : 1;
}
foreach my $group_id (1..4) {
my $group_employees = $employees->search({group_id=>$group_id});
- $group_employees->all();
ok( check_rs($group_employees), "group positions after move_to_group" );
}
my $to_group_2 = 1;
$to_pos = undef;
-# now that we have transactions we need to work around stupid sqlite
{
my @empl = $group_3->all;
while (my $employee = shift @empl) {
foreach my $group_id_2 (1..4) {
foreach my $group_id_3 (1..4) {
my $group_employees = $employees->search({group_id_2=>$group_id_2,group_id_3=>$group_id_3});
- $group_employees->all();
ok( check_rs($group_employees), "group positions after move_to_group" );
}
}
return 1;
}
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use Data::Dumper;
+use DBIC::SqlMakerTest;
+
+my $ping_count = 0;
+
+{
+ local $SIG{__WARN__} = sub {};
+ require DBIx::Class::Storage::DBI;
+
+ my $ping = \&DBIx::Class::Storage::DBI::_ping;
+
+ *DBIx::Class::Storage::DBI::_ping = sub {
+ $ping_count++;
+ goto &$ping;
+ };
+}
+
+
+# We do not count pings during deploy() because of the flux
+# around sqlt. Eventually there should be no pings at all
+my $schema = DBICTest->init_schema( sqlite_use_file => 1, no_populate => 1 );
+
+TODO: {
+ local $TODO = 'Unable to fix before proper deploy() error handling';
+ is ($ping_count, 0, 'no _ping() calls during deploy');
+ $ping_count = 0;
+}
+
+DBICTest->populate_schema ($schema);
+
+# perform some operations and make sure they don't ping
+
+$schema->resultset('CD')->create({
+ cdid => 6, artist => 3, title => 'mtfnpy', year => 2009
+});
+
+$schema->resultset('CD')->create({
+ cdid => 7, artist => 3, title => 'mtfnpy2', year => 2009
+});
+
+$schema->storage->_dbh->disconnect;
+
+$schema->resultset('CD')->create({
+ cdid => 8, artist => 3, title => 'mtfnpy3', year => 2009
+});
+
+$schema->storage->_dbh->disconnect;
+
+$schema->txn_do(sub {
+ $schema->resultset('CD')->create({
+ cdid => 9, artist => 3, title => 'mtfnpy4', year => 2009
+ });
+});
+
+is $ping_count, 0, 'no _ping() calls';
+
+done_testing;
BEGIN {
eval "use DBIx::Class::Storage::DBI::Replicated; use Test::Moose";
- plan $@
- ? ( skip_all => "Deps not installed: $@" )
- : ( tests => 126 );
+ plan skip_all => "Deps not installed: $@" if $@;
}
use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool';
will try to test those. If you do that, it will assume the setup is properly
replicating. Your results may vary, but I have demonstrated this to work with
mysql native replication.
-
+
=cut
## --------------------------------------------------------------------- ##
## Create an object to contain your replicated stuff.
## --------------------------------------------------------------------- ##
-
+
package DBIx::Class::DBI::Replicated::TestReplication;
-
+
use DBICTest;
use base qw/Class::Accessor::Fast/;
-
+
__PACKAGE__->mk_accessors( qw/schema/ );
## Initialize the object
-
- sub new {
- my ($class, $schema_method) = (shift, shift);
- my $self = $class->SUPER::new(@_);
-
- $self->schema( $self->init_schema($schema_method) );
- return $self;
- }
-
+
+ sub new {
+ my ($class, $schema_method) = (shift, shift);
+ my $self = $class->SUPER::new(@_);
+
+ $self->schema( $self->init_schema($schema_method) );
+ return $self;
+ }
+
## Get the Schema and set the replication storage type
-
+
sub init_schema {
# current SQLT SQLite producer does not handle DROP TABLE IF EXISTS, trap warnings here
local $SIG{__WARN__} = sub { warn @_ unless $_[0] =~ /no such table.+DROP TABLE/ };
balancer_type=>'::Random',
balancer_args=>{
auto_validate_every=>100,
- master_read_weight => 1
+ master_read_weight => 1
},
}
},
balancer_type=>'::Random',
balancer_args=> {
auto_validate_every=>100,
- master_read_weight => 1
+ master_read_weight => 1
},
deploy_args=>{
add_drop_table => 1,
no Moose;
}
-
+
## --------------------------------------------------------------------- ##
## Subclass for when you are using SQLite for testing, this provides a fake
## replication support.
## --------------------------------------------------------------------- ##
-
+
package DBIx::Class::DBI::Replicated::TestReplication::SQLite;
use DBICTest;
- use File::Copy;
+ use File::Copy;
use base 'DBIx::Class::DBI::Replicated::TestReplication';
-
+
__PACKAGE__->mk_accessors(qw/master_path slave_paths/);
-
+
## Set the master path from DBICTest
-
- sub new {
- my $class = shift @_;
- my $self = $class->SUPER::new(@_);
-
- $self->master_path( DBICTest->_sqlite_dbfilename );
- $self->slave_paths([
- File::Spec->catfile(qw/t var DBIxClass_slave1.db/),
- File::Spec->catfile(qw/t var DBIxClass_slave2.db/),
- ]);
-
- return $self;
- }
-
+
+ sub new {
+ my $class = shift @_;
+ my $self = $class->SUPER::new(@_);
+
+ $self->master_path( DBICTest->_sqlite_dbfilename );
+ $self->slave_paths([
+ File::Spec->catfile(qw/t var DBIxClass_slave1.db/),
+ File::Spec->catfile(qw/t var DBIxClass_slave2.db/),
+ ]);
+
+ return $self;
+ }
+
## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
## $storage->connect_info to be used for connecting replicants.
-
+
sub generate_replicant_connect_info {
my $self = shift @_;
my @dsn = map {
"dbi:SQLite:${_}";
} @{$self->slave_paths};
-
+
my @connect_infos = map { [$_,'','',{AutoCommit=>1}] } @dsn;
- ## Make sure nothing is left over from a failed test
- $self->cleanup;
+ ## Make sure nothing is left over from a failed test
+ $self->cleanup;
- ## try a hashref too
+ ## try a hashref too
my $c = $connect_infos[0];
$connect_infos[0] = {
dsn => $c->[0],
## Do a 'good enough' replication by copying the master dbfile over each of
## the slave dbfiles. If the master is SQLite we do this, otherwise we
## just do a one second pause to let the slaves catch up.
-
+
sub replicate {
my $self = shift @_;
foreach my $slave (@{$self->slave_paths}) {
copy($self->master_path, $slave);
}
}
-
+
## Cleanup after ourselves. Unlink all gthe slave paths.
-
+
sub cleanup {
my $self = shift @_;
foreach my $slave (@{$self->slave_paths}) {
- if(-e $slave) {
- unlink $slave;
- }
- }
+ if(-e $slave) {
+ unlink $slave;
+ }
+ }
}
-
+
## --------------------------------------------------------------------- ##
## Subclass for when you are setting the databases via custom export vars
## This is for when you have a replicating database setup that you are
## two slave databases to test against, as well as a replication system
## that will replicate in less than 1 second.
## --------------------------------------------------------------------- ##
-
- package DBIx::Class::DBI::Replicated::TestReplication::Custom;
+
+ package DBIx::Class::DBI::Replicated::TestReplication::Custom;
use base 'DBIx::Class::DBI::Replicated::TestReplication';
-
+
## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
## $storage->connect_info to be used for connecting replicants.
-
- sub generate_replicant_connect_info {
+
+ sub generate_replicant_connect_info {
return (
[$ENV{"DBICTEST_SLAVE0_DSN"}, $ENV{"DBICTEST_SLAVE0_DBUSER"}, $ENV{"DBICTEST_SLAVE0_DBPASS"}, {AutoCommit => 1}],
- [$ENV{"DBICTEST_SLAVE1_DSN"}, $ENV{"DBICTEST_SLAVE1_DBUSER"}, $ENV{"DBICTEST_SLAVE1_DBPASS"}, {AutoCommit => 1}],
+ [$ENV{"DBICTEST_SLAVE1_DSN"}, $ENV{"DBICTEST_SLAVE1_DBUSER"}, $ENV{"DBICTEST_SLAVE1_DBPASS"}, {AutoCommit => 1}],
);
}
-
- ## pause a bit to let the replication catch up
-
+
+ ## pause a bit to let the replication catch up
+
sub replicate {
- sleep 1;
- }
+ sleep 1;
+ }
}
## ----------------------------------------------------------------------------
my $replicated;
for my $method (qw/by_connect_info by_storage_type/) {
+ undef $replicated;
ok $replicated = $replicated_class->new($method)
=> "Created a replication object $method";
-
+
isa_ok $replicated->schema
=> 'DBIx::Class::Schema';
-
+
isa_ok $replicated->schema->storage
=> 'DBIx::Class::Storage::DBI::Replicated';
ok $replicated->schema->storage->meta
=> 'has a meta object';
-
+
isa_ok $replicated->schema->storage->master
=> 'DBIx::Class::Storage::DBI';
-
+
isa_ok $replicated->schema->storage->pool
=> 'DBIx::Class::Storage::DBI::Replicated::Pool';
-
+
does_ok $replicated->schema->storage->balancer
- => 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+ => 'DBIx::Class::Storage::DBI::Replicated::Balancer';
ok my @replicant_connects = $replicated->generate_replicant_connect_info
=> 'got replication connect information';
our %debug;
$replicated->schema->storage->debug(1);
$replicated->schema->storage->debugcb(sub {
- my ($op, $info) = @_;
- ##warn "\n$op, $info\n";
- %debug = (
- op => $op,
- info => $info,
- dsn => ($info=~m/\[(.+)\]/)[0],
- storage_type => $info=~m/REPLICANT/ ? 'REPLICANT' : 'MASTER',
- );
+ my ($op, $info) = @_;
+ ##warn "\n$op, $info\n";
+ %debug = (
+ op => $op,
+ info => $info,
+ dsn => ($info=~m/\[(.+)\]/)[0],
+ storage_type => $info=~m/REPLICANT/ ? 'REPLICANT' : 'MASTER',
+ );
});
ok my @all_storages = $replicated->schema->storage->all_storages
is ((grep $_->{master_option}, @all_storage_opts),
3
=> 'connect_info was merged from master to replicants');
-
+
my @replicant_names = keys %{ $replicated->schema->storage->replicants };
ok @replicant_names, "found replicant names @replicant_names";
## sqlite dbs.
$replicated->schema->storage->debugobj->silence(1)
if first { m{^t/} } @replicant_names;
-
+
isa_ok $replicated->schema->storage->balancer->current_replicant
- => 'DBIx::Class::Storage::DBI';
+ => 'DBIx::Class::Storage::DBI';
$replicated->schema->storage->debugobj->silence(0);
ok $replicated->schema->storage->pool->has_replicants
- => 'does have replicants';
+ => 'does have replicants';
is $replicated->schema->storage->pool->num_replicants => 2
=> 'has two replicants';
-
+
does_ok $replicated_storages[0]
=> 'DBIx::Class::Storage::DBI::Replicated::Replicant';
does_ok $replicated_storages[1]
=> 'DBIx::Class::Storage::DBI::Replicated::Replicant';
-
+
does_ok $replicated->schema->storage->replicants->{$replicant_names[0]}
=> 'DBIx::Class::Storage::DBI::Replicated::Replicant';
does_ok $replicated->schema->storage->replicants->{$replicant_names[1]}
- => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+ => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
## Add some info to the database
[ 4, "Ozric Tentacles"],
]);
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- like $debug{info}, qr/INSERT/, 'Last was an insert';
-
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ like $debug{info}, qr/INSERT/, 'Last was an insert';
+
## Make sure all the slaves have the table definitions
$replicated->replicate;
## sqlite dbs.
$replicated->schema->storage->debugobj->silence(1)
if first { m{^t/} } @replicant_names;
-
+
$replicated->schema->storage->pool->validate_replicants;
$replicated->schema->storage->debugobj->silence(0);
## We removed testing here since master read weight is on, so we can't tell in
## advance what storage to expect. We turn master read weight off a bit lower
-## is $debug{storage_type}, 'REPLICANT'
-## => "got last query from a replicant: $debug{dsn}, $debug{info}";
+## is $debug{storage_type}, 'REPLICANT'
+## => "got last query from a replicant: $debug{dsn}, $debug{info}";
isa_ok $artist1
=> 'DBICTest::Artist';
-
+
is $artist1->name, 'Ozric Tentacles'
=> 'Found expected name for first result';
local
*DBIx::Class::Storage::DBI::Replicated::Balancer::Random::_random_number =
- sub { 999 };
+ sub { 999 };
$replicated->schema->storage->balancer->increment_storage;
[ 7, "Watergate"],
]);
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- like $debug{info}, qr/INSERT/, 'Last was an insert';
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ like $debug{info}, qr/INSERT/, 'Last was an insert';
## Make sure all the slaves have the table definitions
$replicated->replicate;
ok my $artist2 = $replicated->schema->resultset('Artist')->find(5)
=> 'Sync succeed';
-is $debug{storage_type}, 'REPLICANT'
- => "got last query from a replicant: $debug{dsn}";
-
+is $debug{storage_type}, 'REPLICANT'
+ => "got last query from a replicant: $debug{dsn}";
+
isa_ok $artist2
=> 'DBICTest::Artist';
-
+
is $artist2->name, "Doom's Children"
=> 'Found expected name for first result';
is $replicated->schema->storage->pool->connected_replicants => 2
=> "both replicants are connected";
-
+
$replicated->schema->storage->replicants->{$replicant_names[0]}->disconnect;
$replicated->schema->storage->replicants->{$replicant_names[1]}->disconnect;
ok my $artist3 = $replicated->schema->resultset('Artist')->find(6)
=> 'Still finding stuff.';
-is $debug{storage_type}, 'REPLICANT'
- => "got last query from a replicant: $debug{dsn}";
-
+is $debug{storage_type}, 'REPLICANT'
+ => "got last query from a replicant: $debug{dsn}";
+
isa_ok $artist3
=> 'DBICTest::Artist';
-
+
is $artist3->name, "Dead On Arrival"
=> 'Found expected name for first result';
is $replicated->schema->storage->pool->connected_replicants => 1
=> "At Least One replicant reconnected to handle the job";
-
+
## What happens when we try to select something that doesn't exist?
ok ! $replicated->schema->resultset('Artist')->find(666)
=> 'Correctly failed to find something.';
-is $debug{storage_type}, 'REPLICANT'
- => "got last query from a replicant: $debug{dsn}";
-
+is $debug{storage_type}, 'REPLICANT'
+ => "got last query from a replicant: $debug{dsn}";
+
## test the reliable option
TESTRELIABLE: {
-
- $replicated->schema->storage->set_reliable_storage;
-
- ok $replicated->schema->resultset('Artist')->find(2)
- => 'Read from master 1';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- ok $replicated->schema->resultset('Artist')->find(5)
- => 'Read from master 2';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- $replicated->schema->storage->set_balanced_storage;
-
- ok $replicated->schema->resultset('Artist')->find(3)
+
+ $replicated->schema->storage->set_reliable_storage;
+
+ ok $replicated->schema->resultset('Artist')->find(2)
+ => 'Read from master 1';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ ok $replicated->schema->resultset('Artist')->find(5)
+ => 'Read from master 2';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ $replicated->schema->storage->set_balanced_storage;
+
+ ok $replicated->schema->resultset('Artist')->find(3)
=> 'Read from replicant';
- is $debug{storage_type}, 'REPLICANT',
- "got last query from a replicant: $debug{dsn}";
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
}
## Make sure when reliable goes out of scope, we are using replicants again
ok $replicated->schema->resultset('Artist')->find(1)
=> 'back to replicant 1.';
- is $debug{storage_type}, 'REPLICANT',
- "got last query from a replicant: $debug{dsn}";
-
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+
ok $replicated->schema->resultset('Artist')->find(2)
=> 'back to replicant 2.';
- is $debug{storage_type}, 'REPLICANT',
- "got last query from a replicant: $debug{dsn}";
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
## set all the replicants to inactive, and make sure the balancer falls back to
## the master.
$replicated->schema->storage->debugfh($debugfh);
ok $replicated->schema->resultset('Artist')->find(2)
- => 'Fallback to master';
+ => 'Fallback to master';
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
like $fallback_warning, qr/falling back to master/
- => 'emits falling back to master warning';
+ => 'emits falling back to master warning';
$replicated->schema->storage->debugfh($oldfh);
}
## sqlite dbs.
$replicated->schema->storage->debugobj->silence(1)
if first { m{^t/} } @replicant_names;
-
+
$replicated->schema->storage->pool->validate_replicants;
$replicated->schema->storage->debugobj->silence(0);
ok $replicated->schema->resultset('Artist')->find(2)
=> 'Returned to replicates';
-is $debug{storage_type}, 'REPLICANT',
- "got last query from a replicant: $debug{dsn}";
-
+is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+
## Getting slave status tests
SKIP: {
## We skip this tests unless you have a custom replicants, since the default
## sqlite based replication tests don't support these functions.
-
- skip 'Cannot Test Replicant Status on Non Replicating Database', 10
+
+ skip 'Cannot Test Replicant Status on Non Replicating Database', 10
unless DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"};
$replicated->replicate; ## Give the slaves a chance to catchup.
- ok $replicated->schema->storage->replicants->{$replicant_names[0]}->is_replicating
- => 'Replicants are replicating';
-
- is $replicated->schema->storage->replicants->{$replicant_names[0]}->lag_behind_master, 0
- => 'Replicant is zero seconds behind master';
-
- ## Test the validate replicants
-
- $replicated->schema->storage->pool->validate_replicants;
-
- is $replicated->schema->storage->pool->active_replicants, 2
- => 'Still have 2 replicants after validation';
-
- ## Force the replicants to fail the validate test by required their lag to
- ## be negative (ie ahead of the master!)
-
+ ok $replicated->schema->storage->replicants->{$replicant_names[0]}->is_replicating
+ => 'Replicants are replicating';
+
+ is $replicated->schema->storage->replicants->{$replicant_names[0]}->lag_behind_master, 0
+ => 'Replicant is zero seconds behind master';
+
+ ## Test the validate replicants
+
+ $replicated->schema->storage->pool->validate_replicants;
+
+ is $replicated->schema->storage->pool->active_replicants, 2
+ => 'Still have 2 replicants after validation';
+
+ ## Force the replicants to fail the validate test by required their lag to
+ ## be negative (ie ahead of the master!)
+
$replicated->schema->storage->pool->maximum_lag(-10);
$replicated->schema->storage->pool->validate_replicants;
-
+
is $replicated->schema->storage->pool->active_replicants, 0
=> 'No way a replicant be be ahead of the master';
-
+
## Let's be fair to the replicants again. Let them lag up to 5
-
+
$replicated->schema->storage->pool->maximum_lag(5);
$replicated->schema->storage->pool->validate_replicants;
-
+
is $replicated->schema->storage->pool->active_replicants, 2
- => 'Both replicants in good standing again';
-
- ## Check auto validate
-
- is $replicated->schema->storage->balancer->auto_validate_every, 100
- => "Got the expected value for auto validate";
-
- ## This will make sure we auto validatge everytime
- $replicated->schema->storage->balancer->auto_validate_every(0);
-
- ## set all the replicants to inactive, and make sure the balancer falls back to
- ## the master.
-
- $replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
- $replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
-
- ## Ok, now when we go to run a query, autovalidate SHOULD reconnect
-
- is $replicated->schema->storage->pool->active_replicants => 0
- => "both replicants turned off";
-
- ok $replicated->schema->resultset('Artist')->find(5)
- => 'replicant reactivated';
-
- is $debug{storage_type}, 'REPLICANT',
- "got last query from a replicant: $debug{dsn}";
-
- is $replicated->schema->storage->pool->active_replicants => 2
- => "both replicants reactivated";
+ => 'Both replicants in good standing again';
+
+ ## Check auto validate
+
+ is $replicated->schema->storage->balancer->auto_validate_every, 100
+ => "Got the expected value for auto validate";
+
+ ## This will make sure we auto validatge everytime
+ $replicated->schema->storage->balancer->auto_validate_every(0);
+
+ ## set all the replicants to inactive, and make sure the balancer falls back to
+ ## the master.
+
+ $replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
+ $replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
+
+ ## Ok, now when we go to run a query, autovalidate SHOULD reconnect
+
+ is $replicated->schema->storage->pool->active_replicants => 0
+ => "both replicants turned off";
+
+ ok $replicated->schema->resultset('Artist')->find(5)
+ => 'replicant reactivated';
+
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+
+ is $replicated->schema->storage->pool->active_replicants => 2
+ => "both replicants reactivated";
}
## Test the reliably callback
ok my $reliably = sub {
-
+
ok $replicated->schema->resultset('Artist')->find(5)
=> 'replicant reactivated';
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
} => 'created coderef properly';
$replicated->schema->storage->execute_reliably($reliably);
## Try something with an error
ok my $unreliably = sub {
-
+
ok $replicated->schema->resultset('ArtistXX')->find(5)
- => 'replicant reactivated';
-
+ => 'replicant reactivated';
+
} => 'created coderef properly';
-throws_ok {$replicated->schema->storage->execute_reliably($unreliably)}
+throws_ok {$replicated->schema->storage->execute_reliably($unreliably)}
qr/Can't find source for ArtistXX/
=> 'Bad coderef throws proper error';
-
+
## Make sure replication came back
ok $replicated->schema->resultset('Artist')->find(3)
=> 'replicant reactivated';
is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
-
+
## make sure transactions are set to execute_reliably
ok my $transaction = sub {
-
- my $id = shift @_;
-
- $replicated
- ->schema
- ->populate('Artist', [
- [ qw/artistid name/ ],
- [ $id, "Children of the Grave"],
- ]);
-
+
+ my $id = shift @_;
+
+ $replicated
+ ->schema
+ ->populate('Artist', [
+ [ qw/artistid name/ ],
+ [ $id, "Children of the Grave"],
+ ]);
+
ok my $result = $replicated->schema->resultset('Artist')->find($id)
=> "Found expected artist for $id";
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
ok my $more = $replicated->schema->resultset('Artist')->find(1)
=> 'Found expected artist again for 1';
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
return ($result, $more);
-
+
} => 'Created a coderef properly';
## Test the transaction with multi return
{
- ok my @return = $replicated->schema->txn_do($transaction, 666)
- => 'did transaction';
-
- is $return[0]->id, 666
- => 'first returned value is correct';
+ ok my @return = $replicated->schema->txn_do($transaction, 666)
+ => 'did transaction';
+
+ is $return[0]->id, 666
+ => 'first returned value is correct';
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- is $return[1]->id, 1
- => 'second returned value is correct';
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
+ is $return[1]->id, 1
+ => 'second returned value is correct';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
}
## Test that asking for single return works
{
- ok my @return = $replicated->schema->txn_do($transaction, 777)
- => 'did transaction';
-
- is $return[0]->id, 777
- => 'first returned value is correct';
-
- is $return[1]->id, 1
- => 'second returned value is correct';
+ ok my @return = $replicated->schema->txn_do($transaction, 777)
+ => 'did transaction';
+
+ is $return[0]->id, 777
+ => 'first returned value is correct';
+
+ is $return[1]->id, 1
+ => 'second returned value is correct';
}
## Test transaction returning a single value
{
- ok my $result = $replicated->schema->txn_do(sub {
- ok my $more = $replicated->schema->resultset('Artist')->find(1)
- => 'found inside a transaction';
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
- return $more;
- }) => 'successfully processed transaction';
-
- is $result->id, 1
- => 'Got expected single result from transaction';
+ ok my $result = $replicated->schema->txn_do(sub {
+ ok my $more = $replicated->schema->resultset('Artist')->find(1)
+ => 'found inside a transaction';
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+ return $more;
+ }) => 'successfully processed transaction';
+
+ is $result->id, 1
+ => 'Got expected single result from transaction';
}
## Make sure replication came back
=> 'replicant reactivated';
is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
-
+
## Test Discard changes
{
- ok my $artist = $replicated->schema->resultset('Artist')->find(2)
- => 'got an artist to test discard changes';
+ ok my $artist = $replicated->schema->resultset('Artist')->find(2)
+ => 'got an artist to test discard changes';
+
+ is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+
+ ok $artist->get_from_storage({force_pool=>'master'})
+ => 'properly discard changes';
- is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
- ok $artist->get_from_storage({force_pool=>'master'})
- => 'properly discard changes';
+ ok $artist->discard_changes({force_pool=>'master'})
+ => 'properly called discard_changes against master (manual attrs)';
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+ ok $artist->discard_changes()
+ => 'properly called discard_changes against master (default attrs)';
+
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+
+ ok $artist->discard_changes({force_pool=>$replicant_names[0]})
+ => 'properly able to override the default attributes';
+
+ is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}"
}
## Test some edge cases, like trying to do a transaction inside a transaction, etc
{
ok my $result = $replicated->schema->txn_do(sub {
- return $replicated->schema->txn_do(sub {
- ok my $more = $replicated->schema->resultset('Artist')->find(1)
- => 'found inside a transaction inside a transaction';
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
- return $more;
- });
+ return $replicated->schema->txn_do(sub {
+ ok my $more = $replicated->schema->resultset('Artist')->find(1)
+ => 'found inside a transaction inside a transaction';
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+ return $more;
+ });
}) => 'successfully processed transaction';
-
+
is $result->id, 1
- => 'Got expected single result from transaction';
+ => 'Got expected single result from transaction';
}
{
ok my $result = $replicated->schema->txn_do(sub {
- return $replicated->schema->storage->execute_reliably(sub {
- return $replicated->schema->txn_do(sub {
- return $replicated->schema->storage->execute_reliably(sub {
- ok my $more = $replicated->schema->resultset('Artist')->find(1)
- => 'found inside crazy deep transactions and execute_reliably';
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
- return $more;
- });
- });
- });
+ return $replicated->schema->storage->execute_reliably(sub {
+ return $replicated->schema->txn_do(sub {
+ return $replicated->schema->storage->execute_reliably(sub {
+ ok my $more = $replicated->schema->resultset('Artist')->find(1)
+ => 'found inside crazy deep transactions and execute_reliably';
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+ return $more;
+ });
+ });
+ });
}) => 'successfully processed transaction';
-
+
is $result->id, 1
- => 'Got expected single result from transaction';
-}
+ => 'Got expected single result from transaction';
+}
## Test the force_pool resultset attribute.
{
- ok my $artist_rs = $replicated->schema->resultset('Artist')
+ ok my $artist_rs = $replicated->schema->resultset('Artist')
=> 'got artist resultset';
-
- ## Turn on Forced Pool Storage
- ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>'master'})
+
+ ## Turn on Forced Pool Storage
+ ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>'master'})
=> 'Created a resultset using force_pool storage';
-
- ok my $artist = $reliable_artist_rs->find(2)
+
+ ok my $artist = $reliable_artist_rs->find(2)
=> 'got an artist result via force_pool storage';
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
}
## Test the force_pool resultset attribute part two.
{
- ok my $artist_rs = $replicated->schema->resultset('Artist')
+ ok my $artist_rs = $replicated->schema->resultset('Artist')
=> 'got artist resultset';
-
- ## Turn on Forced Pool Storage
- ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>$replicant_names[0]})
+
+ ## Turn on Forced Pool Storage
+ ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>$replicant_names[0]})
=> 'Created a resultset using force_pool storage';
-
- ok my $artist = $reliable_artist_rs->find(2)
+
+ ok my $artist = $reliable_artist_rs->find(2)
=> 'got an artist result via force_pool storage';
- is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+ is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
}
## Delete the old database files
$replicated->cleanup;
+done_testing;
+
# vim: sw=4 sts=4 :
my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { } } });
foreach my $source (@sources) {
- my $table = $sqlt_schema->get_table($schema->source($source)->from);
+ my $table = get_table($sqlt_schema, $schema, $source);
my $fk_count = scalar(grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints);
my @indices = $table->get_indices;
my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { add_fk_index => 1 } } });
foreach my $source (@sources) {
- my $table = $sqlt_schema->get_table($schema->source($source)->from);
+ my $table = get_table($sqlt_schema, $schema, $source);
my $fk_count = scalar(grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints);
my @indices = $table->get_indices;
my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { add_fk_index => 0 } } });
foreach my $source (@sources) {
- my $table = $sqlt_schema->get_table($schema->source($source)->from);
+ my $table = get_table($sqlt_schema, $schema, $source);
my @indices = $table->get_indices;
my $index_count = scalar(@indices);
$sqlt->parser('SQL::Translator::Parser::DBIx::Class');
return $sqlt->translate({ data => $schema }) or die $sqlt->error;
}
+
+sub get_table {
+ my ($sqlt_schema, $schema, $source) = @_;
+
+ my $table_name = $schema->source($source)->from;
+ $table_name = $$table_name if ref $table_name;
+
+ return $sqlt_schema->get_table($table_name);
+}
use Test::More;
use DBICTest;
use DBIC::SqlMakerTest;
-use DBIC::DebugObj;
-
-plan tests => 6;
my $schema = DBICTest->init_schema();
{ prefetch => [qw/tracks artist/] },
);
is ($rs->all, 5, 'Correct number of objects');
+ is ($rs->count, 5, 'Correct count');
+ is_same_sql_bind (
+ $rs->count_rs->as_query,
+ '(
+ SELECT COUNT( * )
+ FROM (
+ SELECT cds.cdid
+ FROM artist me
+ JOIN cd cds ON cds.artist = me.artistid
+ LEFT JOIN track tracks ON tracks.cd = cds.cdid
+ JOIN artist artist ON artist.artistid = cds.artist
+ WHERE tracks.position = ? OR tracks.position = ?
+ GROUP BY cds.cdid
+ ) count_subq
+ )',
+ [ map { [ 'tracks.position' => $_ ] } (1, 2) ],
+ );
+}
- my ($sql, @bind);
- $schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind));
- $schema->storage->debug(1);
-
-
- is ($rs->count, 5, 'Correct count');
+# collapsing prefetch with distinct
+{
+ my $first_cd = $schema->resultset('Artist')->first->cds->first;
+ $first_cd->update ({
+ genreid => $first_cd->create_related (
+ genre => ({ name => 'vague genre' })
+ )->id
+ });
+
+ my $rs = $schema->resultset("Artist")->search(undef, {distinct => 1})
+ ->search_related('cds')->search_related('genre',
+ { 'genre.name' => { '!=', 'foo' } },
+ { prefetch => q(cds) },
+ );
+ is ($rs->all, 1, 'Correct number of objects');
+ is ($rs->count, 1, 'Correct count');
is_same_sql_bind (
- $sql,
- \@bind,
- 'SELECT COUNT( * ) FROM (SELECT cds.cdid FROM artist me JOIN cd cds ON cds.artist = me.artistid LEFT JOIN track tracks ON tracks.cd = cds.cdid JOIN artist artist ON artist.artistid = cds.artist WHERE tracks.position = ? OR tracks.position = ? GROUP BY cds.cdid) count_subq',
- [ qw/'1' '2'/ ],
+ $rs->count_rs->as_query,
+ '(
+ SELECT COUNT( * )
+ FROM (
+ SELECT genre.genreid
+ FROM artist me
+ JOIN cd cds ON cds.artist = me.artistid
+ JOIN genre genre ON genre.genreid = cds.genreid
+ LEFT JOIN cd cds_2 ON cds_2.genreid = genre.genreid
+ WHERE ( genre.name != ? )
+ GROUP BY genre.genreid
+ ) count_subq
+ )',
+ [ [ 'genre.name' => 'foo' ] ],
);
}
is ($rs->all, 10, 'Correct number of objects');
- my ($sql, @bind);
- $schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind));
- $schema->storage->debug(1);
-
-
is ($rs->count, 10, 'Correct count');
is_same_sql_bind (
- $sql,
- \@bind,
- 'SELECT COUNT( * ) FROM cd me JOIN track tracks ON tracks.cd = me.cdid JOIN cd disc ON disc.cdid = tracks.cd LEFT JOIN lyrics lyrics ON lyrics.track_id = tracks.trackid WHERE ( ( position = ? OR position = ? ) )',
- [ qw/'1' '2'/ ],
+ $rs->count_rs->as_query,
+ '(
+ SELECT COUNT( * )
+ FROM cd me
+ JOIN track tracks ON tracks.cd = me.cdid
+ JOIN cd disc ON disc.cdid = tracks.cd
+ LEFT JOIN lyrics lyrics ON lyrics.track_id = tracks.trackid
+ WHERE position = ? OR position = ?
+ )',
+ [ map { [ position => $_ ] } (1, 2) ],
);
}
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MSSQL_ODBC_${_}" } qw/DSN USER PASS/};
+
+if (not ($dsn && $user)) {
+ plan skip_all =>
+ 'Set $ENV{DBICTEST_MSSQL_ODBC_DSN}, _USER and _PASS to run this test' .
+ "\nWarning: This test drops and creates a table called 'track'";
+} else {
+ eval "use DateTime; use DateTime::Format::Strptime;";
+ if ($@) {
+ plan skip_all => 'needs DateTime and DateTime::Format::Strptime for testing';
+ }
+ else {
+ plan tests => 4 * 2; # (tests * dt_types)
+ }
+}
+
+my $schema = DBICTest::Schema->clone;
+
+$schema->connection($dsn, $user, $pass);
+$schema->storage->ensure_connected;
+
+# coltype, column, datehash
+my @dt_types = (
+ ['DATETIME',
+ 'last_updated_at',
+ {
+ year => 2004,
+ month => 8,
+ day => 21,
+ hour => 14,
+ minute => 36,
+ second => 48,
+ nanosecond => 500000000,
+ }],
+ ['SMALLDATETIME', # minute precision
+ 'small_dt',
+ {
+ year => 2004,
+ month => 8,
+ day => 21,
+ hour => 14,
+ minute => 36,
+ }],
+);
+
+for my $dt_type (@dt_types) {
+ my ($type, $col, $sample_dt) = @$dt_type;
+
+ eval { $schema->storage->dbh->do("DROP TABLE track") };
+ $schema->storage->dbh->do(<<"SQL");
+CREATE TABLE track (
+ trackid INT IDENTITY PRIMARY KEY,
+ cd INT,
+ position INT,
+ $col $type,
+)
+SQL
+ ok(my $dt = DateTime->new($sample_dt));
+
+ my $row;
+ ok( $row = $schema->resultset('Track')->create({
+ $col => $dt,
+ cd => 1,
+ }));
+ ok( $row = $schema->resultset('Track')
+ ->search({ trackid => $row->trackid }, { select => [$col] })
+ ->first
+ );
+ is( $row->$col, $dt, 'DateTime roundtrip' );
+}
+
+# clean up our mess
+END {
+ if (my $dbh = eval { $schema->storage->_dbh }) {
+ $dbh->do('DROP TABLE track');
+ }
+}
eval {
$dbh->do("DROP TABLE track");
};
-$dbh->do("CREATE TABLE track (trackid NUMBER(12), cd NUMBER(12), position NUMBER(12), title VARCHAR(255), last_updated_on DATE, last_updated_at TIMESTAMP)");
+$dbh->do("CREATE TABLE track (trackid NUMBER(12), cd NUMBER(12), position NUMBER(12), title VARCHAR(255), last_updated_on DATE, last_updated_at TIMESTAMP, small_dt DATE)");
# insert a row to play with
my $new = $schema->resultset('Track')->create({ trackid => 1, cd => 1, position => 1, title => 'Track1', last_updated_on => '06-MAY-07', last_updated_at => '2009-05-03 21:17:18.5' });
croak "Unexpected argument(s) supplied to is_same_sql_bind: " . join ('; ', @_)
if @_;
- SQL::Abstract::Test::is_same_sql_bind (@args);
+ @_ = @args;
+ goto &SQL::Abstract::Test::is_same_sql_bind;
}
*is_same_sql = \&SQL::Abstract::Test::is_same_sql;
--- /dev/null
+package # hide from PAUSE
+ DBICTest::Schema::ArtistGUID;
+
+use base qw/DBICTest::BaseResult/;
+
+# test MSSQL uniqueidentifier type
+
+__PACKAGE__->table('artist');
+__PACKAGE__->add_columns(
+ 'artistid' => {
+ data_type => 'uniqueidentifier' # auto_nextval not necessary for PK
+ },
+ 'name' => {
+ data_type => 'varchar',
+ size => 100,
+ is_nullable => 1,
+ },
+ rank => {
+ data_type => 'integer',
+ default_value => 13,
+ },
+ charfield => {
+ data_type => 'char',
+ size => 10,
+ is_nullable => 1,
+ },
+ a_guid => {
+ data_type => 'uniqueidentifier',
+ auto_nextval => 1, # necessary here, because not a PK
+ is_nullable => 1,
+ }
+);
+__PACKAGE__->set_primary_key('artistid');
+
+1;
use base qw/DBICTest::BaseResult/;
-__PACKAGE__->table('cd');
+# this tests table name as scalar ref
+# DO NOT REMOVE THE \
+__PACKAGE__->table(\'cd');
+
__PACKAGE__->add_columns(
'cdid' => {
data_type => 'integer',
WHERE cd.year = ?)
SQL
+sub sqlt_deploy_hook { $_[1]->schema->drop_table($_[1]) }
+
1;
data_type => 'datetime',
is_nullable => 1
},
+ small_dt => { # for mssql and sybase DT tests
+ data_type => 'smalldatetime',
+ is_nullable => 1
+ },
);
__PACKAGE__->set_primary_key('trackid');
## Used in 104view.t
use base qw/DBICTest::BaseResult/;
-use DBIx::Class::ResultSource::View;
__PACKAGE__->table_class('DBIx::Class::ResultSource::View');
## Used in 104view.t
use base qw/DBICTest::BaseResult/;
-use DBIx::Class::ResultSource::View;
__PACKAGE__->table_class('DBIx::Class::ResultSource::View');
--
-- Created by SQL::Translator::Producer::SQLite
--- Created on Thu Jul 30 08:44:22 2009
+-- Created on Thu Jul 30 09:37:43 2009
--
position integer NOT NULL,
title varchar(100) NOT NULL,
last_updated_on datetime,
- last_updated_at datetime
+ last_updated_at datetime,
+ small_dt smalldatetime
);
CREATE INDEX track_idx_cd ON track (cd);
'(
SELECT
cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track,
- single_track.trackid, single_track.cd, single_track.position, single_track.title, single_track.last_updated_on, single_track.last_updated_at,
- single_track_2.trackid, single_track_2.cd, single_track_2.position, single_track_2.title, single_track_2.last_updated_on, single_track_2.last_updated_at,
+ single_track.trackid, single_track.cd, single_track.position, single_track.title, single_track.last_updated_on, single_track.last_updated_at, single_track.small_dt,
+ single_track_2.trackid, single_track_2.cd, single_track_2.position, single_track_2.title, single_track_2.last_updated_on, single_track_2.last_updated_at, single_track_2.small_dt,
cd.cdid, cd.artist, cd.title, cd.year, cd.genreid, cd.single_track
FROM artist me
LEFT JOIN cd cds ON cds.artist = me.artistid
use strict;
use warnings;
+
use Test::More;
+use Test::Exception;
use lib qw(t/lib);
use DBICTest;
use DBIC::SqlMakerTest;
-#plan tests => 6;
-plan 'no_plan';
-
my $schema = DBICTest->init_schema();
my $sdebug = $schema->storage->debug;
is_same_sql_bind (
$most_tracks_rs->as_query,
'(
- SELECT me.cdid, me.track_count, tracks.trackid, tracks.cd, tracks.position, tracks.title, tracks.last_updated_on, tracks.last_updated_at, liner_notes.liner_id, liner_notes.notes
+ SELECT me.cdid, me.track_count,
+ tracks.trackid, tracks.cd, tracks.position, tracks.title, tracks.last_updated_on, tracks.last_updated_at, tracks.small_dt,
+ liner_notes.liner_id, liner_notes.notes
FROM (
SELECT me.cdid, COUNT( tracks.trackid ) AS track_count
FROM cd me
$schema->storage->debugcb (undef);
$schema->storage->debug ($sdebug);
}
+
+# make sure that distinct still works
+{
+ my $rs = $schema->resultset("CD")->search({}, {
+ prefetch => 'tags',
+ order_by => 'cdid',
+ distinct => 1,
+ });
+
+ is_same_sql_bind (
+ $rs->as_query,
+ '(
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ tags.tagid, tags.cd, tags.tag
+ FROM (
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ FROM cd me
+ GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ ORDER BY cdid
+ ) me
+ LEFT JOIN tags tags ON tags.cd = me.cdid
+ ORDER BY cdid, tags.cd, tags.tag
+ )',
+ [],
+ 'Prefetch + distinct resulted in correct group_by',
+ );
+
+ is ($rs->all, 5, 'Correct number of CD objects');
+ is ($rs->count, 5, 'Correct count of CDs');
+}
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+lives_ok ( sub {
+ my $no_prefetch = $schema->resultset('Track')->search_related(cd =>
+ {
+ 'cd.year' => "2000",
+ },
+ {
+ join => 'tags',
+ order_by => 'me.trackid',
+ rows => 1,
+ }
+ );
+
+ my $use_prefetch = $no_prefetch->search(
+ {},
+ {
+ prefetch => 'tags',
+ }
+ );
+
+ is($use_prefetch->count, $no_prefetch->count, 'counts with and without prefetch match');
+ is(
+ scalar ($use_prefetch->all),
+ scalar ($no_prefetch->all),
+ "Amount of returned rows is right"
+ );
+
+}, 'search_related prefetch with order_by works');
+
+
+lives_ok (sub {
+ my $rs = $schema->resultset("Artwork")->search(undef, {distinct => 1})
+ ->search_related('artwork_to_artist')->search_related('artist',
+ undef,
+ { prefetch => 'cds' },
+ );
+ is($rs->all, 0, 'prefetch without WHERE (objects)');
+ is($rs->count, 0, 'prefetch without WHERE (count)');
+
+ $rs = $schema->resultset("Artwork")->search(undef, {distinct => 1})
+ ->search_related('artwork_to_artist')->search_related('artist',
+ { 'cds.title' => 'foo' },
+ { prefetch => 'cds' },
+ );
+ is($rs->all, 0, 'prefetch with WHERE (objects)');
+ is($rs->count, 0, 'prefetch with WHERE (count)');
+
+
+# test where conditions at the root of the related chain
+ my $artist_rs = $schema->resultset("Artist")->search({artistid => 11});
+
+
+ $rs = $artist_rs->search_related('cds')->search_related('genre',
+ { 'genre.name' => 'foo' },
+ { prefetch => 'cds' },
+ );
+ is($rs->all, 0, 'prefetch without distinct (objects)');
+ is($rs->count, 0, 'prefetch without distinct (count)');
+
+
+
+ $rs = $artist_rs->search(undef, {distinct => 1})
+ ->search_related('cds')->search_related('genre',
+ { 'genre.name' => 'foo' },
+ );
+ is($rs->all, 0, 'distinct without prefetch (objects)');
+ is($rs->count, 0, 'distinct without prefetch (count)');
+
+
+
+ $rs = $artist_rs->search({}, {distinct => 1})
+ ->search_related('cds')->search_related('genre',
+ { 'genre.name' => 'foo' },
+ { prefetch => 'cds' },
+ );
+ is($rs->all, 0, 'distinct with prefetch (objects)');
+ is($rs->count, 0, 'distinct with prefetch (count)');
+
+
+
+}, 'distinct generally works with prefetch on deep search_related chains');
+
+done_testing;