From: Peter Rabbitson Date: Fri, 8 May 2009 17:19:25 +0000 (+0000) Subject: Merge 'trunk' into 'oracle-tweaks' X-Git-Tag: v0.08103~100^2~4 X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=commitdiff_plain;h=9f472bdb904849b2e9884e8b54ef01bc9f6f7298;hp=b1e4a1df5cd0a3cdf744e94254096196e371a5d7;p=dbsrgits%2FDBIx-Class.git Merge 'trunk' into 'oracle-tweaks' r6114@Thesaurus (orig r6113): ribasushi | 2009-05-03 10:23:28 +0200 Bump SQLA ependencies so parenthesis_significant is guaranteed to be there r6127@Thesaurus (orig r6126): arcanez | 2009-05-05 09:27:56 +0200 r6037@mullet (orig r6036): arcanez | 2009-04-30 01:24:41 -0700 branch to work on Sybase/MSSQL subtleties r6038@mullet (orig r6037): arcanez | 2009-04-30 01:27:11 -0700 jump to ::DBI::Sybase::MSSQL if we are using MSSQL through Sybase r6062@mullet (orig r6061): arcanez | 2009-04-30 14:05:24 -0700 fixes for MSSQL via Sybase r6126@mullet (orig r6125): arcanez | 2009-05-05 00:27:13 -0700 add a line to Changes and add a CAVEAT r6164@Thesaurus (orig r6163): ribasushi | 2009-05-07 19:09:01 +0200 r6115@Thesaurus (orig r6114): plu | 2009-05-03 10:39:16 +0200 new branch to fix $rs->update and $rs->delete using the new as_query method r6116@Thesaurus (orig r6115): plu | 2009-05-03 10:52:07 +0200 Methods update/delete on resultset use now new as_query method to updated/delete properly on joined/prefetched resultset using a subquery. Therefore some tests have been added and some have been changed as well as the warnings around $rs->update/delete have been removed. Cheers! r6117@Thesaurus (orig r6116): plu | 2009-05-03 11:13:48 +0200 Using "is" instead of "cmp_ok" r6160@Thesaurus (orig r6159): ribasushi | 2009-05-07 11:58:14 +0200 Back out skip_parens support in as_query r6161@Thesaurus (orig r6160): ribasushi | 2009-05-07 19:00:48 +0200 This test is completely borked, needs a rewrite r6162@Thesaurus (orig r6161): ribasushi | 2009-05-07 19:07:19 +0200 Temporary fix or the IN ( ( ... ) ) problem until we get proper SQLA AST (needs SQLA released with commit 6158 to work) r6165@Thesaurus (orig r6164): ribasushi | 2009-05-07 19:11:46 +0200 Changes, remove merged branch r6169@Thesaurus (orig r6168): ribasushi | 2009-05-07 19:24:54 +0200 Bump SQLA dependency so -in/-between workarounds overload properly r6172@Thesaurus (orig r6171): ribasushi | 2009-05-07 20:49:26 +0200 Cookbook cleanup r6174@Thesaurus (orig r6173): ribasushi | 2009-05-08 10:13:30 +0200 Throw away some debugging code r6175@Thesaurus (orig r6174): ribasushi | 2009-05-08 10:21:53 +0200 Documentation patch by nniuq r6176@Thesaurus (orig r6175): plu | 2009-05-08 10:30:20 +0200 Set NLS_LANG so we have a predictable date format when using MON r6177@Thesaurus (orig r6176): ribasushi | 2009-05-08 12:15:15 +0200 Fix POD r6179@Thesaurus (orig r6178): jgoulah | 2009-05-08 16:27:49 +0200 renaming rh performance test so it will show up at the end of test output --- diff --git a/Changes b/Changes index 13bb6d8..0704243 100644 --- a/Changes +++ b/Changes @@ -1,5 +1,11 @@ Revision history for DBIx::Class + - Refactor DBIx::Class::Storage::DBI::Sybase to automatically + load a subclass, namely Microsoft_SQL_Server.pm + (similar to DBIx::Class::Storage::DBI::ODBC) + - Proper support for update/delete of joined resultsets + (using IN => $sub_rs->as_query) + 0.08102 2009-04-30 08:29:00 (UTC) - Fixed two subtle bugs when using columns or select/as paired with a join (limited prefetch) diff --git a/Makefile.PL b/Makefile.PL index dd0c078..2d8e90b 100644 --- a/Makefile.PL +++ b/Makefile.PL @@ -16,7 +16,7 @@ configure_requires 'DBD::SQLite'; requires 'DBD::SQLite' => 1.23; requires 'Data::Page' => 2.00; requires 'Scalar::Util' => 0; -requires 'SQL::Abstract' => 1.51; +requires 'SQL::Abstract' => 1.54; requires 'SQL::Abstract::Limit' => 0.13; requires 'Class::C3::Componentised' => 1.0005; requires 'Storable' => 0; diff --git a/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm b/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm index 445282c..a461a13 100644 --- a/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm +++ b/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm @@ -51,15 +51,15 @@ sub clear_object_index { sub insert { my ($self, @rest) = @_; $self->next::method(@rest); - + return $self if $self->nocache; - # Because the insert will die() if it can't insert into the db (or should) - # we can be sure the object *was* inserted if we got this far. In which - # case, given primary keys are unique and ID only returns a - # value if the object has all its primary keys, we can be sure there - # isn't a real one in the object index already because such a record - # cannot have existed without the insert failing. + # Because the insert will die() if it can't insert into the db (or should) + # we can be sure the object *was* inserted if we got this far. In which + # case, given primary keys are unique and ID only returns a + # value if the object has all its primary keys, we can be sure there + # isn't a real one in the object index already because such a record + # cannot have existed without the insert failing. if (my $key = $self->ID) { my $live = $self->live_object_index; weaken($live->{$key} = $self); @@ -67,7 +67,7 @@ sub insert { if ++$self->live_object_init_count->{count} % $self->purge_object_index_every == 0; } - #use Data::Dumper; warn Dumper($self); + return $self; } diff --git a/lib/DBIx/Class/Manual/Cookbook.pod b/lib/DBIx/Class/Manual/Cookbook.pod index 7d0de14..d265693 100644 --- a/lib/DBIx/Class/Manual/Cookbook.pod +++ b/lib/DBIx/Class/Manual/Cookbook.pod @@ -497,9 +497,6 @@ L has now prefetched all matching data from the C table, so no additional SQL statements are executed. You now have a much more efficient query. -Note that as of L 0.05999_01, C I be used with -C relationships. - Also note that C should only be used when you know you will definitely use data from a related table. Pre-fetching related tables when you only need columns from the main table will make performance worse! @@ -617,7 +614,7 @@ CD and Concert, and join CD to LinerNotes: =head2 Multi-step prefetch -From 0.04999_05 onwards, C can be nested more than one relationship +C can be nested more than one relationship deep using the same syntax as a multi-step join: my $rs = $schema->resultset('Tag')->search( @@ -657,8 +654,7 @@ method. AKA getting last_insert_id -If you are using PK::Auto (which is a core component as of 0.07), this is -straightforward: +Thanks to the core component PK::Auto, this is straightforward: my $foo = $rs->create(\%blah); # do more stuff diff --git a/lib/DBIx/Class/ResultSet.pm b/lib/DBIx/Class/ResultSet.pm index 7f53ec3..00fc70c 100644 --- a/lib/DBIx/Class/ResultSet.pm +++ b/lib/DBIx/Class/ResultSet.pm @@ -796,19 +796,16 @@ sub _collapse_query { if (ref $query eq 'ARRAY') { foreach my $subquery (@$query) { next unless ref $subquery; # -or -# warn "ARRAY: " . Dumper $subquery; $collapsed = $self->_collapse_query($subquery, $collapsed); } } elsif (ref $query eq 'HASH') { if (keys %$query and (keys %$query)[0] eq '-and') { foreach my $subquery (@{$query->{-and}}) { -# warn "HASH: " . Dumper $subquery; $collapsed = $self->_collapse_query($subquery, $collapsed); } } else { -# warn "LEAF: " . Dumper $query; foreach my $col (keys %$query) { my $value = $query->{$col}; $collapsed->{$col}{$value}++; @@ -1332,49 +1329,8 @@ sub _cond_for_update_delete { # No-op. No condition, we're updating/deleting everything return $cond unless ref $full_cond; - if (ref $full_cond eq 'ARRAY') { - $cond = [ - map { - my %hash; - foreach my $key (keys %{$_}) { - $key =~ /([^.]+)$/; - $hash{$1} = $_->{$key}; - } - \%hash; - } @{$full_cond} - ]; - } - elsif (ref $full_cond eq 'HASH') { - if ((keys %{$full_cond})[0] eq '-and') { - $cond->{-and} = []; - - my @cond = @{$full_cond->{-and}}; - for (my $i = 0; $i < @cond; $i++) { - my $entry = $cond[$i]; - - my $hash; - if (ref $entry eq 'HASH') { - $hash = $self->_cond_for_update_delete($entry); - } - else { - $entry =~ /([^.]+)$/; - $hash->{$1} = $cond[++$i]; - } - - push @{$cond->{-and}}, $hash; - } - } - else { - foreach my $key (keys %{$full_cond}) { - $key =~ /([^.]+)$/; - $cond->{$1} = $full_cond->{$key}; - } - } - } - else { - $self->throw_exception( - "Can't update/delete on resultset with condition unless hash or array" - ); + foreach my $pk ($self->result_source->primary_columns) { + $cond->{$pk} = { -in => $self->get_column($pk)->as_query }; } return $cond; @@ -1402,13 +1358,8 @@ sub update { $self->throw_exception("Values for update must be a hash") unless ref $values eq 'HASH'; - carp( 'WARNING! Currently $rs->update() does not generate proper SQL' - . ' on joined resultsets, and may affect rows well outside of the' - . ' contents of $rs. Use at your own risk' ) - if ( $self->{attrs}{seen_join} ); - my $cond = $self->_cond_for_update_delete; - + return $self->result_source->storage->update( $self->result_source, $values, $cond ); @@ -1456,10 +1407,6 @@ to run. See also L. delete may not generate correct SQL for a query with joins or a resultset chained from a related resultset. In this case it will generate a warning:- - WARNING! Currently $rs->delete() does not generate proper SQL on - joined resultsets, and may delete rows well outside of the contents - of $rs. Use at your own risk - In these cases you may find that delete_all is more appropriate, or you need to respecify your query in a way that can be expressed without a join. @@ -1469,10 +1416,7 @@ sub delete { my ($self) = @_; $self->throw_exception("Delete should not be passed any arguments") if $_[1]; - carp( 'WARNING! Currently $rs->delete() does not generate proper SQL' - . ' on joined resultsets, and may delete rows well outside of the' - . ' contents of $rs. Use at your own risk' ) - if ( $self->{attrs}{seen_join} ); + my $cond = $self->_cond_for_update_delete; $self->result_source->storage->delete($self->result_source, $cond); @@ -1813,19 +1757,16 @@ sub _collapse_cond { if (ref $cond eq 'ARRAY') { foreach my $subcond (@$cond) { next unless ref $subcond; # -or -# warn "ARRAY: " . Dumper $subcond; $collapsed = $self->_collapse_cond($subcond, $collapsed); } } elsif (ref $cond eq 'HASH') { if (keys %$cond and (keys %$cond)[0] eq '-and') { foreach my $subcond (@{$cond->{-and}}) { -# warn "HASH: " . Dumper $subcond; $collapsed = $self->_collapse_cond($subcond, $collapsed); } } else { -# warn "LEAF: " . Dumper $cond; foreach my $col (keys %$cond) { my $value = $cond->{$col}; $collapsed->{$col} = $value; diff --git a/lib/DBIx/Class/ResultSetColumn.pm b/lib/DBIx/Class/ResultSetColumn.pm index 596df7c..2679803 100644 --- a/lib/DBIx/Class/ResultSetColumn.pm +++ b/lib/DBIx/Class/ResultSetColumn.pm @@ -72,7 +72,7 @@ B: This feature is still experimental. =cut -sub as_query { return shift->_resultset->as_query } +sub as_query { return shift->_resultset->as_query(@_) } =head2 next diff --git a/lib/DBIx/Class/ResultSource.pm b/lib/DBIx/Class/ResultSource.pm index 0094fa4..9e67e66 100644 --- a/lib/DBIx/Class/ResultSource.pm +++ b/lib/DBIx/Class/ResultSource.pm @@ -1110,7 +1110,6 @@ sub resolve_join { $self->throw_exception("No idea how to resolve join reftype ".ref $join); } else { my $count = ++$seen->{$join}; - #use Data::Dumper; warn Dumper($seen); my $as = ($count > 1 ? "${join}_${count}" : $join); my $rel_info = $self->relationship_info($join); $self->throw_exception("No such relationship ${join}") unless $rel_info; @@ -1287,8 +1286,6 @@ in the supplied relationships. Examples: sub resolve_prefetch { my ($self, $pre, $alias, $seen, $order, $collapse) = @_; $seen ||= {}; - #$alias ||= $self->name; - #warn $alias, Dumper $pre; if( ref $pre eq 'ARRAY' ) { return map { $self->resolve_prefetch( $_, $alias, $seen, $order, $collapse ) } @@ -1301,7 +1298,6 @@ sub resolve_prefetch { $self->related_source($_)->resolve_prefetch( $pre->{$_}, "${alias}.$_", $seen, $order, $collapse) } keys %$pre; - #die Dumper \@ret; return @ret; } elsif( ref $pre ) { @@ -1354,8 +1350,6 @@ sub resolve_prefetch { return map { [ "${as}.$_", "${as_prefix}${pre}.$_", ] } $rel_source->columns; - #warn $alias, Dumper (\@ret); - #return @ret; } } diff --git a/lib/DBIx/Class/ResultSourceHandle.pm b/lib/DBIx/Class/ResultSourceHandle.pm index 6d6353f..7531954 100644 --- a/lib/DBIx/Class/ResultSourceHandle.pm +++ b/lib/DBIx/Class/ResultSourceHandle.pm @@ -87,7 +87,7 @@ sub STORABLE_freeze { Thaws frozen handle. Resets the internal schema reference to the package variable C<$thaw_schema>. The recomened way of setting this is to use -C<$schema->thaw($ice)> which handles this for you. +C<< $schema->thaw($ice) >> which handles this for you. =cut diff --git a/lib/DBIx/Class/Row.pm b/lib/DBIx/Class/Row.pm index c6fd923..da7ab36 100644 --- a/lib/DBIx/Class/Row.pm +++ b/lib/DBIx/Class/Row.pm @@ -347,7 +347,6 @@ sub insert { $self->throw_exception( "Can't get last insert id" ) unless (@ids == @auto_pri); $self->store_column($auto_pri[$_] => $ids[$_]) for 0 .. $#ids; -#use Data::Dumper; warn Dumper($self); } @@ -881,10 +880,10 @@ Inserts a new row into the database, as a copy of the original object. If a hashref of replacement data is supplied, these will take precedence over data in the original. -If the row has related objects in a -L then those objects may be copied -too depending on the L -relationship attribute. +Relationships will be followed by the copy procedure B if the +relationship specifes a true value for its +L attribute. C +is set by default on C relationships and unset on all others. =cut diff --git a/lib/DBIx/Class/Schema/Versioned.pm b/lib/DBIx/Class/Schema/Versioned.pm index 2c29e0e..39ea774 100644 --- a/lib/DBIx/Class/Schema/Versioned.pm +++ b/lib/DBIx/Class/Schema/Versioned.pm @@ -182,7 +182,6 @@ use strict; use warnings; use base 'DBIx::Class'; use POSIX 'strftime'; -use Data::Dumper; __PACKAGE__->mk_classdata('_filedata'); __PACKAGE__->mk_classdata('upgrade_directory'); diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index c276f38..e4f5c49 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -38,10 +38,10 @@ package # Hide from PAUSE use base qw/SQL::Abstract::Limit/; -# This prevents the caching of $dbh in S::A::L, I believe sub new { my $self = shift->SUPER::new(@_); + # This prevents the caching of $dbh in S::A::L, I believe # If limit_dialect is a ref (like a $dbh), go ahead and replace # it with what it resolves to: $self->{limit_dialect} = $self->_find_syntax($self->{limit_dialect}) @@ -50,6 +50,58 @@ sub new { $self; } + + +# Some databases (sqlite) do not handle multiple parenthesis +# around in/between arguments. A tentative x IN ( ( 1, 2 ,3) ) +# is interpreted as x IN 1 or something similar. +# +# Since we currently do not have access to the SQLA AST, resort +# to barbaric mutilation of any SQL supplied in literal form + +sub _strip_outer_paren { + my ($self, $arg) = @_; + + return $self->_SWITCH_refkind ($arg, { + ARRAYREFREF => sub { + $$arg->[0] = __strip_outer_paren ($$arg->[0]); + return $arg; + }, + SCALARREF => sub { + return \__strip_outer_paren( $$arg ); + }, + FALLBACK => sub { + return $arg + }, + }); +} + +sub __strip_outer_paren { + my $sql = shift; + + if ($sql and not ref $sql) { + while ($sql =~ /^ \s* \( (.*) \) \s* $/x ) { + $sql = $1; + } + } + + return $sql; +} + +sub _where_field_IN { + my ($self, $lhs, $op, $rhs) = @_; + $rhs = $self->_strip_outer_paren ($rhs); + return $self->SUPER::_where_field_IN ($lhs, $op, $rhs); +} + +sub _where_field_BETWEEN { + my ($self, $lhs, $op, $rhs) = @_; + $rhs = $self->_strip_outer_paren ($rhs); + return $self->SUPER::_where_field_BETWEEN ($lhs, $op, $rhs); +} + + + # DB2 is the only remaining DB using this. Even though we are not sure if # RowNumberOver is still needed here (should be part of SQLA) leave the # code in place @@ -1321,13 +1373,7 @@ sub insert_bulk { # @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args ## This must be an arrayref, else nothing works! - my $tuple_status = []; - - ##use Data::Dumper; - ##print STDERR Dumper( $data, $sql, [@bind] ); - - my $time = time(); ## Get the bind_attributes, if any exist my $bind_attributes = $self->source_bind_attributes($source); diff --git a/lib/DBIx/Class/Storage/DBI/Cursor.pm b/lib/DBIx/Class/Storage/DBI/Cursor.pm index 60df379..0fe570f 100644 --- a/lib/DBIx/Class/Storage/DBI/Cursor.pm +++ b/lib/DBIx/Class/Storage/DBI/Cursor.pm @@ -36,8 +36,8 @@ Returns a new L object. sub new { my ($class, $storage, $args, $attrs) = @_; - #use Data::Dumper; warn Dumper(@_); $class = ref $class if ref $class; + my $new = { storage => $storage, args => $args, diff --git a/lib/DBIx/Class/Storage/DBI/Sybase.pm b/lib/DBIx/Class/Storage/DBI/Sybase.pm index 87acdde..f1a8dc8 100644 --- a/lib/DBIx/Class/Storage/DBI/Sybase.pm +++ b/lib/DBIx/Class/Storage/DBI/Sybase.pm @@ -5,6 +5,20 @@ use warnings; use base qw/DBIx::Class::Storage::DBI::NoBindVars/; +sub _rebless { + my $self = shift; + + my $dbtype = eval { @{$self->_dbh->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})}[2] }; + unless ( $@ ) { + $dbtype =~ s/\W/_/gi; + my $subclass = "DBIx::Class::Storage::DBI::Sybase::${dbtype}"; + if ($self->load_optional_class($subclass) && !$self->isa($subclass)) { + bless $self, $subclass; + $self->_rebless; + } + } +} + 1; =head1 NAME @@ -17,10 +31,21 @@ This subclass supports L for real Sybase databases. If you are using an MSSQL database via L, see L. +=head1 CAVEATS + +This storage driver uses L as a base. +This means that bind variables will be interpolated (properly quoted of course) +into the SQL query itself, without using bind placeholders. + +More importantly this means that caching of prepared statements is explicitly +disabled, as the interpolation renders it useless. + =head1 AUTHORS Brandon L Black +Justin Hunter + =head1 LICENSE You may distribute this code under the same terms as Perl itself. diff --git a/lib/DBIx/Class/Storage/DBI/Sybase/MSSQL.pm b/lib/DBIx/Class/Storage/DBI/Sybase/MSSQL.pm index 81222e9..f4eee43 100644 --- a/lib/DBIx/Class/Storage/DBI/Sybase/MSSQL.pm +++ b/lib/DBIx/Class/Storage/DBI/Sybase/MSSQL.pm @@ -3,8 +3,7 @@ package DBIx::Class::Storage::DBI::Sybase::MSSQL; use strict; use warnings; -use Class::C3; -use base qw/DBIx::Class::Storage::DBI::MSSQL DBIx::Class::Storage::DBI::Sybase/; +use base qw/DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server DBIx::Class::Storage::DBI::Sybase/; 1; @@ -29,6 +28,8 @@ after connecting. Brandon L Black +Justin Hunter + =head1 LICENSE You may distribute this code under the same terms as Perl itself. diff --git a/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm b/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm new file mode 100644 index 0000000..d4485a3 --- /dev/null +++ b/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm @@ -0,0 +1,41 @@ +package DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server; + +use strict; +use warnings; + +use base qw/DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server DBIx::Class::Storage::DBI::Sybase/; + +1; + +=head1 NAME + +DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server - Storage::DBI subclass for MSSQL via +DBD::Sybase + +=head1 SYNOPSIS + +This subclass supports MSSQL connected via L. + + $schema->storage_type('::DBI::Sybase::Microsoft_SQL_Server'); + $schema->connect_info('dbi:Sybase:....', ...); + +=head1 CAVEATS + +This storage driver uses L as a base. +This means that bind variables will be interpolated (properly quoted of course) +into the SQL query itself, without using bind placeholders. + +More importantly this means that caching of prepared statements is explicitly +disabled, as the interpolation renders it useless. + +=head1 AUTHORS + +Brandon L Black + +Justin Hunter + +=head1 LICENSE + +You may distribute this code under the same terms as Perl itself. + +=cut diff --git a/lib/SQL/Translator/Parser/DBIx/Class.pm b/lib/SQL/Translator/Parser/DBIx/Class.pm index 57dad33..58db779 100644 --- a/lib/SQL/Translator/Parser/DBIx/Class.pm +++ b/lib/SQL/Translator/Parser/DBIx/Class.pm @@ -13,7 +13,6 @@ $VERSION = '1.10'; $DEBUG = 0 unless defined $DEBUG; use Exporter; -use Data::Dumper; use SQL::Translator::Utils qw(debug normalize_name); use base qw(Exporter); diff --git a/lib/SQL/Translator/Producer/DBIx/Class/File.pm b/lib/SQL/Translator/Producer/DBIx/Class/File.pm index 300ce50..4132c73 100644 --- a/lib/SQL/Translator/Producer/DBIx/Class/File.pm +++ b/lib/SQL/Translator/Producer/DBIx/Class/File.pm @@ -25,6 +25,7 @@ $DEBUG = 0 unless defined $DEBUG; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(header_comment); +use Data::Dumper (); ## Skip all column type translation, as we want to use whatever the parser got. diff --git a/t/53delete_chained.t b/t/53delete_chained.t deleted file mode 100644 index 4619548..0000000 --- a/t/53delete_chained.t +++ /dev/null @@ -1,45 +0,0 @@ -use Test::More; -use strict; -use warnings; -use lib qw(t/lib); -use DBICTest; - -plan tests => 9; - -# This set of tests attempts to do a delete on a chained resultset, which -# would lead to SQL DELETE with a JOIN, which is not supported by the -# SQL generator right now. -# So it currently checks that these operations fail with a warning. -# When the SQL generator is fixed this test will need fixing up appropriately. - -my $schema = DBICTest->init_schema(); -my $total_tracks = $schema->resultset('Track')->count; -cmp_ok($total_tracks, '>', 0, 'need track records'); - -# test that delete_related w/o conditions deletes all related records only -{ - my $w; - local $SIG{__WARN__} = sub { $w = shift }; - - my $artist = $schema->resultset("Artist")->find(3); - my $artist_tracks = $artist->cds->search_related('tracks')->count; - cmp_ok($artist_tracks, '<', $total_tracks, 'need more tracks than just related tracks'); - - ok(!eval{$artist->cds->search_related('tracks')->delete}); - cmp_ok($schema->resultset('Track')->count, '==', $total_tracks, 'No tracks should be deleted'); - like ($w, qr/Currently \$rs->delete\(\) does not generate proper SQL/, 'Delete join warning'); -} - -# test that delete_related w/conditions deletes just the matched related records only -{ - my $w; - local $SIG{__WARN__} = sub { $w = shift }; - - my $artist2 = $schema->resultset("Artist")->find(2); - my $artist2_tracks = $artist2->search_related('cds')->search_related('tracks')->count; - cmp_ok($artist2_tracks, '<', $total_tracks, 'need more tracks than related tracks'); - - ok(!eval{$artist2->search_related('cds')->search_related('tracks')->delete}); - cmp_ok($schema->resultset('Track')->count, '==', $total_tracks, 'No tracks should be deleted'); - like ($w, qr/Currently \$rs->delete\(\) does not generate proper SQL/, 'Delete join warning'); -} diff --git a/t/73oracle_inflate.t b/t/73oracle_inflate.t index ded8bb4..0f2fc23 100644 --- a/t/73oracle_inflate.t +++ b/t/73oracle_inflate.t @@ -24,6 +24,7 @@ else { # DateTime::Format::Oracle needs this set $ENV{NLS_DATE_FORMAT} = 'DD-MON-YY'; $ENV{NLS_TIMESTAMP_FORMAT} = 'YYYY-MM-DD HH24:MI:SSXFF'; +$ENV{NLS_LANG} = 'AMERICAN_AMERICA.WE8ISO8859P1'; my $schema = DBICTest::Schema->connect($dsn, $user, $pass); diff --git a/t/74mssql.t b/t/74mssql.t index 92b3103..49f7967 100644 --- a/t/74mssql.t +++ b/t/74mssql.t @@ -7,23 +7,18 @@ use DBICTest; my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MSSQL_${_}" } qw/DSN USER PASS/}; -#warn "$dsn $user $pass"; - plan skip_all => 'Set $ENV{DBICTEST_MSSQL_DSN}, _USER and _PASS to run this test' unless ($dsn); -plan tests => 5; - -my $storage_type = '::DBI::MSSQL'; -$storage_type = '::DBI::Sybase::MSSQL' if $dsn =~ /^dbi:Sybase:/; -# Add more for others in the future when they exist (ODBC? ADO? JDBC?) +plan tests => 6; my $schema = DBICTest::Schema->clone; -$schema->storage_type($storage_type); $schema->connection($dsn, $user, $pass); my $dbh = $schema->storage->dbh; +isa_ok($schema->storage, 'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server'); + $dbh->do("IF OBJECT_ID('artist', 'U') IS NOT NULL DROP TABLE artist"); $dbh->do("IF OBJECT_ID('cd', 'U') IS NOT NULL diff --git a/t/76joins.t b/t/76joins.t index 84d8ba5..39a51d3 100644 --- a/t/76joins.t +++ b/t/76joins.t @@ -17,7 +17,7 @@ BEGIN { eval "use DBD::SQLite"; plan $@ ? ( skip_all => 'needs DBD::SQLite for testing' ) - : ( tests => 18 ); + : ( tests => 33 ); } # figure out if we've got a version of sqlite that is older than 3.2.6, in @@ -140,7 +140,7 @@ my $rs = $schema->resultset("CD")->search( ] ] } ); -cmp_ok( $rs + 0, '==', 1, "Single record in resultset"); +is( $rs + 0, 1, "Single record in resultset"); is($rs->first->title, 'Forkful of bees', 'Correct record returned'); @@ -148,7 +148,7 @@ $rs = $schema->resultset("CD")->search( { 'year' => 2001, 'artist.name' => 'Caterwauler McCrae' }, { join => 'artist' }); -cmp_ok( $rs + 0, '==', 1, "Single record in resultset"); +is( $rs + 0, 1, "Single record in resultset"); is($rs->first->title, 'Forkful of bees', 'Correct record returned'); @@ -157,7 +157,7 @@ $rs = $schema->resultset("CD")->search( 'liner_notes.notes' => 'Kill Yourself!' }, { join => [ qw/artist liner_notes/ ] }); -cmp_ok( $rs + 0, '==', 1, "Single record in resultset"); +is( $rs + 0, 1, "Single record in resultset"); is($rs->first->title, 'Come Be Depressed With Us', 'Correct record returned'); @@ -166,7 +166,7 @@ $rs = $schema->resultset("CD")->search( { 'artist' => 1 }, { join => [qw/artist/], order_by => 'artist.name' } ); -cmp_ok( scalar $rs->all, '==', scalar $rs->slice(0, $rs->count - 1), 'slice() with join has same count as all()' ); +is( scalar $rs->all, scalar $rs->slice(0, $rs->count - 1), 'slice() with join has same count as all()' ); ok(!$rs->slice($rs->count+1000, $rs->count+1002)->count, 'Slicing beyond end of rs returns a zero count'); @@ -175,32 +175,83 @@ $rs = $schema->resultset("Artist")->search( { 'liner_notes.notes' => 'Kill Yourself!' }, { join => { 'cds' => 'liner_notes' } }); -cmp_ok( $rs->count, '==', 1, "Single record in resultset"); +is( $rs->count, 1, "Single record in resultset"); is($rs->first->name, 'We Are Goth', 'Correct record returned'); -# test for warnings on delete of joined resultset -$rs = $schema->resultset("CD")->search( - { 'artist.name' => 'Caterwauler McCrae' }, - { join => [qw/artist/]} -); -my $tst_delete_warning; -eval { - local $SIG{__WARN__} = sub { $tst_delete_warning = shift }; - $rs->delete(); -}; -ok( ($@ || $tst_delete_warning), 'fail/warning on attempt to delete a join-ed resultset'); - -# test for warnings on update of joined resultset -$rs = $schema->resultset("CD")->search( - { 'artist.name' => 'Random Boy Band' }, - { join => [qw/artist/]} -); -my $tst_update_warning; -eval { - local $SIG{__WARN__} = sub { $tst_update_warning = shift }; - $rs->update({ 'artist' => 1 }); -}; - -ok( ($@ || $tst_update_warning), 'fail/warning on attempt to update a join-ed resultset'); +{ + $schema->populate('Artist', [ + [ qw/artistid name/ ], + [ 4, 'Another Boy Band' ], + ]); + $schema->populate('CD', [ + [ qw/cdid artist title year/ ], + [ 6, 2, "Greatest Hits", 2001 ], + [ 7, 4, "Greatest Hits", 2005 ], + [ 8, 4, "BoyBandBlues", 2008 ], + ]); + $schema->populate('TwoKeys', [ + [ qw/artist cd/ ], + [ 2, 4 ], + [ 2, 6 ], + [ 4, 7 ], + [ 4, 8 ], + ]); + + sub cd_count { + return $schema->resultset("CD")->count; + } + sub tk_count { + return $schema->resultset("TwoKeys")->count; + } + + is(cd_count(), 8, '8 rows in table cd'); + is(tk_count(), 7, '7 rows in table twokeys'); + + sub artist1 { + return $schema->resultset("CD")->search( + { 'artist.name' => 'Caterwauler McCrae' }, + { join => [qw/artist/]} + ); + } + sub artist2 { + return $schema->resultset("CD")->search( + { 'artist.name' => 'Random Boy Band' }, + { join => [qw/artist/]} + ); + } + + is( artist1()->count, 3, '3 Caterwauler McCrae CDs' ); + ok( artist1()->delete, 'Successfully deleted 3 CDs' ); + is( artist1()->count, 0, '0 Caterwauler McCrae CDs' ); + is( artist2()->count, 2, '3 Random Boy Band CDs' ); + ok( artist2()->update( { 'artist' => 1 } ) ); + is( artist2()->count, 0, '0 Random Boy Band CDs' ); + is( artist1()->count, 2, '2 Caterwauler McCrae CDs' ); + + # test update on multi-column-pk + sub tk1 { + return $schema->resultset("TwoKeys")->search( + { + 'artist.name' => { like => '%Boy Band' }, + 'cd.title' => 'Greatest Hits', + }, + { join => [qw/artist cd/] } + ); + } + sub tk2 { + return $schema->resultset("TwoKeys")->search( + { 'artist.name' => 'Caterwauler McCrae' }, + { join => [qw/artist/]} + ); + } + is( tk2()->count, 2, 'TwoKeys count == 2' ); + is( tk1()->count, 2, 'TwoKeys count == 2' ); + ok( tk1()->update( { artist => 1 } ) ); + is( tk1()->count, 0, 'TwoKeys count == 0' ); + is( tk2()->count, 4, '2 Caterwauler McCrae CDs' ); + ok( tk2()->delete, 'Successfully deleted 4 CDs' ); + is(cd_count(), 5, '5 rows in table cd'); + is(tk_count(), 3, '3 rows in table twokeys'); +} diff --git a/t/count/in_subquery.t b/t/count/in_subquery.t new file mode 100644 index 0000000..1275c1e --- /dev/null +++ b/t/count/in_subquery.t @@ -0,0 +1,26 @@ +#!/usr/bin/perl + +use strict; +use warnings; + +use Data::Dumper; + +use Test::More; + +plan ( tests => 1 ); + +use lib qw(t/lib); +use DBICTest; +use DBIC::SqlMakerTest; + +my $schema = DBICTest->init_schema(); + +{ + my $rs = $schema->resultset("CD")->search( + { 'artist.name' => 'Caterwauler McCrae' }, + { join => [qw/artist/]} + ); + my $squery = $rs->get_column('cdid')->as_query; + my $subsel_rs = $schema->resultset("CD")->search( { cdid => { IN => $squery } } ); + is($subsel_rs->count, $rs->count, 'Subselect on PK got the same row count'); +} diff --git a/t/resultset/as_query.t b/t/resultset/as_query.t index 7f4c738..c496085 100644 --- a/t/resultset/as_query.t +++ b/t/resultset/as_query.t @@ -65,5 +65,3 @@ my $rscol = $art_rs->get_column( 'charfield' ); [ [ rank => 2 ], [ name => 'Billy Joel' ] ], ); } - -__END__ diff --git a/t/99rh_perl_perf_bug.t b/t/zzzzzzz_perl_perf_bug.t similarity index 100% rename from t/99rh_perl_perf_bug.t rename to t/zzzzzzz_perl_perf_bug.t