From: Peter Rabbitson Date: Fri, 13 Nov 2009 12:17:57 +0000 (+0000) Subject: Merge 'trunk' into 'view_rels' X-Git-Tag: v0.08116~140^2~5 X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=commitdiff_plain;h=38b4225f88f46198b9f07f1be23ffad139f2d7d1;hp=18129e8117befc4681180910901210aeef4fdf42;p=dbsrgits%2FDBIx-Class.git Merge 'trunk' into 'view_rels' r7872@Thesaurus (orig r7860): ribasushi | 2009-11-12 01:35:36 +0100 Fix find on resultset with custom result_class r7873@Thesaurus (orig r7861): ribasushi | 2009-11-12 01:40:14 +0100 Fix return value of in_storage r7874@Thesaurus (orig r7862): ribasushi | 2009-11-12 01:43:48 +0100 Extra FAQ entry r7875@Thesaurus (orig r7863): ribasushi | 2009-11-12 02:11:25 +0100 Sanify _determine_driver handling in ::Storage::DBI r7876@Thesaurus (orig r7864): ribasushi | 2009-11-12 02:14:37 +0100 Add mysql determine_driver test by Pedro Melo r7881@Thesaurus (orig r7869): ribasushi | 2009-11-12 11:10:04 +0100 _cond_for_update_delete is hopelessly broken attempting to introspect SQLA1. Replace with a horrific but effective hack r7882@Thesaurus (orig r7870): ribasushi | 2009-11-12 11:15:12 +0100 Clarifying comment r7884@Thesaurus (orig r7872): ribasushi | 2009-11-13 00:13:40 +0100 The real fix for the non-introspectable condition bug, mst++ r7885@Thesaurus (orig r7873): ribasushi | 2009-11-13 00:24:56 +0100 Some cleanup r7887@Thesaurus (orig r7875): frew | 2009-11-13 10:01:37 +0100 fix subtle bug with Sybase database type determination --- diff --git a/Changes b/Changes index 25707cb..43d9dea 100644 --- a/Changes +++ b/Changes @@ -30,6 +30,12 @@ Revision history for DBIx::Class - Remove some IN workarounds, and require a recent version of SQLA instead - Improvements to populate's handling of mixed scalarref values + - Fixed regression losing result_class after $rs->find (introduced + in 0.08108) + - Fix in_storage() to return 1|0 as per existing documentation + - Centralize handling of _determine_driver calls prior to certain + ::Storage::DBI methods + - Fix update/delete arbitrary condition handling (RT#51409) - POD improvements 0.08112 2009-09-21 10:57:00 (UTC) diff --git a/lib/DBIx/Class/Manual/FAQ.pod b/lib/DBIx/Class/Manual/FAQ.pod index 4a5d7ba..6d35ae6 100644 --- a/lib/DBIx/Class/Manual/FAQ.pod +++ b/lib/DBIx/Class/Manual/FAQ.pod @@ -371,6 +371,9 @@ C supplied with C. =item .. insert many rows of data efficiently? +The C method in L provides +efficient bulk inserts. + =item .. update a collection of rows at the same time? Create a resultset using a search, to filter the rows of data you diff --git a/lib/DBIx/Class/ResultSet.pm b/lib/DBIx/Class/ResultSet.pm index 594c04a..1279ea5 100644 --- a/lib/DBIx/Class/ResultSet.pm +++ b/lib/DBIx/Class/ResultSet.pm @@ -357,9 +357,9 @@ sub search_rs { } my $rs = (ref $self)->new($self->result_source, $new_attrs); - if ($rows) { - $rs->set_cache($rows); - } + + $rs->set_cache($rows) if ($rows); + return $rs; } @@ -530,7 +530,7 @@ sub find { } # Run the query - my $rs = $self->search ($query, $attrs); + my $rs = $self->search ($query, {result_class => $self->result_class, %$attrs}); if (keys %{$rs->_resolved_attrs->{collapse}}) { my $row = $rs->next; carp "Query returned more than one row" if $rs->next; @@ -1495,8 +1495,12 @@ sub _rs_update_delete { my $rsrc = $self->result_source; + # if a condition exists we need to strip all table qualifiers + # if this is not possible we'll force a subquery below + my $cond = $rsrc->schema->storage->_strip_cond_qualifiers ($self->{cond}); + my $needs_group_by_subq = $self->_has_resolved_attr (qw/collapse group_by -join/); - my $needs_subq = $self->_has_resolved_attr (qw/row offset/); + my $needs_subq = (not defined $cond) || $self->_has_resolved_attr(qw/row offset/); if ($needs_group_by_subq or $needs_subq) { @@ -1544,70 +1548,11 @@ sub _rs_update_delete { return $rsrc->storage->$op( $rsrc, $op eq 'update' ? $values : (), - $self->_cond_for_update_delete, + $cond, ); } } - -# _cond_for_update_delete -# -# update/delete require the condition to be modified to handle -# the differing SQL syntax available. This transforms the $self->{cond} -# appropriately, returning the new condition. - -sub _cond_for_update_delete { - my ($self, $full_cond) = @_; - my $cond = {}; - - $full_cond ||= $self->{cond}; - # No-op. No condition, we're updating/deleting everything - return $cond unless ref $full_cond; - - if (ref $full_cond eq 'ARRAY') { - $cond = [ - map { - my %hash; - foreach my $key (keys %{$_}) { - $key =~ /([^.]+)$/; - $hash{$1} = $_->{$key}; - } - \%hash; - } @{$full_cond} - ]; - } - elsif (ref $full_cond eq 'HASH') { - if ((keys %{$full_cond})[0] eq '-and') { - $cond->{-and} = []; - my @cond = @{$full_cond->{-and}}; - for (my $i = 0; $i < @cond; $i++) { - my $entry = $cond[$i]; - my $hash; - if (ref $entry eq 'HASH') { - $hash = $self->_cond_for_update_delete($entry); - } - else { - $entry =~ /([^.]+)$/; - $hash->{$1} = $cond[++$i]; - } - push @{$cond->{-and}}, $hash; - } - } - else { - foreach my $key (keys %{$full_cond}) { - $key =~ /([^.]+)$/; - $cond->{$1} = $full_cond->{$key}; - } - } - } - else { - $self->throw_exception("Can't update/delete on resultset with condition unless hash or array"); - } - - return $cond; -} - - =head2 update =over 4 diff --git a/lib/DBIx/Class/Row.pm b/lib/DBIx/Class/Row.pm index f708d21..75b64db 100644 --- a/lib/DBIx/Class/Row.pm +++ b/lib/DBIx/Class/Row.pm @@ -424,7 +424,7 @@ L on one, sets it to false. sub in_storage { my ($self, $val) = @_; $self->{_in_storage} = $val if @_ > 1; - return $self->{_in_storage}; + return $self->{_in_storage} ? 1 : 0; } =head2 update diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 2009df3..0de26f6 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -41,6 +41,38 @@ __PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class/); __PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks'); +# Each of these methods need _determine_driver called before itself +# in order to function reliably. This is a purely DRY optimization +my @rdbms_specific_methods = qw/ + sqlt_type + build_datetime_parser + datetime_parser_type + + insert + insert_bulk + update + delete + select + select_single +/; + +for my $meth (@rdbms_specific_methods) { + + my $orig = __PACKAGE__->can ($meth) + or next; + + no strict qw/refs/; + no warnings qw/redefine/; + *{__PACKAGE__ ."::$meth"} = sub { + if (not $_[0]->_driver_determined) { + $_[0]->_determine_driver; + goto $_[0]->can($meth); + } + $orig->(@_); + }; +} + + =head1 NAME DBIx::Class::Storage::DBI - DBI storage handler @@ -713,7 +745,6 @@ in MySQL's case disabled entirely. # Storage subclasses should override this sub with_deferred_fk_checks { my ($self, $sub) = @_; - $sub->(); } @@ -1304,12 +1335,6 @@ sub _execute { sub insert { my ($self, $source, $to_insert) = @_; -# redispatch to insert method of storage we reblessed into, if necessary - if (not $self->_driver_determined) { - $self->_determine_driver; - goto $self->can('insert'); - } - my $ident = $source->from; my $bind_attributes = $self->source_bind_attributes($source); @@ -1341,12 +1366,6 @@ sub insert { sub insert_bulk { my ($self, $source, $cols, $data) = @_; -# redispatch to insert_bulk method of storage we reblessed into, if necessary - if (not $self->_driver_determined) { - $self->_determine_driver; - goto $self->can('insert_bulk'); - } - my %colvalues; @colvalues{@$cols} = (0..$#$cols); @@ -1532,30 +1551,79 @@ sub _dbh_execute_inserts_with_no_binds { sub update { my ($self, $source, @args) = @_; -# redispatch to update method of storage we reblessed into, if necessary - if (not $self->_driver_determined) { - $self->_determine_driver; - goto $self->can('update'); - } - - my $bind_attributes = $self->source_bind_attributes($source); + my $bind_attrs = $self->source_bind_attributes($source); - return $self->_execute('update' => [], $source, $bind_attributes, @args); + return $self->_execute('update' => [], $source, $bind_attrs, @args); } sub delete { - my $self = shift @_; - my $source = shift @_; - $self->_determine_driver; + my ($self, $source, @args) = @_; + my $bind_attrs = $self->source_bind_attributes($source); - return $self->_execute('delete' => [], $source, $bind_attrs, @_); + return $self->_execute('delete' => [], $source, $bind_attrs, @args); +} + +# Most databases do not allow aliasing of tables in UPDATE/DELETE. Thus +# a condition containing 'me' or other table prefixes will not work +# at all. What this code tries to do (badly) is introspect the condition +# and remove all column qualifiers. If it bails out early (returns undef) +# the calling code should try another approach (e.g. a subquery) +sub _strip_cond_qualifiers { + my ($self, $where) = @_; + + my $cond = {}; + + # No-op. No condition, we're updating/deleting everything + return $cond unless $where; + + if (ref $where eq 'ARRAY') { + $cond = [ + map { + my %hash; + foreach my $key (keys %{$_}) { + $key =~ /([^.]+)$/; + $hash{$1} = $_->{$key}; + } + \%hash; + } @$where + ]; + } + elsif (ref $where eq 'HASH') { + if ( (keys %$where) == 1 && ( (keys %{$where})[0] eq '-and' )) { + $cond->{-and} = []; + my @cond = @{$where->{-and}}; + for (my $i = 0; $i < @cond; $i++) { + my $entry = $cond[$i]; + my $hash; + if (ref $entry eq 'HASH') { + $hash = $self->_strip_cond_qualifiers($entry); + } + else { + $entry =~ /([^.]+)$/; + $hash->{$1} = $cond[++$i]; + } + push @{$cond->{-and}}, $hash; + } + } + else { + foreach my $key (keys %$where) { + $key =~ /([^.]+)$/; + $cond->{$1} = $where->{$key}; + } + } + } + else { + return undef; + } + + return $cond; } # We were sent here because the $rs contains a complex search # which will require a subquery to select the correct rows -# (i.e. joined or limited resultsets) +# (i.e. joined or limited resultsets, or non-introspectable conditions) # # Genarating a single PK column subquery is trivial and supported # by all RDBMS. However if we have a multicolumn PK, things get ugly. @@ -1566,16 +1634,7 @@ sub _subq_update_delete { my $rsrc = $rs->result_source; - # we already check this, but double check naively just in case. Should be removed soon - my $sel = $rs->_resolved_attrs->{select}; - $sel = [ $sel ] unless ref $sel eq 'ARRAY'; my @pcols = $rsrc->primary_columns; - if (@$sel != @pcols) { - $self->throw_exception ( - 'Subquery update/delete can not be called on resultsets selecting a' - .' number of columns different than the number of primary keys' - ); - } if (@pcols == 1) { return $self->$op ( @@ -2350,14 +2409,7 @@ Returns the database driver name. =cut sub sqlt_type { - my ($self) = @_; - - if (not $self->_driver_determined) { - $self->_determine_driver; - goto $self->can ('sqlt_type'); - } - - $self->_get_dbh->{Driver}->{Name}; + shift->_get_dbh->{Driver}->{Name}; } =head2 bind_attribute_by_data_type @@ -2701,11 +2753,6 @@ See L =cut sub build_datetime_parser { - if (not $_[0]->_driver_determined) { - $_[0]->_determine_driver; - goto $_[0]->can('build_datetime_parser'); - } - my $self = shift; my $type = $self->datetime_parser_type(@_); $self->ensure_class_loaded ($type); diff --git a/lib/DBIx/Class/Storage/DBI/Sybase.pm b/lib/DBIx/Class/Storage/DBI/Sybase.pm index eeb4f01..8cb5f5f 100644 --- a/lib/DBIx/Class/Storage/DBI/Sybase.pm +++ b/lib/DBIx/Class/Storage/DBI/Sybase.pm @@ -63,12 +63,13 @@ sub _rebless { my $dbtype = eval { @{$self->_get_dbh->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})}[2] } || ''; + $self->throw_exception("Unable to estable connection to determine database type: $@") + if $@; - my $exception = $@; $dbtype =~ s/\W/_/gi; my $subclass = "DBIx::Class::Storage::DBI::Sybase::${dbtype}"; - if (!$exception && $dbtype && $self->load_optional_class($subclass)) { + if ($dbtype && $self->load_optional_class($subclass)) { bless $self, $subclass; $self->_rebless; } else { # real Sybase @@ -189,7 +190,7 @@ sub _populate_dbh { my $self = shift; $self->next::method(@_); - + if ($self->_is_bulk_storage) { # this should be cleared on every reconnect $self->_began_bulk_work(0); @@ -381,7 +382,7 @@ sub insert { # we are already in a transaction, or there are no blobs # and we don't need the PK - just (try to) do it if ($self->{transaction_depth} - || (!$blob_cols && !$dumb_last_insert_id) + || (!$blob_cols && !$dumb_last_insert_id) ) { return $self->_insert ( $next, $source, $to_insert, $blob_cols, $identity_col @@ -511,7 +512,7 @@ EOF # _execute_array uses a txn anyway, but it ends too early in case we need to # select max(col) to get the identity for inserting blobs. - ($self, my $guard) = $self->{transaction_depth} == 0 ? + ($self, my $guard) = $self->{transaction_depth} == 0 ? ($self->_writer_storage, $self->_writer_storage->txn_scope_guard) : ($self, undef); diff --git a/t/60core.t b/t/60core.t index b62b82d..8ab6129 100644 --- a/t/60core.t +++ b/t/60core.t @@ -65,7 +65,7 @@ lives_ok (sub { $art->delete }, 'Cascading delete on Ordered has_many works' ); is(@art, 2, 'And then there were two'); -ok(!$art->in_storage, "It knows it's dead"); +is($art->in_storage, 0, "It knows it's dead"); dies_ok ( sub { $art->delete }, "Can't delete twice"); @@ -144,7 +144,7 @@ is($schema->resultset("Artist")->count, 4, 'count ok'); }); is($new_obj->name, 'find_or_new', 'find_or_new: instantiated a new artist'); - ok(! $new_obj->in_storage, 'new artist is not in storage'); + is($new_obj->in_storage, 0, 'new artist is not in storage'); } my $cd = $schema->resultset("CD")->find(1); diff --git a/t/71mysql.t b/t/71mysql.t index 0c099f8..0d49a0e 100644 --- a/t/71mysql.t +++ b/t/71mysql.t @@ -227,4 +227,14 @@ NULLINSEARCH: { => 'Nothing Found!'; } + +## If find() is the first query after connect() +## DBI::Storage::sql_maker() will be called before +## _determine_driver() and so the ::SQLHacks class for MySQL +## will not be used + +my $schema2 = DBICTest::Schema->connect($dsn, $user, $pass); +$schema2->resultset("Artist")->find(4); +isa_ok($schema2->storage->sql_maker, 'DBIx::Class::SQLAHacks::MySQL'); + done_testing; diff --git a/t/79aliasing.t b/t/79aliasing.t index 94ae02b..4f9b3a3 100644 --- a/t/79aliasing.t +++ b/t/79aliasing.t @@ -52,7 +52,7 @@ plan tests => 11; my $cd_rs = $schema->resultset('CD')->search({ 'artist.name' => 'Caterwauler McCrae' }, { join => 'artist' }); my $cd = $cd_rs->find_or_new({ title => 'Huh?', year => 2006 }); - ok(! $cd->in_storage, 'new CD not in storage yet'); + is($cd->in_storage, 0, 'new CD not in storage yet'); is($cd->title, 'Huh?', 'new CD title is correct'); is($cd->year, 2006, 'new CD year is correct'); } diff --git a/t/80unique.t b/t/80unique.t index 2245511..0e4108b 100644 --- a/t/80unique.t +++ b/t/80unique.t @@ -195,7 +195,7 @@ is($row->baz, 3, 'baz is correct'); { key => 'cd_artist_title' } ); - ok(!$cd1->in_storage, 'CD is not in storage yet after update_or_new'); + is($cd1->in_storage, 0, 'CD is not in storage yet after update_or_new'); $cd1->insert; ok($cd1->in_storage, 'CD got added to strage after update_or_new && insert'); diff --git a/t/inflate/hri.t b/t/inflate/hri.t index 1d32dd6..292c943 100644 --- a/t/inflate/hri.t +++ b/t/inflate/hri.t @@ -1,7 +1,7 @@ use strict; -use warnings; +use warnings; -use Test::More qw(no_plan); +use Test::More; use lib qw(t/lib); use DBICTest; my $schema = DBICTest->init_schema(); @@ -9,7 +9,7 @@ my $schema = DBICTest->init_schema(); # Under some versions of SQLite if the $rs is left hanging around it will lock # So we create a scope here cos I'm lazy { - my $rs = $schema->resultset('CD'); + my $rs = $schema->resultset('CD')->search ({}, { order_by => 'cdid' }); # get the defined columns my @dbic_cols = sort $rs->result_source->columns; @@ -23,8 +23,10 @@ my $schema = DBICTest->init_schema(); my @hashref_cols = sort keys %$datahashref1; is_deeply( \@dbic_cols, \@hashref_cols, 'returned columns' ); -} + my $cd1 = $rs->find ({cdid => 1}); + is_deeply ( $cd1, $datahashref1, 'first/find return the same thing'); +} sub check_cols_of { my ($dbic_obj, $datahashref) = @_; @@ -135,3 +137,5 @@ is_deeply( [{ $artist->get_columns, cds => [] }], 'nested has_many prefetch without entries' ); + +done_testing; diff --git a/t/relationship/core.t b/t/relationship/core.t index 6a09c57..90e49a3 100644 --- a/t/relationship/core.t +++ b/t/relationship/core.t @@ -133,7 +133,7 @@ $cd = $artist->find_or_new_related( 'cds', { year => 2007, } ); is( $cd->title, 'Greatest Hits 2: Louder Than Ever', 'find_or_new_related new record ok' ); -ok( ! $cd->in_storage, 'find_or_new_related on a new record: not in_storage' ); +is( $cd->in_storage, 0, 'find_or_new_related on a new record: not in_storage' ); $cd->artist(undef); my $newartist = $cd->find_or_new_related( 'artist', { diff --git a/t/resultset/update_delete.t b/t/resultset/update_delete.t index fc535e6..05d245b 100644 --- a/t/resultset/update_delete.t +++ b/t/resultset/update_delete.t @@ -79,8 +79,12 @@ throws_ok ( ); # grouping on PKs only should pass -$sub_rs->search ({}, { group_by => [ reverse $sub_rs->result_source->primary_columns ] }) # reverse to make sure the comaprison works - ->update ({ pilot_sequence => \ 'pilot_sequence + 1' }); +$sub_rs->search ( + {}, + { + group_by => [ reverse $sub_rs->result_source->primary_columns ], # reverse to make sure the PK-list comaprison works + }, +)->update ({ pilot_sequence => \ 'pilot_sequence + 1' }); is_deeply ( [ $tkfks->search ({ autopilot => [qw/a b x y/]}, { order_by => 'autopilot' }) @@ -90,6 +94,19 @@ is_deeply ( 'Only two rows incremented', ); +# also make sure weird scalarref usage works (RT#51409) +$tkfks->search ( + \ 'pilot_sequence BETWEEN 11 AND 21', +)->update ({ pilot_sequence => \ 'pilot_sequence + 1' }); + +is_deeply ( + [ $tkfks->search ({ autopilot => [qw/a b x y/]}, { order_by => 'autopilot' }) + ->get_column ('pilot_sequence')->all + ], + [qw/12 22 30 40/], + 'Only two rows incremented (where => scalarref works)', +); + $sub_rs->delete; is ($tkfks->count, $tkfk_cnt -= 2, 'Only two rows deleted');