X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FResultSet.pm;h=defbe9aea8c0ab40efac21dda48894ca90f7c673;hb=47dba3e30a261fec0bdbeac022c161183c876f49;hp=2a08b657c5f10fa4d8a8c2995ad99ab172ace5dd;hpb=7ed4b48f691b78a3d832266d3a253a4d5c6a4837;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/ResultSet.pm b/lib/DBIx/Class/ResultSet.pm index 2a08b65..defbe9a 100644 --- a/lib/DBIx/Class/ResultSet.pm +++ b/lib/DBIx/Class/ResultSet.pm @@ -5,12 +5,12 @@ use warnings; use base qw/DBIx::Class/; use DBIx::Class::Carp; use DBIx::Class::ResultSetColumn; +use DBIx::Class::ResultClass::HashRefInflator; use Scalar::Util qw/blessed weaken reftype/; use DBIx::Class::_Util qw( fail_on_internal_wantarray fail_on_internal_call UNRESOLVABLE_CONDITION ); use Try::Tiny; -use Data::Compare (); # no imports!!! guard against insane architecture # not importing first() as it will clash with our own method use List::Util (); @@ -59,7 +59,7 @@ just stores all the conditions needed to create the query. A basic ResultSet representing the data of an entire table is returned by calling C on a L and passing in a -L name. +L name. my $users_rs = $schema->resultset('User'); @@ -656,26 +656,17 @@ sub _stack_cond { (ref $_ eq 'HASH' and ! keys %$_) ) and $_ = undef for ($left, $right); - # either on of the two undef or both undef - if ( ( (defined $left) xor (defined $right) ) or ! defined $left ) { + # either one of the two undef + if ( (defined $left) xor (defined $right) ) { return defined $left ? $left : $right; } - - my $cond = $self->result_source->schema->storage->_collapse_cond({ -and => [$left, $right] }); - - for my $c (grep { ref $cond->{$_} eq 'ARRAY' and ($cond->{$_}[0]||'') eq '-and' } keys %$cond) { - - my @vals = sort @{$cond->{$c}}[ 1..$#{$cond->{$c}} ]; - my @fin = shift @vals; - - for my $v (@vals) { - push @fin, $v unless Data::Compare::Compare( $fin[-1], $v ); - } - - $cond->{$c} = (@fin == 1) ? $fin[0] : [-and => @fin ]; + # both undef + elsif ( ! defined $left ) { + return undef + } + else { + return $self->result_source->schema->storage->_collapse_cond({ -and => [$left, $right] }); } - - $cond; } =head2 search_literal @@ -1157,7 +1148,7 @@ You most likely want to use L with specific operators. For more information, see L. -This method is deprecated and will be removed in 0.09. Use L +This method is deprecated and will be removed in 0.09. Use L instead. An example conversion is: ->search_like({ foo => 'bar' }); @@ -1392,11 +1383,7 @@ sub _construct_results { $self->{_result_inflator}{is_hri} = ( ( ! $self->{_result_inflator}{is_core_row} and - $inflator_cref == ( - require DBIx::Class::ResultClass::HashRefInflator - && - DBIx::Class::ResultClass::HashRefInflator->can('inflate_result') - ) + $inflator_cref == \&DBIx::Class::ResultClass::HashRefInflator::inflate_result ) ? 1 : 0 ) unless defined $self->{_result_inflator}{is_hri}; @@ -1473,6 +1460,9 @@ sub _construct_results { if @violating_idx; $unrolled_non_null_cols_to_check = join (',', @$check_non_null_cols); + + utf8::upgrade($unrolled_non_null_cols_to_check) + if DBIx::Class::_ENV_::STRESSTEST_UTF8_UPGRADE_GENERATED_COLLAPSER_SOURCE; } my $next_cref = @@ -1552,8 +1542,8 @@ L<"table"|DBIx::Class::Manual::Glossary/"ResultSource"> class. Note that changing the result_class will also remove any components that were originally loaded in the source class via -L. Any overloaded methods -in the original source class will not run. +L. +Any overloaded methods in the original source class will not run. =cut @@ -2228,36 +2218,42 @@ case there are obviously no benefits to using this method over L. sub populate { my $self = shift; - my ($data, $guard); - # this is naive and just a quick check # the types will need to be checked more thoroughly when the # multi-source populate gets added - if (ref $_[0] eq 'ARRAY') { - return unless @{$_[0]}; - - $data = $_[0] if (ref $_[0][0] eq 'HASH' or ref $_[0][0] eq 'ARRAY'); - } - - $self->throw_exception('Populate expects an arrayref of hashrefs or arrayref of arrayrefs') - unless $data; + my $data = ( + ref $_[0] eq 'ARRAY' + and + ( @{$_[0]} or return ) + and + ( ref $_[0][0] eq 'HASH' or ref $_[0][0] eq 'ARRAY' ) + and + $_[0] + ) or $self->throw_exception('Populate expects an arrayref of hashrefs or arrayref of arrayrefs'); # FIXME - no cref handling # At this point assume either hashes or arrays if(defined wantarray) { - my @results; - - $guard = $self->result_source->schema->storage->txn_scope_guard - if ( @$data > 2 or ( @$data == 2 and ref $data->[0] eq 'ARRAY' ) ); + my (@results, $guard); if (ref $data->[0] eq 'ARRAY') { + # column names only, nothing to do + return if @$data == 1; + + $guard = $self->result_source->schema->storage->txn_scope_guard + if @$data > 2; + @results = map { my $vals = $_; $self->new_result({ map { $data->[0][$_] => $vals->[$_] } 0..$#{$data->[0]} })->insert } @{$data}[1 .. $#$data] ; } else { + + $guard = $self->result_source->schema->storage->txn_scope_guard + if @$data > 1; + @results = map { $self->new_result($_)->insert } @$data; } @@ -2286,6 +2282,8 @@ sub populate { # positional(!) explicit column list if ($i == 0) { + # column names only, nothing to do + return if @$data == 1; $colinfo->{$data->[0][$_]} = { pos => $_, name => $data->[0][$_] } and push @$colnames, $data->[0][$_] for 0 .. $#{$data->[0]}; @@ -2423,13 +2421,14 @@ sub populate { } ### start work + my $guard; $guard = $rsrc->schema->storage->txn_scope_guard if $slices_with_rels; ### main source data # FIXME - need to switch entirely to a coderef-based thing, # so that large sets aren't copied several times... I think - $rsrc->storage->insert_bulk( + $rsrc->storage->_insert_bulk( $rsrc, [ @$colnames, sort keys %$rs_data ], [ map { @@ -3321,6 +3320,9 @@ source alias of the current result set: }); } +The alias of L can be altered by the +L. + =cut sub current_source_alias { @@ -3615,62 +3617,35 @@ sub _resolved_attrs { ]; } - if ( defined $attrs->{order_by} ) { - $attrs->{order_by} = ( - ref( $attrs->{order_by} ) eq 'ARRAY' - ? [ @{ $attrs->{order_by} } ] - : [ $attrs->{order_by} || () ] - ); - } - - if ($attrs->{group_by} and ref $attrs->{group_by} ne 'ARRAY') { - $attrs->{group_by} = [ $attrs->{group_by} ]; - } + for my $attr (qw(order_by group_by)) { - # generate selections based on the prefetch helper - my ($prefetch, @prefetch_select, @prefetch_as); - $prefetch = $self->_merge_joinpref_attr( {}, delete $attrs->{prefetch} ) - if defined $attrs->{prefetch}; + if ( defined $attrs->{$attr} ) { + $attrs->{$attr} = ( + ref( $attrs->{$attr} ) eq 'ARRAY' + ? [ @{ $attrs->{$attr} } ] + : [ $attrs->{$attr} || () ] + ); - if ($prefetch) { + delete $attrs->{$attr} unless @{$attrs->{$attr}}; + } + } - $self->throw_exception("Unable to prefetch, resultset contains an unnamed selector $attrs->{_dark_selector}{string}") - if $attrs->{_dark_selector}; + # set collapse default based on presence of prefetch + my $prefetch; + if ( + defined $attrs->{prefetch} + and + $prefetch = $self->_merge_joinpref_attr( {}, delete $attrs->{prefetch} ) + ) { $self->throw_exception("Specifying prefetch in conjunction with an explicit collapse => 0 is unsupported") if defined $attrs->{collapse} and ! $attrs->{collapse}; $attrs->{collapse} = 1; - - # this is a separate structure (we don't look in {from} directly) - # as the resolver needs to shift things off the lists to work - # properly (identical-prefetches on different branches) - my $join_map = {}; - if (ref $attrs->{from} eq 'ARRAY') { - - my $start_depth = $attrs->{seen_join}{-relation_chain_depth} || 0; - - for my $j ( @{$attrs->{from}}[1 .. $#{$attrs->{from}} ] ) { - next unless $j->[0]{-alias}; - next unless $j->[0]{-join_path}; - next if ($j->[0]{-relation_chain_depth} || 0) < $start_depth; - - my @jpath = map { keys %$_ } @{$j->[0]{-join_path}}; - - my $p = $join_map; - $p = $p->{$_} ||= {} for @jpath[ ($start_depth/2) .. $#jpath]; #only even depths are actual jpath boundaries - push @{$p->{-join_aliases} }, $j->[0]{-alias}; - } - } - - my @prefetch = $source->_resolve_prefetch( $prefetch, $alias, $join_map ); - - # save these for after distinct resolution - @prefetch_select = map { $_->[0] } @prefetch; - @prefetch_as = map { $_->[1] } @prefetch; } + # run through the resulting joinstructure (starting from our current slot) # and unset collapse if proven unnecessary # @@ -3720,6 +3695,7 @@ sub _resolved_attrs { } } + # generate the distinct induced group_by before injecting the prefetched select/as parts if (delete $attrs->{distinct}) { if ($attrs->{group_by}) { @@ -3739,9 +3715,38 @@ sub _resolved_attrs { } } - # inject prefetch-bound selection (if any) - push @{$attrs->{select}}, @prefetch_select; - push @{$attrs->{as}}, @prefetch_as; + + # generate selections based on the prefetch helper + if ($prefetch) { + + $self->throw_exception("Unable to prefetch, resultset contains an unnamed selector $attrs->{_dark_selector}{string}") + if $attrs->{_dark_selector}; + + # this is a separate structure (we don't look in {from} directly) + # as the resolver needs to shift things off the lists to work + # properly (identical-prefetches on different branches) + my $joined_node_aliases_map = {}; + if (ref $attrs->{from} eq 'ARRAY') { + + my $start_depth = $attrs->{seen_join}{-relation_chain_depth} || 0; + + for my $j ( @{$attrs->{from}}[1 .. $#{$attrs->{from}} ] ) { + next unless $j->[0]{-alias}; + next unless $j->[0]{-join_path}; + next if ($j->[0]{-relation_chain_depth} || 0) < $start_depth; + + my @jpath = map { keys %$_ } @{$j->[0]{-join_path}}; + + my $p = $joined_node_aliases_map; + $p = $p->{$_} ||= {} for @jpath[ ($start_depth/2) .. $#jpath]; #only even depths are actual jpath boundaries + push @{$p->{-join_aliases} }, $j->[0]{-alias}; + } + } + + ( push @{$attrs->{select}}, $_->[0] ) and ( push @{$attrs->{as}}, $_->[1] ) + for $source->_resolve_selection_from_prefetch( $prefetch, $joined_node_aliases_map ); + } + $attrs->{_simple_passthrough_construction} = !( $attrs->{collapse} @@ -3749,6 +3754,7 @@ sub _resolved_attrs { grep { $_ =~ /\./ } @{$attrs->{as}} ); + # if both page and offset are specified, produce a combined offset # even though it doesn't make much sense, this is what pre 081xx has # been doing @@ -3814,8 +3820,10 @@ sub _calculate_score { if (ref $b eq 'HASH') { my ($b_key) = keys %{$b}; + $b_key = '' if ! defined $b_key; if (ref $a eq 'HASH') { my ($a_key) = keys %{$a}; + $a_key = '' if ! defined $a_key; if ($a_key eq $b_key) { return (1 + $self->_calculate_score( $a->{$a_key}, $b->{$b_key} )); } else { @@ -4082,19 +4090,34 @@ is the same as as => [qw(some_column dbic_slot)] If you want to individually retrieve related columns (in essence perform -manual prefetch) you have to make sure to specify the correct inflation slot +manual L) you have to make sure to specify the correct inflation slot chain such that it matches existing relationships: my $rs = $schema->resultset('Artist')->search({}, { # required to tell DBIC to collapse has_many relationships collapse => 1, - join => { cds => 'tracks'}, + join => { cds => 'tracks' }, '+columns' => { 'cds.cdid' => 'cds.cdid', 'cds.tracks.title' => 'tracks.title', }, }); +Like elsewhere, literal SQL or literal values can be included by using a +scalar reference or a literal bind value, and these values will be available +in the result with C (see also +L): + + # equivalent SQL: SELECT 1, 'a string', IF(my_column,?,?) ... + # bind values: $true_value, $false_value + columns => [ + { + foo => \1, + bar => \q{'a string'}, + baz => \[ 'IF(my_column,?,?)', $true_value, $false_value ], + } + ] + =head2 +columns B You B explicitly quote C<'+columns'> when using this attribute. @@ -4147,10 +4170,11 @@ names: B You will almost always need a corresponding L attribute when you use L, to instruct DBIx::Class how to store the result of the column. -Also note that the L attribute has nothing to do with the SQL-side 'AS' -identifier aliasing. You can however alias a function, so you can use it in -e.g. an C clause. This is done via the C<-as> B supplied as shown in the example above. =head2 +select @@ -4180,8 +4204,10 @@ Indicates DBIC-side names for object inflation. That is L indicates the slot name in which the column value will be stored within the L object. The value will then be accessible via this identifier by the C method (or via the object accessor B) as shown below. The L attribute has -B with the SQL-side C. See L for details. +with the same name already exists>) as shown below. + +The L attribute has B with the SQL-side identifier +aliasing C. See L for details. $rs = $schema->resultset('Employee')->search(undef, { select => [ @@ -4357,8 +4383,10 @@ For a more in-depth discussion, see L. This attribute is a shorthand for specifying a L spec, adding all columns from the joined related sources as L and setting -L to a true value. For example, the following two queries are -equivalent: +L to a true value. It can be thought of as a rough B +of the L attribute. + +For example, the following two queries are equivalent: my $rs = $schema->resultset('Artist')->search({}, { prefetch => { cds => ['genre', 'tracks' ] }, @@ -4535,15 +4563,20 @@ A arrayref of columns to group by. Can include columns of joined tables. =back -HAVING is a select statement attribute that is applied between GROUP BY and -ORDER BY. It is applied to the after the grouping calculations have been -done. +The HAVING operator specifies a B condition applied to the set +after the grouping calculations have been done. In other words it is a +constraint just like L (and accepting the same +L) applied to the data +as it exists after GROUP BY has taken place. Specifying L without +L is a logical mistake, and a fatal error on most RDBMS engines. + +E.g. having => { 'count_employee' => { '>=', 100 } } or with an in-place function in which case literal SQL is required: - having => \[ 'count(employee) >= ?', [ count => 100 ] ] + having => \[ 'count(employee) >= ?', 100 ] =head2 distinct @@ -4567,19 +4600,14 @@ setting is ignored and an appropriate warning is issued. =head2 where -=over 4 - -Adds to the WHERE clause. +Adds extra conditions to the resultset, combined with the preexisting C +conditions, same as the B argument to the L # only return rows WHERE deleted IS NULL for all searches __PACKAGE__->resultset_attributes({ where => { deleted => undef } }); -Can be overridden by passing C<< { where => undef } >> as an attribute -to a resultset. - -For more complicated where clauses see L. - -=back +Note that the above example is +L. =head2 cache @@ -4785,11 +4813,15 @@ supported: [ undef, $val ] === [ {}, $val ] $val === [ {}, $val ] -=head1 AUTHOR AND CONTRIBUTORS +=head1 FURTHER QUESTIONS? -See L and L in DBIx::Class +Check the list of L. -=head1 LICENSE +=head1 COPYRIGHT AND LICENSE -You may distribute this code under the same terms as Perl itself. +This module is free software L +by the L. You can +redistribute it and/or modify it under the same terms as the +L. +=cut