X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FResultSet.pm;h=e94782f6e121060142aeb839bb04a2be143b8fad;hb=9d29cf57a7ed1b68277ff2b7ee58d46112d8c384;hp=584ec961e5cf32292917ad4d95ce433b568a57fd;hpb=a871b4be787cebd5e61411429d31a841ac227156;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/ResultSet.pm b/lib/DBIx/Class/ResultSet.pm index 584ec96..e94782f 100644 --- a/lib/DBIx/Class/ResultSet.pm +++ b/lib/DBIx/Class/ResultSet.pm @@ -8,6 +8,7 @@ use overload fallback => 1; use Data::Page; use Storable; +use Data::Dumper; use Scalar::Util qw/weaken/; use DBIx::Class::ResultSetColumn; @@ -86,68 +87,6 @@ sub new { my ($source, $attrs) = @_; weaken $source; - $attrs = Storable::dclone($attrs || {}); # { %{ $attrs || {} } }; - #use Data::Dumper; warn Dumper($attrs); - my $alias = ($attrs->{alias} ||= 'me'); - - $attrs->{columns} ||= delete $attrs->{cols} if $attrs->{cols}; - delete $attrs->{as} if $attrs->{columns}; - $attrs->{columns} ||= [ $source->columns ] unless $attrs->{select}; - $attrs->{select} = [ - map { m/\./ ? $_ : "${alias}.$_" } @{delete $attrs->{columns}} - ] if $attrs->{columns}; - $attrs->{as} ||= [ - map { m/^\Q$alias.\E(.+)$/ ? $1 : $_ } @{$attrs->{select}} - ]; - if (my $include = delete $attrs->{include_columns}) { - push(@{$attrs->{select}}, @$include); - push(@{$attrs->{as}}, map { m/([^.]+)$/; $1; } @$include); - } - #use Data::Dumper; warn Dumper(@{$attrs}{qw/select as/}); - - $attrs->{from} ||= [ { $alias => $source->from } ]; - $attrs->{seen_join} ||= {}; - my %seen; - if (my $join = delete $attrs->{join}) { - foreach my $j (ref $join eq 'ARRAY' ? @$join : ($join)) { - if (ref $j eq 'HASH') { - $seen{$_} = 1 foreach keys %$j; - } else { - $seen{$j} = 1; - } - } - push(@{$attrs->{from}}, $source->resolve_join( - $join, $attrs->{alias}, $attrs->{seen_join}) - ); - } - - $attrs->{group_by} ||= $attrs->{select} if delete $attrs->{distinct}; - $attrs->{order_by} = [ $attrs->{order_by} ] if - $attrs->{order_by} and !ref($attrs->{order_by}); - $attrs->{order_by} ||= []; - - my $collapse = $attrs->{collapse} || {}; - if (my $prefetch = delete $attrs->{prefetch}) { - my @pre_order; - foreach my $p (ref $prefetch eq 'ARRAY' ? @$prefetch : ($prefetch)) { - if ( ref $p eq 'HASH' ) { - foreach my $key (keys %$p) { - push(@{$attrs->{from}}, $source->resolve_join($p, $attrs->{alias})) - unless $seen{$key}; - } - } else { - push(@{$attrs->{from}}, $source->resolve_join($p, $attrs->{alias})) - unless $seen{$p}; - } - my @prefetch = $source->resolve_prefetch( - $p, $attrs->{alias}, {}, \@pre_order, $collapse); - push(@{$attrs->{select}}, map { $_->[0] } @prefetch); - push(@{$attrs->{as}}, map { $_->[1] } @prefetch); - } - push(@{$attrs->{order_by}}, @pre_order); - } - $attrs->{collapse} = $collapse; -# use Data::Dumper; warn Dumper($collapse) if keys %{$collapse}; if ($attrs->{page}) { $attrs->{rows} ||= 10; @@ -155,12 +94,14 @@ sub new { $attrs->{offset} += ($attrs->{rows} * ($attrs->{page} - 1)); } + $attrs->{alias} ||= 'me'; + bless { result_source => $source, result_class => $attrs->{result_class} || $source->result_class, cond => $attrs->{where}, - from => $attrs->{from}, - collapse => $collapse, +# from => $attrs->{from}, +# collapse => $collapse, count => undef, page => delete $attrs->{page}, pager => undef, @@ -196,45 +137,86 @@ call it as C. sub search { my $self = shift; + my $rs = $self->search_rs( @_ ); + return (wantarray ? $rs->all : $rs); +} - my $rs; - if( @_ ) { - - my $attrs = { %{$self->{attrs}} }; - my $having = delete $attrs->{having}; - $attrs = { %$attrs, %{ pop(@_) } } if @_ > 1 and ref $_[$#_] eq 'HASH'; - - my $where = (@_ - ? ((@_ == 1 || ref $_[0] eq "HASH") - ? shift - : ((@_ % 2) - ? $self->throw_exception( - "Odd number of arguments to search") - : {@_})) - : undef()); - if (defined $where) { - $attrs->{where} = (defined $attrs->{where} - ? { '-and' => - [ map { ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ } - $where, $attrs->{where} ] } - : $where); - } +=head2 search_rs + +=over 4 - if (defined $having) { - $attrs->{having} = (defined $attrs->{having} - ? { '-and' => - [ map { ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ } - $having, $attrs->{having} ] } - : $having); +=item Arguments: $cond, \%attrs? + +=item Return Value: $resultset + +=back + +This method does the same exact thing as search() except it will +always return a resultset, even in list context. + +=cut + +sub search_rs { + my $self = shift; + + my $our_attrs = { %{$self->{attrs}} }; + my $having = delete $our_attrs->{having}; + my $attrs = {}; + $attrs = pop(@_) if @_ > 1 and ref $_[$#_] eq 'HASH'; + + # merge new attrs into old + foreach my $key (qw/join prefetch/) { + next unless (exists $attrs->{$key}); + if (exists $our_attrs->{$key}) { + $our_attrs->{$key} = $self->_merge_attr($our_attrs->{$key}, $attrs->{$key}); + } else { + $our_attrs->{$key} = $attrs->{$key}; } + delete $attrs->{$key}; + } - $rs = (ref $self)->new($self->result_source, $attrs); + if (exists $our_attrs->{prefetch}) { + $our_attrs->{join} = $self->_merge_attr($our_attrs->{join}, $our_attrs->{prefetch}, 1); } - else { - $rs = $self; - $rs->reset; + + my $new_attrs = { %{$our_attrs}, %{$attrs} }; + + # merge new where and having into old + my $where = (@_ + ? ((@_ == 1 || ref $_[0] eq "HASH") + ? shift + : ((@_ % 2) + ? $self->throw_exception( + "Odd number of arguments to search") + : {@_})) + : undef()); + if (defined $where) { + $new_attrs->{where} = (defined $new_attrs->{where} + ? { '-and' => + [ map { ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ } + $where, $new_attrs->{where} ] } + : $where); } - return (wantarray ? $rs->all : $rs); + + if (defined $having) { + $new_attrs->{having} = (defined $new_attrs->{having} + ? { '-and' => + [ map { ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ } + $having, $new_attrs->{having} ] } + : $having); + } + + my $rs = (ref $self)->new($self->result_source, $new_attrs); + $rs->{_parent_rs} = $self->{_parent_rs} if ($self->{_parent_rs}); #XXX - hack to pass through parent of related resultsets + + unless (@_) { # no search, effectively just a clone + my $rows = $self->get_cache; + if ($rows) { + $rs->set_cache($rows); + } + } + + return $rs; } =head2 search_literal @@ -277,8 +259,8 @@ a row by its primary key: my $cd = $schema->resultset('CD')->find(5); -You can also find a row by a specific key or unique constraint by specifying -the C attribute. For example: +You can also find a row by a specific unique constraint using the C +attribute. For example: my $cd = $schema->resultset('CD')->find('Massive Attack', 'Mezzanine', { key => 'artist_title' }); @@ -304,24 +286,31 @@ L. =cut sub find { - my ($self, @vals) = @_; - my $attrs = (@vals > 1 && ref $vals[$#vals] eq 'HASH' ? pop(@vals) : {}); + my $self = shift; + my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {}); # Parse out a hash from input - my @unique_cols = exists $attrs->{key} + my @cols = exists $attrs->{key} ? $self->result_source->unique_constraint_columns($attrs->{key}) : $self->result_source->primary_columns; - my %hash; - if (ref $vals[0] eq 'HASH') { - %hash = %{ $vals[0] }; + my $hash; + if (ref $_[0] eq 'HASH') { + $hash = { %{$_[0]} }; + } + elsif (@_ == @cols) { + $hash = {}; + @{$hash}{@cols} = @_; } - elsif (@vals == @unique_cols) { - @hash{@unique_cols} = @vals; + elsif (@_) { + # For backwards compatibility + $hash = {@_}; } else { - # Hack for CDBI queries - %hash = @vals; + $self->throw_exception( + "Arguments to find must be a hashref or match the number of columns in the " + . (exists $attrs->{key} ? "$attrs->{key} unique constraint" : "primary key") + ); } # Check the hash we just parsed against our source's unique constraints @@ -332,58 +321,50 @@ sub find { "Can't find unless a primary key or unique constraint is defined" ) unless @constraint_names; - my @unique_hashes; + my @unique_queries; foreach my $name (@constraint_names) { my @unique_cols = $self->result_source->unique_constraint_columns($name); - my %unique_hash = $self->_unique_hash(\%hash, \@unique_cols); + my $unique_query = $self->_build_unique_query($hash, \@unique_cols); - # TODO: Check that the ResultSet defines the rest of the query - push @unique_hashes, \%unique_hash - if scalar keys %unique_hash;# == scalar @unique_cols; - } - - # Add the ResultSet's alias - foreach my $unique_hash (@unique_hashes) { - foreach my $key (grep { ! m/\./ } keys %$unique_hash) { - $unique_hash->{"$self->{attrs}{alias}.$key"} = delete $unique_hash->{$key}; + # Add the ResultSet's alias + foreach my $key (grep { ! m/\./ } keys %$unique_query) { + my $alias = $self->{attrs}->{alias}; + $unique_query->{"$alias.$key"} = delete $unique_query->{$key}; } + + push @unique_queries, $unique_query if %$unique_query; } # Handle cases where the ResultSet already defines the query - my $query = @unique_hashes ? \@unique_hashes : undef; + my $query = @unique_queries ? \@unique_queries : undef; # Run the query if (keys %$attrs) { my $rs = $self->search($query, $attrs); - return keys %{$rs->{collapse}} ? $rs->next : $rs->single; + $rs->_resolve; + return keys %{$rs->{_attrs}->{collapse}} ? $rs->next : $rs->single; } else { - return keys %{$self->{collapse}} + $self->_resolve; + return (keys %{$self->{_attrs}->{collapse}}) ? $self->search($query)->next : $self->single($query); } } -# _unique_hash +# _build_unique_query # -# Constrain the specified hash based on the specific column names. +# Constrain the specified query hash based on the specified column names. -sub _unique_hash { - my ($self, $hash, $unique_cols) = @_; +sub _build_unique_query { + my ($self, $query, $unique_cols) = @_; - # Ugh, CDBI lowercases column names - if (exists $INC{'DBIx/Class/CDBICompat/ColumnCase.pm'}) { - foreach my $key (keys %$hash) { - $hash->{lc $key} = delete $hash->{$key}; - } - } - - my %unique_hash = - map { $_ => $hash->{$_} } - grep { exists $hash->{$_} } + my %unique_query = + map { $_ => $query->{$_} } + grep { exists $query->{$_} } @$unique_cols; - return %unique_hash; + return \%unique_query; } =head2 search_related @@ -426,9 +407,11 @@ L for more information. sub cursor { my ($self) = @_; - my $attrs = { %{$self->{attrs}} }; + + $self->_resolve; + my $attrs = { %{$self->{_attrs}} }; return $self->{cursor} - ||= $self->result_source->storage->select($self->{from}, $attrs->{select}, + ||= $self->result_source->storage->select($attrs->{from}, $attrs->{select}, $attrs->{where},$attrs); } @@ -447,11 +430,16 @@ sub cursor { Inflates the first result without creating a cursor if the resultset has any records in it; if not returns nothing. Used by L as an optimisation. +Can optionally take an additional condition *only* - this is a fast-code-path +method; if you need to add extra joins or similar call ->search and then +->single without a condition on the $rs returned from that. + =cut sub single { my ($self, $where) = @_; - my $attrs = { %{$self->{attrs}} }; + $self->_resolve; + my $attrs = { %{$self->{_attrs}} }; if ($where) { if (defined $attrs->{where}) { $attrs->{where} = { @@ -463,8 +451,9 @@ sub single { $attrs->{where} = $where; } } + my @data = $self->result_source->storage->select_single( - $self->{from}, $attrs->{select}, + $attrs->{from}, $attrs->{select}, $attrs->{where},$attrs); return (@data ? $self->_construct_object(@data) : ()); } @@ -577,9 +566,9 @@ first record from the resultset. sub next { my ($self) = @_; - if (@{$self->{all_cache} || []}) { + if (my $cache = $self->get_cache) { $self->{all_cache_position} ||= 0; - return $self->{all_cache}->[$self->{all_cache_position}++]; + return $cache->[$self->{all_cache_position}++]; } if ($self->{attrs}{cache}) { $self->{all_cache_position} = 1; @@ -589,27 +578,152 @@ sub next { @{delete $self->{stashed_row}} : $self->cursor->next ); -# warn Dumper(\@row); use Data::Dumper; return unless (@row); return $self->_construct_object(@row); } +sub _resolve { + my $self = shift; + + return if(exists $self->{_attrs}); #return if _resolve has already been called + + my $attrs = $self->{attrs}; + my $source = ($self->{_parent_rs}) ? $self->{_parent_rs} : $self->{result_source}; + + # XXX - lose storable dclone + my $record_filter = delete $attrs->{record_filter} if (defined $attrs->{record_filter}); + $attrs = Storable::dclone($attrs || {}); # { %{ $attrs || {} } }; + $attrs->{record_filter} = $record_filter if ($record_filter); + $self->{attrs}->{record_filter} = $record_filter if ($record_filter); + + my $alias = $attrs->{alias}; + + $attrs->{columns} ||= delete $attrs->{cols} if $attrs->{cols}; + delete $attrs->{as} if $attrs->{columns}; + $attrs->{columns} ||= [ $self->{result_source}->columns ] unless $attrs->{select}; + my $select_alias = ($self->{_parent_rs}) ? $self->{attrs}->{_live_join} : $alias; + $attrs->{select} = [ + map { m/\./ ? $_ : "${select_alias}.$_" } @{delete $attrs->{columns}} + ] if $attrs->{columns}; + $attrs->{as} ||= [ + map { m/^\Q$alias.\E(.+)$/ ? $1 : $_ } @{$attrs->{select}} + ]; + if (my $include = delete $attrs->{include_columns}) { + push(@{$attrs->{select}}, @$include); + push(@{$attrs->{as}}, map { m/([^.]+)$/; $1; } @$include); + } + + $attrs->{from} ||= [ { $alias => $source->from } ]; + $attrs->{seen_join} ||= {}; + my %seen; + if (my $join = delete $attrs->{join}) { + foreach my $j (ref $join eq 'ARRAY' ? @$join : ($join)) { + if (ref $j eq 'HASH') { + $seen{$_} = 1 foreach keys %$j; + } else { + $seen{$j} = 1; + } + } + + push(@{$attrs->{from}}, $source->resolve_join($join, $attrs->{alias}, $attrs->{seen_join})); + } + $attrs->{group_by} ||= $attrs->{select} if delete $attrs->{distinct}; + $attrs->{order_by} = [ $attrs->{order_by} ] if + $attrs->{order_by} and !ref($attrs->{order_by}); + $attrs->{order_by} ||= []; + + my $collapse = $attrs->{collapse} || {}; + if (my $prefetch = delete $attrs->{prefetch}) { + my @pre_order; + foreach my $p (ref $prefetch eq 'ARRAY' ? @$prefetch : ($prefetch)) { + if ( ref $p eq 'HASH' ) { + foreach my $key (keys %$p) { + push(@{$attrs->{from}}, $source->resolve_join($p, $attrs->{alias})) + unless $seen{$key}; + } + } else { + push(@{$attrs->{from}}, $source->resolve_join($p, $attrs->{alias})) + unless $seen{$p}; + } + my @prefetch = $source->resolve_prefetch( + $p, $attrs->{alias}, {}, \@pre_order, $collapse); + push(@{$attrs->{select}}, map { $_->[0] } @prefetch); + push(@{$attrs->{as}}, map { $_->[1] } @prefetch); + } + push(@{$attrs->{order_by}}, @pre_order); + } + $attrs->{collapse} = $collapse; + $self->{_attrs} = $attrs; +} + +sub _merge_attr { + my ($self, $a, $b, $is_prefetch) = @_; + + return $b unless $a; + if (ref $b eq 'HASH' && ref $a eq 'HASH') { + foreach my $key (keys %{$b}) { + if (exists $a->{$key}) { + $a->{$key} = $self->_merge_attr($a->{$key}, $b->{$key}, $is_prefetch); + } else { + $a->{$key} = delete $b->{$key}; + } + } + return $a; + } else { + $a = [$a] unless (ref $a eq 'ARRAY'); + $b = [$b] unless (ref $b eq 'ARRAY'); + + my $hash = {}; + my $array = []; + foreach ($a, $b) { + foreach my $element (@{$_}) { + if (ref $element eq 'HASH') { + $hash = $self->_merge_attr($hash, $element, $is_prefetch); + } elsif (ref $element eq 'ARRAY') { + $array = [@{$array}, @{$element}]; + } else { + if (($b == $_) && $is_prefetch) { + $self->_merge_array($array, $element, $is_prefetch); + } else { + push(@{$array}, $element); + } + } + } + } + + if ((keys %{$hash}) && (scalar(@{$array} > 0))) { + return [$hash, @{$array}]; + } else { + return (keys %{$hash}) ? $hash : $array; + } + } +} + +sub _merge_array { + my ($self, $a, $b) = @_; + + $b = [$b] unless (ref $b eq 'ARRAY'); + # add elements from @{$b} to @{$a} which aren't already in @{$a} + foreach my $b_element (@{$b}) { + push(@{$a}, $b_element) unless grep {$b_element eq $_} @{$a}; + } +} + sub _construct_object { my ($self, @row) = @_; - my @as = @{ $self->{attrs}{as} }; - + my @as = @{ $self->{_attrs}{as} }; + my $info = $self->_collapse_result(\@as, \@row); - my $new = $self->result_class->inflate_result($self->result_source, @$info); - - $new = $self->{attrs}{record_filter}->($new) - if exists $self->{attrs}{record_filter}; + $new = $self->{_attrs}{record_filter}->($new) + if exists $self->{_attrs}{record_filter}; return $new; } sub _collapse_result { my ($self, $as, $row, $prefix) = @_; + my $live_join = $self->{attrs}->{_live_join} ||=""; my %const; my @copy = @$row; @@ -629,7 +743,7 @@ sub _collapse_result { my $info = [ {}, {} ]; foreach my $key (keys %const) { - if (length $key) { + if (length $key && $key ne $live_join) { my $target = $info; my @parts = split(/\./, $key); foreach my $p (@parts) { @@ -645,9 +759,9 @@ sub _collapse_result { if (defined $prefix) { @collapse = map { m/^\Q${prefix}.\E(.+)$/ ? ($1) : () - } keys %{$self->{collapse}} + } keys %{$self->{_attrs}->{collapse}} } else { - @collapse = keys %{$self->{collapse}}; + @collapse = keys %{$self->{_attrs}->{collapse}}; }; if (@collapse) { @@ -657,7 +771,7 @@ sub _collapse_result { $target = $target->[1]->{$p} ||= []; } my $c_prefix = (defined($prefix) ? "${prefix}.${c}" : $c); - my @co_key = @{$self->{collapse}{$c_prefix}}; + my @co_key = @{$self->{_attrs}->{collapse}{$c_prefix}}; my %co_check = map { ($_, $target->[0]->{$_}); } @co_key; my $tree = $self->_collapse_result($as, $row, $c_prefix); my (@final, @raw); @@ -669,11 +783,10 @@ sub _collapse_result { last unless (@raw = $self->cursor->next); $row = $self->{stashed_row} = \@raw; $tree = $self->_collapse_result($as, $row, $c_prefix); - #warn Data::Dumper::Dumper($tree, $row); } - @$target = @final; + @$target = (@final ? @final : [ {}, {} ]); + # single empty result to indicate an empty prefetched has_many } - return $info; } @@ -718,7 +831,7 @@ clause. sub count { my $self = shift; return $self->search(@_)->count if @_ and defined $_[0]; - return scalar @{ $self->get_cache } if @{ $self->get_cache }; + return scalar @{ $self->get_cache } if $self->get_cache; my $count = $self->_count; return 0 unless $count; @@ -732,7 +845,9 @@ sub count { sub _count { # Separated out so pager can get the full count my $self = shift; my $select = { count => '*' }; - my $attrs = { %{ $self->{attrs} } }; + + $self->_resolve; + my $attrs = { %{ $self->{_attrs} } }; if (my $group_by = delete $attrs->{group_by}) { delete $attrs->{having}; my @distinct = (ref $group_by ? @$group_by : ($group_by)); @@ -748,7 +863,6 @@ sub _count { # Separated out so pager can get the full count } $select = { count => { distinct => \@distinct } }; - #use Data::Dumper; die Dumper $select; } $attrs->{select} = $select; @@ -756,7 +870,6 @@ sub _count { # Separated out so pager can get the full count # offset, order by and page are not needed to count. record_filter is cdbi delete $attrs->{$_} for qw/rows offset order_by page pager record_filter/; - my ($count) = (ref $self)->new($self->result_source, $attrs)->cursor->next; return $count; } @@ -795,16 +908,18 @@ is returned in list context. sub all { my ($self) = @_; - return @{ $self->get_cache } if @{ $self->get_cache }; + return @{ $self->get_cache } if $self->get_cache; my @obj; - if (keys %{$self->{collapse}}) { + # TODO: don't call resolve here + $self->_resolve; + if (keys %{$self->{_attrs}->{collapse}}) { +# if ($self->{attrs}->{prefetch}) { # Using $self->cursor->all is really just an optimisation. # If we're collapsing has_many prefetches it probably makes # very little difference, and this is cleaner than hacking # _construct_object to survive the approach - $self->cursor->reset; my @row = $self->cursor->next; while (@row) { push(@obj, $self->_construct_object(@row)); @@ -836,6 +951,8 @@ Resets the resultset's cursor, so you can iterate through the elements again. sub reset { my ($self) = @_; + delete $self->{_attrs} if (exists $self->{_attrs}); + $self->{all_cache_position} = 0; $self->cursor->reset; return $self; @@ -890,7 +1007,7 @@ sub _cond_for_update_delete { $cond->{-and} = []; my @cond = @{$self->{cond}{-and}}; - for (my $i = 0; $i < @cond - 1; $i++) { + for (my $i = 0; $i <= @cond - 1; $i++) { my $entry = $cond[$i]; my %hash; @@ -902,7 +1019,7 @@ sub _cond_for_update_delete { } else { $entry =~ /([^.]+)$/; - $hash{$entry} = $cond[++$i]; + $hash{$1} = $cond[++$i]; } push @{$cond->{-and}}, \%hash; @@ -1246,8 +1363,7 @@ sub update_or_create { my $row = $self->find($hash, $attrs); if (defined $row) { - $row->set_columns($hash); - $row->update; + $row->update($hash); return $row; } @@ -1269,7 +1385,7 @@ Gets the contents of the cache for the resultset, if the cache is set. =cut sub get_cache { - shift->{all_cache} || []; + shift->{all_cache}; } =head2 set_cache @@ -1292,13 +1408,7 @@ than re-querying the database even if the cache attr is not set. sub set_cache { my ( $self, $data ) = @_; $self->throw_exception("set_cache requires an arrayref") - if ref $data ne 'ARRAY'; - my $result_class = $self->result_class; - foreach( @$data ) { - $self->throw_exception( - "cannot cache object of type '$_', expected '$result_class'" - ) if ref $_ ne $result_class; - } + if defined($data) && (ref $data ne 'ARRAY'); $self->{all_cache} = $data; } @@ -1317,7 +1427,7 @@ Clears the cache for the resultset. =cut sub clear_cache { - shift->set_cache([]); + shift->set_cache(undef); } =head2 related_resultset @@ -1338,28 +1448,28 @@ Returns a related resultset for the supplied relationship name. sub related_resultset { my ( $self, $rel ) = @_; + $self->{related_resultsets} ||= {}; return $self->{related_resultsets}{$rel} ||= do { - #warn "fetching related resultset for rel '$rel'"; + #warn "fetching related resultset for rel '$rel' " . $self->result_source->{name}; my $rel_obj = $self->result_source->relationship_info($rel); $self->throw_exception( "search_related: result source '" . $self->result_source->name . "' has no such relationship ${rel}") unless $rel_obj; #die Dumper $self->{attrs}; - my $rs = $self->search(undef, { join => $rel }); - my $alias = defined $rs->{attrs}{seen_join}{$rel} - && $rs->{attrs}{seen_join}{$rel} > 1 - ? join('_', $rel, $rs->{attrs}{seen_join}{$rel}) - : $rel; - - $self->result_source->schema->resultset($rel_obj->{class} + my $rs = $self->result_source->schema->resultset($rel_obj->{class} )->search( undef, - { %{$rs->{attrs}}, - alias => $alias, + { %{$self->{attrs}}, select => undef, - as => undef } + as => undef, + join => $rel, + _live_join => $rel } ); + + # keep reference of the original resultset + $rs->{_parent_rs} = $self->result_source; + return $rs; }; } @@ -1484,6 +1594,10 @@ use C instead: You can create your own accessors if required - see L for details. +Please note: This will NOT insert an C into the SQL statement +produced, it is used for internal access only. Thus attempting to use the accessor +in an C clause or similar will fail misrably. + =head2 join =over 4 @@ -1578,6 +1692,83 @@ C can be used with the following relationship types: C, C (or if you're using C, any relationship declared with an accessor type of 'single' or 'filter'). +=head2 page + +=over 4 + +=item Value: $page + +=back + +Makes the resultset paged and specifies the page to retrieve. Effectively +identical to creating a non-pages resultset and then calling ->page($page) +on it. + +=head2 rows + +=over 4 + +=item Value: $rows + +=back + +Specifes the maximum number of rows for direct retrieval or the number of +rows per page if the page attribute or method is used. + +=head2 group_by + +=over 4 + +=item Value: \@columns + +=back + +A arrayref of columns to group by. Can include columns of joined tables. + + group_by => [qw/ column1 column2 ... /] + +=head2 having + +=over 4 + +=item Value: $condition + +=back + +HAVING is a select statement attribute that is applied between GROUP BY and +ORDER BY. It is applied to the after the grouping calculations have been +done. + + having => { 'count(employee)' => { '>=', 100 } } + +=head2 distinct + +=over 4 + +=item Value: (0 | 1) + +=back + +Set to 1 to group by all columns. + +=head2 cache + +Set to 1 to cache search results. This prevents extra SQL queries if you +revisit rows in your ResultSet: + + my $resultset = $schema->resultset('Artist')->search( undef, { cache => 1 } ); + + while( my $artist = $resultset->next ) { + ... do stuff ... + } + + $rs->first; # without cache, this would issue a query + +By default, searches are not cached. + +For more examples of using these attributes, see +L. + =head2 from =over 4 @@ -1591,21 +1782,35 @@ statements generated by L, allowing you to express custom C clauses. NOTE: Use this on your own risk. This allows you to shoot off your foot! + C will usually do what you need and it is strongly recommended that you avoid using C unless you cannot achieve the desired result using C. +And we really do mean "cannot", not just tried and failed. Attempting to use +this because you're having problems with C is like trying to use x86 +ASM because you've got a syntax error in your C. Trust us on this. + +Now, if you're still really, really sure you need to use this (and if you're +not 100% sure, ask the mailing list first), here's an explanation of how this +works. -In simple terms, C works as follows: +The syntax is as follows - + [ + { => }, [ - { => , -join_type => 'inner|left|right' } - [] # nested JOIN (optional) - { => } - ] + { => , -join_type => 'inner|left|right' }, + [], # nested JOIN (optional) + { => , ... (more conditions) }, + ], + # More of the above [ ] may follow for additional joins + ] - JOIN -
- [JOIN ...] - ON = + + JOIN + + [JOIN ...] + ON = + An easy way to follow the examples below is to remember the following: @@ -1671,83 +1876,6 @@ with a father in the person table, we could explicitly use C: # SELECT child.* FROM person child # INNER JOIN person father ON child.father_id = father.id -=head2 page - -=over 4 - -=item Value: $page - -=back - -Makes the resultset paged and specifies the page to retrieve. Effectively -identical to creating a non-pages resultset and then calling ->page($page) -on it. - -=head2 rows - -=over 4 - -=item Value: $rows - -=back - -Specifes the maximum number of rows for direct retrieval or the number of -rows per page if the page attribute or method is used. - -=head2 group_by - -=over 4 - -=item Value: \@columns - -=back - -A arrayref of columns to group by. Can include columns of joined tables. - - group_by => [qw/ column1 column2 ... /] - -=head2 having - -=over 4 - -=item Value: $condition - -=back - -HAVING is a select statement attribute that is applied between GROUP BY and -ORDER BY. It is applied to the after the grouping calculations have been -done. - - having => { 'count(employee)' => { '>=', 100 } } - -=head2 distinct - -=over 4 - -=item Value: (0 | 1) - -=back - -Set to 1 to group by all columns. - -=head2 cache - -Set to 1 to cache search results. This prevents extra SQL queries if you -revisit rows in your ResultSet: - - my $resultset = $schema->resultset('Artist')->search( undef, { cache => 1 } ); - - while( my $artist = $resultset->next ) { - ... do stuff ... - } - - $rs->first; # without cache, this would issue a query - -By default, searches are not cached. - -For more examples of using these attributes, see -L. - =cut 1;