use strict;
use warnings;
-use overload
- '0+' => "count",
- 'bool' => "_bool",
- fallback => 1;
+use base qw/DBIx::Class/;
use Carp::Clan qw/^DBIx::Class/;
use DBIx::Class::Exception;
use Data::Page;
use DBIx::Class::ResultSetColumn;
use DBIx::Class::ResultSourceHandle;
use List::Util ();
-use Scalar::Util ();
-use base qw/DBIx::Class/;
+use Scalar::Util qw/blessed weaken/;
+use Try::Tiny;
+use namespace::clean;
+
+use overload
+ '0+' => "count",
+ 'bool' => "_bool",
+ fallback => 1;
__PACKAGE__->mk_group_accessors('simple' => qw/_result_class _source_handle/);
=head1 SYNOPSIS
my $users_rs = $schema->resultset('User');
+ while( $user = $users_rs->next) {
+ print $user->username;
+ }
+
my $registered_users_rs = $schema->resultset('User')->search({ registered => 1 });
my @cds_in_2005 = $schema->resultset('CD')->search({ year => 2005 })->all();
The query that the ResultSet represents is B<only> executed against
the database when these methods are called:
-L</find> L</next> L</all> L</first> L</single> L</count>
+L</find>, L</next>, L</all>, L</first>, L</single>, L</count>.
+
+If a resultset is used in a numeric context it returns the L</count>.
+However, if it is used in a boolean context it is B<always> true. So if
+you want to check if a resultset has any results, you must use C<if $rs
+!= 0>.
=head1 EXAMPLES
L</join>, L</prefetch>, L</+select>, L</+as> attributes are merged
into the existing ones from the original resultset.
-The L</where>, L</having> attribute, and any search conditions are
+The L</where> and L</having> attributes, and any search conditions, are
merged with an SQL C<AND> to the existing condition from the original
resultset.
See: L</search>, L</count>, L</get_column>, L</all>, L</create>.
-=head1 OVERLOADING
-
-If a resultset is used in a numeric context it returns the L</count>.
-However, if it is used in a boolean context it is always true. So if
-you want to check if a resultset has any results use C<if $rs != 0>.
-C<if $rs> will always be true.
-
=head1 METHODS
=head2 new
my $self = {
_source_handle => $source,
cond => $attrs->{where},
- count => undef,
pager => undef,
attrs => $attrs
};
my $self = shift;
my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {});
- # Default to the primary key, but allow a specific key
- my @cols = exists $attrs->{key}
- ? $self->result_source->unique_constraint_columns($attrs->{key})
- : $self->result_source->primary_columns;
- $self->throw_exception(
- "Can't find unless a primary key is defined or unique constraint is specified"
- ) unless @cols;
-
- # Parse out a hashref from input
+ # Parse out a query from input
my $input_query;
if (ref $_[0] eq 'HASH') {
$input_query = { %{$_[0]} };
}
- elsif (@_ == @cols) {
- $input_query = {};
- @{$input_query}{@cols} = @_;
- }
else {
- # Compatibility: Allow e.g. find(id => $value)
- carp "Find by key => value deprecated; please use a hashref instead";
- $input_query = {@_};
- }
+ my $constraint = exists $attrs->{key} ? $attrs->{key} : 'primary';
+ my @c_cols = $self->result_source->unique_constraint_columns($constraint);
+
+ $self->throw_exception(
+ "No constraint columns, maybe a malformed '$constraint' constraint?"
+ ) unless @c_cols;
- my (%related, $info);
+ $self->throw_exception (
+ 'find() expects either a column/value hashref, or a list of values '
+ . "corresponding to the columns of the specified unique constraint '$constraint'"
+ ) unless @c_cols == @_;
- KEY: foreach my $key (keys %$input_query) {
- if (ref($input_query->{$key})
- && ($info = $self->result_source->relationship_info($key))) {
+ $input_query = {};
+ @{$input_query}{@c_cols} = @_;
+ }
+
+ my %related;
+ for my $key (keys %$input_query) {
+ if (
+ my $keyref = ref($input_query->{$key})
+ and
+ my $relinfo = $self->result_source->relationship_info($key)
+ ) {
my $val = delete $input_query->{$key};
- next KEY if (ref($val) eq 'ARRAY'); # has_many for multi_create
+
+ next if $keyref eq 'ARRAY'; # has_many for multi_create
+
my $rel_q = $self->result_source->_resolve_condition(
- $info->{cond}, $val, $key
- );
- die "Can't handle OR join condition in find" if ref($rel_q) eq 'ARRAY';
+ $relinfo->{cond}, $val, $key
+ );
+ die "Can't handle complex relationship conditions in find" if ref($rel_q) ne 'HASH';
@related{keys %$rel_q} = values %$rel_q;
}
}
- if (my @keys = keys %related) {
- @{$input_query}{@keys} = values %related;
- }
+ # relationship conditions take precedence (?)
+ @{$input_query}{keys %related} = values %related;
# Build the final query: Default to the disjunction of the unique queries,
# but allow the input query in case the ResultSet defines the query or the
: $self->_add_alias($input_query, $alias);
}
- # Run the query
+ # Run the query, passing the result_class since it should propagate for find
my $rs = $self->search ($query, {result_class => $self->result_class, %$attrs});
if (keys %{$rs->_resolved_attrs->{collapse}}) {
my $row = $rs->next;
=item Arguments: $cond?
-=item Return Value: $row_object?
+=item Return Value: $row_object | undef
=back
my $cd = $schema->resultset('CD')->single({ year => 2001 });
Inflates the first result without creating a cursor if the resultset has
-any records in it; if not returns nothing. Used by L</find> as a lean version of
-L</search>.
+any records in it; if not returns C<undef>. Used by L</find> as a lean version
+of L</search>.
While this method can take an optional search condition (just like L</search>)
being a fast-code-path it does not recognize search attributes. If you need to
}
}
-# XXX: Disabled since it doesn't infer uniqueness in all cases
-# unless ($self->_is_unique_query($attrs->{where})) {
-# carp "Query not guaranteed to return a single row"
-# . "; please declare your unique constraints or use search instead";
-# }
-
my @data = $self->result_source->storage->select_single(
$attrs->{from}, $attrs->{select},
$attrs->{where}, $attrs
}
-# _is_unique_query
-#
-# Try to determine if the specified query is guaranteed to be unique, based on
-# the declared unique constraints.
-
-sub _is_unique_query {
- my ($self, $query) = @_;
-
- my $collapsed = $self->_collapse_query($query);
- my $alias = $self->{attrs}{alias};
-
- foreach my $name ($self->result_source->unique_constraint_names) {
- my @unique_cols = map {
- "$alias.$_"
- } $self->result_source->unique_constraint_columns($name);
-
- # Count the values for each unique column
- my %seen = map { $_ => 0 } @unique_cols;
-
- foreach my $key (keys %$collapsed) {
- my $aliased = $key =~ /\./ ? $key : "$alias.$key";
- next unless exists $seen{$aliased}; # Additional constraints are okay
- $seen{$aliased} = scalar keys %{ $collapsed->{$key} };
- }
-
- # If we get 0 or more than 1 value for a column, it's not necessarily unique
- return 1 unless grep { $_ != 1 } values %seen;
- }
-
- return 0;
-}
-
# _collapse_query
#
# Recursively collapse the query, accumulating values for each column.
$attrs->{offset} = $self->{attrs}{offset} || 0;
$attrs->{offset} += $min;
$attrs->{rows} = ($max ? ($max - $min + 1) : 1);
- return $self->search(undef(), $attrs);
+ return $self->search(undef, $attrs);
#my $slice = (ref $self)->new($self->result_source, $attrs);
#return (wantarray ? $slice->all : $slice);
}
=item Arguments: none
-=item Return Value: $result?
+=item Return Value: $result | undef
=back
return $cache->[$self->{all_cache_position}++];
}
if ($self->{attrs}{cache}) {
+ delete $self->{pager};
$self->{all_cache_position} = 1;
return ($self->all)[0];
}
# without having to contruct the full hash
if (keys %collapse) {
- my %pri = map { ($_ => 1) } $self->result_source->primary_columns;
+ my %pri = map { ($_ => 1) } $self->result_source->_pri_cols;
foreach my $i (0 .. $#construct_as) {
next if defined($construct_as[$i][0]); # only self table
if (delete $pri{$construct_as[$i][1]}) {
sub result_class {
my ($self, $result_class) = @_;
if ($result_class) {
- $self->ensure_class_loaded($result_class);
+ unless (ref $result_class) { # don't fire this for an object
+ $self->ensure_class_loaded($result_class);
+ }
$self->_result_class($result_class);
+ # THIS LINE WOULD BE A BUG - this accessor specifically exists to
+ # permit the user to set result class on one result set only; it only
+ # chains if provided to search()
+ #$self->{attrs}{result_class} = $result_class if ref $self;
}
$self->_result_class;
}
$attrs ||= $self->_resolved_attrs;
my $tmp_attrs = { %$attrs };
-
- # take off any limits, record_filter is cdbi, and no point of ordering a count
- delete $tmp_attrs->{$_} for (qw/select as rows offset order_by record_filter/);
+ # take off any limits, record_filter is cdbi, and no point of ordering nor locking a count
+ delete @{$tmp_attrs}{qw/rows offset order_by record_filter for/};
# overwrite the selector (supplied by the storage)
- $tmp_attrs->{select} = $rsrc->storage->_count_select ($rsrc, $tmp_attrs);
+ $tmp_attrs->{select} = $rsrc->storage->_count_select ($rsrc, $attrs);
$tmp_attrs->{as} = 'count';
my $tmp_rs = $rsrc->resultset_class->new($rsrc, $tmp_attrs)->get_column ('count');
my ($self, $attrs) = @_;
my $rsrc = $self->result_source;
- $attrs ||= $self->_resolved_attrs_copy;
+ $attrs ||= $self->_resolved_attrs;
my $sub_attrs = { %$attrs };
-
- # extra selectors do not go in the subquery and there is no point of ordering it
- delete $sub_attrs->{$_} for qw/collapse select _prefetch_select as order_by/;
+ # extra selectors do not go in the subquery and there is no point of ordering it, nor locking it
+ delete @{$sub_attrs}{qw/collapse select _prefetch_select as order_by for/};
# if we multi-prefetch we group_by primary keys only as this is what we would
# get out of the rs via ->next/->all. We *DO WANT* to clobber old group_by regardless
$sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } ($rsrc->_pri_cols) ]
}
- $sub_attrs->{select} = $rsrc->storage->_subq_count_select ($rsrc, $attrs);
+ # Calculate subquery selector
+ if (my $g = $sub_attrs->{group_by}) {
- # this is so that the query can be simplified e.g.
- # * ordering can be thrown away in things like Top limit
- $sub_attrs->{-for_count_only} = 1;
+ my $sql_maker = $rsrc->storage->sql_maker;
- my $sub_rs = $rsrc->resultset_class->new ($rsrc, $sub_attrs);
+ # necessary as the group_by may refer to aliased functions
+ my $sel_index;
+ for my $sel (@{$attrs->{select}}) {
+ $sel_index->{$sel->{-as}} = $sel
+ if (ref $sel eq 'HASH' and $sel->{-as});
+ }
- $attrs->{from} = [{
- -alias => 'count_subq',
- -source_handle => $rsrc->handle,
- count_subq => $sub_rs->as_query,
- }];
+ for my $g_part (@$g) {
+ my $colpiece = $sel_index->{$g_part} || $g_part;
- # the subquery replaces this
- delete $attrs->{$_} for qw/where bind collapse group_by having having_bind rows offset/;
+ # disqualify join-based group_by's. Arcane but possible query
+ # also horrible horrible hack to alias a column (not a func.)
+ # (probably need to introduce SQLA syntax)
+ if ($colpiece =~ /\./ && $colpiece !~ /^$attrs->{alias}\./) {
+ my $as = $colpiece;
+ $as =~ s/\./__/;
+ $colpiece = \ sprintf ('%s AS %s', map { $sql_maker->_quote ($_) } ($colpiece, $as) );
+ }
+ push @{$sub_attrs->{select}}, $colpiece;
+ }
+ }
+ else {
+ my @pcols = map { "$attrs->{alias}.$_" } ($rsrc->primary_columns);
+ $sub_attrs->{select} = @pcols ? \@pcols : [ 1 ];
+ }
- return $self->_count_rs ($attrs);
+ return $rsrc->resultset_class
+ ->new ($rsrc, $sub_attrs)
+ ->as_subselect_rs
+ ->search ({}, { columns => { count => $rsrc->storage->_count_select ($rsrc, $attrs) } })
+ ->get_column ('count');
}
sub _bool {
=item Arguments: none
-=item Return Value: $object?
+=item Return Value: $object | undef
=back
-Resets the resultset and returns an object for the first result (if the
-resultset returns anything).
+Resets the resultset and returns an object for the first result (or C<undef>
+if the resultset is empty).
=cut
my $cond = $rsrc->schema->storage->_strip_cond_qualifiers ($self->{cond});
my $needs_group_by_subq = $self->_has_resolved_attr (qw/collapse group_by -join/);
- my $needs_subq = $needs_group_by_subq || (not defined $cond) || $self->_has_resolved_attr(qw/row offset/);
+ my $needs_subq = $needs_group_by_subq || (not defined $cond) || $self->_has_resolved_attr(qw/rows offset/);
if ($needs_group_by_subq or $needs_subq) {
# make a new $rs selecting only the PKs (that's all we really need)
my $attrs = $self->_resolved_attrs_copy;
- delete $attrs->{$_} for qw/collapse select as/;
+
+ delete $attrs->{$_} for qw/collapse _collapse_order_by select _prefetch_select as/;
$attrs->{columns} = [ map { "$attrs->{alias}.$_" } ($self->result_source->_pri_cols) ];
if ($needs_group_by_subq) {
}
my $subrs = (ref $self)->new($rsrc, $attrs);
-
return $self->result_source->storage->_subq_update_delete($subrs, $op, $values);
}
else {
=back
Sets the specified columns in the resultset to the supplied values in a
-single query. Return value will be true if the update succeeded or false
-if no records were updated; exact type of success value is storage-dependent.
+single query. Note that this will not run any accessor/set_column/update
+triggers, nor will it update any row object instances derived from this
+resultset (this includes the contents of the L<resultset cache|/set_cache>
+if any). See L</update_all> if you need to execute any on-update
+triggers or cascades defined either by you or a
+L<result component|DBIx::Class::Manual::Component/WHAT_IS_A_COMPONENT>.
+
+The return value is a pass through of what the underlying
+storage backend returned, and may vary. See L<DBI/execute> for the most
+common case.
=cut
=back
-Fetches all objects and updates them one at a time. Note that C<update_all>
-will run DBIC cascade triggers, while L</update> will not.
+Fetches all objects and updates them one at a time via
+L<DBIx::Class::Row/update>. Note that C<update_all> will run DBIC defined
+triggers, while L</update> will not.
=cut
my ($self, $values) = @_;
$self->throw_exception('Values for update_all must be a hash')
unless ref $values eq 'HASH';
- foreach my $obj ($self->all) {
- $obj->set_columns($values)->update;
- }
+
+ my $guard = $self->result_source->schema->txn_scope_guard;
+ $_->update($values) for $self->all;
+ $guard->commit;
return 1;
}
=back
-Deletes the contents of the resultset from its result source. Note that this
-will not run DBIC cascade triggers. See L</delete_all> if you need triggers
-to run. See also L<DBIx::Class::Row/delete>.
+Deletes the rows matching this resultset in a single query. Note that this
+will not run any delete triggers, nor will it alter the
+L<in_storage|DBIx::Class::Row/in_storage> status of any row object instances
+derived from this resultset (this includes the contents of the
+L<resultset cache|/set_cache> if any). See L</delete_all> if you need to
+execute any on-delete triggers or cascades defined either by you or a
+L<result component|DBIx::Class::Manual::Component/WHAT_IS_A_COMPONENT>.
-Return value will be the amount of rows deleted; exact type of return value
-is storage-dependent.
+The return value is a pass through of what the underlying storage backend
+returned, and may vary. See L<DBI/execute> for the most common case.
=cut
=back
-Fetches all objects and deletes them one at a time. Note that C<delete_all>
-will run DBIC cascade triggers, while L</delete> will not.
+Fetches all objects and deletes them one at a time via
+L<DBIx::Class::Row/delete>. Note that C<delete_all> will run DBIC defined
+triggers, while L</delete> will not.
=cut
$self->throw_exception('delete_all does not accept any arguments')
if @_;
+ my $guard = $self->result_source->schema->txn_scope_guard;
$_->delete for $self->all;
+ $guard->commit;
return 1;
}
=cut
+# make a wizard good for both a scalar and a hashref
+my $mk_lazy_count_wizard = sub {
+ require Variable::Magic;
+
+ my $stash = { total_rs => shift };
+ my $slot = shift; # only used by the hashref magic
+
+ my $magic = Variable::Magic::wizard (
+ data => sub { $stash },
+
+ (!$slot)
+ ? (
+ # the scalar magic
+ get => sub {
+ # set value lazily, and dispell for good
+ ${$_[0]} = $_[1]{total_rs}->count;
+ Variable::Magic::dispell (${$_[0]}, $_[1]{magic_selfref});
+ return 1;
+ },
+ set => sub {
+ # an explicit set implies dispell as well
+ # the unless() is to work around "fun and giggles" below
+ Variable::Magic::dispell (${$_[0]}, $_[1]{magic_selfref})
+ unless (caller(2))[3] eq 'DBIx::Class::ResultSet::pager';
+ return 1;
+ },
+ )
+ : (
+ # the uvar magic
+ fetch => sub {
+ if ($_[2] eq $slot and !$_[1]{inactive}) {
+ my $cnt = $_[1]{total_rs}->count;
+ $_[0]->{$slot} = $cnt;
+
+ # attempting to dispell in a fetch handle (works in store), seems
+ # to invariable segfault on 5.10, 5.12, 5.13 :(
+ # so use an inactivator instead
+ #Variable::Magic::dispell (%{$_[0]}, $_[1]{magic_selfref});
+ $_[1]{inactive}++;
+ }
+ return 1;
+ },
+ store => sub {
+ if (! $_[1]{inactive} and $_[2] eq $slot) {
+ #Variable::Magic::dispell (%{$_[0]}, $_[1]{magic_selfref});
+ $_[1]{inactive}++
+ unless (caller(2))[3] eq 'DBIx::Class::ResultSet::pager';
+ }
+ return 1;
+ },
+ ),
+ );
+
+ $stash->{magic_selfref} = $magic;
+ weaken ($stash->{magic_selfref}); # this fails on 5.8.1
+
+ return $magic;
+};
+
+# the tie class for 5.8.1
+{
+ package DBIx::Class::__DBIC_LAZY_RS_COUNT__;
+ use base qw/Tie::Hash/;
+
+ sub FIRSTKEY { my $dummy = scalar keys %{$_[0]{data}}; each %{$_[0]{data}} }
+ sub NEXTKEY { each %{$_[0]{data}} }
+ sub EXISTS { exists $_[0]{data}{$_[1]} }
+ sub DELETE { delete $_[0]{data}{$_[1]} }
+ sub CLEAR { %{$_[0]{data}} = () }
+ sub SCALAR { scalar %{$_[0]{data}} }
+
+ sub TIEHASH {
+ $_[1]{data} = {%{$_[1]{selfref}}};
+ %{$_[1]{selfref}} = ();
+ Scalar::Util::weaken ($_[1]{selfref});
+ return bless ($_[1], $_[0]);
+ };
+
+ sub FETCH {
+ if ($_[1] eq $_[0]{slot}) {
+ my $cnt = $_[0]{data}{$_[1]} = $_[0]{total_rs}->count;
+ untie %{$_[0]{selfref}};
+ %{$_[0]{selfref}} = %{$_[0]{data}};
+ return $cnt;
+ }
+ else {
+ $_[0]{data}{$_[1]};
+ }
+ }
+
+ sub STORE {
+ $_[0]{data}{$_[1]} = $_[2];
+ if ($_[1] eq $_[0]{slot}) {
+ untie %{$_[0]{selfref}};
+ %{$_[0]{selfref}} = %{$_[0]{data}};
+ }
+ $_[2];
+ }
+}
+
sub pager {
my ($self) = @_;
return $self->{pager} if $self->{pager};
+ if ($self->get_cache) {
+ $self->throw_exception ('Pagers on cached resultsets are not supported');
+ }
+
my $attrs = $self->{attrs};
$self->throw_exception("Can't create pager for non-paged rs")
unless $self->{attrs}{page};
# with a subselect) to get the real total count
my $count_attrs = { %$attrs };
delete $count_attrs->{$_} for qw/rows offset page pager/;
- my $total_count = (ref $self)->new($self->result_source, $count_attrs)->count;
+ my $total_rs = (ref $self)->new($self->result_source, $count_attrs);
+
- return $self->{pager} = Data::Page->new(
- $total_count,
+### the following may seem awkward and dirty, but it's a thought-experiment
+### necessary for future development of DBIx::DS. Do *NOT* change this code
+### before talking to ribasushi/mst
+
+ my $pager = Data::Page->new(
+ 0, #start with an empty set
$attrs->{rows},
- $self->{attrs}{page}
+ $self->{attrs}{page},
);
+
+ my $data_slot = 'total_entries';
+
+ # Since we are interested in a cached value (once it's set - it's set), every
+ # technique will detach from the magic-host once the time comes to fire the
+ # ->count (or in the segfaulting case of >= 5.10 it will deactivate itself)
+
+ if ($] < 5.008003) {
+ # 5.8.1 throws 'Modification of a read-only value attempted' when one tries
+ # to weakref the magic container :(
+ # tested on 5.8.1
+ tie (%$pager, 'DBIx::Class::__DBIC_LAZY_RS_COUNT__',
+ { slot => $data_slot, total_rs => $total_rs, selfref => $pager }
+ );
+ }
+ elsif ($] < 5.010) {
+ # We can use magic on the hash value slot. It's interesting that the magic is
+ # attached to the hash-slot, and does *not* stop working once I do the dummy
+ # assignments after the cast()
+ # tested on 5.8.3 and 5.8.9
+ my $magic = $mk_lazy_count_wizard->($total_rs);
+ Variable::Magic::cast ( $pager->{$data_slot}, $magic );
+
+ # this is for fun and giggles
+ $pager->{$data_slot} = -1;
+ $pager->{$data_slot} = 0;
+
+ # this does not work for scalars, but works with
+ # uvar magic below
+ #my %vals = %$pager;
+ #%$pager = ();
+ #%{$pager} = %vals;
+ }
+ else {
+ # And the uvar magic
+ # works on 5.10.1, 5.12.1 and 5.13.4 in its current form,
+ # however see the wizard maker for more notes
+ my $magic = $mk_lazy_count_wizard->($total_rs, $data_slot);
+ Variable::Magic::cast ( %$pager, $magic );
+
+ # still works
+ $pager->{$data_slot} = -1;
+ $pager->{$data_slot} = 0;
+
+ # this now works
+ my %vals = %$pager;
+ %$pager = ();
+ %{$pager} = %vals;
+ }
+
+ return $self->{pager} = $pager;
}
=head2 page
my $value = shift;
my $ref_type = ref $value;
return 1 if $ref_type eq '' || $ref_type eq 'SCALAR';
- return 1 if Scalar::Util::blessed($value);
+ return 1 if blessed $value;
return 0;
}
);
Example of creating a new row and also creating a row in a related
-C<belongs_to>resultset. Note Hashref.
+C<belongs_to> resultset. Note Hashref.
$cd_rs->create({
title=>"Music for Silly Walks",
producer => $producer,
name => 'harry',
}, {
- key => 'primary,
+ key => 'primary',
});
=item Arguments: none
-=item Return Value: \@cache_objects?
+=item Return Value: \@cache_objects | undef
=back
=item Arguments: none
-=item Return Value: []
+=item Return Value: undef
=back
sub is_ordered {
my ($self) = @_;
- return scalar $self->result_source->storage->_parse_order_by($self->{attrs}{order_by});
+ return scalar $self->result_source->storage->_extract_order_columns($self->{attrs}{order_by});
}
=head2 related_resultset
# (the select/as attrs were deleted in the beginning), we need to flip all
# left joins to inner, so we get the expected results
# read the comment on top of the actual function to see what this does
- $attrs->{from} = $rsrc->schema->storage->_straight_join_to_node ($attrs->{from}, $alias);
+ $attrs->{from} = $rsrc->schema->storage->_inner_join_to_node ($attrs->{from}, $alias);
#XXX - temp fix for result_class bug. There likely is a more elegant fix -groditi
=cut
sub as_subselect_rs {
- my $self = shift;
+ my $self = shift;
+
+ my $attrs = $self->_resolved_attrs;
- return $self->result_source->resultset->search( undef, {
- alias => $self->current_source_alias,
- from => [{
- $self->current_source_alias => $self->as_query,
- -alias => $self->current_source_alias,
- -source_handle => $self->result_source->handle,
- }]
- });
+ my $fresh_rs = (ref $self)->new (
+ $self->result_source
+ );
+
+ # these pieces will be locked in the subquery
+ delete $fresh_rs->{cond};
+ delete @{$fresh_rs->{attrs}}{qw/where bind/};
+
+ return $fresh_rs->search( {}, {
+ from => [{
+ $attrs->{alias} => $self->as_query,
+ -alias => $attrs->{alias},
+ -source_handle => $self->result_source->handle,
+ }],
+ alias => $attrs->{alias},
+ });
}
# This code is called by search_related, and makes sure there
# ->_resolve_join as otherwise they get lost - captainL
my $join = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} );
- delete @{$attrs}{qw/join prefetch collapse distinct select as columns +select +as +columns/};
+ delete @{$attrs}{qw/join prefetch collapse group_by distinct select as columns +select +as +columns/};
my $seen = { %{ (delete $attrs->{seen_join}) || {} } };
-alias => $attrs->{alias},
$attrs->{alias} => $rs_copy->as_query,
}];
- delete @{$attrs}{@force_subq_attrs, 'where'};
+ delete @{$attrs}{@force_subq_attrs, qw/where bind/};
$seen->{-relation_chain_depth} = 0;
}
elsif ($attrs->{from}) { #shallow copy suffices
carp ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)");
}
else {
- $attrs->{group_by} = [ grep { !ref($_) || (ref($_) ne 'HASH') } @{$attrs->{select}} ];
+ my $storage = $self->result_source->schema->storage;
+ my $rs_column_list = $storage->_resolve_column_info ($attrs->{from});
+
+ my $group_spec = $attrs->{group_by} = [];
+ my %group_index;
+ for (@{$attrs->{select}}) {
+ if (! ref($_) or ref ($_) ne 'HASH' ) {
+ push @$group_spec, $_;
+ $group_index{$_}++;
+ if ($rs_column_list->{$_} and $_ !~ /\./ ) {
+ # add a fully qualified version as well
+ $group_index{"$rs_column_list->{$_}{-source_alias}.$_"}++;
+ }
+ }
+ }
# add any order_by parts that are not already present in the group_by
# we need to be careful not to add any named functions/aggregates
# i.e. select => [ ... { count => 'foo', -as 'foocount' } ... ]
- my %already_grouped = map { $_ => 1 } (@{$attrs->{group_by}});
-
- my $storage = $self->result_source->schema->storage;
+ for my $chunk ($storage->_extract_order_columns($attrs->{order_by})) {
- my $rs_column_list = $storage->_resolve_column_info ($attrs->{from});
+ # only consider real columns (for functions the user got to do an explicit group_by)
+ my $colinfo = $rs_column_list->{$chunk}
+ or next;
- for my $chunk ($storage->_parse_order_by($attrs->{order_by})) {
- if ($rs_column_list->{$chunk} && not $already_grouped{$chunk}++) {
- push @{$attrs->{group_by}}, $chunk;
- }
+ $chunk = "$colinfo->{-source_alias}.$chunk" if $chunk !~ /\./;
+ push @$group_spec, $chunk unless $group_index{$chunk}++;
}
}
}
C<select> as normal. (You may also use the C<cols> attribute, as in
earlier versions of DBIC.)
+Essentially C<columns> does the same as L</select> and L</as>.
+
+ columns => [ 'foo', { bar => 'baz' } ]
+
+is the same as
+
+ select => [qw/foo baz/],
+ as => [qw/foo bar/]
+
=head2 +columns
=over 4
select => [
'name',
{ count => 'employeeid' },
- { sum => 'salary' }
+ { max => { length => 'name' }, -as => 'longest_name' }
]
});
-When you use function/stored procedure names and do not supply an C<as>
-attribute, the column names returned are storage-dependent. E.g. MySQL would
-return a column named C<count(employeeid)> in the above example.
+ # Equivalent SQL
+ SELECT name, COUNT( employeeid ), MAX( LENGTH( name ) ) AS longest_name FROM employee
-B<NOTE:> You will almost always need a corresponding 'as' entry when you use
-'select'.
+B<NOTE:> You will almost always need a corresponding L</as> attribute when you
+use L</select>, to instruct DBIx::Class how to store the result of the column.
+Also note that the L</as> attribute has nothing to do with the SQL-side 'AS'
+identifier aliasing. You can however alias a function, so you can use it in
+e.g. an C<ORDER BY> clause. This is done via the C<-as> B<select function
+attribute> supplied as shown in the example above.
=head2 +select
=over 4
Indicates additional columns to be selected from storage. Works the same as
-L</select> but adds columns to the selection.
+L</select> but adds columns to the default selection, instead of specifying
+an explicit list.
=back
=back
-Indicates column names for object inflation. That is, C<as>
-indicates the name that the column can be accessed as via the
-C<get_column> method (or via the object accessor, B<if one already
-exists>). It has nothing to do with the SQL code C<SELECT foo AS bar>.
-
-The C<as> attribute is used in conjunction with C<select>,
-usually when C<select> contains one or more function or stored
-procedure names:
+Indicates column names for object inflation. That is L</as> indicates the
+slot name in which the column value will be stored within the
+L<Row|DBIx::Class::Row> object. The value will then be accessible via this
+identifier by the C<get_column> method (or via the object accessor B<if one
+with the same name already exists>) as shown below. The L</as> attribute has
+B<nothing to do> with the SQL-side C<AS>. See L</select> for details.
$rs = $schema->resultset('Employee')->search(undef, {
select => [
'name',
- { count => 'employeeid' }
+ { count => 'employeeid' },
+ { max => { length => 'name' }, -as => 'longest_name' }
],
- as => ['name', 'employee_count'],
+ as => [qw/
+ name
+ employee_count
+ max_name_length
+ /],
});
- my $employee = $rs->first(); # get the first Employee
-
If the object against which the search is performed already has an accessor
matching a column name specified in C<as>, the value can be retrieved using
the accessor as normal:
You can create your own accessors if required - see
L<DBIx::Class::Manual::Cookbook> for details.
-Please note: This will NOT insert an C<AS employee_count> into the SQL
-statement produced, it is used for internal access only. Thus
-attempting to use the accessor in an C<order_by> clause or similar
-will fail miserably.
-
-To get around this limitation, you can supply literal SQL to your
-C<select> attribute that contains the C<AS alias> text, e.g.
-
- select => [\'myfield AS alias']
-
=head2 join
=over 4