use DBIx::Class::Carp;
use DBIx::Class::ResultSetColumn;
use Scalar::Util qw/blessed weaken reftype/;
+use DBIx::Class::_Util 'fail_on_internal_wantarray';
use Try::Tiny;
use Data::Compare (); # no imports!!! guard against insane architecture
=head3 Resolving conditions and attributes
-When a resultset is chained from another resultset (ie:
-C<my $new_rs = $old_rs->search(\%extra_cond, \%attrs)>), conditions
+When a resultset is chained from another resultset (e.g.:
+C<< my $new_rs = $old_rs->search(\%extra_cond, \%attrs) >>), conditions
and attributes with the same keys need resolving.
If any of L</columns>, L</select>, L</as> are present, they reset the
For a list of attributes that can be passed to C<search>, see
L</ATTRIBUTES>. For more examples of using this function, see
-L<Searching|DBIx::Class::Manual::Cookbook/Searching>. For a complete
-documentation for the first argument, see L<SQL::Abstract>
+L<Searching|DBIx::Class::Manual::Cookbook/SEARCHING>. For a complete
+documentation for the first argument, see L<SQL::Abstract/"WHERE CLAUSES">
and its extension L<DBIx::Class::SQLMaker>.
For more help on using joins with search, see L<DBIx::Class::Manual::Joining>.
my $rs = $self->search_rs( @_ );
if (wantarray) {
+ DBIx::Class::_ENV_::ASSERT_NO_INTERNAL_WANTARRAY and my $sog = fail_on_internal_wantarray($rs);
return $rs->all;
}
elsif (defined wantarray) {
method. It is equivalent to calling C<< $schema->search(\[]) >>, but if you
want to ensure columns are bound correctly, use L</search>.
-See L<DBIx::Class::Manual::Cookbook/Searching> and
+See L<DBIx::Class::Manual::Cookbook/SEARCHING> and
L<DBIx::Class::Manual::FAQ/Searching> for searching techniques that do not
require C<search_literal>.
$attrs->{offset} += $min;
$attrs->{rows} = ($max ? ($max - $min + 1) : 1);
return $self->search(undef, $attrs);
- #my $slice = (ref $self)->new($self->result_source, $attrs);
- #return (wantarray ? $slice->all : $slice);
}
=head2 next
if (
$aliastypes->{multiplying}{$sel_alias}
or
- scalar grep { $aliastypes->{multiplying}{(values %$_)[0]} } @{ $aliastypes->{selecting}{$sel_alias}{-parents} }
+ $aliastypes->{premultiplied}{$sel_alias}
) {
$multiplied_selectors->{$_} = 1 for values %{$aliastypes->{selecting}{$sel_alias}{-seen_columns}}
}
: 'classic_nonpruning'
;
- # $args and $attrs to _mk_row_parser are seperated to delineate what is
+ # $args and $attrs to _mk_row_parser are separated to delineate what is
# core collapser stuff and what is dbic $rs specific
@{$self->{_row_parser}{$parser_type}}{qw(cref nullcheck)} = $rsrc->_mk_row_parser({
eval => 1,
# can't work without it). Add an explicit check for the *main*
# result, hopefully this will gradually weed out such errors
#
- # FIXME - this is a temporary kludge that reduces perfromance
+ # FIXME - this is a temporary kludge that reduces performance
# It is however necessary for the time being
my ($unrolled_non_null_cols_to_check, $err);
if (!$needs_subq and @{$attrs->{from}} > 1) {
($attrs->{from}, $join_classifications) =
- $storage->_prune_unused_joins ($attrs->{from}, $attrs->{select}, $self->{cond}, $attrs);
+ $storage->_prune_unused_joins ($attrs);
# any non-pruneable non-local restricting joins imply subq
$needs_subq = defined List::Util::first { $_ ne $attrs->{alias} } keys %{ $join_classifications->{restricting} || {} };
if (
$existing_group_by
or
+ # we do not need to check pre-multipliers, since if the premulti is there, its
+ # parent (who is multi) will be there too
keys %{ $join_classifications->{multiplying} || {} }
) {
# make sure if there is a supplied group_by it matches the columns compiled above
element should be a data value in the earlier specified column order.
For example:
- $Arstist_rs->populate([
+ $schema->resultset("Artist")->populate([
[ qw( artistid name ) ],
[ 100, 'A Formally Unknown Singer' ],
[ 101, 'A singer that jumped the shark two albums ago' ],
sub populate {
my $self = shift;
- # cruft placed in standalone method
- my $data = $self->_normalize_populate_args(@_);
+ if (defined wantarray) {
+ # cruft placed in standalone method
+ my $data = $self->_normalize_populate_to_hashref(@_);
- return unless @$data;
+ return unless @$data;
- if(defined wantarray) {
my @created = map { $self->create($_) } @$data;
return wantarray ? @created : \@created;
}
else {
- my $first = $data->[0];
+ # cruft placed in standalone method
+ my $data = $self->_normalize_populate_to_arrayref(@_);
+
+ return unless @$data;
+ use DDP; p $data;
+
+ my $first = shift @$data;
# if a column is a registered relationship, and is a non-blessed hash/array, consider
# it relationship data
my (@rels, @columns);
my $rsrc = $self->result_source;
my $rels = { map { $_ => $rsrc->relationship_info($_) } $rsrc->relationships };
- for (keys %$first) {
- my $ref = ref $first->{$_};
- $rels->{$_} && ($ref eq 'ARRAY' or $ref eq 'HASH')
- ? push @rels, $_
- : push @columns, $_
- ;
+
+ if (ref $data->[0] eq 'CODE') {
+ @columns = @$first;
+ }
+ else {
+ for my $index (0..$#$first) {
+ my $col = $first->[$index];
+ my $val = $data->[0][$index];
+ my $ref = ref $val;
+ $rels->{$col} && ($ref eq 'ARRAY' or $ref eq 'HASH')
+ ? push @rels, $col
+ : push @columns, $col
+ ;
+ }
}
my @pks = $rsrc->primary_columns;
+ my %colmap = map { $first->[$_] => $_ } (0..$#$first);
## do the belongs_to relationships
foreach my $index (0..$#$data) {
+ next if (ref $data->[$index] eq 'CODE');
+
+ # delegate to list context populate()/create() for any dataset without
+ # primary keys with specified relationships
- # delegate to create() for any dataset without primary keys with specified relationships
- if (grep { !defined $data->[$index]->{$_} } @pks ) {
+ if (grep { !defined $colmap{$_} } @pks) {
for my $r (@rels) {
- if (grep { ref $data->[$index]{$r} eq $_ } qw/HASH ARRAY/) { # a related set must be a HASH or AoH
- my @ret = $self->populate($data);
+ if (grep { ref $data->[$index][$colmap{$r}] eq $_ } qw/HASH ARRAY/) { # a related set must be a HASH or AoH
+ # we pass in @_ here as it is the pre-normalized version of $data.
+ # in list/scalar context it is expecting this form so we save a step
+ # converting it back
+ my @ret = $self->populate(@_);
return;
}
}
}
foreach my $rel (@rels) {
- next unless ref $data->[$index]->{$rel} eq "HASH";
- my $result = $self->related_resultset($rel)->create($data->[$index]->{$rel});
+ next unless ref $data->[$index][$colmap{$rel}] eq "HASH";
+ my $result = $self->related_resultset($rel)->create($data->[$index][$colmap{$rel}]);
my ($reverse_relname, $reverse_relinfo) = %{$rsrc->reverse_relationship_info($rel)};
my $related = $result->result_source->_resolve_condition(
$reverse_relinfo->{cond},
$rel,
);
- delete $data->[$index]->{$rel};
- $data->[$index] = {%{$data->[$index]}, %$related};
-
- push @columns, keys %$related if $index == 0;
+ $data->[$index][$colmap{$rel}] = $related->{$rel};
+ if ($index == 0) {
+ for my $col (keys %$related) {
+ $colmap{$col} = $colmap{$rel};
+ push @columns, $col;
+ }
+ }
}
}
## inherit the data locked in the conditions of the resultset
my ($rs_data) = $self->_merge_with_rscond({});
- delete @{$rs_data}{@columns};
## do bulk insert on current row
$rsrc->storage->insert_bulk(
$rsrc,
[@columns, keys %$rs_data],
- [ map { [ @$_{@columns}, values %$rs_data ] } @$data ],
+ [ map { [ @$_[@colmap{@columns}], values %$rs_data ] } @$data ],
);
## do the has_many relationships
foreach my $item (@$data) {
+ next if (ref $item eq 'CODE');
my $main_row;
foreach my $rel (@rels) {
- next unless ref $item->{$rel} eq "ARRAY" && @{ $item->{$rel} };
+ next unless ref $item->[$colmap{$rel}] eq "ARRAY" && @{ $item->[$colmap{$rel}] };
- $main_row ||= $self->new_result({map { $_ => $item->{$_} } @pks});
+ $main_row ||= $self->new_result({map { $_ => $item->[$colmap{$_}] } @pks});
my $child = $main_row->$rel;
$rel,
);
- my @rows_to_add = ref $item->{$rel} eq 'ARRAY' ? @{$item->{$rel}} : ($item->{$rel});
+ my @rows_to_add = do {
+ if (ref $item->[ $colmap{$rel} ] eq 'ARRAY') {
+ @{$item->[$colmap{$rel}]};
+ }
+ else {
+ ($item->[$colmap{$rel}]);
+ }
+ };
+
+ # only AoH supports multicreate per doc specs
my @populate = map { {%$_, %$related} } @rows_to_add;
- $child->populate( \@populate );
+ $child->populate(\@populate);
}
}
}
}
-
-# populate() argumnets went over several incarnations
+# populate() arguments went over several incarnations
# What we ultimately support is AoH
-sub _normalize_populate_args {
+sub _normalize_populate_to_hashref {
my ($self, $arg) = @_;
if (ref $arg eq 'ARRAY') {
$self->throw_exception('Populate expects an arrayref of hashrefs or arrayref of arrayrefs');
}
+sub _normalize_populate_to_arrayref {
+ my ($self, $args) = @_;
+
+ my @normalized;
+ my @cols;
+
+ARG: for (my $idx = 0; $idx <= $#$args; $idx++) {
+ my $arg = $args->[$idx];
+
+ if ($idx == 0) {
+ if (ref $arg eq 'ARRAY') {
+ @cols = @$arg;
+ }
+ elsif (ref $arg eq 'HASH') {
+ @cols = keys %$arg;
+ push @normalized, [@{$arg}{@cols}];
+ }
+ else {
+ $self->throw_exception('Populate expects first record to either be a hashref or arrayref of cols');
+ }
+ next ARG;
+ }
+
+ if (ref $arg eq 'ARRAY' || ref $arg eq 'CODE') {
+ push @normalized, $arg;
+ }
+ elsif (ref $arg eq 'HASH') {
+ push @normalized, [@{$arg}{@cols}];
+ }
+ else {
+ $self->throw_exception('Populate expects either arrayref, coderef, or hashref');
+ }
+ }
+
+ return [\@cols, @normalized];
+}
+
+=begin
+
+ # AoH
+ if (ref $arg eq 'ARRAY' && @$arg > 0 && ref $arg->[0] eq 'HASH') {
+ @cols = sort keys %{$arg->[0]} if $idx == 1;
+ push @normalized, [ @{$args->[0]}{@cols} ];
+ next ARG;
+ }
+ # AoA
+ elsif (ref $arg eq 'ARRAY' && @$arg > 0 && ref $arg->[0] eq 'ARRAY') {
+ my @ret;
+ my @colnames = @{$arg->[0]};
+ foreach my $values (@{$arg}[1 .. $#$arg]) {
+ push @ret, { map { $colnames[$_] => $values->[$_] } (0 .. $#colnames) };
+ }
+ push @normalized, \@ret;
+ next ARG;
+ }
+ # AoS, Arrayrefref (subq)
+ elsif (ref $arg eq 'ARRAY' && ref $arg->[0] eq '') {
+ push @normalized, $arg, $args->[$idx+1];
+ $idx += 1; # we are consuming the next element, skip it next time
+ }
+ # Coderef
+ elsif (ref $arg eq 'CODE') {
+ push @normalized, $arg;
+ }
+=cut
+
=head2 pager
=over 4
);
}
else {
- # precendence must be given to passed values over values inherited from
+ # precedence must be given to passed values over values inherited from
# the cond, so the order here is important.
my $collapsed_cond = $self->_collapse_cond($self->{cond});
my %implied = %{$self->_remove_alias($collapsed_cond, $alias)};
# determines if the resultset defines at least one
# of the attributes supplied
#
-# used to determine if a subquery is neccessary
+# used to determine if a subquery is necessary
#
# supports some virtual attributes:
# -join
{ artist => 'fred' }, { key => 'artists' });
$cd->cd_to_producer->find_or_new({ producer => $producer },
- { key => 'primary });
+ { key => 'primary' });
Find an existing record from this resultset using L</find>. if none exists,
instantiate a new result object and return it. The object will not be saved
#XXX - temp fix for result_class bug. There likely is a more elegant fix -groditi
delete @{$attrs}{qw(result_class alias)};
- my $related_cache;
-
- if (my $cache = $self->get_cache) {
- $related_cache = [ map
- { @{$_->related_resultset($rel)->get_cache||[]} }
- @$cache
- ];
- }
-
my $rel_source = $rsrc->related_source($rel);
my $new = do {
where => $attrs->{where},
});
};
- $new->set_cache($related_cache) if $related_cache;
+
+ if (my $cache = $self->get_cache) {
+ my @related_cache = map
+ { @{$_->related_resultset($rel)->get_cache||[]} }
+ @$cache
+ ;
+
+ $new->set_cache(\@related_cache) if @related_cache;
+ }
+
$new;
};
}
my $source = $self->result_source;
my $alias = $attrs->{alias};
+ $self->throw_exception("Specifying distinct => 1 in conjunction with collapse => 1 is unsupported")
+ if $attrs->{collapse} and $attrs->{distinct};
+
# default selection list
$attrs->{columns} = [ $source->columns ]
unless List::Util::first { exists $attrs->{$_} } qw/columns cols select as/;
$attrs->{group_by} = [ $attrs->{group_by} ];
}
- # generate the distinct induced group_by early, as prefetch will be carried via a
- # subquery (since a group_by is present)
- if (delete $attrs->{distinct}) {
- if ($attrs->{group_by}) {
- carp_unique ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)");
- }
- else {
- $attrs->{_grouped_by_distinct} = 1;
- # distinct affects only the main selection part, not what prefetch may
- # add below.
- $attrs->{group_by} = $source->storage->_group_over_selection($attrs);
- }
- }
# generate selections based on the prefetch helper
- my $prefetch;
+ my ($prefetch, @prefetch_select, @prefetch_as);
$prefetch = $self->_merge_joinpref_attr( {}, delete $attrs->{prefetch} )
if defined $attrs->{prefetch};
$self->throw_exception("Unable to prefetch, resultset contains an unnamed selector $attrs->{_dark_selector}{string}")
if $attrs->{_dark_selector};
+ $self->throw_exception("Specifying prefetch in conjunction with an explicit collapse => 0 is unsupported")
+ if defined $attrs->{collapse} and ! $attrs->{collapse};
+
$attrs->{collapse} = 1;
# this is a separate structure (we don't look in {from} directly)
my @prefetch = $source->_resolve_prefetch( $prefetch, $alias, $join_map );
- push @{ $attrs->{select} }, (map { $_->[0] } @prefetch);
- push @{ $attrs->{as} }, (map { $_->[1] } @prefetch);
- }
-
- if ( List::Util::first { $_ =~ /\./ } @{$attrs->{as}} ) {
- $attrs->{_related_results_construction} = 1;
+ # save these for after distinct resolution
+ @prefetch_select = map { $_->[0] } @prefetch;
+ @prefetch_as = map { $_->[1] } @prefetch;
}
# run through the resulting joinstructure (starting from our current slot)
- # and unset collapse if proven unnesessary
+ # and unset collapse if proven unnecessary
#
# also while we are at it find out if the current root source has
# been premultiplied by previous related_source chaining
}
}
+ # generate the distinct induced group_by before injecting the prefetched select/as parts
+ if (delete $attrs->{distinct}) {
+ if ($attrs->{group_by}) {
+ carp_unique ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)");
+ }
+ else {
+ $attrs->{_grouped_by_distinct} = 1;
+ # distinct affects only the main selection part, not what prefetch may add below
+ ($attrs->{group_by}, my $new_order) = $source->storage->_group_over_selection($attrs);
+
+ # FIXME possibly ignore a rewritten order_by (may turn out to be an issue)
+ # The thinking is: if we are collapsing the subquerying prefetch engine will
+ # rip stuff apart for us anyway, and we do not want to have a potentially
+ # function-converted external order_by
+ # ( there is an explicit if ( collapse && _grouped_by_distinct ) check in DBIHacks )
+ $attrs->{order_by} = $new_order unless $attrs->{collapse};
+ }
+ }
+
+ # inject prefetch-bound selection (if any)
+ push @{$attrs->{select}}, @prefetch_select;
+ push @{$attrs->{as}}, @prefetch_as;
+
+ # whether we can get away with the dumbest (possibly DBI-internal) collapser
+ if ( List::Util::first { $_ =~ /\./ } @{$attrs->{as}} ) {
+ $attrs->{_related_results_construction} = 1;
+ }
+
# if both page and offset are specified, produce a combined offset
# even though it doesn't make much sense, this is what pre 081xx has
# been doing
If an L</order_by> is already declared, and orders the resultset in a way that
makes collapsing as described above impossible (e.g. C<< ORDER BY
has_many_rel.column >> or C<ORDER BY RANDOM()>), DBIC will automatically
-switch to "eager" mode and slurp the entire resultset before consturcting the
+switch to "eager" mode and slurp the entire resultset before constructing the
first object returned by L</next>.
Setting this attribute on a resultset that does not join any has_many
=back
-Set to 1 to group by all columns. If the resultset already has a group_by
-attribute, this setting is ignored and an appropriate warning is issued.
+Set to 1 to automatically generate a L</group_by> clause based on the selection
+(including intelligent handling of L</order_by> contents). Note that the group
+criteria calculation takes place over the B<final> selection. This includes
+any L</+columns>, L</+select> or L</order_by> additions in subsequent
+L</search> calls, and standalone columns selected via
+L<DBIx::Class::ResultSetColumn> (L</get_column>). A notable exception are the
+extra selections specified via L</prefetch> - such selections are explicitly
+excluded from group criteria calculations.
+
+If the final ResultSet also explicitly defines a L</group_by> attribute, this
+setting is ignored and an appropriate warning is issued.
=head2 where
=item dbic_colname
Used to fill in missing sqlt_datatype and sqlt_size attributes (if they are
-explicitly specified they are never overriden). Also used by some weird DBDs,
+explicitly specified they are never overridden). Also used by some weird DBDs,
where the column name should be available at bind_param time (e.g. Oracle).
=back