if (
$aliastypes->{multiplying}{$sel_alias}
or
- scalar grep { $aliastypes->{multiplying}{(values %$_)[0]} } @{ $aliastypes->{selecting}{$sel_alias}{-parents} }
+ $aliastypes->{premultiplied}{$sel_alias}
) {
$multiplied_selectors->{$_} = 1 for values %{$aliastypes->{selecting}{$sel_alias}{-seen_columns}}
}
if (
$existing_group_by
or
+ # we do not need to check pre-multipliers, since if the premulti is there, its
+ # parent (who is multi) will be there too
keys %{ $join_classifications->{multiplying} || {} }
) {
# make sure if there is a supplied group_by it matches the columns compiled above
sub populate {
my $self = shift;
- # cruft placed in standalone method
- my $data = $self->_normalize_populate_args(@_);
+ if (defined wantarray) {
+ # cruft placed in standalone method
+ my $data = $self->_normalize_populate_to_hashref(@_);
- return unless @$data;
+ return unless @$data;
- if(defined wantarray) {
my @created = map { $self->create($_) } @$data;
return wantarray ? @created : \@created;
}
else {
- my $first = $data->[0];
+ # cruft placed in standalone method
+ my $data = $self->_normalize_populate_to_arrayref(@_);
+
+ return unless @$data;
+ use DDP; p $data;
+
+ my $first = shift @$data;
# if a column is a registered relationship, and is a non-blessed hash/array, consider
# it relationship data
my (@rels, @columns);
my $rsrc = $self->result_source;
my $rels = { map { $_ => $rsrc->relationship_info($_) } $rsrc->relationships };
- for (keys %$first) {
- my $ref = ref $first->{$_};
- $rels->{$_} && ($ref eq 'ARRAY' or $ref eq 'HASH')
- ? push @rels, $_
- : push @columns, $_
- ;
+
+ if (ref $data->[0] eq 'CODE') {
+ @columns = @$first;
+ }
+ else {
+ for my $index (0..$#$first) {
+ my $col = $first->[$index];
+ my $val = $data->[0][$index];
+ my $ref = ref $val;
+ $rels->{$col} && ($ref eq 'ARRAY' or $ref eq 'HASH')
+ ? push @rels, $col
+ : push @columns, $col
+ ;
+ }
}
my @pks = $rsrc->primary_columns;
+ my %colmap = map { $first->[$_] => $_ } (0..$#$first);
## do the belongs_to relationships
foreach my $index (0..$#$data) {
+ next if (ref $data->[$index] eq 'CODE');
- # delegate to create() for any dataset without primary keys with specified relationships
- if (grep { !defined $data->[$index]->{$_} } @pks ) {
+ # delegate to list context populate()/create() for any dataset without
+ # primary keys with specified relationships
+
+ if (grep { !defined $colmap{$_} } @pks) {
for my $r (@rels) {
- if (grep { ref $data->[$index]{$r} eq $_ } qw/HASH ARRAY/) { # a related set must be a HASH or AoH
- my @ret = $self->populate($data);
+ if (grep { ref $data->[$index][$colmap{$r}] eq $_ } qw/HASH ARRAY/) { # a related set must be a HASH or AoH
+ # we pass in @_ here as it is the pre-normalized version of $data.
+ # in list/scalar context it is expecting this form so we save a step
+ # converting it back
+ my @ret = $self->populate(@_);
return;
}
}
}
foreach my $rel (@rels) {
- next unless ref $data->[$index]->{$rel} eq "HASH";
- my $result = $self->related_resultset($rel)->create($data->[$index]->{$rel});
+ next unless ref $data->[$index][$colmap{$rel}] eq "HASH";
+ my $result = $self->related_resultset($rel)->create($data->[$index][$colmap{$rel}]);
my ($reverse_relname, $reverse_relinfo) = %{$rsrc->reverse_relationship_info($rel)};
my $related = $result->result_source->_resolve_condition(
$reverse_relinfo->{cond},
$rel,
);
- delete $data->[$index]->{$rel};
- $data->[$index] = {%{$data->[$index]}, %$related};
-
- push @columns, keys %$related if $index == 0;
+ $data->[$index][$colmap{$rel}] = $related->{$rel};
+ if ($index == 0) {
+ for my $col (keys %$related) {
+ $colmap{$col} = $colmap{$rel};
+ push @columns, $col;
+ }
+ }
}
}
## inherit the data locked in the conditions of the resultset
my ($rs_data) = $self->_merge_with_rscond({});
- delete @{$rs_data}{@columns};
## do bulk insert on current row
$rsrc->storage->insert_bulk(
$rsrc,
[@columns, keys %$rs_data],
- [ map { [ @$_{@columns}, values %$rs_data ] } @$data ],
+ [ map { [ @$_[@colmap{@columns}], values %$rs_data ] } @$data ],
);
## do the has_many relationships
foreach my $item (@$data) {
+ next if (ref $item eq 'CODE');
my $main_row;
foreach my $rel (@rels) {
- next unless ref $item->{$rel} eq "ARRAY" && @{ $item->{$rel} };
+ next unless ref $item->[$colmap{$rel}] eq "ARRAY" && @{ $item->[$colmap{$rel}] };
- $main_row ||= $self->new_result({map { $_ => $item->{$_} } @pks});
+ $main_row ||= $self->new_result({map { $_ => $item->[$colmap{$_}] } @pks});
my $child = $main_row->$rel;
$rel,
);
- my @rows_to_add = ref $item->{$rel} eq 'ARRAY' ? @{$item->{$rel}} : ($item->{$rel});
+ my @rows_to_add = do {
+ if (ref $item->[ $colmap{$rel} ] eq 'ARRAY') {
+ @{$item->[$colmap{$rel}]};
+ }
+ else {
+ ($item->[$colmap{$rel}]);
+ }
+ };
+
+ # only AoH supports multicreate per doc specs
my @populate = map { {%$_, %$related} } @rows_to_add;
- $child->populate( \@populate );
+ $child->populate(\@populate);
}
}
}
}
-
# populate() arguments went over several incarnations
# What we ultimately support is AoH
-sub _normalize_populate_args {
+sub _normalize_populate_to_hashref {
my ($self, $arg) = @_;
if (ref $arg eq 'ARRAY') {
$self->throw_exception('Populate expects an arrayref of hashrefs or arrayref of arrayrefs');
}
+sub _normalize_populate_to_arrayref {
+ my ($self, $args) = @_;
+
+ my @normalized;
+ my @cols;
+
+ARG: for (my $idx = 0; $idx <= $#$args; $idx++) {
+ my $arg = $args->[$idx];
+
+ if ($idx == 0) {
+ if (ref $arg eq 'ARRAY') {
+ @cols = @$arg;
+ }
+ elsif (ref $arg eq 'HASH') {
+ @cols = keys %$arg;
+ push @normalized, [@{$arg}{@cols}];
+ }
+ else {
+ $self->throw_exception('Populate expects first record to either be a hashref or arrayref of cols');
+ }
+ next ARG;
+ }
+
+ if (ref $arg eq 'ARRAY' || ref $arg eq 'CODE') {
+ push @normalized, $arg;
+ }
+ elsif (ref $arg eq 'HASH') {
+ push @normalized, [@{$arg}{@cols}];
+ }
+ else {
+ $self->throw_exception('Populate expects either arrayref, coderef, or hashref');
+ }
+ }
+
+ return [\@cols, @normalized];
+}
+
+=begin
+
+ # AoH
+ if (ref $arg eq 'ARRAY' && @$arg > 0 && ref $arg->[0] eq 'HASH') {
+ @cols = sort keys %{$arg->[0]} if $idx == 1;
+ push @normalized, [ @{$args->[0]}{@cols} ];
+ next ARG;
+ }
+ # AoA
+ elsif (ref $arg eq 'ARRAY' && @$arg > 0 && ref $arg->[0] eq 'ARRAY') {
+ my @ret;
+ my @colnames = @{$arg->[0]};
+ foreach my $values (@{$arg}[1 .. $#$arg]) {
+ push @ret, { map { $colnames[$_] => $values->[$_] } (0 .. $#colnames) };
+ }
+ push @normalized, \@ret;
+ next ARG;
+ }
+ # AoS, Arrayrefref (subq)
+ elsif (ref $arg eq 'ARRAY' && ref $arg->[0] eq '') {
+ push @normalized, $arg, $args->[$idx+1];
+ $idx += 1; # we are consuming the next element, skip it next time
+ }
+ # Coderef
+ elsif (ref $arg eq 'CODE') {
+ push @normalized, $arg;
+ }
+=cut
+
=head2 pager
=over 4
my $source = $self->result_source;
my $alias = $attrs->{alias};
+ $self->throw_exception("Specifying distinct => 1 in conjunction with collapse => 1 is unsupported")
+ if $attrs->{collapse} and $attrs->{distinct};
+
# default selection list
$attrs->{columns} = [ $source->columns ]
unless List::Util::first { exists $attrs->{$_} } qw/columns cols select as/;
$attrs->{group_by} = [ $attrs->{group_by} ];
}
- # generate the distinct induced group_by early, as prefetch will be carried via a
- # subquery (since a group_by is present)
- if (delete $attrs->{distinct}) {
- if ($attrs->{group_by}) {
- carp_unique ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)");
- }
- else {
- $attrs->{_grouped_by_distinct} = 1;
- # distinct affects only the main selection part, not what prefetch may
- # add below.
- $attrs->{group_by} = $source->storage->_group_over_selection($attrs);
- }
- }
# generate selections based on the prefetch helper
- my $prefetch;
+ my ($prefetch, @prefetch_select, @prefetch_as);
$prefetch = $self->_merge_joinpref_attr( {}, delete $attrs->{prefetch} )
if defined $attrs->{prefetch};
$self->throw_exception("Unable to prefetch, resultset contains an unnamed selector $attrs->{_dark_selector}{string}")
if $attrs->{_dark_selector};
+ $self->throw_exception("Specifying prefetch in conjunction with an explicit collapse => 0 is unsupported")
+ if defined $attrs->{collapse} and ! $attrs->{collapse};
+
$attrs->{collapse} = 1;
# this is a separate structure (we don't look in {from} directly)
my @prefetch = $source->_resolve_prefetch( $prefetch, $alias, $join_map );
- push @{ $attrs->{select} }, (map { $_->[0] } @prefetch);
- push @{ $attrs->{as} }, (map { $_->[1] } @prefetch);
- }
-
- if ( List::Util::first { $_ =~ /\./ } @{$attrs->{as}} ) {
- $attrs->{_related_results_construction} = 1;
+ # save these for after distinct resolution
+ @prefetch_select = map { $_->[0] } @prefetch;
+ @prefetch_as = map { $_->[1] } @prefetch;
}
# run through the resulting joinstructure (starting from our current slot)
}
}
+ # generate the distinct induced group_by before injecting the prefetched select/as parts
+ if (delete $attrs->{distinct}) {
+ if ($attrs->{group_by}) {
+ carp_unique ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)");
+ }
+ else {
+ $attrs->{_grouped_by_distinct} = 1;
+ # distinct affects only the main selection part, not what prefetch may add below
+ ($attrs->{group_by}, my $new_order) = $source->storage->_group_over_selection($attrs);
+
+ # FIXME possibly ignore a rewritten order_by (may turn out to be an issue)
+ # The thinking is: if we are collapsing the subquerying prefetch engine will
+ # rip stuff apart for us anyway, and we do not want to have a potentially
+ # function-converted external order_by
+ # ( there is an explicit if ( collapse && _grouped_by_distinct ) check in DBIHacks )
+ $attrs->{order_by} = $new_order unless $attrs->{collapse};
+ }
+ }
+
+ # inject prefetch-bound selection (if any)
+ push @{$attrs->{select}}, @prefetch_select;
+ push @{$attrs->{as}}, @prefetch_as;
+
+ # whether we can get away with the dumbest (possibly DBI-internal) collapser
+ if ( List::Util::first { $_ =~ /\./ } @{$attrs->{as}} ) {
+ $attrs->{_related_results_construction} = 1;
+ }
+
# if both page and offset are specified, produce a combined offset
# even though it doesn't make much sense, this is what pre 081xx has
# been doing
=back
-Set to 1 to group by all columns. If the resultset already has a group_by
-attribute, this setting is ignored and an appropriate warning is issued.
+Set to 1 to automatically generate a L</group_by> clause based on the selection
+(including intelligent handling of L</order_by> contents). Note that the group
+criteria calculation takes place over the B<final> selection. This includes
+any L</+columns>, L</+select> or L</order_by> additions in subsequent
+L</search> calls, and standalone columns selected via
+L<DBIx::Class::ResultSetColumn> (L</get_column>). A notable exception are the
+extra selections specified via L</prefetch> - such selections are explicitly
+excluded from group criteria calculations.
+
+If the final ResultSet also explicitly defines a L</group_by> attribute, this
+setting is ignored and an appropriate warning is issued.
=head2 where