. "corresponding to the columns of the specified unique constraint '$constraint_name'"
) unless @c_cols == @_;
- $call_cond = {};
@{$call_cond}{@c_cols} = @_;
}
- my %related;
+ # process relationship data if any
for my $key (keys %$call_cond) {
if (
- my $keyref = ref($call_cond->{$key})
+ length ref($call_cond->{$key})
and
my $relinfo = $rsrc->relationship_info($key)
+ and
+ # implicitly skip has_many's (likely MC)
+ (ref (my $val = delete $call_cond->{$key}) ne 'ARRAY' )
) {
- my $val = delete $call_cond->{$key};
-
- next if $keyref eq 'ARRAY'; # has_many for multi_create
-
my ($rel_cond, $crosstable) = $rsrc->_resolve_condition(
$relinfo->{cond}, $val, $key, $key
);
$self->throw_exception("Complex condition via relationship '$key' is unsupported in find()")
if $crosstable or ref($rel_cond) ne 'HASH';
- # supplement
- @related{keys %$rel_cond} = values %$rel_cond;
+ # supplement condition
+ # relationship conditions take precedence (?)
+ @{$call_cond}{keys %$rel_cond} = values %$rel_cond;
}
}
- # relationship conditions take precedence (?)
- @{$call_cond}{keys %related} = values %related;
-
my $alias = exists $attrs->{alias} ? $attrs->{alias} : $self->{attrs}{alias};
my $final_cond;
if (defined $constraint_name) {
$final_cond = $self->_qualify_cond_columns (
- $self->_build_unique_cond (
- $constraint_name,
- $call_cond,
+ $self->result_source->_minimal_valueset_satisfying_constraint(
+ constraint_name => $constraint_name,
+ values => ($self->_merge_with_rscond($call_cond))[0],
+ carp_on_nulls => 1,
),
$alias,
# relationship
}
else {
+ my (@unique_queries, %seen_column_combinations, $ci, @fc_exceptions);
+
# no key was specified - fall down to heuristics mode:
# run through all unique queries registered on the resultset, and
# 'OR' all qualifying queries together
- my (@unique_queries, %seen_column_combinations);
- for my $c_name ($rsrc->unique_constraint_names) {
+ #
+ # always start from 'primary' if it exists at all
+ for my $c_name ( sort {
+ $a eq 'primary' ? -1
+ : $b eq 'primary' ? 1
+ : $a cmp $b
+ } $rsrc->unique_constraint_names) {
+
next if $seen_column_combinations{
join "\x00", sort $rsrc->unique_constraint_columns($c_name)
}++;
- push @unique_queries, try {
- $self->_build_unique_cond ($c_name, $call_cond, 'croak_on_nulls')
- } || ();
+ try {
+ push @unique_queries, $self->_qualify_cond_columns(
+ $self->result_source->_minimal_valueset_satisfying_constraint(
+ constraint_name => $c_name,
+ values => ($self->_merge_with_rscond($call_cond))[0],
+ columns_info => ($ci ||= $self->result_source->columns_info),
+ ),
+ $alias
+ );
+ }
+ catch {
+ push @fc_exceptions, $_ if $_ =~ /\bFilterColumn\b/;
+ };
}
- $final_cond = @unique_queries
- ? [ map { $self->_qualify_cond_columns($_, $alias) } @unique_queries ]
- : $self->_non_unique_find_fallback ($call_cond, $attrs)
+ $final_cond =
+ @unique_queries ? \@unique_queries
+ : @fc_exceptions ? $self->throw_exception(join "; ", map { $_ =~ /(.*) at .+ line \d+$/s } @fc_exceptions )
+ : $self->_non_unique_find_fallback ($call_cond, $attrs)
;
}
}
sub _build_unique_cond {
- my ($self, $constraint_name, $extra_cond, $croak_on_null) = @_;
-
- my @c_cols = $self->result_source->unique_constraint_columns($constraint_name);
-
- # combination may fail if $self->{cond} is non-trivial
- my ($final_cond) = try {
- $self->_merge_with_rscond ($extra_cond)
- } catch {
- +{ %$extra_cond }
- };
-
- # trim out everything not in $columns
- $final_cond = { map {
- exists $final_cond->{$_}
- ? ( $_ => $final_cond->{$_} )
- : ()
- } @c_cols };
-
- if (my @missing = grep
- { ! ($croak_on_null ? defined $final_cond->{$_} : exists $final_cond->{$_}) }
- (@c_cols)
- ) {
- $self->throw_exception( sprintf ( "Unable to satisfy requested constraint '%s', no values for column(s): %s",
- $constraint_name,
- join (', ', map { "'$_'" } @missing),
- ) );
- }
-
- if (
- !$croak_on_null
- and
- !$ENV{DBIC_NULLABLE_KEY_NOWARN}
- and
- my @undefs = sort grep { ! defined $final_cond->{$_} } (keys %$final_cond)
- ) {
- carp_unique ( sprintf (
- "NULL/undef values supplied for requested unique constraint '%s' (NULL "
- . 'values in column(s): %s). This is almost certainly not what you wanted, '
- . 'though you can set DBIC_NULLABLE_KEY_NOWARN to disable this warning.',
- $constraint_name,
- join (', ', map { "'$_'" } @undefs),
- ));
- }
-
- return $final_cond;
+ carp_unique sprintf
+ '_build_unique_cond is a private method, and moreover is about to go '
+ . 'away. Please contact the development team at %s if you believe you '
+ . 'have a genuine use for this method, in order to discuss alternatives.',
+ DBIx::Class::_ENV_::HELP_URL,
+ ;
+
+ my ($self, $constraint_name, $cond, $croak_on_null) = @_;
+
+ $self->result_source->_minimal_valueset_satisfying_constraint(
+ constraint_name => $constraint_name,
+ values => $cond,
+ carp_on_nulls => !$croak_on_null
+ );
}
=head2 search_related
#
# crude unscientific benchmarking indicated the shortcut eval is not worth it for
# this particular resultset size
- elsif (@$rows < 60) {
+ elsif ( $self->{_result_inflator}{is_core_row} and @$rows < 60 ) {
for my $r (@$rows) {
$r = $inflator_cref->($res_class, $rsrc, { map { $infmap->[$_] => $r->[$_] } (0..$#$infmap) } );
}
}
else {
eval sprintf (
- '$_ = $inflator_cref->($res_class, $rsrc, { %s }) for @$rows',
- join (', ', map { "\$infmap->[$_] => \$_->[$_]" } 0..$#$infmap )
+ ( $self->{_result_inflator}{is_core_row}
+ ? '$_ = $inflator_cref->($res_class, $rsrc, { %s }) for @$rows'
+ # a custom inflator may be a multiplier/reductor - put it in direct list ctx
+ : '@$rows = map { $inflator_cref->($res_class, $rsrc, { %s } ) } @$rows'
+ ),
+ ( join (', ', map { "\$infmap->[$_] => \$_->[$_]" } 0..$#$infmap ) )
);
}
}
$next_cref ? ( $next_cref, $self->{_stashed_rows} = [] ) : (),
);
- # Special-case multi-object HRI - there is no $inflator_cref pass
- unless ($self->{_result_inflator}{is_hri}) {
+ # simple in-place substitution, does not regrow $rows
+ if ($self->{_result_inflator}{is_core_row}) {
$_ = $inflator_cref->($res_class, $rsrc, @$_) for @$rows
}
+ # Special-case multi-object HRI - there is no $inflator_cref pass at all
+ elsif ( ! $self->{_result_inflator}{is_hri} ) {
+ # the inflator may be a multiplier/reductor - put it in list ctx
+ @$rows = map { $inflator_cref->($res_class, $rsrc, @$_) } @$rows;
+ }
}
# The @$rows check seems odd at first - why wouldn't we want to warn
sub populate {
my $self = shift;
- my ($data, $guard);
-
# this is naive and just a quick check
# the types will need to be checked more thoroughly when the
# multi-source populate gets added
- if (ref $_[0] eq 'ARRAY') {
- return unless @{$_[0]};
-
- $data = $_[0] if (ref $_[0][0] eq 'HASH' or ref $_[0][0] eq 'ARRAY');
- }
-
- $self->throw_exception('Populate expects an arrayref of hashrefs or arrayref of arrayrefs')
- unless $data;
+ my $data = (
+ ref $_[0] eq 'ARRAY'
+ and
+ ( @{$_[0]} or return )
+ and
+ ( ref $_[0][0] eq 'HASH' or ref $_[0][0] eq 'ARRAY' )
+ and
+ $_[0]
+ ) or $self->throw_exception('Populate expects an arrayref of hashrefs or arrayref of arrayrefs');
# FIXME - no cref handling
# At this point assume either hashes or arrays
if(defined wantarray) {
- my @results;
-
- $guard = $self->result_source->schema->storage->txn_scope_guard
- if ( @$data > 2 or ( @$data == 2 and ref $data->[0] eq 'ARRAY' ) );
+ my (@results, $guard);
if (ref $data->[0] eq 'ARRAY') {
+ # column names only, nothing to do
+ return if @$data == 1;
+
+ $guard = $self->result_source->schema->storage->txn_scope_guard
+ if @$data > 2;
+
@results = map
{ my $vals = $_; $self->new_result({ map { $data->[0][$_] => $vals->[$_] } 0..$#{$data->[0]} })->insert }
@{$data}[1 .. $#$data]
;
}
else {
+
+ $guard = $self->result_source->schema->storage->txn_scope_guard
+ if @$data > 1;
+
@results = map { $self->new_result($_)->insert } @$data;
}
# positional(!) explicit column list
if ($i == 0) {
+ # column names only, nothing to do
+ return if @$data == 1;
$colinfo->{$data->[0][$_]} = { pos => $_, name => $data->[0][$_] } and push @$colnames, $data->[0][$_]
for 0 .. $#{$data->[0]};
}
### start work
+ my $guard;
$guard = $rsrc->schema->storage->txn_scope_guard
if $slices_with_rels;
### main source data
# FIXME - need to switch entirely to a coderef-based thing,
# so that large sets aren't copied several times... I think
- $rsrc->storage->insert_bulk(
+ $rsrc->storage->_insert_bulk(
$rsrc,
[ @$colnames, sort keys %$rs_data ],
[ map {
B<NOTE:> You will almost always need a corresponding L</as> attribute when you
use L</select>, to instruct DBIx::Class how to store the result of the column.
-Also note that the L</as> attribute has nothing to do with the SQL-side 'AS'
-identifier aliasing. You can however alias a function, so you can use it in
-e.g. an C<ORDER BY> clause. This is done via the C<-as> B<select function
-attribute> supplied as shown in the example above.
+
+Also note that the L</as> attribute has B<nothing to do> with the SQL-side
+C<AS> identifier aliasing. You B<can> alias a function (so you can use it e.g.
+in an C<ORDER BY> clause), however this is done via the C<-as> B<select
+function attribute> supplied as shown in the example above.
=head2 +select
slot name in which the column value will be stored within the
L<Row|DBIx::Class::Row> object. The value will then be accessible via this
identifier by the C<get_column> method (or via the object accessor B<if one
-with the same name already exists>) as shown below. The L</as> attribute has
-B<nothing to do> with the SQL-side C<AS>. See L</select> for details.
+with the same name already exists>) as shown below.
+
+The L</as> attribute has B<nothing to do> with the SQL-side identifier
+aliasing C<AS>. See L</select> for details.
$rs = $schema->resultset('Employee')->search(undef, {
select => [
This attribute is a shorthand for specifying a L</join> spec, adding all
columns from the joined related sources as L</+columns> and setting
-L</collapse> to a true value. For example, the following two queries are
-equivalent:
+L</collapse> to a true value. It can be thought of as a rough B<superset>
+of the L</join> attribute.
+
+For example, the following two queries are equivalent:
my $rs = $schema->resultset('Artist')->search({}, {
prefetch => { cds => ['genre', 'tracks' ] },
=back
-HAVING is a select statement attribute that is applied between GROUP BY and
-ORDER BY. It is applied to the after the grouping calculations have been
-done.
+The HAVING operator specifies a B<secondary> condition applied to the set
+after the grouping calculations have been done. In other words it is a
+constraint just like L</where> (and accepting the same
+L<SQL::Abstract syntax|SQL::Abstract/WHERE CLAUSES>) applied to the data
+as it exists after GROUP BY has taken place. Specifying L</having> without
+L</group_by> is a logical mistake, and a fatal error on most RDBMS engines.
+
+E.g.
having => { 'count_employee' => { '>=', 100 } }
or with an in-place function in which case literal SQL is required:
- having => \[ 'count(employee) >= ?', [ count => 100 ] ]
+ having => \[ 'count(employee) >= ?', 100 ]
=head2 distinct