'0+' => \&count,
'bool' => sub { 1; },
fallback => 1;
+use Carp::Clan qw/^DBIx::Class/;
use Data::Page;
use Storable;
+use Data::Dumper;
use Scalar::Util qw/weaken/;
use DBIx::Class::ResultSetColumn;
}
delete $attrs->{$key};
}
+
+ if (exists $our_attrs->{prefetch}) {
+ $our_attrs->{join} = $self->_merge_attr($our_attrs->{join}, $our_attrs->{prefetch}, 1);
+ }
+
my $new_attrs = { %{$our_attrs}, %{$attrs} };
# merge new where and having into old
{ key => 'artist_title' }
);
-If no C<key> is specified and you explicitly name columns, it searches on all
-unique constraints defined on the source, including the primary key.
-
If the C<key> is specified as C<primary>, it searches only on the primary key.
+If no C<key> is specified, it searches on all unique constraints defined on the
+source, including the primary key.
+
See also L</find_or_create> and L</update_or_create>. For information on how to
declare unique constraints, see
L<DBIx::Class::ResultSource/add_unique_constraint>.
my $self = shift;
my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {});
- # Parse out a hash from input
+ # Default to the primary key, but allow a specific key
my @cols = exists $attrs->{key}
? $self->result_source->unique_constraint_columns($attrs->{key})
: $self->result_source->primary_columns;
+ $self->throw_exception(
+ "Can't find unless a primary key or unique constraint is defined"
+ ) unless @cols;
- my $hash;
+ # Parse out a hashref from input
+ my $input_query;
if (ref $_[0] eq 'HASH') {
- $hash = { %{$_[0]} };
+ $input_query = { %{$_[0]} };
}
elsif (@_ == @cols) {
- $hash = {};
- @{$hash}{@cols} = @_;
- }
- elsif (@_) {
- # For backwards compatibility
- $hash = {@_};
+ $input_query = {};
+ @{$input_query}{@cols} = @_;
}
else {
- $self->throw_exception(
- "Arguments to find must be a hashref or match the number of columns in the "
- . (exists $attrs->{key} ? "$attrs->{key} unique constraint" : "primary key")
- );
+ # Compatibility: Allow e.g. find(id => $value)
+ carp "Find by key => value deprecated; please use a hashref instead";
+ $input_query = {@_};
}
- # Check the hash we just parsed against our source's unique constraints
- my @constraint_names = exists $attrs->{key}
- ? ($attrs->{key})
- : $self->result_source->unique_constraint_names;
- $self->throw_exception(
- "Can't find unless a primary key or unique constraint is defined"
- ) unless @constraint_names;
-
- my @unique_queries;
- foreach my $name (@constraint_names) {
- my @unique_cols = $self->result_source->unique_constraint_columns($name);
- my $unique_query = $self->_build_unique_query($hash, \@unique_cols);
+ my @unique_queries = $self->_unique_queries($input_query, $attrs);
+# use Data::Dumper; warn Dumper $self->result_source->name, $input_query, \@unique_queries, $self->{attrs}->{where};
- # Add the ResultSet's alias
- foreach my $key (grep { ! m/\./ } keys %$unique_query) {
- my $alias = $self->{attrs}->{alias};
- $unique_query->{"$alias.$key"} = delete $unique_query->{$key};
- }
-
- push @unique_queries, $unique_query if %$unique_query;
+ # Verify the query
+ my $query = \@unique_queries;
+ if (scalar @unique_queries == 0) {
+ # Handle cases where the ResultSet defines the query, or where the user is
+ # abusing find
+ $query = $input_query;
}
- # Handle cases where the ResultSet already defines the query
- my $query = @unique_queries ? \@unique_queries : undef;
-
# Run the query
-
if (keys %$attrs) {
my $rs = $self->search($query, $attrs);
$rs->_resolve;
}
}
+# _unique_queries
+#
+# Build a list of queries which satisfy unique constraints.
+
+sub _unique_queries {
+ my ($self, $query, $attrs) = @_;
+
+ my @constraint_names = exists $attrs->{key}
+ ? ($attrs->{key})
+ : $self->result_source->unique_constraint_names;
+
+ my @unique_queries;
+ foreach my $name (@constraint_names) {
+ my @unique_cols = $self->result_source->unique_constraint_columns($name);
+ my $unique_query = $self->_build_unique_query($query, \@unique_cols);
+
+ next unless scalar keys %$unique_query;
+
+ # Add the ResultSet's alias
+ foreach my $key (grep { ! m/\./ } keys %$unique_query) {
+ $unique_query->{"$self->{attrs}->{alias}.$key"} = delete $unique_query->{$key};
+ }
+
+ push @unique_queries, $unique_query;
+ }
+
+ return @unique_queries;
+}
+
# _build_unique_query
#
# Constrain the specified query hash based on the specified column names.
Inflates the first result without creating a cursor if the resultset has
any records in it; if not returns nothing. Used by L</find> as an optimisation.
+Can optionally take an additional condition *only* - this is a fast-code-path
+method; if you need to add extra joins or similar call ->search and then
+->single without a condition on the $rs returned from that.
+
=cut
sub single {
}
}
+# use Data::Dumper; warn Dumper $attrs->{where};
+ unless ($self->_is_unique_query($attrs->{where})) {
+ carp "Query not guarnteed to return a single row"
+ . "; please declare your unique constraints or use search instead";
+ }
+
my @data = $self->result_source->storage->select_single(
$attrs->{from}, $attrs->{select},
$attrs->{where},$attrs);
return (@data ? $self->_construct_object(@data) : ());
}
+# _is_unique_query
+#
+# Try to determine if the specified query is guaranteed to be unique, based on
+# the declared unique constraints.
+
+sub _is_unique_query {
+ my ($self, $query) = @_;
+
+ my $collapsed = $self->_collapse_query($query);
+# use Data::Dumper; warn Dumper $collapsed;
+
+ foreach my $name ($self->result_source->unique_constraint_names) {
+ my @unique_cols = map { "$self->{attrs}->{alias}.$_" }
+ $self->result_source->unique_constraint_columns($name);
+
+ # Count the values for each unique column
+ my %seen = map { $_ => 0 } @unique_cols;
+
+ foreach my $key (keys %$collapsed) {
+ my $aliased = $key;
+ $aliased = "$self->{attrs}->{alias}.$key" unless $key =~ /\./;
+
+ next unless exists $seen{$aliased}; # Additional constraints are okay
+ $seen{$aliased} = scalar @{ $collapsed->{$key} };
+ }
+
+ # If we get 0 or more than 1 value for a column, it's not necessarily unique
+ return 1 unless grep { $_ != 1 } values %seen;
+ }
+
+ return 0;
+}
+
+# _collapse_query
+#
+# Recursively collapse the query, accumulating values for each column.
+
+sub _collapse_query {
+ my ($self, $query, $collapsed) = @_;
+
+ # Accumulate fields in the AST
+ $collapsed ||= {};
+
+ if (ref $query eq 'ARRAY') {
+ foreach my $subquery (@$query) {
+ next unless ref $subquery; # -or
+# warn "ARRAY: " . Dumper $subquery;
+ $collapsed = $self->_collapse_query($subquery, $collapsed);
+ }
+ }
+ elsif (ref $query eq 'HASH') {
+ if (keys %$query and (keys %$query)[0] eq '-and') {
+ foreach my $subquery (@{$query->{-and}}) {
+# warn "HASH: " . Dumper $subquery;
+ $collapsed = $self->_collapse_query($subquery, $collapsed);
+ }
+ }
+ else {
+# warn "LEAF: " . Dumper $query;
+ foreach my $key (keys %$query) {
+ push @{$collapsed->{$key}}, $query->{$key};
+ }
+# warn Dumper $collapsed;
+ }
+ }
+
+ return $collapsed;
+}
+
=head2 get_column
=over 4
return $self->_construct_object(@row);
}
-# XXX - this is essentially just the old new(). rewrite / tidy up?
sub _resolve {
my $self = shift;
my $attrs = $self->{attrs};
my $source = ($self->{_parent_rs}) ? $self->{_parent_rs} : $self->{result_source};
- # XXX - this is a hack to prevent dclone dieing because of the code ref, get's put back in $attrs afterwards
+ # XXX - lose storable dclone
my $record_filter = delete $attrs->{record_filter} if (defined $attrs->{record_filter});
$attrs = Storable::dclone($attrs || {}); # { %{ $attrs || {} } };
$attrs->{record_filter} = $record_filter if ($record_filter);
$attrs->{order_by} = [ $attrs->{order_by} ] if
$attrs->{order_by} and !ref($attrs->{order_by});
$attrs->{order_by} ||= [];
+
+ if(my $seladds = delete($attrs->{'+select'})) {
+ my @seladds = (ref($seladds) eq 'ARRAY' ? @$seladds : ($seladds));
+ $attrs->{select} = [
+ @{ $attrs->{select} },
+ map { (m/\./ || ref($_)) ? $_ : "${alias}.$_" } $seladds
+ ];
+ }
+ if(my $asadds = delete($attrs->{'+as'})) {
+ my @asadds = (ref($asadds) eq 'ARRAY' ? @$asadds : ($asadds));
+ $attrs->{as} = [ @{ $attrs->{as} }, @asadds ];
+ }
my $collapse = $attrs->{collapse} || {};
if (my $prefetch = delete $attrs->{prefetch}) {
}
sub _merge_attr {
- my ($self, $a, $b) = @_;
+ my ($self, $a, $b, $is_prefetch) = @_;
+ return $b unless $a;
if (ref $b eq 'HASH' && ref $a eq 'HASH') {
- return $self->_merge_hash($a, $b);
+ foreach my $key (keys %{$b}) {
+ if (exists $a->{$key}) {
+ $a->{$key} = $self->_merge_attr($a->{$key}, $b->{$key}, $is_prefetch);
+ } else {
+ $a->{$key} = delete $b->{$key};
+ }
+ }
+ return $a;
} else {
- $a = [$a] unless (ref $a eq 'ARRAY');
- $b = [$b] unless (ref $b eq 'ARRAY');
- my @new_array = (@{$a}, @{$b});
- foreach my $a_element (@new_array) {
- my $i = 0;
- foreach my $b_element (@new_array) {
- if ((ref $a_element eq 'HASH') && (ref $b_element eq 'HASH') && ($a_element ne $b_element)) {
- $a_element = $self->_merge_hash($a_element, $b_element);
- $new_array[$i] = undef;
- }
- $i++;
- }
- }
- @new_array = grep($_, @new_array);
- return \@new_array;
- }
+ $a = [$a] unless (ref $a eq 'ARRAY');
+ $b = [$b] unless (ref $b eq 'ARRAY');
+
+ my $hash = {};
+ my $array = [];
+ foreach ($a, $b) {
+ foreach my $element (@{$_}) {
+ if (ref $element eq 'HASH') {
+ $hash = $self->_merge_attr($hash, $element, $is_prefetch);
+ } elsif (ref $element eq 'ARRAY') {
+ $array = [@{$array}, @{$element}];
+ } else {
+ if (($b == $_) && $is_prefetch) {
+ $self->_merge_array($array, $element, $is_prefetch);
+ } else {
+ push(@{$array}, $element);
+ }
+ }
+ }
+ }
+
+ if ((keys %{$hash}) && (scalar(@{$array} > 0))) {
+ return [$hash, @{$array}];
+ } else {
+ return (keys %{$hash}) ? $hash : $array;
+ }
+ }
}
-sub _merge_hash {
- my ($self, $a, $b) = @_;
-
- foreach my $key (keys %{$b}) {
- if (exists $a->{$key}) {
- $a->{$key} = $self->_merge_attr($a->{$key}, $b->{$key});
- } else {
- $a->{$key} = delete $b->{$key};
- }
- }
- return $a;
+sub _merge_array {
+ my ($self, $a, $b) = @_;
+
+ $b = [$b] unless (ref $b eq 'ARRAY');
+ # add elements from @{$b} to @{$a} which aren't already in @{$a}
+ foreach my $b_element (@{$b}) {
+ push(@{$a}, $b_element) unless grep {$b_element eq $_} @{$a};
+ }
}
sub _construct_object {
$self->_resolve;
my $attrs = { %{ $self->{_attrs} } };
- if ($attrs->{distinct} && (my $group_by = $attrs->{group_by} || $attrs->{select})) {
+ if (my $group_by = delete $attrs->{group_by}) {
delete $attrs->{having};
my @distinct = (ref $group_by ? @$group_by : ($group_by));
# todo: try CONCAT for multi-column pk
}
$select = { count => { distinct => \@distinct } };
- #use Data::Dumper; die Dumper $select;
}
$attrs->{select} = $select;
$cond->{-and} = [];
my @cond = @{$self->{cond}{-and}};
- for (my $i = 0; $i < @cond - 1; $i++) {
+ for (my $i = 0; $i <= @cond - 1; $i++) {
my $entry = $cond[$i];
my %hash;
}
else {
$entry =~ /([^.]+)$/;
- $hash{$entry} = $cond[++$i];
+ $hash{$1} = $cond[++$i];
}
push @{$cond->{-and}}, \%hash;
$class->find_or_create({ key => $val, ... });
-Searches for a record matching the search condition; if it doesn't find one,
-creates one and returns that instead.
+Tries to find a record based on its primary key or unique constraint; if none
+is found, creates one and returns that instead.
my $cd = $schema->resultset('CD')->find_or_create({
cdid => 5,
sub update_or_create {
my $self = shift;
my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {});
- my $hash = ref $_[0] eq 'HASH' ? shift : {@_};
+ my $cond = ref $_[0] eq 'HASH' ? shift : {@_};
- my $row = $self->find($hash, $attrs);
+ my $row = $self->find($cond);
if (defined $row) {
- $row->update($hash);
+ $row->update($cond);
return $row;
}
- return $self->create($hash);
+ return $self->create($cond);
}
=head2 get_cache
through directly to SQL, so you can give e.g. C<year DESC> for a
descending order on the column `year'.
+Please note that if you have quoting enabled (see
+L<DBIx::Class::Storage/quote_char>) you will need to do C<\'year DESC' > to
+specify an order. (The scalar ref causes it to be passed as raw sql to the DB,
+so you will need to manually quote things as appropriate.)
+
=head2 columns
=over 4
attribute, the column names returned are storage-dependent. E.g. MySQL would
return a column named C<count(employeeid)> in the above example.
+=head2 +select
+
+=over 4
+
+Indicates additional columns to be selected from storage. Works the same as
+L<select> but adds columns to the selection.
+
+=back
+
+=head2 +as
+
+=over 4
+
+Indicates additional column names for those added via L<+select>.
+
+=back
+
=head2 as
=over 4
You can create your own accessors if required - see
L<DBIx::Class::Manual::Cookbook> for details.
+Please note: This will NOT insert an C<AS employee_count> into the SQL statement
+produced, it is used for internal access only. Thus attempting to use the accessor
+in an C<order_by> clause or similar will fail misrably.
+
=head2 join
=over 4
C<has_one> (or if you're using C<add_relationship>, any relationship declared
with an accessor type of 'single' or 'filter').
+=head2 page
+
+=over 4
+
+=item Value: $page
+
+=back
+
+Makes the resultset paged and specifies the page to retrieve. Effectively
+identical to creating a non-pages resultset and then calling ->page($page)
+on it.
+
+If L<rows> attribute is not specified it defualts to 10 rows per page.
+
+=head2 rows
+
+=over 4
+
+=item Value: $rows
+
+=back
+
+Specifes the maximum number of rows for direct retrieval or the number of
+rows per page if the page attribute or method is used.
+
+=head2 offset
+
+=over 4
+
+=item Value: $offset
+
+=back
+
+Specifies the (zero-based) row number for the first row to be returned, or the
+of the first row of the first page if paging is used.
+
+=head2 group_by
+
+=over 4
+
+=item Value: \@columns
+
+=back
+
+A arrayref of columns to group by. Can include columns of joined tables.
+
+ group_by => [qw/ column1 column2 ... /]
+
+=head2 having
+
+=over 4
+
+=item Value: $condition
+
+=back
+
+HAVING is a select statement attribute that is applied between GROUP BY and
+ORDER BY. It is applied to the after the grouping calculations have been
+done.
+
+ having => { 'count(employee)' => { '>=', 100 } }
+
+=head2 distinct
+
+=over 4
+
+=item Value: (0 | 1)
+
+=back
+
+Set to 1 to group by all columns.
+
+=head2 cache
+
+Set to 1 to cache search results. This prevents extra SQL queries if you
+revisit rows in your ResultSet:
+
+ my $resultset = $schema->resultset('Artist')->search( undef, { cache => 1 } );
+
+ while( my $artist = $resultset->next ) {
+ ... do stuff ...
+ }
+
+ $rs->first; # without cache, this would issue a query
+
+By default, searches are not cached.
+
+For more examples of using these attributes, see
+L<DBIx::Class::Manual::Cookbook>.
+
=head2 from
=over 4
clauses.
NOTE: Use this on your own risk. This allows you to shoot off your foot!
+
C<join> will usually do what you need and it is strongly recommended that you
avoid using C<from> unless you cannot achieve the desired result using C<join>.
+And we really do mean "cannot", not just tried and failed. Attempting to use
+this because you're having problems with C<join> is like trying to use x86
+ASM because you've got a syntax error in your C. Trust us on this.
-In simple terms, C<from> works as follows:
+Now, if you're still really, really sure you need to use this (and if you're
+not 100% sure, ask the mailing list first), here's an explanation of how this
+works.
+The syntax is as follows -
+
+ [
+ { <alias1> => <table1> },
[
- { <alias> => <table>, -join_type => 'inner|left|right' }
- [] # nested JOIN (optional)
- { <table.column> => <foreign_table.foreign_key> }
- ]
+ { <alias2> => <table2>, -join_type => 'inner|left|right' },
+ [], # nested JOIN (optional)
+ { <table1.column1> => <table2.column2>, ... (more conditions) },
+ ],
+ # More of the above [ ] may follow for additional joins
+ ]
- JOIN
- <alias> <table>
- [JOIN ...]
- ON <table.column> = <foreign_table.foreign_key>
+ <table1> <alias1>
+ JOIN
+ <table2> <alias2>
+ [JOIN ...]
+ ON <table1.column1> = <table2.column2>
+ <more joins may follow>
An easy way to follow the examples below is to remember the following:
# SELECT child.* FROM person child
# INNER JOIN person father ON child.father_id = father.id
-=head2 page
-
-=over 4
-
-=item Value: $page
-
-=back
-
-Makes the resultset paged and specifies the page to retrieve. Effectively
-identical to creating a non-pages resultset and then calling ->page($page)
-on it.
-
-=head2 rows
-
-=over 4
-
-=item Value: $rows
-
-=back
-
-Specifes the maximum number of rows for direct retrieval or the number of
-rows per page if the page attribute or method is used.
-
-=head2 group_by
-
-=over 4
-
-=item Value: \@columns
-
-=back
-
-A arrayref of columns to group by. Can include columns of joined tables.
-
- group_by => [qw/ column1 column2 ... /]
-
-=head2 having
-
-=over 4
-
-=item Value: $condition
-
-=back
-
-HAVING is a select statement attribute that is applied between GROUP BY and
-ORDER BY. It is applied to the after the grouping calculations have been
-done.
-
- having => { 'count(employee)' => { '>=', 100 } }
-
-=head2 distinct
-
-=over 4
-
-=item Value: (0 | 1)
-
-=back
-
-Set to 1 to group by all columns.
-
-=head2 cache
-
-Set to 1 to cache search results. This prevents extra SQL queries if you
-revisit rows in your ResultSet:
-
- my $resultset = $schema->resultset('Artist')->search( undef, { cache => 1 } );
-
- while( my $artist = $resultset->next ) {
- ... do stuff ...
- }
-
- $rs->first; # without cache, this would issue a query
-
-By default, searches are not cached.
-
-For more examples of using these attributes, see
-L<DBIx::Class::Manual::Cookbook>.
-
=cut
1;