DBIx::Class::Storage::DBIHacks;
#
-# This module contains code that should never have seen the light of day,
-# does not belong in the Storage, or is otherwise unfit for public
-# display. The arrival of SQLA2 should immediately obsolete 90% of this
+# This module contains code supporting a battery of special cases and tests for
+# many corner cases pushing the envelope of what DBIC can do. When work on
+# these utilities began in mid 2009 (51a296b402c) it wasn't immediately obvious
+# that these pieces, despite their misleading on-first-sight-flakiness, will
+# become part of the generic query rewriting machinery of DBIC, allowing it to
+# both generate and process queries representing incredibly complex sets with
+# reasonable efficiency.
+#
+# Now (end of 2015), more than 6 years later the routines in this class have
+# stabilized enough, and are meticulously covered with tests, to a point where
+# an effort to formalize them into user-facing APIs might be worthwhile.
+#
+# An implementor working on publicizing and/or replacing the routines with a
+# more modern SQL generation framework should keep in mind that pretty much all
+# existing tests are constructed on the basis of real-world code used in
+# production somewhere.
+#
+# Please hack on this responsibly ;)
#
use strict;
use base 'DBIx::Class::Storage';
use mro 'c3';
-use List::Util 'first';
use Scalar::Util 'blessed';
-use Sub::Name 'subname';
-use DBIx::Class::_Util qw(is_plain_value is_literal_value UNRESOLVABLE_CONDITION);
+use DBIx::Class::_Util qw(
+ dump_value fail_on_internal_call
+);
+use DBIx::Class::SQLMaker::Util 'extract_equality_conditions';
+use DBIx::Class::ResultSource::FromSpec::Util qw(
+ fromspec_columns_info
+ find_join_path_to_alias
+);
+use DBIx::Class::Carp;
use namespace::clean;
#
$self->_use_join_optimizer
);
- my $orig_aliastypes = $self->_resolve_aliastypes_from_select_args($attrs);
+ my $orig_aliastypes =
+ $attrs->{_precalculated_aliastypes}
+ ||
+ $self->_resolve_aliastypes_from_select_args($attrs)
+ ;
my $new_aliastypes = { %$orig_aliastypes };
my $outer_attrs = { %$attrs };
delete @{$outer_attrs}{qw(from bind rows offset group_by _grouped_by_distinct having)};
- my $inner_attrs = { %$attrs };
- delete @{$inner_attrs}{qw(for collapse select as _related_results_construction)};
+ my $inner_attrs = { %$attrs, _simple_passthrough_construction => 1 };
+ delete @{$inner_attrs}{qw(for collapse select as)};
# there is no point of ordering the insides if there is no limit
delete $inner_attrs->{order_by} if (
unless $root_node;
# use the heavy duty resolver to take care of aliased/nonaliased naming
- my $colinfo = $self->_resolve_column_info($inner_attrs->{from});
+ my $colinfo = fromspec_columns_info($inner_attrs->{from});
my $selected_root_columns;
for my $i (0 .. $#{$outer_attrs->{select}}) {
push @{$inner_attrs->{as}}, $attrs->{as}[$i];
}
- # We will need to fetch all native columns in the inner subquery, which may
+ my $inner_aliastypes = $self->_resolve_aliastypes_from_select_args($inner_attrs);
+
+ # In the inner subq we will need to fetch *only* native columns which may
# be a part of an *outer* join condition, or an order_by (which needs to be
# preserved outside), or wheres. In other words everything but the inner
# selector
# We can not just fetch everything because a potential has_many restricting
# join collapse *will not work* on heavy data types.
- my $connecting_aliastypes = $self->_resolve_aliastypes_from_select_args({
- %$inner_attrs,
- select => [],
- });
- for (sort map { keys %{$_->{-seen_columns}||{}} } map { values %$_ } values %$connecting_aliastypes) {
+ # essentially a map of all non-selecting seen columns
+ # the sort is there for a nicer select list
+ for (
+ sort
+ map
+ { keys %{$_->{-seen_columns}||{}} }
+ map
+ { values %{$inner_aliastypes->{$_}} }
+ grep
+ { $_ ne 'selecting' }
+ keys %$inner_aliastypes
+ ) {
my $ci = $colinfo->{$_} or next;
if (
$ci->{-source_alias} eq $root_alias
my $inner_subq = do {
# must use it here regardless of user requests (vastly gentler on optimizer)
- local $self->{_use_join_optimizer} = 1;
+ local $self->{_use_join_optimizer} = 1
+ unless $self->{_use_join_optimizer};
# throw away multijoins since we def. do not care about those inside the subquery
- ($inner_attrs->{from}, my $inner_aliastypes) = $self->_prune_unused_joins ({
- %$inner_attrs, _force_prune_multiplying_joins => 1
+ # $inner_aliastypes *will* be redefined at this point
+ ($inner_attrs->{from}, $inner_aliastypes ) = $self->_prune_unused_joins ({
+ %$inner_attrs,
+ _force_prune_multiplying_joins => 1,
+ _precalculated_aliastypes => $inner_aliastypes,
});
# uh-oh a multiplier (which is not us) left in, this is a problem for limits
) {
push @outer_from, $j
}
- elsif (first { $_->{$alias} } @outer_nonselecting_chains ) {
+ elsif (grep { $_->{$alias} } @outer_nonselecting_chains ) {
push @outer_from, $j;
$may_need_outer_group_by ||= $outer_aliastypes->{multiplying}{$alias} ? 1 : 0;
}
});
}
- # This is totally horrific - the {where} ends up in both the inner and outer query
- # Unfortunately not much can be done until SQLA2 introspection arrives, and even
- # then if where conditions apply to the *right* side of the prefetch, you may have
- # to both filter the inner select (e.g. to apply a limit) and then have to re-filter
- # the outer select to exclude joins you didn't want in the first place
+ # FIXME: The {where} ends up in both the inner and outer query, i.e. *twice*
+ #
+ # This is rather horrific, and while we currently *do* have enough
+ # introspection tooling available to attempt a stab at properly deciding
+ # whether or not to include the where condition on the outside, the
+ # machinery is still too slow to apply it here.
+ # Thus for the time being we do not attempt any sanitation of the where
+ # clause and just pass it through on both sides of the subquery. This *will*
+ # be addressed at a later stage, most likely after folding the SQL generator
+ # into SQLMaker proper
#
# OTOH it can be seen as a plus: <ash> (notes that this query would make a DBA cry ;)
+ #
return $outer_attrs;
}
+# This is probably the ickiest, yet most relied upon part of the codebase:
+# this is the place where we take arbitrary SQL input and break it into its
+# constituent parts, making sure we know which *sources* are used in what
+# *capacity* ( selecting / restricting / grouping / ordering / joining, etc )
+# Although the method is pretty horrific, the worst thing that can happen is
+# for a classification failure, which in turn will result in a vocal exception,
+# and will lead to a relatively prompt fix.
+# The code has been slowly improving and is covered with a formiddable battery
+# of tests, so can be considered "reliably stable" at this point (Oct 2015).
#
-# I KNOW THIS SUCKS! GET SQLA2 OUT THE DOOR SO THIS CAN DIE!
+# A note to implementors attempting to "replace" this - keep in mind that while
+# there are multiple optimization avenues, the actual "scan literal elements"
+# part *MAY NEVER BE REMOVED*, even if it is limited only ot the (future) AST
+# nodes that are deemed opaque (i.e. contain literal expressions). The use of
+# blackbox literals is at this point firmly a user-facing API, and is one of
+# *the* reasons DBIC remains as flexible as it is. In other words, when working
+# on this keep in mind that the following is widespread and *encouraged* way
+# of using DBIC in the wild when push comes to shove:
+#
+# $rs->search( {}, {
+# select => \[ $random, @stuff],
+# from => \[ $random, @stuff ],
+# where => \[ $random, @stuff ],
+# group_by => \[ $random, @stuff ],
+# order_by => \[ $random, @stuff ],
+# } )
+#
+# Various incarnations of the above are reflected in many of the tests. If one
+# gets to fail, you get to fix it. A "this is crazy, nobody does that" is not
+# acceptable going forward.
#
-# Due to a lack of SQLA2 we fall back to crude scans of all the
-# select/where/order/group attributes, in order to determine what
-# aliases are needed to fulfill the query. This information is used
-# throughout the code to prune unnecessary JOINs from the queries
-# in an attempt to reduce the execution time.
-# Although the method is pretty horrific, the worst thing that can
-# happen is for it to fail due to some scalar SQL, which in turn will
-# result in a vocal exception.
sub _resolve_aliastypes_from_select_args {
my ( $self, $attrs ) = @_;
}
# get a column to source/alias map (including unambiguous unqualified ones)
- my $colinfo = $self->_resolve_column_info ($attrs->{from});
+ my $colinfo = fromspec_columns_info($attrs->{from});
# set up a botched SQLA
my $sql_maker = $self->sql_maker;
# generate sql chunks
my $to_scan = {
restricting => [
- $sql_maker->_recurse_where ($attrs->{where}),
+ ($sql_maker->_recurse_where ($attrs->{where}))[0],
$sql_maker->_parse_rs_attrs ({ having => $attrs->{having} }),
],
grouping => [
),
],
selecting => [
- map { ($sql_maker->_recurse_fields($_))[0] } @{$attrs->{select}},
+ # kill all selectors which look like a proper subquery
+ # this is a sucky heuristic *BUT* - if we get it wrong the query will simply
+ # fail to run, so we are relatively safe
+ grep
+ { $_ !~ / \A \s* \( \s* SELECT \s+ .+? \s+ FROM \s+ .+? \) \s* \z /xsi }
+ map
+ {
+ length ref $_
+ ? ($sql_maker->_recurse_fields($_))[0]
+ : $sql_maker->_quote($_)
+ }
+ @{$attrs->{select}}
],
- ordering => [
- map { $_->[0] } $self->_extract_order_criteria ($attrs->{order_by}, $sql_maker),
+ ordering => [ map
+ {
+ ( my $sql = (ref $_ ? $_->[0] : $_) ) =~ s/ \s+ (?: ASC | DESC ) \s* \z //xi;
+ $sql;
+ }
+ $sql_maker->_order_by_chunks( $attrs->{order_by} ),
],
};
- # throw away empty chunks and all 2-value arrayrefs: the thinking is that these are
- # bind value specs left in by the sloppy renderer above. It is ok to do this
- # at this point, since we are going to end up rewriting this crap anyway
- for my $v (values %$to_scan) {
- my @nv;
- for (@$v) {
- next if (
- ! defined $_
- or
- (
- ref $_ eq 'ARRAY'
- and
- ( @$_ == 0 or @$_ == 2 )
- )
- );
-
- if (ref $_) {
- require Data::Dumper::Concise;
- $self->throw_exception("Unexpected ref in scan-plan: " . Data::Dumper::Concise::Dumper($v) );
- }
+ # we will be bulk-scanning anyway - pieces will not matter in that case,
+ # thus join everything up
+ # throw away empty-string chunks, and make sure no binds snuck in
+ # note that we operate over @{$to_scan->{$type}}, hence the
+ # semi-mindbending ... map ... for values ...
+ ( $_ = join ' ', map {
+
+ ( ! defined $_ ) ? ()
+ : ( length ref $_ ) ? $self->throw_exception(
+ "Unexpected ref in scan-plan: " . dump_value $_
+ )
+ : ( $_ =~ /^\s*$/ ) ? ()
+ : $_
+
+ } @$_ ) for values %$to_scan;
+
+ # throw away empty to-scan's
+ (
+ length $to_scan->{$_}
+ or
+ delete $to_scan->{$_}
+ ) for keys %$to_scan;
- push @nv, $_;
- }
- $v = \@nv;
- }
- # kill all selectors which look like a proper subquery
- # this is a sucky heuristic *BUT* - if we get it wrong the query will simply
- # fail to run, so we are relatively safe
- $to_scan->{selecting} = [ grep {
- $_ !~ / \A \s* \( \s* SELECT \s+ .+? \s+ FROM \s+ .+? \) \s* \z /xsi
- } @{ $to_scan->{selecting} || [] } ];
+ # these will be used for matching in the loop below
+ my $all_aliases = join ' | ', map { quotemeta $_ } keys %$alias_list;
+ my $fq_col_re = qr/
+ $lquote ( $all_aliases ) $rquote $sep (?: $lquote ([^$rquote]+) $rquote )?
+ |
+ \b ( $all_aliases ) \. ( [^\s\)\($rquote]+ )?
+ /x;
- # first see if we have any exact matches (qualified or unqualified)
- for my $type (keys %$to_scan) {
- for my $piece (@{$to_scan->{$type}}) {
- if ($colinfo->{$piece} and my $alias = $colinfo->{$piece}{-source_alias}) {
- $aliases_by_type->{$type}{$alias} ||= { -parents => $alias_list->{$alias}{-join_path}||[] };
- $aliases_by_type->{$type}{$alias}{-seen_columns}{$colinfo->{$piece}{-fq_colname}} = $piece;
- }
- }
- }
- # now loop through all fully qualified columns and get the corresponding
- # alias (should work even if they are in scalarrefs)
- for my $alias (keys %$alias_list) {
- my $al_re = qr/
- $lquote $alias $rquote $sep (?: $lquote ([^$rquote]+) $rquote )?
+ my $all_unq_columns = join ' | ',
+ map
+ { quotemeta $_ }
+ grep
+ # using a regex here shows up on profiles, boggle
+ { index( $_, '.') < 0 }
+ keys %$colinfo
+ ;
+ my $unq_col_re = $all_unq_columns
+ ? qr/
+ $lquote ( $all_unq_columns ) $rquote
|
- \b $alias \. ([^\s\)\($rquote]+)?
- /x;
-
- for my $type (keys %$to_scan) {
- for my $piece (@{$to_scan->{$type}}) {
- if (my @matches = $piece =~ /$al_re/g) {
- $aliases_by_type->{$type}{$alias} ||= { -parents => $alias_list->{$alias}{-join_path}||[] };
- $aliases_by_type->{$type}{$alias}{-seen_columns}{"$alias.$_"} = "$alias.$_"
- for grep { defined $_ } @matches;
- }
+ (?: \A | \s ) ( $all_unq_columns ) (?: \s | \z )
+ /x
+ : undef
+ ;
+
+
+ # the actual scan, per type
+ for my $type (keys %$to_scan) {
+
+
+ # now loop through all fully qualified columns and get the corresponding
+ # alias (should work even if they are in scalarrefs)
+ #
+ # The regex captures in multiples of 4, with one of the two pairs being
+ # undef. There may be a *lot* of matches, hence the convoluted loop
+ my @matches = $to_scan->{$type} =~ /$fq_col_re/g;
+ my $i = 0;
+ while( $i < $#matches ) {
+
+ if (
+ defined $matches[$i]
+ ) {
+ $aliases_by_type->{$type}{$matches[$i]} ||= { -parents => $alias_list->{$matches[$i]}{-join_path}||[] };
+
+ $aliases_by_type->{$type}{$matches[$i]}{-seen_columns}{"$matches[$i].$matches[$i+1]"} = "$matches[$i].$matches[$i+1]"
+ if defined $matches[$i+1];
+
+ $i += 2;
}
+
+ $i += 2;
}
- }
- # now loop through unqualified column names, and try to locate them within
- # the chunks
- for my $col (keys %$colinfo) {
- next if $col =~ / \. /x; # if column is qualified it was caught by the above
-
- my $col_re = qr/ $lquote ($col) $rquote /x;
-
- for my $type (keys %$to_scan) {
- for my $piece (@{$to_scan->{$type}}) {
- if ( my @matches = $piece =~ /$col_re/g) {
- my $alias = $colinfo->{$col}{-source_alias};
- $aliases_by_type->{$type}{$alias} ||= { -parents => $alias_list->{$alias}{-join_path}||[] };
- $aliases_by_type->{$type}{$alias}{-seen_columns}{"$alias.$_"} = $_
- for grep { defined $_ } @matches;
- }
- }
+
+ # now loop through unqualified column names, and try to locate them within
+ # the chunks, if there are any unqualified columns in the 1st place
+ next unless $unq_col_re;
+
+ # The regex captures in multiples of 2, one of the two being undef
+ for ( $to_scan->{$type} =~ /$unq_col_re/g ) {
+ defined $_ or next;
+ my $alias = $colinfo->{$_}{-source_alias} or next;
+ $aliases_by_type->{$type}{$alias} ||= { -parents => $alias_list->{$alias}{-join_path}||[] };
+ $aliases_by_type->{$type}{$alias}{-seen_columns}{"$alias.$_"} = $_
}
}
+
# Add any non-left joins to the restriction list (such joins are indeed restrictions)
- for my $j (values %$alias_list) {
- my $alias = $j->{-alias} or next;
- $aliases_by_type->{restricting}{$alias} ||= { -parents => $j->{-join_path}||[] } if (
- (not $j->{-join_type})
+ (
+ $_->{-alias}
+ and
+ ! $aliases_by_type->{restricting}{ $_->{-alias} }
+ and
+ (
+ not $_->{-join_type}
or
- ($j->{-join_type} !~ /^left (?: \s+ outer)? $/xi)
- );
- }
+ $_->{-join_type} !~ /^left (?: \s+ outer)? $/xi
+ )
+ and
+ $aliases_by_type->{restricting}{ $_->{-alias} } = { -parents => $_->{-join_path}||[] }
+ ) for values %$alias_list;
+
+
+ # final cleanup
+ (
+ keys %{$aliases_by_type->{$_}}
+ or
+ delete $aliases_by_type->{$_}
+ ) for keys %$aliases_by_type;
- for (keys %$aliases_by_type) {
- delete $aliases_by_type->{$_} unless keys %{$aliases_by_type->{$_}};
- }
- return $aliases_by_type;
+ $aliases_by_type;
}
# This is the engine behind { distinct => 1 } and the general
sub _group_over_selection {
my ($self, $attrs) = @_;
- my $colinfos = $self->_resolve_column_info ($attrs->{from});
+ my $colinfos = fromspec_columns_info($attrs->{from});
my (@group_by, %group_index);
# of the external order and convert them to MIN(X) for ASC or MAX(X)
# for DESC, and group_by the root columns. The end result should be
# exactly what we expect
+ #
- # FIXME - this code is a joke, will need to be completely rewritten in
- # the DQ branch. But I need to push a POC here, otherwise the
- # pesky tests won't pass
- # wrap any part of the order_by that "responds" to an ordering alias
- # into a MIN/MAX
+ # both populated on the first loop over $o_idx
$sql_maker ||= $self->sql_maker;
$order_chunks ||= [
map { ref $_ eq 'ARRAY' ? $_ : [ $_ ] } $sql_maker->_order_by_chunks($attrs->{order_by})
my ($chunk, $is_desc) = $sql_maker->_split_order_chunk($order_chunks->[$o_idx][0]);
+ # we reached that far - wrap any part of the order_by that "responded"
+ # to an ordering alias into a MIN/MAX
$new_order_by[$o_idx] = \[
sprintf( '%s( %s )%s',
- ($is_desc ? 'MAX' : 'MIN'),
+ $self->_minmax_operator_for_datatype($chunk_ci->{data_type}, $is_desc),
$chunk,
($is_desc ? ' DESC' : ''),
),
);
}
-sub _resolve_ident_sources {
- my ($self, $ident) = @_;
-
- my $alias2source = {};
-
- # the reason this is so contrived is that $ident may be a {from}
- # structure, specifying multiple tables to join
- if ( blessed $ident && $ident->isa("DBIx::Class::ResultSource") ) {
- # this is compat mode for insert/update/delete which do not deal with aliases
- $alias2source->{me} = $ident;
- }
- elsif (ref $ident eq 'ARRAY') {
-
- for (@$ident) {
- my $tabinfo;
- if (ref $_ eq 'HASH') {
- $tabinfo = $_;
- }
- if (ref $_ eq 'ARRAY' and ref $_->[0] eq 'HASH') {
- $tabinfo = $_->[0];
- }
-
- $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-rsrc}
- if ($tabinfo->{-rsrc});
- }
- }
-
- return $alias2source;
-}
-
-# Takes $ident, \@column_names
-#
-# returns { $column_name => \%column_info, ... }
-# also note: this adds -result_source => $rsrc to the column info
-#
-# If no columns_names are supplied returns info about *all* columns
-# for all sources
-sub _resolve_column_info {
- my ($self, $ident, $colnames) = @_;
-
- return {} if $colnames and ! @$colnames;
-
- my $alias2src = $self->_resolve_ident_sources($ident);
-
- my (%seen_cols, @auto_colnames);
-
- # compile a global list of column names, to be able to properly
- # disambiguate unqualified column names (if at all possible)
- for my $alias (keys %$alias2src) {
- my $rsrc = $alias2src->{$alias};
- for my $colname ($rsrc->columns) {
- push @{$seen_cols{$colname}}, $alias;
- push @auto_colnames, "$alias.$colname" unless $colnames;
- }
- }
-
- $colnames ||= [
- @auto_colnames,
- grep { @{$seen_cols{$_}} == 1 } (keys %seen_cols),
- ];
-
- my (%return, $colinfos);
- foreach my $col (@$colnames) {
- my ($source_alias, $colname) = $col =~ m/^ (?: ([^\.]+) \. )? (.+) $/x;
-
- # if the column was seen exactly once - we know which rsrc it came from
- $source_alias ||= $seen_cols{$colname}[0]
- if ($seen_cols{$colname} and @{$seen_cols{$colname}} == 1);
-
- next unless $source_alias;
-
- my $rsrc = $alias2src->{$source_alias}
- or next;
-
- $return{$col} = {
- %{
- ( $colinfos->{$source_alias} ||= $rsrc->columns_info )->{$colname}
- ||
- $self->throw_exception(
- "No such column '$colname' on source " . $rsrc->source_name
- );
- },
- -result_source => $rsrc,
- -source_alias => $source_alias,
- -fq_colname => $col eq $colname ? "$source_alias.$col" : $col,
- -colname => $colname,
- };
-
- $return{"$source_alias.$colname"} = $return{$col} if $col eq $colname;
- }
+sub _minmax_operator_for_datatype {
+ #my ($self, $datatype, $want_max) = @_;
- return \%return;
-}
-
-# The DBIC relationship chaining implementation is pretty simple - every
-# new related_relationship is pushed onto the {from} stack, and the {select}
-# window simply slides further in. This means that when we count somewhere
-# in the middle, we got to make sure that everything in the join chain is an
-# actual inner join, otherwise the count will come back with unpredictable
-# results (a resultset may be generated with _some_ rows regardless of if
-# the relation which the $rs currently selects has rows or not). E.g.
-# $artist_rs->cds->count - normally generates:
-# SELECT COUNT( * ) FROM artist me LEFT JOIN cd cds ON cds.artist = me.artistid
-# which actually returns the number of artists * (number of cds || 1)
-#
-# So what we do here is crawl {from}, determine if the current alias is at
-# the top of the stack, and if not - make sure the chain is inner-joined down
-# to the root.
-#
-sub _inner_join_to_node {
- my ($self, $from, $alias) = @_;
-
- my $switch_branch = $self->_find_join_path_to_node($from, $alias);
-
- return $from unless @{$switch_branch||[]};
-
- # So it looks like we will have to switch some stuff around.
- # local() is useless here as we will be leaving the scope
- # anyway, and deep cloning is just too fucking expensive
- # So replace the first hashref in the node arrayref manually
- my @new_from = ($from->[0]);
- my $sw_idx = { map { (values %$_), 1 } @$switch_branch }; #there's one k/v per join-path
-
- for my $j (@{$from}[1 .. $#$from]) {
- my $jalias = $j->[0]{-alias};
-
- if ($sw_idx->{$jalias}) {
- my %attrs = %{$j->[0]};
- delete $attrs{-join_type};
- push @new_from, [
- \%attrs,
- @{$j}[ 1 .. $#$j ],
- ];
- }
- else {
- push @new_from, $j;
- }
- }
-
- return \@new_from;
-}
-
-sub _find_join_path_to_node {
- my ($self, $from, $target_alias) = @_;
-
- # subqueries and other oddness are naturally not supported
- return undef if (
- ref $from ne 'ARRAY'
- ||
- ref $from->[0] ne 'HASH'
- ||
- ! defined $from->[0]{-alias}
- );
-
- # no path - the head is the alias
- return [] if $from->[0]{-alias} eq $target_alias;
-
- for my $i (1 .. $#$from) {
- return $from->[$i][0]{-join_path} if ( ($from->[$i][0]{-alias}||'') eq $target_alias );
- }
-
- # something else went quite wrong
- return undef;
+ $_[2] ? 'MAX' : 'MIN';
}
sub _extract_order_criteria {
my @cols = (
( map { $_->[0] } $self->_extract_order_criteria($order_by) ),
- ( $where ? keys %{ $self->_extract_fixed_condition_columns($where) } : () ),
+ ( $where ? keys %{ extract_equality_conditions( $where ) } : () ),
) or return 0;
- my $colinfo = $self->_resolve_column_info($ident, \@cols);
+ my $colinfo = fromspec_columns_info($ident, \@cols);
return keys %$colinfo
? $self->_columns_comprise_identifying_set( $colinfo, \@cols )
sub _extract_colinfo_of_stable_main_source_order_by_portion {
my ($self, $attrs) = @_;
- my $nodes = $self->_find_join_path_to_node($attrs->{from}, $attrs->{alias});
+ my $nodes = find_join_path_to_alias($attrs->{from}, $attrs->{alias});
return unless defined $nodes;
map { values %$_ } @$nodes,
) };
- my $colinfos = $self->_resolve_column_info($attrs->{from});
+ my $colinfos = fromspec_columns_info($attrs->{from});
my ($colinfos_to_return, $seen_main_src_cols);
if $colinfo->{-source_alias} eq $attrs->{alias};
}
- # FIXME the condition may be singling out things on its own, so we
- # conceivable could come back wi "stable-ordered by nothing"
- # not confient enough in the parser yet, so punt for the time being
+ # FIXME: the condition may be singling out things on its own, so we
+ # conceivably could come back with "stable-ordered by nothing"
+ # not confident enough in the parser yet, so punt for the time being
return unless $seen_main_src_cols;
my $main_src_fixed_cols_from_cond = [ $attrs->{where}
? $colinfos->{$_}{-colname}
: ()
}
- keys %{ $self->_extract_fixed_condition_columns($attrs->{where}) }
+ keys %{ extract_equality_conditions( $attrs->{where} ) }
)
: ()
];
]) ? $colinfos_to_return : ();
}
-# Attempts to flatten a passed in SQLA condition as much as possible towards
-# a plain hashref, *without* altering its semantics. Required by
-# create/populate being able to extract definitive conditions from preexisting
-# resultset {where} stacks
-#
-# FIXME - while relatively robust, this is still imperfect, one of the first
-# things to tackle with DQ
-sub _collapse_cond {
- my ($self, $where, $where_is_anded_array) = @_;
-
- if (! $where) {
- return;
- }
- elsif ($where_is_anded_array or ref $where eq 'HASH') {
-
- my @pairs;
-
- my @pieces = $where_is_anded_array ? @$where : $where;
- while (@pieces) {
- my $chunk = shift @pieces;
-
- if (ref $chunk eq 'HASH') {
- push @pairs, map { [ $_ => $chunk->{$_} ] } sort keys %$chunk;
- }
- elsif (ref $chunk eq 'ARRAY') {
- push @pairs, [ -or => $chunk ]
- if @$chunk;
- }
- elsif ( ! ref $chunk) {
- push @pairs, [ $chunk, shift @pieces ];
- }
- else {
- push @pairs, [ '', $chunk ];
- }
- }
-
- return unless @pairs;
-
- my @conds = $self->_collapse_cond_unroll_pairs(\@pairs)
- or return;
-
- # Consolidate various @conds back into something more compact
- my $fin;
-
- for my $c (@conds) {
- if (ref $c ne 'HASH') {
- push @{$fin->{-and}}, $c;
- }
- else {
- for my $col (sort keys %$c) {
- if (exists $fin->{$col}) {
- my ($l, $r) = ($fin->{$col}, $c->{$col});
-
- (ref $_ ne 'ARRAY' or !@$_) and $_ = [ -and => $_ ] for ($l, $r);
-
- if (@$l and @$r and $l->[0] eq $r->[0] and $l->[0] eq '-and') {
- $fin->{$col} = [ -and => map { @$_[1..$#$_] } ($l, $r) ];
- }
- else {
- $fin->{$col} = [ -and => $fin->{$col}, $c->{$col} ];
- }
- }
- else {
- $fin->{$col} = $c->{$col};
- }
- }
- }
- }
+sub _resolve_column_info :DBIC_method_is_indirect_sugar {
+ DBIx::Class::_ENV_::ASSERT_NO_INTERNAL_INDIRECT_CALLS and fail_on_internal_call;
+ carp_unique("_resolve_column_info() is deprecated, ask on IRC for a better alternative");
- if ( ref $fin->{-and} eq 'ARRAY' and @{$fin->{-and}} == 1 ) {
- my $piece = (delete $fin->{-and})->[0];
- if (ref $piece eq 'ARRAY') {
- $fin->{-or} = $fin->{-or} ? [ $piece, $fin->{-or} ] : $piece;
- }
- elsif (! exists $fin->{''}) {
- $fin->{''} = $piece;
- }
- }
-
- return $fin;
- }
- elsif (ref $where eq 'ARRAY') {
- my @w = @$where;
-
- while ( @w and (
- (ref $w[0] eq 'ARRAY' and ! @{$w[0]} )
- or
- (ref $w[0] eq 'HASH' and ! keys %{$w[0]})
- )) { shift @w };
-
- return unless @w;
-
- if ( @w == 1 ) {
- return ( ref $w[0] )
- ? $self->_collapse_cond($w[0])
- : { $w[0] => undef }
- ;
- }
- elsif ( @w == 2 and ! ref $w[0]) {
- if ( ( $w[0]||'' ) =~ /^\-and$/i ) {
- return (ref $w[1] eq 'HASH' or ref $w[1] eq 'ARRAY')
- ? $self->_collapse_cond($w[1], (ref $w[1] eq 'ARRAY') )
- : $self->throw_exception("Unsupported top-level op/arg pair: [ $w[0] => $w[1] ]")
- ;
- }
- else {
- return $self->_collapse_cond({ @w });
- }
- }
- else {
- return { -or => \@w };
- }
- }
- else {
- # not a hash not an array
- return { '' => $where };
- }
-
- die 'should not get here';
+ fromspec_columns_info( @_[1,2] );
}
-sub _collapse_cond_unroll_pairs {
- my ($self, $pairs) = @_;
+sub _find_join_path_to_node :DBIC_method_is_indirect_sugar {
+ DBIx::Class::_ENV_::ASSERT_NO_INTERNAL_INDIRECT_CALLS and fail_on_internal_call;
+ carp_unique("_find_join_path_to_node() is deprecated, ask on IRC for a better alternative");
- my @conds;
+ find_join_path_to_alias( @_[1,2] );
+}
- while (@$pairs) {
- my ($lhs, $rhs) = @{ shift @$pairs };
+sub _collapse_cond :DBIC_method_is_indirect_sugar {
+ DBIx::Class::_ENV_::ASSERT_NO_INTERNAL_INDIRECT_CALLS and fail_on_internal_call;
+ carp_unique("_collapse_cond() is deprecated, ask on IRC for a better alternative");
- if ($lhs eq '') {
- push @conds, $self->_collapse_cond($rhs);
- }
- elsif ( $lhs =~ /^\-and$/i ) {
- push @conds, $self->_collapse_cond($rhs, (ref $rhs eq 'ARRAY'));
- }
- elsif ( $lhs =~ /^\-or$/i ) {
- push @conds, $self->_collapse_cond(
- (ref $rhs eq 'HASH') ? [ map { $_ => $rhs->{$_} } sort keys %$rhs ] : $rhs
- );
- }
- else {
- if (ref $rhs eq 'HASH' and ! keys %$rhs) {
- # FIXME - SQLA seems to be doing... nothing...?
- }
- elsif (ref $rhs eq 'HASH' and keys %$rhs == 1 and exists $rhs->{'='}) {
- for my $p ($self->_collapse_cond_unroll_pairs([ [ $lhs => $rhs->{'='} ] ])) {
-
- # extra sanity check
- if (keys %$p > 1) {
- require Data::Dumper::Concise;
- local $Data::Dumper::Deepcopy = 1;
- $self->throw_exception(
- "Internal error: unexpected collapse unroll:"
- . Data::Dumper::Concise::Dumper { in => { $lhs => $rhs }, out => $p }
- );
- }
+ shift;
+ DBIx::Class::SQLMaker::Util::normalize_sqla_condition(@_);
+}
- my ($l, $r) = %$p;
+sub _extract_fixed_condition_columns :DBIC_method_is_indirect_sugar {
+ DBIx::Class::_ENV_::ASSERT_NO_INTERNAL_INDIRECT_CALLS and fail_on_internal_call;
+ carp_unique("_extract_fixed_condition_columns() is deprecated, ask on IRC for a better alternative");
- push @conds, ( ! length ref $r or is_plain_value($r) )
- ? { $l => $r }
- : { $l => { '=' => $r } }
- ;
- }
- }
- elsif (ref $rhs eq 'ARRAY') {
- # some of these conditionals encounter multi-values - roll them out using
- # an unshift, which will cause extra looping in the while{} above
- if (! @$rhs ) {
- push @conds, { $lhs => [] };
- }
- elsif ( ($rhs->[0]||'') =~ /^\-(?:and|or)$/i ) {
- $self->throw_exception("Value modifier not followed by any values: $lhs => [ $rhs->[0] ] ")
- if @$rhs == 1;
-
- if( $rhs->[0] =~ /^\-and$/i ) {
- unshift @$pairs, map { [ $lhs => $_ ] } @{$rhs}[1..$#$rhs];
- }
- # if not an AND then it's an OR
- elsif(@$rhs == 2) {
- unshift @$pairs, [ $lhs => $rhs->[1] ];
- }
- else {
- push @conds, { $lhs => $rhs };
- }
- }
- elsif (@$rhs == 1) {
- unshift @$pairs, [ $lhs => $rhs->[0] ];
- }
- else {
- push @conds, { $lhs => $rhs };
- }
- }
- else {
- push @conds, { $lhs => $rhs };
- }
- }
- }
-
- return @conds;
+ shift;
+ extract_equality_conditions(@_);
}
-# Analyzes a given condition and attempts to extract all columns
-# with a definitive fixed-condition criteria. Returns a hashref
-# of k/v pairs suitable to be passed to set_columns(), with a
-# MAJOR CAVEAT - multi-value (contradictory) equalities are still
-# represented as a reference to the UNRESOVABLE_CONDITION constant
-# The reason we do this is that some codepaths only care about the
-# codition being stable, as opposed to actually making sense
-#
-# The normal mode is used to figure out if a resultset is constrained
-# to a column which is part of a unique constraint, which in turn
-# allows us to better predict how ordering will behave etc.
-#
-# With the optional "consider_nulls" boolean argument, the function
-# is instead used to infer inambiguous values from conditions
-# (e.g. the inheritance of resultset conditions on new_result)
-#
-my $undef_marker = \ do{ my $x = 'undef' };
-sub _extract_fixed_condition_columns {
- my ($self, $where, $consider_nulls) = @_;
- my $where_hash = $self->_collapse_cond($_[1]);
-
- my $res = {};
- my ($c, $v);
- for $c (keys %$where_hash) {
- my $vals;
-
- if (!defined ($v = $where_hash->{$c}) ) {
- $vals->{$undef_marker} = $v if $consider_nulls
- }
- elsif (
- ! length ref $v
- or
- is_plain_value ($v)
- ) {
- $vals->{$v} = $v;
- }
- elsif (
- ref $v eq 'HASH'
- and
- keys %$v == 1
- and
- ref $v->{'='}
- and
- # do not need to check for plain values - _collapse_cond did it for us
- is_literal_value($v->{'='})
- ) {
- $vals->{$v->{'='}} = $v->{'='};
- }
- elsif (ref $v eq 'ARRAY' and ($v->[0]||'') eq '-and') {
- for ( @{$v}[1..$#$v] ) {
- my $subval = $self->_extract_fixed_condition_columns({ $c => $_ }, 'consider nulls'); # always fish nulls out on recursion
- next unless exists $subval->{$c}; # didn't find anything
- $vals->{defined $subval->{$c} ? $subval->{$c} : $undef_marker} = $subval->{$c};
- }
- }
-
- if (keys %$vals == 1) {
- ($res->{$c}) = (values %$vals)
- unless !$consider_nulls and exists $vals->{$undef_marker};
- }
- elsif (keys %$vals > 1) {
- $res->{$c} = UNRESOLVABLE_CONDITION;
- }
- }
+sub _resolve_ident_sources :DBIC_method_is_indirect_sugar {
+ DBIx::Class::Exception->throw(
+ '_resolve_ident_sources() has been removed with no replacement, '
+ . 'ask for advice on IRC if this affected you'
+ );
+}
- $res;
+sub _inner_join_to_node :DBIC_method_is_indirect_sugar {
+ DBIx::Class::Exception->throw(
+ '_inner_join_to_node() has been removed with no replacement, '
+ . 'ask for advice on IRC if this affected you'
+ );
}
1;