From: Peter Rabbitson Date: Thu, 14 Feb 2013 04:58:09 +0000 (+0100) Subject: Merge branch 'master' into topic/constructor_rewrite X-Git-Tag: v0.08240~23 X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=commitdiff_plain;h=0077982b2edc8273ab4b6ea59921177667008cb3;p=dbsrgits%2FDBIx-Class.git Merge branch 'master' into topic/constructor_rewrite --- 0077982b2edc8273ab4b6ea59921177667008cb3 diff --cc lib/DBIx/Class/ResultSet.pm index d4c271a,10905b2..5ec88d0 --- a/lib/DBIx/Class/ResultSet.pm +++ b/lib/DBIx/Class/ResultSet.pm @@@ -1036,11 -1046,13 +1046,11 @@@ sub single $self->throw_exception('single() only takes search conditions, no attributes. You want ->search( $cond, $attrs )->single()'); } - my $attrs = $self->_resolved_attrs_copy; + my $attrs = { %{$self->_resolved_attrs} }; - if (keys %{$attrs->{collapse}}) { - $self->throw_exception( - 'single() can not be used on resultsets prefetching has_many. Use find( \%cond ) or next() instead' - ); - } + $self->throw_exception( + 'single() can not be used on resultsets prefetching has_many. Use find( \%cond ) or next() instead' + ) if $attrs->{collapse}; if ($where) { if (defined $attrs->{where}) { @@@ -1228,145 -1237,152 +1238,146 @@@ sub next $self->{all_cache_position} = 1; return ($self->all)[0]; } - if ($self->{stashed_objects}) { - my $obj = shift(@{$self->{stashed_objects}}); - delete $self->{stashed_objects} unless @{$self->{stashed_objects}}; - return $obj; - } - my @row = ( - exists $self->{stashed_row} - ? @{delete $self->{stashed_row}} - : $self->cursor->next - ); - return undef unless (@row); - my ($row, @more) = $self->_construct_object(@row); - $self->{stashed_objects} = \@more if @more; - return $row; -} -sub _construct_object { - my ($self, @row) = @_; + return shift(@{$self->{stashed_objects}}) if @{ $self->{stashed_objects}||[] }; - my $info = $self->_collapse_result($self->{_attrs}{as}, \@row) - or return (); - my @new = $self->result_class->inflate_result($self->result_source, @$info); - @new = $self->{_attrs}{record_filter}->(@new) - if exists $self->{_attrs}{record_filter}; - return @new; -} - -sub _collapse_result { - my ($self, $as_proto, $row) = @_; - - my @copy = @$row; - - # 'foo' => [ undef, 'foo' ] - # 'foo.bar' => [ 'foo', 'bar' ] - # 'foo.bar.baz' => [ 'foo.bar', 'baz' ] - - my @construct_as = map { [ (/^(?:(.*)\.)?([^.]+)$/) ] } @$as_proto; + $self->{stashed_objects} = $self->_construct_objects + or return undef; - my %collapse = %{$self->{_attrs}{collapse}||{}}; - - my @pri_index; - - # if we're doing collapsing (has_many prefetch) we need to grab records - # until the PK changes, so fill @pri_index. if not, we leave it empty so - # we know we don't have to bother. + return shift @{$self->{stashed_objects}}; +} - # the reason for not using the collapse stuff directly is because if you - # had for e.g. two artists in a row with no cds, the collapse info for - # both would be NULL (undef) so you'd lose the second artist +# Constructs as many objects as it can in one pass while respecting +# cursor laziness. Several modes of operation: +# +# * Always builds everything present in @{$self->{stashed_rows}} +# * If called with $fetch_all true - pulls everything off the cursor and +# builds all objects in one pass +# * If $self->_resolved_attrs->{collapse} is true, checks the order_by +# and if the resultset is ordered properly by the left side: +# * Fetches stuff off the cursor until the "master object" changes, +# and saves the last extra row (if any) in @{$self->{stashed_rows}} +# OR +# * Just fetches, and collapses/constructs everything as if $fetch_all +# was requested (there is no other way to collapse except for an +# eager cursor) +# * If no collapse is requested - just get the next row, construct and +# return +sub _construct_objects { + my ($self, $fetch_all) = @_; - # store just the index so we can check the array positions from the row - # without having to contruct the full hash + my $rsrc = $self->result_source; + my $attrs = $self->_resolved_attrs; + my $cursor = $self->cursor; - if (keys %collapse) { - my %pri = map { ($_ => 1) } $self->result_source->_pri_cols; - foreach my $i (0 .. $#construct_as) { - next if defined($construct_as[$i][0]); # only self table - if (delete $pri{$construct_as[$i][1]}) { - push(@pri_index, $i); - } - last unless keys %pri; # short circuit (Johnny Five Is Alive!) - } + # this will be used as both initial raw-row collector AND as a RV of + # _construct_objects. Not regrowing the array twice matters a lot... + # a suprising amount actually + my $rows = (delete $self->{stashed_rows}) || []; + if ($fetch_all) { + # FIXME SUBOPTIMAL - we can do better, cursor->next/all (well diff. methods) should return a ref + $rows = [ @$rows, $cursor->all ]; } + elsif (!$attrs->{collapse}) { + # FIXME SUBOPTIMAL - we can do better, cursor->next/all (well diff. methods) should return a ref + push @$rows, do { my @r = $cursor->next; @r ? \@r : () } + unless @$rows; + } + else { + $attrs->{_ordered_for_collapse} ||= (!$attrs->{order_by}) ? undef : do { + my $st = $rsrc->schema->storage; + my @ord_cols = map + { $_->[0] } + ( $st->_extract_order_criteria($attrs->{order_by}) ) + ; - # no need to do an if, it'll be empty if @pri_index is empty anyway - - my %pri_vals = map { ($_ => $copy[$_]) } @pri_index; - - my @const_rows; + my $colinfos = $st->_resolve_column_info($attrs->{from}, \@ord_cols); - do { # no need to check anything at the front, we always want the first row + for (0 .. $#ord_cols) { + if ( + ! $colinfos->{$ord_cols[$_]} + or + $colinfos->{$ord_cols[$_]}{-result_source} != $rsrc + ) { + splice @ord_cols, $_; + last; + } + } - my %const; + # since all we check here are the start of the order_by belonging to the + # top level $rsrc, a present identifying set will mean that the resultset + # is ordered by its leftmost table in a tsable manner + (@ord_cols and $rsrc->_identifying_column_set({ map + { $colinfos->{$_}{-colname} => $colinfos->{$_} } + @ord_cols + })) ? 1 : 0; + }; - foreach my $this_as (@construct_as) { - $const{$this_as->[0]||''}{$this_as->[1]} = shift(@copy); + if ($attrs->{_ordered_for_collapse}) { + push @$rows, do { my @r = $cursor->next; @r ? \@r : () }; } + # instead of looping over ->next, use ->all in stealth mode ++ # *without* calling a ->reset afterwards + # FIXME - encapsulation breach, got to be a better way - elsif (! $cursor->{done}) { ++ elsif (! $cursor->{_done}) { + push @$rows, $cursor->all; - $cursor->{done} = 1; ++ $cursor->{_done} = 1; + $fetch_all = 1; + } + } - push(@const_rows, \%const); - - } until ( # no pri_index => no collapse => drop straight out - !@pri_index - or - do { # get another row, stash it, drop out if different PK + return undef unless @$rows; - @copy = $self->cursor->next; - $self->{stashed_row} = \@copy; + my $res_class = $self->result_class; + my $inflator = $res_class->can ('inflate_result') + or $self->throw_exception("Inflator $res_class does not provide an inflate_result() method"); - # last thing in do block, counts as true if anything doesn't match + my $infmap = $attrs->{as}; - # check xor defined first for NULL vs. NOT NULL then if one is - # defined the other must be so check string equality + if (!$attrs->{collapse} and $attrs->{_single_object_inflation}) { + # construct a much simpler array->hash folder for the one-table cases right here - grep { - (defined $pri_vals{$_} ^ defined $copy[$_]) - || (defined $pri_vals{$_} && ($pri_vals{$_} ne $copy[$_])) - } @pri_index; + # FIXME SUBOPTIMAL this is a very very very hot spot + # while rather optimal we can *still* do much better, by + # building a smarter [Row|HRI]::inflate_result(), and + # switch to feeding it data via a much leaner interface + # + # crude unscientific benchmarking indicated the shortcut eval is not worth it for + # this particular resultset size + if (@$rows < 60) { + my @as_idx = 0..$#$infmap; + for my $r (@$rows) { + $r = $inflator->($res_class, $rsrc, { map { $infmap->[$_] => $r->[$_] } @as_idx } ); } - ); - - my $alias = $self->{attrs}{alias}; - my $info = []; + } + else { + eval sprintf ( + '$_ = $inflator->($res_class, $rsrc, { %s }) for @$rows', + join (', ', map { "\$infmap->[$_] => \$_->[$_]" } 0..$#$infmap ) + ); + } + } + else { + ($self->{_row_parser} ||= eval sprintf 'sub { %s }', $rsrc->_mk_row_parser({ + inflate_map => $infmap, + selection => $attrs->{select}, + collapse => $attrs->{collapse}, + }) or die $@)->($rows, $fetch_all ? () : ( + # FIXME SUBOPTIMAL - we can do better, cursor->next/all (well diff. methods) should return a ref + sub { my @r = $cursor->next or return; \@r }, # how the collapser gets more rows + ($self->{stashed_rows} = []), # where does it stuff excess + )); # modify $rows in-place, shrinking/extending as necessary - my %collapse_pos; + $_ = $inflator->($res_class, $rsrc, @$_) for @$rows; - my @const_keys; + } - foreach my $const (@const_rows) { - scalar @const_keys or do { - @const_keys = sort { length($a) <=> length($b) } keys %$const; - }; - foreach my $key (@const_keys) { - if (length $key) { - my $target = $info; - my @parts = split(/\./, $key); - my $cur = ''; - my $data = $const->{$key}; - foreach my $p (@parts) { - $target = $target->[1]->{$p} ||= []; - $cur .= ".${p}"; - if ($cur eq ".${key}" && (my @ckey = @{$collapse{$cur}||[]})) { - # collapsing at this point and on final part - my $pos = $collapse_pos{$cur}; - CK: foreach my $ck (@ckey) { - if (!defined $pos->{$ck} || $pos->{$ck} ne $data->{$ck}) { - $collapse_pos{$cur} = $data; - delete @collapse_pos{ # clear all positioning for sub-entries - grep { m/^\Q${cur}.\E/ } keys %collapse_pos - }; - push(@$target, []); - last CK; - } - } - } - if (exists $collapse{$cur}) { - $target = $target->[-1]; - } - } - $target->[0] = $data; - } else { - $info->[0] = $const->{$key}; - } - } + # CDBI compat stuff + if ($attrs->{record_filter}) { + $_ = $attrs->{record_filter}->($_) for @$rows; } - return $info; + return $rows; } =head2 result_source @@@ -1688,9 -1724,6 +1706,8 @@@ another query sub reset { my ($self) = @_; + - delete @{$self}{qw/_attrs stashed_rows stashed_objects/}; - ++ delete @{$self}{qw/stashed_rows stashed_objects/}; $self->{all_cache_position} = 0; $self->cursor->reset; return $self; @@@ -1731,148 -1763,141 +1747,141 @@@ sub _rs_update_delete my $attrs = { %{$self->_resolved_attrs} }; - # "needs" is a strong word here - if the subquery is part of an IN clause - no point of - # even adding the group_by. It will really be used only when composing a poor-man's - # multicolumn-IN equivalent OR set - my $needs_group_by_subq = defined $attrs->{group_by}; - - # simplify the joinmap and maybe decide if a grouping (and thus subquery) is necessary - my $relation_classifications; - if (ref($attrs->{from}) eq 'ARRAY') { - $attrs->{from} = $storage->_prune_unused_joins ($attrs->{from}, $attrs->{select}, $cond, $attrs); - - $relation_classifications = $storage->_resolve_aliastypes_from_select_args ( - [ @{$attrs->{from}}[1 .. $#{$attrs->{from}}] ], - $attrs->{select}, - $cond, - $attrs - ) unless $needs_group_by_subq; # we already know we need a group, no point of resolving them - } - else { - $needs_group_by_subq ||= 1; # if {from} is unparseable assume the worst - } - - $needs_group_by_subq ||= exists $relation_classifications->{multiplying}; + my $join_classifications; + my $existing_group_by = delete $attrs->{group_by}; - # if no subquery - life is easy-ish - unless ( - $needs_group_by_subq + # do we need a subquery for any reason? + my $needs_subq = ( + defined $existing_group_by or - keys %$relation_classifications # if any joins at all - need to wrap a subq + # if {from} is unparseable wrap a subq + ref($attrs->{from}) ne 'ARRAY' or - $self->_has_resolved_attr(qw/rows offset/) # limits call for a subq - ) { - # Most databases do not allow aliasing of tables in UPDATE/DELETE. Thus - # a condition containing 'me' or other table prefixes will not work - # at all. What this code tries to do (badly) is to generate a condition - # with the qualifiers removed, by exploiting the quote mechanism of sqla - # - # this is atrocious and should be replaced by normal sqla introspection - # one sunny day - my ($sql, @bind) = do { - my $sqla = $rsrc->storage->sql_maker; - local $sqla->{_dequalify_idents} = 1; - $sqla->_recurse_where($self->{cond}); - } if $self->{cond}; + # limits call for a subq + $self->_has_resolved_attr(qw/rows offset/) + ); - return $rsrc->storage->$op( - $rsrc, - $op eq 'update' ? $values : (), - $self->{cond} ? \[$sql, @bind] : (), - ); + # simplify the joinmap, so we can further decide if a subq is necessary + if (!$needs_subq and @{$attrs->{from}} > 1) { + $attrs->{from} = $storage->_prune_unused_joins ($attrs->{from}, $attrs->{select}, $self->{cond}, $attrs); + + # check if there are any joins left after the prune + if ( @{$attrs->{from}} > 1 ) { + $join_classifications = $storage->_resolve_aliastypes_from_select_args ( + [ @{$attrs->{from}}[1 .. $#{$attrs->{from}}] ], + $attrs->{select}, + $self->{cond}, + $attrs + ); + + # any non-pruneable joins imply subq + $needs_subq = scalar keys %{ $join_classifications->{restricting} || {} }; + } } - # we got this far - means it is time to wrap a subquery - my $idcols = $rsrc->_identifying_column_set || $self->throw_exception( - sprintf( - "Unable to perform complex resultset %s() without an identifying set of columns on source '%s'", - $op, - $rsrc->source_name, - ) + # check if the head is composite (by now all joins are thrown out unless $needs_subq) + $needs_subq ||= ( + (ref $attrs->{from}[0]) ne 'HASH' + or + ref $attrs->{from}[0]{ $attrs->{from}[0]{-alias} } ); - my $existing_group_by = delete $attrs->{group_by}; - - # make a new $rs selecting only the PKs (that's all we really need for the subq) - delete @{$attrs}{qw/collapse select _prefetch_selector_range as/}; - $attrs->{columns} = [ map { "$attrs->{alias}.$_" } @$idcols ]; - $attrs->{group_by} = \ ''; # FIXME - this is an evil hack, it causes the optimiser to kick in and throw away the LEFT joins - my $subrs = (ref $self)->new($rsrc, $attrs); - if (@$idcols == 1) { - return $storage->$op ( - $rsrc, - $op eq 'update' ? $values : (), - { $idcols->[0] => { -in => $subrs->as_query } }, - ); + my ($cond, $guard); + # do we need anything like a subquery? + if (! $needs_subq) { + # Most databases do not allow aliasing of tables in UPDATE/DELETE. Thus + # a condition containing 'me' or other table prefixes will not work + # at all. Tell SQLMaker to dequalify idents via a gross hack. + $cond = do { + my $sqla = $rsrc->storage->sql_maker; + local $sqla->{_dequalify_idents} = 1; + \[ $sqla->_recurse_where($self->{cond}) ]; + }; } - elsif ($storage->_use_multicolumn_in) { - # This is hideously ugly, but SQLA does not understand multicol IN expressions - my $sql_maker = $storage->sql_maker; - my ($sql, @bind) = @${$subrs->as_query}; - $sql = sprintf ('(%s) IN %s', # the as_query already comes with a set of parenthesis - join (', ', map { $sql_maker->_quote ($_) } @$idcols), - $sql, + else { + # we got this far - means it is time to wrap a subquery + my $idcols = $rsrc->_identifying_column_set || $self->throw_exception( + sprintf( + "Unable to perform complex resultset %s() without an identifying set of columns on source '%s'", + $op, + $rsrc->source_name, + ) ); - return $storage->$op ( - $rsrc, - $op eq 'update' ? $values : (), - \[$sql, @bind], - ); - } - else { - # if all else fails - get all primary keys and operate over a ORed set - # wrap in a transaction for consistency - # this is where the group_by starts to matter - my $subq_group_by; - if ($needs_group_by_subq) { - $subq_group_by = $attrs->{columns}; - - # make sure if there is a supplied group_by it matches the columns compiled above - # perfectly. Anything else can not be sanely executed on most databases so croak - # right then and there - if ($existing_group_by) { - my @current_group_by = map - { $_ =~ /\./ ? $_ : "$attrs->{alias}.$_" } - @$existing_group_by - ; + # make a new $rs selecting only the PKs (that's all we really need for the subq) - delete $attrs->{$_} for qw/collapse _collapse_order_by select _prefetch_selector_range as/; ++ delete $attrs->{$_} for qw/collapse select _prefetch_selector_range as/; + $attrs->{columns} = [ map { "$attrs->{alias}.$_" } @$idcols ]; + $attrs->{group_by} = \ ''; # FIXME - this is an evil hack, it causes the optimiser to kick in and throw away the LEFT joins + my $subrs = (ref $self)->new($rsrc, $attrs); - if ( - join ("\x00", sort @current_group_by) - ne - join ("\x00", sort @$subq_group_by ) - ) { - $self->throw_exception ( - "You have just attempted a $op operation on a resultset which does group_by" - . ' on columns other than the primary keys, while DBIC internally needs to retrieve' - . ' the primary keys in a subselect. All sane RDBMS engines do not support this' - . ' kind of queries. Please retry the operation with a modified group_by or' - . ' without using one at all.' - ); + if (@$idcols == 1) { + $cond = { $idcols->[0] => { -in => $subrs->as_query } }; + } + elsif ($storage->_use_multicolumn_in) { + # no syntax for calling this properly yet + # !!! EXPERIMENTAL API !!! WILL CHANGE !!! + $cond = $storage->sql_maker->_where_op_multicolumn_in ( + $idcols, # how do I convey a list of idents...? can binds reside on lhs? + $subrs->as_query + ), + } + else { + # if all else fails - get all primary keys and operate over a ORed set + # wrap in a transaction for consistency + # this is where the group_by/multiplication starts to matter + if ( + $existing_group_by + or + keys %{ $join_classifications->{multiplying} || {} } + ) { + # make sure if there is a supplied group_by it matches the columns compiled above + # perfectly. Anything else can not be sanely executed on most databases so croak + # right then and there + if ($existing_group_by) { + my @current_group_by = map + { $_ =~ /\./ ? $_ : "$attrs->{alias}.$_" } + @$existing_group_by + ; + + if ( + join ("\x00", sort @current_group_by) + ne + join ("\x00", sort @{$attrs->{columns}} ) + ) { + $self->throw_exception ( + "You have just attempted a $op operation on a resultset which does group_by" + . ' on columns other than the primary keys, while DBIC internally needs to retrieve' + . ' the primary keys in a subselect. All sane RDBMS engines do not support this' + . ' kind of queries. Please retry the operation with a modified group_by or' + . ' without using one at all.' + ); + } } + + $subrs = $subrs->search({}, { group_by => $attrs->{columns} }); } - } - my $guard = $storage->txn_scope_guard; + $guard = $storage->txn_scope_guard; - my @op_condition; - for my $row ($subrs->search({}, { group_by => $subq_group_by })->cursor->all) { - push @op_condition, { map - { $idcols->[$_] => $row->[$_] } - (0 .. $#$idcols) - }; + $cond = []; + for my $row ($subrs->cursor->all) { + push @$cond, { map + { $idcols->[$_] => $row->[$_] } + (0 .. $#$idcols) + }; + } } + } - my $res = $storage->$op ( - $rsrc, - $op eq 'update' ? $values : (), - \@op_condition, - ); + my $res = $storage->$op ( + $rsrc, + $op eq 'update' ? $values : (), + $cond, + ); - $guard->commit; + $guard->commit if $guard; - return $res; - } + return $res; } =head2 update @@@ -3457,39 -3463,9 +3444,40 @@@ sub _resolved_attrs push @{ $attrs->{select} }, (map { $_->[0] } @prefetch); push @{ $attrs->{as} }, (map { $_->[1] } @prefetch); + } + + $attrs->{_single_object_inflation} = ! List::Util::first { $_ =~ /\./ } @{$attrs->{as}}; + + # run through the resulting joinstructure (starting from our current slot) + # and unset collapse if proven unnesessary + if ($attrs->{collapse} && ref $attrs->{from} eq 'ARRAY') { + + if (@{$attrs->{from}} > 1) { + + # find where our table-spec starts and consider only things after us + my @fromlist = @{$attrs->{from}}; + while (@fromlist) { + my $t = shift @fromlist; + $t = $t->[0] if ref $t eq 'ARRAY'; #me vs join from-spec mismatch + last if ($t->{-alias} && $t->{-alias} eq $alias); + } + + for (@fromlist) { + $attrs->{collapse} = ! $_->[0]{-is_single} + and last; + } + } + else { + # no joins - no collapse + $attrs->{collapse} = 0; + } + } - push( @{$attrs->{order_by}}, @$prefetch_ordering ); - $attrs->{_collapse_order_by} = \@$prefetch_ordering; + if (! $attrs->{order_by} and $attrs->{collapse}) { + # default order for collapsing unless the user asked for something + $attrs->{order_by} = [ map { "$alias.$_" } $source->primary_columns ]; + $attrs->{_ordered_for_collapse} = 1; ++ $attrs->{_order_is_artificial} = 1; } # if both page and offset are specified, produce a combined offset diff --cc lib/DBIx/Class/Storage/DBIHacks.pm index 3efd488,47189c9..a8eca16 --- a/lib/DBIx/Class/Storage/DBIHacks.pm +++ b/lib/DBIx/Class/Storage/DBIHacks.pm @@@ -77,9 -76,18 +76,12 @@@ sub _adjust_select_args_for_complex_pre my $outer_attrs = { %$attrs }; delete $outer_attrs->{$_} for qw/where bind rows offset group_by having/; - my $inner_attrs = { %$attrs, _is_internal_subuery => 1 }; + my $inner_attrs = { %$attrs }; - delete $inner_attrs->{$_} for qw/for collapse _prefetch_selector_range _collapse_order_by select as/; - - # bring over all non-collapse-induced order_by into the inner query (if any) - # the outer one will have to keep them all - delete $inner_attrs->{order_by}; - if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}} ) { - $inner_attrs->{order_by} = [ - @{$outer_attrs->{order_by}}[ 0 .. $ord_cnt - 1] - ]; - } + delete $inner_attrs->{$_} for qw/for collapse _prefetch_selector_range select as/; + ++ # if the user did not request it, there is no point using it inside ++ delete $inner_attrs->{order_by} if delete $inner_attrs->{_order_is_artificial}; + # generate the inner/outer select lists # for inside we consider only stuff *not* brought in by the prefetch # on the outside we substitute any function for its alias diff --cc t/52leaks.t index a5ad085,f1d11af..9a9a570 --- a/t/52leaks.t +++ b/t/52leaks.t @@@ -362,19 -371,6 +371,16 @@@ for my $slot (keys %$weak_registry) delete $weak_registry->{$slot} unless $cleared->{hash_merge_singleton}{$weak_registry->{$slot}{weakref}{behavior}}++; } + elsif ( + $slot =~ /^Data::Dumper/ + and + $weak_registry->{$slot}{stacktrace} =~ /\QDBIx::Class::ResultSource::RowParser::_mk_row_parser/ + ) { + # there should be only one D::D object (used to construct the rowparser) + # more would indicate trouble + delete $weak_registry->{$slot} + unless $cleared->{mk_row_parser_dd_singleton}++; + } - elsif (DBIx::Class::_ENV_::INVISIBLE_DOLLAR_AT and $slot =~ /^__TxnScopeGuard__FIXUP__/) { - delete $weak_registry->{$slot} - } elsif ($slot =~ /^DateTime::TimeZone/) { # DT is going through a refactor it seems - let it leak zones for now delete $weak_registry->{$slot}; diff --cc t/lib/sqlite.sql index 9d49210,c52ef7b..64ddc33 --- a/t/lib/sqlite.sql +++ b/t/lib/sqlite.sql @@@ -1,466 -1,371 +1,373 @@@ - -- - -- Created by SQL::Translator::Producer::SQLite - -- Created on Fri Mar 2 18:22:33 2012 - -- - - -- - -- Table: artist - -- - CREATE TABLE artist ( - artistid INTEGER PRIMARY KEY NOT NULL, - name varchar(100), - rank integer NOT NULL DEFAULT 13, - charfield char(10) - ); - - CREATE INDEX artist_name_hookidx ON artist (name); - - CREATE UNIQUE INDEX artist_name ON artist (name); - - CREATE UNIQUE INDEX u_nullable ON artist (charfield, rank); - - -- - -- Table: bindtype_test - -- - CREATE TABLE bindtype_test ( - id INTEGER PRIMARY KEY NOT NULL, - bytea blob, - blob blob, - clob clob, - a_memo memo - ); - - -- - -- Table: collection - -- - CREATE TABLE collection ( - collectionid INTEGER PRIMARY KEY NOT NULL, - name varchar(100) NOT NULL - ); - - -- - -- Table: encoded - -- - CREATE TABLE encoded ( - id INTEGER PRIMARY KEY NOT NULL, - encoded varchar(100) - ); - - -- - -- Table: event - -- - CREATE TABLE event ( - id INTEGER PRIMARY KEY NOT NULL, - starts_at date NOT NULL, - created_on timestamp NOT NULL, - varchar_date varchar(20), - varchar_datetime varchar(20), - skip_inflation datetime, - ts_without_tz datetime - ); - - -- - -- Table: fourkeys - -- - CREATE TABLE fourkeys ( - foo integer NOT NULL, - bar integer NOT NULL, - hello integer NOT NULL, - goodbye integer NOT NULL, - sensors character(10) NOT NULL, - read_count int, - PRIMARY KEY (foo, bar, hello, goodbye) - ); - - -- - -- Table: genre - -- - CREATE TABLE genre ( - genreid INTEGER PRIMARY KEY NOT NULL, - name varchar(100) NOT NULL - ); - - CREATE UNIQUE INDEX genre_name ON genre (name); - - -- - -- Table: link - -- - CREATE TABLE link ( - id INTEGER PRIMARY KEY NOT NULL, - url varchar(100), - title varchar(100) - ); - - -- - -- Table: money_test - -- - CREATE TABLE money_test ( - id INTEGER PRIMARY KEY NOT NULL, - amount money - ); - - -- - -- Table: noprimarykey - -- - CREATE TABLE noprimarykey ( - foo integer NOT NULL, - bar integer NOT NULL, - baz integer NOT NULL - ); - - CREATE UNIQUE INDEX foo_bar ON noprimarykey (foo, bar); + CREATE TABLE "artist" ( + "artistid" INTEGER PRIMARY KEY NOT NULL, + "name" varchar(100), + "rank" integer NOT NULL DEFAULT 13, + "charfield" char(10) + ); + + CREATE INDEX "artist_name_hookidx" ON "artist" ("name"); + + CREATE UNIQUE INDEX "artist_name" ON "artist" ("name"); + + CREATE UNIQUE INDEX "u_nullable" ON "artist" ("charfield", "rank"); + + CREATE TABLE "bindtype_test" ( + "id" INTEGER PRIMARY KEY NOT NULL, + "bytea" blob, + "blob" blob, + "clob" clob, + "a_memo" memo + ); + + CREATE TABLE "collection" ( + "collectionid" INTEGER PRIMARY KEY NOT NULL, + "name" varchar(100) NOT NULL + ); + + CREATE TABLE "encoded" ( + "id" INTEGER PRIMARY KEY NOT NULL, + "encoded" varchar(100) + ); + + CREATE TABLE "event" ( + "id" INTEGER PRIMARY KEY NOT NULL, + "starts_at" date NOT NULL, + "created_on" timestamp NOT NULL, + "varchar_date" varchar(20), + "varchar_datetime" varchar(20), + "skip_inflation" datetime, + "ts_without_tz" datetime + ); + + CREATE TABLE "fourkeys" ( + "foo" integer NOT NULL, + "bar" integer NOT NULL, + "hello" integer NOT NULL, + "goodbye" integer NOT NULL, + "sensors" character(10) NOT NULL, + "read_count" int, + PRIMARY KEY ("foo", "bar", "hello", "goodbye") + ); + + CREATE TABLE "genre" ( + "genreid" INTEGER PRIMARY KEY NOT NULL, + "name" varchar(100) NOT NULL + ); - -- - -- Table: onekey - -- - CREATE TABLE onekey ( - id INTEGER PRIMARY KEY NOT NULL, - artist integer NOT NULL, - cd integer NOT NULL - ); - - -- - -- Table: owners - -- - CREATE TABLE owners ( - id INTEGER PRIMARY KEY NOT NULL, - name varchar(100) NOT NULL - ); - - CREATE UNIQUE INDEX owners_name ON owners (name); - - -- - -- Table: producer - -- - CREATE TABLE producer ( - producerid INTEGER PRIMARY KEY NOT NULL, - name varchar(100) NOT NULL + CREATE UNIQUE INDEX "genre_name" ON "genre" ("name"); + + CREATE TABLE "link" ( + "id" INTEGER PRIMARY KEY NOT NULL, + "url" varchar(100), + "title" varchar(100) + ); + + CREATE TABLE "money_test" ( + "id" INTEGER PRIMARY KEY NOT NULL, + "amount" money + ); + + CREATE TABLE "noprimarykey" ( + "foo" integer NOT NULL, + "bar" integer NOT NULL, + "baz" integer NOT NULL + ); + + CREATE UNIQUE INDEX "foo_bar" ON "noprimarykey" ("foo", "bar"); + + CREATE TABLE "onekey" ( + "id" INTEGER PRIMARY KEY NOT NULL, + "artist" integer NOT NULL, + "cd" integer NOT NULL ); - CREATE UNIQUE INDEX prod_name ON producer (name); - - -- - -- Table: self_ref - -- - CREATE TABLE self_ref ( - id INTEGER PRIMARY KEY NOT NULL, - name varchar(100) NOT NULL + CREATE TABLE "owners" ( + "id" INTEGER PRIMARY KEY NOT NULL, + "name" varchar(100) NOT NULL ); - - -- - -- Table: sequence_test - -- - CREATE TABLE sequence_test ( - pkid1 integer NOT NULL, - pkid2 integer NOT NULL, - nonpkid integer NOT NULL, - name varchar(100), - PRIMARY KEY (pkid1, pkid2) + + CREATE UNIQUE INDEX "owners_name" ON "owners" ("name"); + + CREATE TABLE "producer" ( + "producerid" INTEGER PRIMARY KEY NOT NULL, + "name" varchar(100) NOT NULL + ); + + CREATE UNIQUE INDEX "prod_name" ON "producer" ("name"); + + CREATE TABLE "self_ref" ( + "id" INTEGER PRIMARY KEY NOT NULL, + "name" varchar(100) NOT NULL + ); + + CREATE TABLE "sequence_test" ( + "pkid1" integer NOT NULL, + "pkid2" integer NOT NULL, + "nonpkid" integer NOT NULL, + "name" varchar(100), + PRIMARY KEY ("pkid1", "pkid2") ); - -- - -- Table: serialized - -- - CREATE TABLE serialized ( - id INTEGER PRIMARY KEY NOT NULL, - serialized text NOT NULL + CREATE TABLE "serialized" ( + "id" INTEGER PRIMARY KEY NOT NULL, + "serialized" text NOT NULL ); - -- - -- Table: timestamp_primary_key_test - -- - CREATE TABLE timestamp_primary_key_test ( - id timestamp NOT NULL DEFAULT current_timestamp, - PRIMARY KEY (id) + CREATE TABLE "timestamp_primary_key_test" ( + "id" timestamp NOT NULL DEFAULT current_timestamp, + PRIMARY KEY ("id") ); - -- - -- Table: treelike - -- - CREATE TABLE treelike ( - id INTEGER PRIMARY KEY NOT NULL, - parent integer, - name varchar(100) NOT NULL + CREATE TABLE "treelike" ( + "id" INTEGER PRIMARY KEY NOT NULL, + "parent" integer, + "name" varchar(100) NOT NULL, + FOREIGN KEY ("parent") REFERENCES "treelike"("id") ON DELETE CASCADE ON UPDATE CASCADE ); - CREATE INDEX treelike_idx_parent ON treelike (parent); + CREATE INDEX "treelike_idx_parent" ON "treelike" ("parent"); - -- - -- Table: twokeytreelike - -- - CREATE TABLE twokeytreelike ( - id1 integer NOT NULL, - id2 integer NOT NULL, - parent1 integer NOT NULL, - parent2 integer NOT NULL, - name varchar(100) NOT NULL, - PRIMARY KEY (id1, id2) + CREATE TABLE "twokeytreelike" ( + "id1" integer NOT NULL, + "id2" integer NOT NULL, + "parent1" integer NOT NULL, + "parent2" integer NOT NULL, + "name" varchar(100) NOT NULL, + PRIMARY KEY ("id1", "id2"), + FOREIGN KEY ("parent1", "parent2") REFERENCES "twokeytreelike"("id1", "id2") ); - CREATE INDEX twokeytreelike_idx_parent1_parent2 ON twokeytreelike (parent1, parent2); + CREATE INDEX "twokeytreelike_idx_parent1_parent2" ON "twokeytreelike" ("parent1", "parent2"); - CREATE UNIQUE INDEX tktlnameunique ON twokeytreelike (name); + CREATE UNIQUE INDEX "tktlnameunique" ON "twokeytreelike" ("name"); - -- - -- Table: typed_object - -- - CREATE TABLE typed_object ( - objectid INTEGER PRIMARY KEY NOT NULL, - type varchar(100) NOT NULL, - value varchar(100) NOT NULL + CREATE TABLE "typed_object" ( + "objectid" INTEGER PRIMARY KEY NOT NULL, + "type" varchar(100) NOT NULL, + "value" varchar(100) NOT NULL ); - -- - -- Table: artist_undirected_map - -- - CREATE TABLE artist_undirected_map ( - id1 integer NOT NULL, - id2 integer NOT NULL, - PRIMARY KEY (id1, id2) + CREATE TABLE "artist_undirected_map" ( + "id1" integer NOT NULL, + "id2" integer NOT NULL, + PRIMARY KEY ("id1", "id2"), + FOREIGN KEY ("id1") REFERENCES "artist"("artistid") ON DELETE RESTRICT ON UPDATE CASCADE, + FOREIGN KEY ("id2") REFERENCES "artist"("artistid") ); - CREATE INDEX artist_undirected_map_idx_id1 ON artist_undirected_map (id1); + CREATE INDEX "artist_undirected_map_idx_id1" ON "artist_undirected_map" ("id1"); - CREATE INDEX artist_undirected_map_idx_id2 ON artist_undirected_map (id2); + CREATE INDEX "artist_undirected_map_idx_id2" ON "artist_undirected_map" ("id2"); - -- - -- Table: bookmark - -- - CREATE TABLE bookmark ( - id INTEGER PRIMARY KEY NOT NULL, - link integer + CREATE TABLE "bookmark" ( + "id" INTEGER PRIMARY KEY NOT NULL, + "link" integer, + FOREIGN KEY ("link") REFERENCES "link"("id") ON DELETE SET NULL ON UPDATE CASCADE ); - CREATE INDEX bookmark_idx_link ON bookmark (link); + CREATE INDEX "bookmark_idx_link" ON "bookmark" ("link"); - -- - -- Table: books - -- - CREATE TABLE books ( - id INTEGER PRIMARY KEY NOT NULL, - source varchar(100) NOT NULL, - owner integer NOT NULL, - title varchar(100) NOT NULL, - price integer + CREATE TABLE "books" ( + "id" INTEGER PRIMARY KEY NOT NULL, + "source" varchar(100) NOT NULL, + "owner" integer NOT NULL, + "title" varchar(100) NOT NULL, + "price" integer, + FOREIGN KEY ("owner") REFERENCES "owners"("id") ON DELETE CASCADE ON UPDATE CASCADE ); - CREATE INDEX books_idx_owner ON books (owner); + CREATE INDEX "books_idx_owner" ON "books" ("owner"); - CREATE UNIQUE INDEX books_title ON books (title); + CREATE UNIQUE INDEX "books_title" ON "books" ("title"); - -- - -- Table: employee - -- - CREATE TABLE employee ( - employee_id INTEGER PRIMARY KEY NOT NULL, - position integer NOT NULL, - group_id integer, - group_id_2 integer, - group_id_3 integer, - name varchar(100), - encoded integer + CREATE TABLE "employee" ( + "employee_id" INTEGER PRIMARY KEY NOT NULL, + "position" integer NOT NULL, + "group_id" integer, + "group_id_2" integer, + "group_id_3" integer, + "name" varchar(100), + "encoded" integer, + FOREIGN KEY ("encoded") REFERENCES "encoded"("id") ON DELETE CASCADE ON UPDATE CASCADE ); - CREATE INDEX employee_idx_encoded ON employee (encoded); + CREATE INDEX "employee_idx_encoded" ON "employee" ("encoded"); - -- - -- Table: forceforeign - -- - CREATE TABLE forceforeign ( - artist INTEGER PRIMARY KEY NOT NULL, - cd integer NOT NULL + CREATE TABLE "forceforeign" ( + "artist" INTEGER PRIMARY KEY NOT NULL, + "cd" integer NOT NULL, + FOREIGN KEY ("artist") REFERENCES "artist"("artistid") ); - -- - -- Table: self_ref_alias - -- - CREATE TABLE self_ref_alias ( - self_ref integer NOT NULL, - alias integer NOT NULL, - PRIMARY KEY (self_ref, alias) + CREATE TABLE "self_ref_alias" ( + "self_ref" integer NOT NULL, + "alias" integer NOT NULL, + PRIMARY KEY ("self_ref", "alias"), + FOREIGN KEY ("alias") REFERENCES "self_ref"("id"), + FOREIGN KEY ("self_ref") REFERENCES "self_ref"("id") ON DELETE CASCADE ON UPDATE CASCADE ); - CREATE INDEX self_ref_alias_idx_alias ON self_ref_alias (alias); + CREATE INDEX "self_ref_alias_idx_alias" ON "self_ref_alias" ("alias"); - CREATE INDEX self_ref_alias_idx_self_ref ON self_ref_alias (self_ref); + CREATE INDEX "self_ref_alias_idx_self_ref" ON "self_ref_alias" ("self_ref"); - -- - -- Table: track - -- - CREATE TABLE track ( - trackid INTEGER PRIMARY KEY NOT NULL, - cd integer NOT NULL, - position int NOT NULL, - title varchar(100) NOT NULL, - last_updated_on datetime, - last_updated_at datetime + CREATE TABLE "track" ( + "trackid" INTEGER PRIMARY KEY NOT NULL, + "cd" integer NOT NULL, + "position" int NOT NULL, + "title" varchar(100) NOT NULL, + "last_updated_on" datetime, + "last_updated_at" datetime, + FOREIGN KEY ("cd") REFERENCES "cd"("cdid") ON DELETE CASCADE ON UPDATE CASCADE ); - CREATE INDEX track_idx_cd ON track (cd); + CREATE INDEX "track_idx_cd" ON "track" ("cd"); - CREATE UNIQUE INDEX track_cd_position ON track (cd, position); + CREATE UNIQUE INDEX "track_cd_position" ON "track" ("cd", "position"); - CREATE UNIQUE INDEX track_cd_title ON track (cd, title); + CREATE UNIQUE INDEX "track_cd_title" ON "track" ("cd", "title"); - -- - -- Table: cd - -- - CREATE TABLE cd ( - cdid INTEGER PRIMARY KEY NOT NULL, - artist integer NOT NULL, - title varchar(100) NOT NULL, - year varchar(100) NOT NULL, - genreid integer, - single_track integer + CREATE TABLE "cd" ( + "cdid" INTEGER PRIMARY KEY NOT NULL, + "artist" integer NOT NULL, + "title" varchar(100) NOT NULL, + "year" varchar(100) NOT NULL, + "genreid" integer, + "single_track" integer, + FOREIGN KEY ("artist") REFERENCES "artist"("artistid") ON DELETE CASCADE ON UPDATE CASCADE, - FOREIGN KEY ("genreid") REFERENCES "genre"("genreid") ON DELETE SET NULL ON UPDATE CASCADE, - FOREIGN KEY ("single_track") REFERENCES "track"("trackid") ON DELETE CASCADE ++ FOREIGN KEY ("single_track") REFERENCES "track"("trackid") ON DELETE CASCADE, ++ FOREIGN KEY ("genreid") REFERENCES "genre"("genreid") ON DELETE SET NULL ON UPDATE CASCADE ); - CREATE INDEX cd_idx_artist ON cd (artist); + CREATE INDEX "cd_idx_artist" ON "cd" ("artist"); - CREATE INDEX cd_idx_genreid ON cd (genreid); -CREATE INDEX "cd_idx_genreid" ON "cd" ("genreid"); - + CREATE INDEX "cd_idx_single_track" ON "cd" ("single_track"); - CREATE INDEX cd_idx_single_track ON cd (single_track); ++CREATE INDEX "cd_idx_genreid" ON "cd" ("genreid"); + - CREATE UNIQUE INDEX cd_artist_title ON cd (artist, title); + CREATE UNIQUE INDEX "cd_artist_title" ON "cd" ("artist", "title"); - -- - -- Table: collection_object - -- - CREATE TABLE collection_object ( - collection integer NOT NULL, - object integer NOT NULL, - PRIMARY KEY (collection, object) + CREATE TABLE "collection_object" ( + "collection" integer NOT NULL, + "object" integer NOT NULL, + PRIMARY KEY ("collection", "object"), + FOREIGN KEY ("collection") REFERENCES "collection"("collectionid") ON DELETE CASCADE ON UPDATE CASCADE, + FOREIGN KEY ("object") REFERENCES "typed_object"("objectid") ON DELETE CASCADE ON UPDATE CASCADE ); - CREATE INDEX collection_object_idx_collection ON collection_object (collection); + CREATE INDEX "collection_object_idx_collection" ON "collection_object" ("collection"); - CREATE INDEX collection_object_idx_object ON collection_object (object); + CREATE INDEX "collection_object_idx_object" ON "collection_object" ("object"); - -- - -- Table: lyrics - -- - CREATE TABLE lyrics ( - lyric_id INTEGER PRIMARY KEY NOT NULL, - track_id integer NOT NULL + CREATE TABLE "lyrics" ( + "lyric_id" INTEGER PRIMARY KEY NOT NULL, + "track_id" integer NOT NULL, + FOREIGN KEY ("track_id") REFERENCES "track"("trackid") ON DELETE CASCADE ); - CREATE INDEX lyrics_idx_track_id ON lyrics (track_id); + CREATE INDEX "lyrics_idx_track_id" ON "lyrics" ("track_id"); - -- - -- Table: cd_artwork - -- - CREATE TABLE cd_artwork ( - cd_id INTEGER PRIMARY KEY NOT NULL + CREATE TABLE "cd_artwork" ( + "cd_id" INTEGER PRIMARY KEY NOT NULL, + FOREIGN KEY ("cd_id") REFERENCES "cd"("cdid") ON DELETE CASCADE ); - -- - -- Table: liner_notes - -- - CREATE TABLE liner_notes ( - liner_id INTEGER PRIMARY KEY NOT NULL, - notes varchar(100) NOT NULL + CREATE TABLE "liner_notes" ( + "liner_id" INTEGER PRIMARY KEY NOT NULL, + "notes" varchar(100) NOT NULL, + FOREIGN KEY ("liner_id") REFERENCES "cd"("cdid") ON DELETE CASCADE ); - -- - -- Table: lyric_versions - -- - CREATE TABLE lyric_versions ( - id INTEGER PRIMARY KEY NOT NULL, - lyric_id integer NOT NULL, - text varchar(100) NOT NULL + CREATE TABLE "lyric_versions" ( + "id" INTEGER PRIMARY KEY NOT NULL, + "lyric_id" integer NOT NULL, + "text" varchar(100) NOT NULL, + FOREIGN KEY ("lyric_id") REFERENCES "lyrics"("lyric_id") ON DELETE CASCADE ON UPDATE CASCADE ); - CREATE INDEX lyric_versions_idx_lyric_id ON lyric_versions (lyric_id); + CREATE INDEX "lyric_versions_idx_lyric_id" ON "lyric_versions" ("lyric_id"); + ++CREATE UNIQUE INDEX "lyric_versions_lyric_id_text" ON "lyric_versions" ("lyric_id", "text"); + - -- - -- Table: tags - -- - CREATE TABLE tags ( - tagid INTEGER PRIMARY KEY NOT NULL, - cd integer NOT NULL, - tag varchar(100) NOT NULL + CREATE TABLE "tags" ( + "tagid" INTEGER PRIMARY KEY NOT NULL, + "cd" integer NOT NULL, + "tag" varchar(100) NOT NULL, + FOREIGN KEY ("cd") REFERENCES "cd"("cdid") ON DELETE CASCADE ON UPDATE CASCADE ); - CREATE INDEX tags_idx_cd ON tags (cd); + CREATE INDEX "tags_idx_cd" ON "tags" ("cd"); - CREATE UNIQUE INDEX tagid_cd ON tags (tagid, cd); + CREATE UNIQUE INDEX "tagid_cd" ON "tags" ("tagid", "cd"); - CREATE UNIQUE INDEX tagid_cd_tag ON tags (tagid, cd, tag); + CREATE UNIQUE INDEX "tagid_cd_tag" ON "tags" ("tagid", "cd", "tag"); - CREATE UNIQUE INDEX tags_tagid_tag ON tags (tagid, tag); + CREATE UNIQUE INDEX "tags_tagid_tag" ON "tags" ("tagid", "tag"); - CREATE UNIQUE INDEX tags_tagid_tag_cd ON tags (tagid, tag, cd); + CREATE UNIQUE INDEX "tags_tagid_tag_cd" ON "tags" ("tagid", "tag", "cd"); - -- - -- Table: cd_to_producer - -- - CREATE TABLE cd_to_producer ( - cd integer NOT NULL, - producer integer NOT NULL, - attribute integer, - PRIMARY KEY (cd, producer) + CREATE TABLE "cd_to_producer" ( + "cd" integer NOT NULL, + "producer" integer NOT NULL, + "attribute" integer, + PRIMARY KEY ("cd", "producer"), + FOREIGN KEY ("cd") REFERENCES "cd"("cdid") ON DELETE CASCADE ON UPDATE CASCADE, + FOREIGN KEY ("producer") REFERENCES "producer"("producerid") ); - CREATE INDEX cd_to_producer_idx_cd ON cd_to_producer (cd); + CREATE INDEX "cd_to_producer_idx_cd" ON "cd_to_producer" ("cd"); - CREATE INDEX cd_to_producer_idx_producer ON cd_to_producer (producer); + CREATE INDEX "cd_to_producer_idx_producer" ON "cd_to_producer" ("producer"); - -- - -- Table: images - -- - CREATE TABLE images ( - id INTEGER PRIMARY KEY NOT NULL, - artwork_id integer NOT NULL, - name varchar(100) NOT NULL, - data blob + CREATE TABLE "images" ( + "id" INTEGER PRIMARY KEY NOT NULL, + "artwork_id" integer NOT NULL, + "name" varchar(100) NOT NULL, + "data" blob, + FOREIGN KEY ("artwork_id") REFERENCES "cd_artwork"("cd_id") ON DELETE CASCADE ON UPDATE CASCADE ); - CREATE INDEX images_idx_artwork_id ON images (artwork_id); + CREATE INDEX "images_idx_artwork_id" ON "images" ("artwork_id"); - -- - -- Table: twokeys - -- - CREATE TABLE twokeys ( - artist integer NOT NULL, - cd integer NOT NULL, - PRIMARY KEY (artist, cd) + CREATE TABLE "twokeys" ( + "artist" integer NOT NULL, + "cd" integer NOT NULL, + PRIMARY KEY ("artist", "cd"), + FOREIGN KEY ("artist") REFERENCES "artist"("artistid") ON DELETE CASCADE ON UPDATE CASCADE, + FOREIGN KEY ("cd") REFERENCES "cd"("cdid") ); - CREATE INDEX twokeys_idx_artist ON twokeys (artist); + CREATE INDEX "twokeys_idx_artist" ON "twokeys" ("artist"); - -- - -- Table: artwork_to_artist - -- - CREATE TABLE artwork_to_artist ( - artwork_cd_id integer NOT NULL, - artist_id integer NOT NULL, - PRIMARY KEY (artwork_cd_id, artist_id) + CREATE TABLE "artwork_to_artist" ( + "artwork_cd_id" integer NOT NULL, + "artist_id" integer NOT NULL, + PRIMARY KEY ("artwork_cd_id", "artist_id"), + FOREIGN KEY ("artist_id") REFERENCES "artist"("artistid") ON DELETE CASCADE ON UPDATE CASCADE, + FOREIGN KEY ("artwork_cd_id") REFERENCES "cd_artwork"("cd_id") ON DELETE CASCADE ON UPDATE CASCADE ); - CREATE INDEX artwork_to_artist_idx_artist_id ON artwork_to_artist (artist_id); + CREATE INDEX "artwork_to_artist_idx_artist_id" ON "artwork_to_artist" ("artist_id"); - CREATE INDEX artwork_to_artist_idx_artwork_cd_id ON artwork_to_artist (artwork_cd_id); + CREATE INDEX "artwork_to_artist_idx_artwork_cd_id" ON "artwork_to_artist" ("artwork_cd_id"); - -- - -- Table: fourkeys_to_twokeys - -- - CREATE TABLE fourkeys_to_twokeys ( - f_foo integer NOT NULL, - f_bar integer NOT NULL, - f_hello integer NOT NULL, - f_goodbye integer NOT NULL, - t_artist integer NOT NULL, - t_cd integer NOT NULL, - autopilot character NOT NULL, - pilot_sequence integer, - PRIMARY KEY (f_foo, f_bar, f_hello, f_goodbye, t_artist, t_cd) + CREATE TABLE "fourkeys_to_twokeys" ( + "f_foo" integer NOT NULL, + "f_bar" integer NOT NULL, + "f_hello" integer NOT NULL, + "f_goodbye" integer NOT NULL, + "t_artist" integer NOT NULL, + "t_cd" integer NOT NULL, + "autopilot" character NOT NULL, + "pilot_sequence" integer, + PRIMARY KEY ("f_foo", "f_bar", "f_hello", "f_goodbye", "t_artist", "t_cd"), + FOREIGN KEY ("f_foo", "f_bar", "f_hello", "f_goodbye") REFERENCES "fourkeys"("foo", "bar", "hello", "goodbye") ON DELETE CASCADE ON UPDATE CASCADE, + FOREIGN KEY ("t_artist", "t_cd") REFERENCES "twokeys"("artist", "cd") ON DELETE CASCADE ON UPDATE CASCADE ); - CREATE INDEX fourkeys_to_twokeys_idx_f_foo_f_bar_f_hello_f_goodbye ON fourkeys_to_twokeys (f_foo, f_bar, f_hello, f_goodbye); + CREATE INDEX "fourkeys_to_twokeys_idx_f_foo_f_bar_f_hello_f_goodbye" ON "fourkeys_to_twokeys" ("f_foo", "f_bar", "f_hello", "f_goodbye"); - CREATE INDEX fourkeys_to_twokeys_idx_t_artist_t_cd ON fourkeys_to_twokeys (t_artist, t_cd); + CREATE INDEX "fourkeys_to_twokeys_idx_t_artist_t_cd" ON "fourkeys_to_twokeys" ("t_artist", "t_cd"); - -- - -- View: year2000cds - -- - CREATE VIEW year2000cds AS + CREATE VIEW "year2000cds" AS SELECT cdid, artist, title, year, genreid, single_track FROM cd WHERE year = "2000"; diff --cc t/prefetch/grouped.t index c50b7ef,ffe94b8..760e381 --- a/t/prefetch/grouped.t +++ b/t/prefetch/grouped.t @@@ -294,10 -294,8 +294,9 @@@ for ($cd_rs->all) FROM cd me JOIN artist artist ON artist.artistid = me.artist GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track - ORDER BY me.cdid ) me JOIN artist artist ON artist.artistid = me.artist + ORDER BY me.cdid )', [], ); diff --cc t/sqlmaker/limit_dialects/torture.t index 7bb116b,44df440..3b72154 --- a/t/sqlmaker/limit_dialects/torture.t +++ b/t/sqlmaker/limit_dialects/torture.t @@@ -53,6 -55,23 +55,23 @@@ my $tests = [ { sqlt_datatype => 'integer' } => 3 ], ], ], + limit_offset_prefetch => [ + '( + SELECT me.name, books.id, books.source, books.owner, books.title, books.price + FROM ( + SELECT me.name, me.id + FROM owners me + LIMIT ? OFFSET ? + ) me + LEFT JOIN books books + ON books.owner = me.id - ORDER BY books.owner ++ ORDER BY me.id + )', + [ + [ { sqlt_datatype => 'integer' } => 3 ], + [ { sqlt_datatype => 'integer' } => 1 ], + ] + ], }, LimitXY => { @@@ -78,6 -97,23 +97,23 @@@ [ { sqlt_datatype => 'integer' } => 4 ], ], ], + limit_offset_prefetch => [ + '( + SELECT me.name, books.id, books.source, books.owner, books.title, books.price + FROM ( + SELECT me.name, me.id + FROM owners me + LIMIT ?,? + ) me + LEFT JOIN books books + ON books.owner = me.id - ORDER BY books.owner ++ ORDER BY me.id + )', + [ + [ { sqlt_datatype => 'integer' } => 1 ], + [ { sqlt_datatype => 'integer' } => 3 ], + ] + ], }, SkipFirst => { @@@ -102,6 -138,22 +138,22 @@@ @order_bind, ], ], + limit_offset_prefetch => [ + '( + SELECT me.name, books.id, books.source, books.owner, books.title, books.price + FROM ( + SELECT SKIP ? FIRST ? me.name, me.id + FROM owners me + ) me + LEFT JOIN books books + ON books.owner = me.id - ORDER BY books.owner ++ ORDER BY me.id + )', + [ + [ { sqlt_datatype => 'integer' } => 1 ], + [ { sqlt_datatype => 'integer' } => 3 ], + ] + ], }, FirstSkip => { @@@ -126,6 -178,22 +178,22 @@@ @order_bind, ], ], + limit_offset_prefetch => [ + '( + SELECT me.name, books.id, books.source, books.owner, books.title, books.price + FROM ( + SELECT FIRST ? SKIP ? me.name, me.id + FROM owners me + ) me + LEFT JOIN books books + ON books.owner = me.id - ORDER BY books.owner ++ ORDER BY me.id + )', + [ + [ { sqlt_datatype => 'integer' } => 3 ], + [ { sqlt_datatype => 'integer' } => 1 ], + ] + ], }, RowNumberOver => do { @@@ -207,6 -275,28 +275,28 @@@ [ { sqlt_datatype => 'integer' } => 7 ], ], ], + limit_offset_prefetch => [ + '( + SELECT me.name, books.id, books.source, books.owner, books.title, books.price + FROM ( + SELECT me.name, me.id + FROM ( + SELECT me.name, me.id, ROW_NUMBER() OVER() AS rno__row__index + FROM ( + SELECT me.name, me.id FROM owners me + ) me + ) me + WHERE rno__row__index >= ? AND rno__row__index <= ? + ) me + LEFT JOIN books books + ON books.owner = me.id - ORDER BY books.owner ++ ORDER BY me.id + )', + [ + [ { sqlt_datatype => 'integer' } => 2 ], + [ { sqlt_datatype => 'integer' } => 4 ], + ] + ], }; }, @@@ -303,6 -393,28 +393,28 @@@ [ { sqlt_datatype => 'integer' } => 4 ], ], ], + limit_offset_prefetch => [ + '( + SELECT me.name, books.id, books.source, books.owner, books.title, books.price + FROM ( + SELECT me.name, me.id + FROM ( + SELECT me.name, me.id, ROWNUM rownum__index + FROM ( + SELECT me.name, me.id + FROM owners me + ) me + ) me WHERE rownum__index BETWEEN ? AND ? + ) me + LEFT JOIN books books + ON books.owner = me.id - ORDER BY books.owner ++ ORDER BY me.id + )', + [ + [ { sqlt_datatype => 'integer' } => 2 ], + [ { sqlt_datatype => 'integer' } => 4 ], + ] + ], }; }, @@@ -399,6 -511,26 +511,26 @@@ (map { [ @$_ ] } @order_bind), # without this is_deeply throws a fit ], ], + limit_offset_prefetch => [ + '( + SELECT me.name, books.id, books.source, books.owner, books.title, books.price + FROM ( + SELECT me.name, me.id + FROM ( + SELECT me.name, me.id + FROM owners me + ORDER BY me.id + FETCH FIRST 4 ROWS ONLY + ) me + ORDER BY me.id DESC + FETCH FIRST 3 ROWS ONLY + ) me + LEFT JOIN books books + ON books.owner = me.id - ORDER BY books.owner ++ ORDER BY me.id + )', + [], + ], }, Top => { @@@ -488,6 -620,24 +620,24 @@@ (map { [ @$_ ] } @order_bind), # without this is_deeply throws a fit ], ], + limit_offset_prefetch => [ + '( + SELECT me.name, books.id, books.source, books.owner, books.title, books.price + FROM ( + SELECT TOP 3 me.name, me.id + FROM ( + SELECT TOP 4 me.name, me.id + FROM owners me + ORDER BY me.id + ) me + ORDER BY me.id DESC + ) me + LEFT JOIN books books + ON books.owner = me.id - ORDER BY books.owner ++ ORDER BY me.id + )', + [], + ], }, RowCountOrGenericSubQ => { @@@ -597,6 -747,30 +747,30 @@@ [ { sqlt_datatype => 'integer' } => 6 ], ], ], + limit_offset_prefetch => [ + '( + SELECT me.name, books.id, books.source, books.owner, books.title, books.price + FROM ( + SELECT me.name, me.id + FROM ( + SELECT me.name, me.id FROM owners me + ) me + WHERE ( + SELECT COUNT(*) + FROM owners rownum__emulation + WHERE rownum__emulation.id < me.id + ) BETWEEN ? AND ? + ORDER BY me.id + ) me + LEFT JOIN books books + ON books.owner = me.id - ORDER BY me.id, books.owner ++ ORDER BY me.id + )', + [ + [ { sqlt_datatype => 'integer' } => 1 ], + [ { sqlt_datatype => 'integer' } => 3 ], + ], + ], } };