$rs_attrs->{order_by}
and
$rs_attrs->{_rsroot_rsrc}->storage->_order_by_is_stable(
- $rs_attrs->{from}, $rs_attrs->{order_by}
+ @{$rs_attrs}{qw/from order_by where/}
)
) {
push @{$self->{limit_bind}}, [ $self->__total_bindtype => $offset + $rows ], [ $self->__offset_bindtype => $offset + 1 ];
if ($sq_attrs->{order_by_requested}) {
$self->throw_exception (
'Unable to safely perform "skimming type" limit with supplied unstable order criteria'
- ) unless $rs_attrs->{_rsroot_rsrc}->schema->storage->_order_by_is_stable(
+ ) unless ($rs_attrs->{_rsroot_rsrc}->schema->storage->_order_by_is_stable(
$rs_attrs->{from},
- $requested_order
- );
+ $requested_order,
+ $rs_attrs->{where},
+ ));
$inner_order = $requested_order;
}
databases. It works by ordering the set by some unique column, and calculating
the amount of rows that have a less-er value (thus emulating a L</RowNum>-like
index). Of course this implies the set can only be ordered by a single unique
-column. Also note that this technique can be and often is B<excruciatingly
-slow>.
+column.
+
+Also note that this technique can be and often is B<excruciatingly slow>. You
+may have much better luck using L<DBIx::Class::ResultSet/software_limit>
+instead.
Currently used by B<Sybase ASE>, due to lack of any other option.
next if $in_sel_index->{$chunk};
$extra_order_sel->{$chunk} ||= $self->_quote (
- 'ORDER__BY__' . scalar keys %{$extra_order_sel||{}}
+ 'ORDER__BY__' . sprintf '%03d', scalar keys %{$extra_order_sel||{}}
);
}