X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI%2FSybase%2FASE.pm;h=eceef2016cc4c4e12b97d307ff963402e30238b0;hb=5529838f7afff91467ef2664087999ab222da48d;hp=8d1419fb884068b6e6885186c2e419545ed07dff;hpb=9c510ba5d0481e77302d8689b4ae5cb63548b200;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm b/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm index 8d1419f..eceef20 100644 --- a/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm +++ b/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm @@ -16,9 +16,10 @@ use Sub::Name(); use Data::Dumper::Concise 'Dumper'; use Try::Tiny; use Context::Preserve 'preserve_context'; +use DBIx::Class::_Util 'sigwarn_silencer'; use namespace::clean; -__PACKAGE__->sql_limit_dialect ('RowCountOrGenericSubQ'); +__PACKAGE__->sql_limit_dialect ('GenericSubQ'); __PACKAGE__->sql_quote_char ([qw/[ ]/]); __PACKAGE__->datetime_parser_type( 'DBIx::Class::Storage::DBI::Sybase::ASE::DateTime::Format' @@ -178,11 +179,10 @@ sub disconnect { # Even though we call $sth->finish for uses off the bulk API, there's still an # "active statement" warning on disconnect, which we throw away here. -# This is due to the bug described in insert_bulk. +# This is due to the bug described in _insert_bulk. # Currently a noop because 'prepare' is used instead of 'prepare_cached'. - local $SIG{__WARN__} = sub { - warn $_[0] unless $_[0] =~ /active statement/i; - } if $self->_is_bulk_storage; + local $SIG{__WARN__} = sigwarn_silencer(qr/active statement/i) + if $self->_is_bulk_storage; # so that next transaction gets a dbh $self->_began_bulk_work(0) if $self->_is_bulk_storage; @@ -233,7 +233,7 @@ Also sets the C value for blob write operations. The default is C<1>, but C<0> is better if your database is configured for it. See -L. +L. =cut @@ -254,8 +254,7 @@ sub _is_lob_column { } sub _prep_for_execute { - my $self = shift; - my ($op, $ident) = @_; + my ($self, $op, $ident, $args) = @_; # ### This is commented out because all tests pass. However I am leaving it @@ -263,6 +262,8 @@ sub _prep_for_execute { ### BTW it doesn't currently work exactly - need better sensitivity to # currently set value # + #my ($op, $ident) = @_; + # # inherit these from the parent for the duration of _prep_for_execute # Don't know how to make a localizing loop with if's, otherwise I would #local $self->{_autoinc_supplied_for_op} @@ -272,7 +273,20 @@ sub _prep_for_execute { # = $self->_parent_storage->_perform_autoinc_retrieval #if ($op eq 'insert' or $op eq 'update') and $self->_parent_storage; - my ($sql, $bind) = $self->next::method (@_); + my $limit; # extract and use shortcut on limit without offset + if ($op eq 'select' and ! $args->[4] and $limit = $args->[3]) { + $args = [ @$args ]; + $args->[3] = undef; + } + + my ($sql, $bind) = $self->next::method($op, $ident, $args); + + # $limit is already sanitized by now + $sql = join( "\n", + "SET ROWCOUNT $limit", + $sql, + "SET ROWCOUNT 0", + ) if $limit; if (my $identity_col = $self->_perform_autoinc_retrieval) { $sql .= "\n" . $self->_fetch_identity_sql($ident, $identity_col) @@ -322,8 +336,6 @@ sub _native_data_type { sub _execute { my $self = shift; - my ($op) = @_; - my ($rv, $sth, @bind) = $self->next::method(@_); $self->_identity( ($sth->fetchall_arrayref)->[0][0] ) @@ -489,7 +501,7 @@ sub update { } } -sub insert_bulk { +sub _insert_bulk { my $self = shift; my ($source, $cols, $data) = @_; @@ -595,7 +607,7 @@ sub insert_bulk { # This ignores any data conversion errors detected by the client side libs, as # they are usually harmless. my $orig_cslib_cb = DBD::Sybase::set_cslib_cb( - Sub::Name::subname insert_bulk => sub { + Sub::Name::subname _insert_bulk_cslib_errhandler => sub { my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_; return 1 if $errno == 36; @@ -673,7 +685,7 @@ sub insert_bulk { $self->_bulk_storage(undef); unshift @_, $self; - goto \&insert_bulk; + goto \&_insert_bulk; } elsif ($exception) { # rollback makes the bulkLogin connection unusable @@ -705,7 +717,7 @@ sub _remove_blob_cols { return %blob_cols ? \%blob_cols : undef; } -# same for insert_bulk +# same for _insert_bulk sub _remove_blob_cols_array { my ($self, $source, $cols, $data) = @_; @@ -736,7 +748,7 @@ sub _update_blobs { my ($self, $source, $blob_cols, $where) = @_; my @primary_cols = try - { $source->_pri_cols } + { $source->_pri_cols_or_die } catch { $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_") }; @@ -771,7 +783,7 @@ sub _insert_blobs { my %row = %$row; my @primary_cols = try - { $source->_pri_cols } + { $source->_pri_cols_or_die } catch { $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_") }; @@ -1021,9 +1033,9 @@ For example, this will not work: $schema->txn_do(sub { my $rs = $schema->resultset('Book'); - while (my $row = $rs->next) { + while (my $result = $rs->next) { $schema->resultset('MetaData')->create({ - book_id => $row->id, + book_id => $result->id, ... }); } @@ -1070,15 +1082,15 @@ for L. =head1 LIMITED QUERIES -Because ASE does not have a good way to limit results in SQL that works for all -types of queries, the limit dialect is set to -L. +Because ASE does not have a good way to limit results in SQL that works for +all types of queries, the limit dialect is set to +L. Fortunately, ASE and L support cursors properly, so when -L is too slow you can use -the L -L attribute to simulate limited queries by skipping over -records. +L is too slow +you can use the L +L attribute to simulate limited queries by skipping +over records. =head1 TEXT/IMAGE COLUMNS