From: Rafael Kitover Date: Tue, 4 Aug 2009 21:32:56 +0000 (+0000) Subject: Merge 'trunk' into 'sybase' X-Git-Tag: v0.08112~14^2~75 X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=commitdiff_plain;h=7eee5c151525ef17674dac9b914acbd98f5f2f97;hp=4c46fa18f51e7f5d3b1e711a8ece1308607295a4;p=dbsrgits%2FDBIx-Class.git Merge 'trunk' into 'sybase' r9037@hlagh (orig r7166): castaway | 2009-08-02 06:41:25 -0400 Mention ResultSet, ResultSource and Row in synopsis r9038@hlagh (orig r7167): castaway | 2009-08-02 08:10:53 -0400 Docs: Explainations of result sources and how to find them r9042@hlagh (orig r7172): ribasushi | 2009-08-03 05:01:44 -0400 Disable Pod::Inherit makefile calls, until we get to version 0.02 r9046@hlagh (orig r7176): ribasushi | 2009-08-03 05:51:42 -0400 r6983@Thesaurus (orig r6982): ribasushi | 2009-07-04 11:46:57 +0200 New branch to experiment with a sanifying mysql on_connect_call r6984@Thesaurus (orig r6983): ribasushi | 2009-07-04 11:49:44 +0200 Initial set_ansi_mode code - make sure to utilize _do_query instead of dbh->do, so the result is visible in the trace r6987@Thesaurus (orig r6986): ribasushi | 2009-07-04 12:40:47 +0200 Fix POD r7178@Thesaurus (orig r7175): ribasushi | 2009-08-03 11:51:15 +0200 Wrap up set_strict_mode for mysql r9048@hlagh (orig r7178): ribasushi | 2009-08-03 06:41:32 -0400 Sanify unqualified column bindtype handling Silence a warning when using a custom {from} r9068@hlagh (orig r7198): caelum | 2009-08-04 16:18:27 -0400 update Changes --- diff --git a/Makefile.PL b/Makefile.PL index d21951b..da7c940 100644 --- a/Makefile.PL +++ b/Makefile.PL @@ -111,6 +111,12 @@ my %force_requires_if_author = ( 'DateTime::Format::Oracle' => 0, ) : () , + + $ENV{DBICTEST_SYBASE_DSN} + ? ( + 'DateTime::Format::Sybase' => 0, + ) : () + , ); if ($Module::Install::AUTHOR) { diff --git a/lib/DBIx/Class/ResultSet.pm b/lib/DBIx/Class/ResultSet.pm index 9fba9fc..21bc62d 100644 --- a/lib/DBIx/Class/ResultSet.pm +++ b/lib/DBIx/Class/ResultSet.pm @@ -2780,7 +2780,14 @@ sub _resolved_attrs { : "${alias}.$_" ) } - } ( ref($attrs->{columns}) eq 'ARRAY' ) ? @{ delete $attrs->{columns}} : (delete $attrs->{columns} || $source->columns ); + } ( ref($attrs->{columns}) eq 'ARRAY' ) ? + @{ delete $attrs->{columns}} : + (delete $attrs->{columns} || + $source->storage->order_columns_for_select( + $source, + [ $source->columns ] + ) + ); } # add the additional columns on foreach ( 'include_columns', '+columns' ) { diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 5a97bb3..22ac30c 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -629,7 +629,8 @@ sub disconnect { $self->_do_connection_actions(disconnect_call_ => $_) for @actions; - $self->_dbh->rollback unless $self->_dbh_autocommit; + $self->_dbh_rollback unless $self->_dbh_autocommit; + $self->_dbh->disconnect; $self->_dbh(undef); $self->{_dbh_gen}++; @@ -986,20 +987,25 @@ sub txn_begin { # this isn't ->_dbh-> because # we should reconnect on begin_work # for AutoCommit users - $self->dbh->begin_work; + $self->_dbh_begin_work; } elsif ($self->auto_savepoint) { $self->svp_begin; } $self->{transaction_depth}++; } +sub _dbh_begin_work { + my $self = shift; + $self->dbh->begin_work; +} + sub txn_commit { my $self = shift; if ($self->{transaction_depth} == 1) { my $dbh = $self->_dbh; $self->debugobj->txn_commit() if ($self->debug); - $dbh->commit; + $self->_dbh_commit; $self->{transaction_depth} = 0 if $self->_dbh_autocommit; } @@ -1010,6 +1016,11 @@ sub txn_commit { } } +sub _dbh_commit { + my $self = shift; + $self->_dbh->commit; +} + sub txn_rollback { my $self = shift; my $dbh = $self->_dbh; @@ -1019,7 +1030,7 @@ sub txn_rollback { if ($self->debug); $self->{transaction_depth} = 0 if $self->_dbh_autocommit; - $dbh->rollback; + $self->_dbh_rollback; } elsif($self->{transaction_depth} > 1) { $self->{transaction_depth}--; @@ -1042,6 +1053,11 @@ sub txn_rollback { } } +sub _dbh_rollback { + my $self = shift; + $self->_dbh->rollback; +} + # This used to be the top-half of _execute. It was split out to make it # easier to override in NoBindVars without duping the rest. It takes up # all of _execute's args, and emits $sql, @bind. @@ -2290,6 +2306,23 @@ sub lag_behind_master { return; } +=head2 order_columns_for_select + +Returns an ordered list of column names for use with a C. + +But your queries will be cached. + +A recommended L setting: + + on_connect_call => [['datetime_setup'], ['blob_setup', log_on_update => 0]] + +=head1 METHODS + +=cut sub _rebless { - my $self = shift; + my $self = shift; + + if (ref($self) eq 'DBIx::Class::Storage::DBI::Sybase') { + my $dbtype = eval { + @{$self->dbh->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})}[2] + } || ''; + + my $exception = $@; + $dbtype =~ s/\W/_/gi; + my $subclass = "DBIx::Class::Storage::DBI::Sybase::${dbtype}"; + + if (!$exception && $dbtype && $self->load_optional_class($subclass)) { + bless $self, $subclass; + $self->_rebless; + } else { # real Sybase + my $no_bind_vars = 'DBIx::Class::Storage::DBI::Sybase::NoBindVars'; + +# This is reset to 0 in ::NoBindVars, only necessary because we use max(col) to +# get the identity. + $self->_insert_txn(1); + + if ($self->using_freetds) { + carp <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN}; + +You are using FreeTDS with Sybase. - my $dbtype = eval { @{$self->dbh->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})}[2] }; - unless ( $@ ) { - $dbtype =~ s/\W/_/gi; - my $subclass = "DBIx::Class::Storage::DBI::Sybase::${dbtype}"; - if ($self->load_optional_class($subclass) && !$self->isa($subclass)) { - bless $self, $subclass; +We will do our best to support this configuration, but please consider this +support experimental. + +TEXT/IMAGE columns will definitely not work. + +You are encouraged to recompile DBD::Sybase with the Sybase Open Client libraries +instead. + +See perldoc DBIx::Class::Storage::DBI::Sybase for more details. + +To turn off this warning set the DBIC_SYBASE_FREETDS_NOWARN environment +variable. +EOF + if (not $self->placeholders_with_type_conversion_supported) { + if ($self->placeholders_supported) { + $self->auto_cast(1); + } else { + $self->ensure_class_loaded($no_bind_vars); + bless $self, $no_bind_vars; $self->_rebless; + } } + + $self->set_textsize; # based on LongReadLen in connect_info + + } elsif (not $self->dbh->{syb_dynamic_supported}) { +# not necessarily FreeTDS, but no placeholders nevertheless + $self->ensure_class_loaded($no_bind_vars); + bless $self, $no_bind_vars; + $self->_rebless; + } + + $self->_set_max_connect(256); } + } } -sub _dbh_last_insert_id { - my ($self, $dbh, $source, $col) = @_; - return ($dbh->selectrow_array('select @@identity'))[0]; +# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS +# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however +# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we +# only want when AutoCommit is off. +sub _populate_dbh { + my $self = shift; + + $self->next::method(@_); + + if (not $self->using_freetds) { + $self->_dbh->{syb_chained_txn} = 1; + } else { + if ($self->_dbh_autocommit) { + $self->_dbh->do('SET CHAINED OFF'); + } else { + $self->_dbh->do('SET CHAINED ON'); + } + } +} + +=head2 connect_call_blob_setup + +Used as: + + on_connect_call => [ [ 'blob_setup', log_on_update => 0 ] ] + +Does C<< $dbh->{syb_binary_images} = 1; >> to return C data as raw binary +instead of as a hex string. + +Recommended. + +Also sets the C value for blob write operations. The default is +C<1>, but C<0> is better if your database is configured for it. + +See +L. + +=cut + +sub connect_call_blob_setup { + my $self = shift; + my %args = @_; + my $dbh = $self->_dbh; + $dbh->{syb_binary_images} = 1; + + $self->_blob_log_on_update($args{log_on_update}) + if exists $args{log_on_update}; +} + +=head2 connect_call_set_auto_cast + +In some configurations (usually with L) statements with values bound +to columns or conditions that are not strings will throw implicit type +conversion errors. For L this is automatically detected, and this +option is set. + +It converts placeholders to: + + CAST(? as $type) + +the type is taken from the L setting from +your Result class, and mapped to a Sybase type using a mapping based on +L if necessary. + +This setting can also be set outside of +L at any time using: + + $schema->storage->auto_cast(1); + +=cut + +sub connect_call_set_auto_cast { + my $self = shift; + $self->auto_cast(1); +} + +sub _is_lob_type { + my $self = shift; + my $type = shift; + $type && $type =~ /(?:text|image|lob|bytea|binary|memo)/i; +} + +# The select-piggybacking-on-insert trick stolen from odbc/mssql +sub _prep_for_execute { + my $self = shift; + my ($op, $extra_bind, $ident, $args) = @_; + + my ($sql, $bind) = $self->next::method (@_); + +# Some combinations of FreeTDS and Sybase throw implicit conversion errors for +# all placeeholders, so we convert them into CASTs here. +# Based on code in ::DBI::NoBindVars . +# +# If we're using ::NoBindVars, there are no binds by this point so this code +# gets skippeed. + if ($self->auto_cast && @$bind) { + my $new_sql; + my @sql_part = split /\?/, $sql; + my $col_info = $self->_resolve_column_info($ident,[ map $_->[0], @$bind ]); + + foreach my $bound (@$bind) { + my $col = $bound->[0]; + my $syb_type = $self->_syb_base_type($col_info->{$col}{data_type}); + + foreach my $data (@{$bound}[1..$#$bound]) { + $new_sql .= shift(@sql_part) . + ($syb_type ? "CAST(? AS $syb_type)" : '?'); + } + } + $new_sql .= join '', @sql_part; + $sql = $new_sql; + } + + if ($op eq 'insert') { + my $table = $ident->from; + + my $bind_info = $self->_resolve_column_info( + $ident, [map $_->[0], @{$bind}] + ); + my $identity_col = +List::Util::first { $bind_info->{$_}{is_auto_increment} } (keys %$bind_info); + + if ($identity_col) { + $sql = +"SET IDENTITY_INSERT $table ON\n" . +"$sql\n" . +"SET IDENTITY_INSERT $table OFF" + } else { + $identity_col = List::Util::first { + $ident->column_info($_)->{is_auto_increment} + } $ident->columns; + } + + if ($identity_col) { + $sql = + "$sql\n" . + $self->_fetch_identity_sql($ident, $identity_col); + } + } + + return ($sql, $bind); +} + +# Stolen from SQLT, with some modifications. This will likely change when the +# SQLT Sybase stuff is redone/fixed-up. +my %TYPE_MAPPING = ( + number => 'numeric', + money => 'money', + varchar => 'varchar', + varchar2 => 'varchar', + timestamp => 'datetime', + text => 'varchar', + real => 'double precision', + comment => 'text', + bit => 'bit', + tinyint => 'smallint', + float => 'double precision', + serial => 'numeric', + bigserial => 'numeric', + boolean => 'varchar', + long => 'varchar', +); + +sub _syb_base_type { + my ($self, $type) = @_; + + $type = lc $type; + $type =~ s/ identity//; + + return uc($TYPE_MAPPING{$type} || $type); +} + +sub _fetch_identity_sql { + my ($self, $source, $col) = @_; + + return "SELECT MAX($col) FROM ".$source->from; +} + +sub _execute { + my $self = shift; + my ($op) = @_; + + my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_); + + if ($op eq 'insert') { + $self->_identity($sth->fetchrow_array); + $sth->finish; + } + + return wantarray ? ($rv, $sth, @bind) : $rv; +} + +sub last_insert_id { shift->_identity } + +# override to handle TEXT/IMAGE and to do a transaction if necessary +sub insert { + my ($self, $source, $to_insert) = splice @_, 0, 3; + my $dbh = $self->_dbh; + + my $blob_cols = $self->_remove_blob_cols($source, $to_insert); + +# We have to do the insert in a transaction to avoid race conditions with the +# SELECT MAX(COL) identity method used when placeholders are enabled. + my $updated_cols = do { + if ($self->_insert_txn && (not $self->{transaction_depth})) { + my $args = \@_; + my $method = $self->next::can; + $self->txn_do( + sub { $self->$method($source, $to_insert, @$args) } + ); + } else { + $self->next::method($source, $to_insert, @_); + } + }; + + $self->_insert_blobs($source, $blob_cols, $to_insert) if %$blob_cols; + + return $updated_cols; +} + +sub update { + my ($self, $source) = splice @_, 0, 2; + my ($fields, $where) = @_; + my $wantarray = wantarray; + + my $blob_cols = $self->_remove_blob_cols($source, $fields); + + my @res; + if ($wantarray) { + @res = $self->next::method($source, @_); + } else { + $res[0] = $self->next::method($source, @_); + } + + $self->_update_blobs($source, $blob_cols, $where) if %$blob_cols; + + return $wantarray ? @res : $res[0]; +} + +sub _remove_blob_cols { + my ($self, $source, $fields) = @_; + + my %blob_cols; + + for my $col (keys %$fields) { + if ($self->_is_lob_type($source->column_info($col)->{data_type})) { + $blob_cols{$col} = delete $fields->{$col}; + $fields->{$col} = \"''"; + } + } + + return \%blob_cols; +} + +sub _update_blobs { + my ($self, $source, $blob_cols, $where) = @_; + + my (@primary_cols) = $source->primary_columns; + + croak "Cannot update TEXT/IMAGE column(s) without a primary key" + unless @primary_cols; + +# check if we're updating a single row by PK + my $pk_cols_in_where = 0; + for my $col (@primary_cols) { + $pk_cols_in_where++ if defined $where->{$col}; + } + my @rows; + + if ($pk_cols_in_where == @primary_cols) { + my %row_to_update; + @row_to_update{@primary_cols} = @{$where}{@primary_cols}; + @rows = \%row_to_update; + } else { + my $rs = $source->resultset->search( + $where, + { + result_class => 'DBIx::Class::ResultClass::HashRefInflator', + select => \@primary_cols + } + ); + @rows = $rs->all; # statement must finish + } + + for my $row (@rows) { + $self->_insert_blobs($source, $blob_cols, $row); + } +} + +sub _insert_blobs { + my ($self, $source, $blob_cols, $row) = @_; + my $dbh = $self->dbh; + + my $table = $source->from; + + my %row = %$row; + my (@primary_cols) = $source->primary_columns; + + croak "Cannot update TEXT/IMAGE column(s) without a primary key" + unless @primary_cols; + + if ((grep { defined $row{$_} } @primary_cols) != @primary_cols) { + if (@primary_cols == 1) { + my $col = $primary_cols[0]; + $row{$col} = $self->last_insert_id($source, $col); + } else { + croak "Cannot update TEXT/IMAGE column(s) without primary key values"; + } + } + + for my $col (keys %$blob_cols) { + my $blob = $blob_cols->{$col}; + + my %where = map { ($_, $row{$_}) } @primary_cols; + my $cursor = $source->resultset->search(\%where, { + select => [$col] + })->cursor; + $cursor->next; + my $sth = $cursor->sth; + + eval { + do { + $sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr; + } while $sth->fetch; + + $sth->func('ct_prepare_send') or die $sth->errstr; + + my $log_on_update = $self->_blob_log_on_update; + $log_on_update = 1 if not defined $log_on_update; + + $sth->func('CS_SET', 1, { + total_txtlen => length($blob), + log_on_update => $log_on_update + }, 'ct_data_info') or die $sth->errstr; + + $sth->func($blob, length($blob), 'ct_send_data') or die $sth->errstr; + + $sth->func('ct_finish_send') or die $sth->errstr; + }; + my $exception = $@; + $sth->finish if $sth; + if ($exception) { + if ($self->using_freetds) { + croak +"TEXT/IMAGE operation failed, probably because you're using FreeTDS: " . +$exception; + } else { + croak $exception; + } + } + } +} + +=head2 connect_call_datetime_setup + +Used as: + + on_connect_call => 'datetime_setup' + +In L to set: + + $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z + $dbh->do('set dateformat mdy'); # input fmt: 08/13/1979 18:08:55.080 + +On connection for use with L, using +L, which you will need to install. + +This works for both C and C columns, although +C columns only have minute precision. + +=cut + +{ + my $old_dbd_warned = 0; + + sub connect_call_datetime_setup { + my $self = shift; + my $dbh = $self->_dbh; + + if ($dbh->can('syb_date_fmt')) { +# amazingly, this works with FreeTDS + $dbh->syb_date_fmt('ISO_strict'); + } elsif (not $old_dbd_warned) { + carp "Your DBD::Sybase is too old to support ". + "DBIx::Class::InflateColumn::DateTime, please upgrade!"; + $old_dbd_warned = 1; + } + + $dbh->do('SET DATEFORMAT mdy'); + + 1; + } +} + +sub datetime_parser_type { "DateTime::Format::Sybase" } + +# ->begin_work and such have no effect with FreeTDS but we run them anyway to +# let the DBD keep any state it needs to. +# +# If they ever do start working, the extra statements will do no harm (because +# Sybase supports nested transactions.) + +sub _dbh_begin_work { + my $self = shift; + $self->next::method(@_); + if ($self->using_freetds) { + $self->dbh->do('BEGIN TRAN'); + } +} + +sub _dbh_commit { + my $self = shift; + if ($self->using_freetds) { + $self->_dbh->do('COMMIT'); + } + return $self->next::method(@_); +} + +sub _dbh_rollback { + my $self = shift; + if ($self->using_freetds) { + $self->_dbh->do('ROLLBACK'); + } + return $self->next::method(@_); +} + +# savepoint support using ASE syntax + +sub _svp_begin { + my ($self, $name) = @_; + + $self->dbh->do("SAVE TRANSACTION $name"); +} + +# A new SAVE TRANSACTION with the same name releases the previous one. +sub _svp_release { 1 } + +sub _svp_rollback { + my ($self, $name) = @_; + + $self->dbh->do("ROLLBACK TRANSACTION $name"); } 1; -=head1 NAME +=head1 FreeTDS -DBIx::Class::Storage::DBI::Sybase - Storage::DBI subclass for Sybase +This driver supports L compiled against FreeTDS +(L) to the best of our ability, however it is +recommended that you recompile L against the Sybase Open Client +libraries. They are a part of the Sybase ASE distribution: -=head1 SYNOPSIS +The Open Client FAQ is here: +L. -This subclass supports L for real Sybase databases. If -you are using an MSSQL database via L, see -L. +Sybase ASE for Linux (which comes with the Open Client libraries) may be +downloaded here: L. -=head1 CAVEATS +To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run: -This storage driver uses L as a base. -This means that bind variables will be interpolated (properly quoted of course) -into the SQL query itself, without using bind placeholders. + perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}' -More importantly this means that caching of prepared statements is explicitly -disabled, as the interpolation renders it useless. +Some versions of the libraries involved will not support placeholders, in which +case the storage will be reblessed to +L. -=head1 AUTHORS +In some configurations, placeholders will work but will throw implicit +conversion errors for anything that's not expecting a string. In such a case, +the C option is automatically set, which you may enable yourself with +L (see the description of that method for more +details.) + +In other configurations, placeholers will work just as they do with the Sybase +Open Client libraries. + +Inserts or updates of TEXT/IMAGE columns will B work with FreeTDS. + +=head1 MAXIMUM CONNECTIONS + +The TDS protocol makes separate connections to the server for active statements +in the background. By default the number of such connections is limited to 25, +on both the client side and the server side. + +This is a bit too low for a complex L application, so on connection +the client side setting is set to C<256> (see L.) You +can override it to whatever setting you like in the DSN. + +See +L +for information on changing the setting on the server side. + +=head1 DATES -Brandon L Black +See L to setup date formats +for L. + +=head1 TEXT/IMAGE COLUMNS + +L compiled with FreeTDS will B allow you to insert or update +C columns. + +Setting C<< $dbh->{LongReadLen} >> will also not work with FreeTDS use either: + + $schema->storage->dbh->do("SET TEXTSIZE $bytes"); + +or + + $schema->storage->set_textsize($bytes); + +instead. + +However, the C you pass in +L is used to execute the equivalent +C command on connection. + +See L for a L +setting you need to work with C columns. + +=head1 AUTHORS -Justin Hunter +See L. =head1 LICENSE You may distribute this code under the same terms as Perl itself. =cut +# vim:sts=2 sw=2: diff --git a/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm b/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm index be57610..05c2336 100644 --- a/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm +++ b/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm @@ -1,5 +1,4 @@ -package # hide from PAUSE - DBIx::Class::Storage::DBI::Sybase::Base; +package DBIx::Class::Storage::DBI::Sybase::Base; use strict; use warnings; @@ -12,6 +11,15 @@ use mro 'c3'; DBIx::Class::Storage::DBI::Sybase::Base - Common functionality for drivers using DBD::Sybase +=head1 DESCRIPTION + +This is the base class for L and +L. It provides some +utility methods related to L and the supported functions of the +database you are connecting to. + +=head1 METHODS + =cut sub _ping { @@ -27,6 +35,93 @@ sub _ping { return $@ ? 0 : 1; } +=head2 placeholders_supported + +Whether or not string placeholders work. Does not check for implicit conversion +errors, see L. + +=cut + +sub placeholders_supported { + my $self = shift; + my $dbh = $self->_dbh; + + return eval { +# There's also $dbh->{syb_dynamic_supported} but it can be inaccurate for this +# purpose. + local $dbh->{PrintError} = 0; + local $dbh->{RaiseError} = 1; + $dbh->selectrow_array('select ?', {}, 1); + }; +} + +=head2 placeholders_with_type_conversion_supported + +=cut + +sub placeholders_with_type_conversion_supported { + my $self = shift; + my $dbh = $self->_dbh; + + return eval { + local $dbh->{PrintError} = 0; + local $dbh->{RaiseError} = 1; +# this specifically tests a bind that is NOT a string + $dbh->selectrow_array('select 1 where 1 = ?', {}, 1); + }; +} + +sub _set_max_connect { + my $self = shift; + my $val = shift || 256; + + my $dsn = $self->_dbi_connect_info->[0]; + + return if ref($dsn) eq 'CODE'; + + if ($dsn !~ /maxConnect=/) { + $self->_dbi_connect_info->[0] = "$dsn;maxConnect=$val"; + my $connected = defined $self->_dbh; + $self->disconnect; + $self->ensure_connected if $connected; + } +} + +=head2 using_freetds + +Whether or not L was compiled against FreeTDS. If false, it means +the Sybase OpenClient libraries were used. + +=cut + +sub using_freetds { + my $self = shift; + + return $self->_dbh->{syb_oc_version} =~ /freetds/i; +} + +=head2 set_textsize + +When using FreeTDS and/or MSSQL, C<< $dbh->{LongReadLen} >> is not available, +use this function instead. It does: + + $dbh->do("SET TEXTSIZE $bytes"); + +Takes the number of bytes, or uses the C value from your +L if omitted. + +=cut + +sub set_textsize { + my $self = shift; + my $text_size = shift || + eval { $self->_dbi_connect_info->[-1]->{LongReadLen} }; + + return unless defined $text_size; + + $self->_dbh->do("SET TEXTSIZE $text_size"); +} + 1; =head1 AUTHORS diff --git a/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm b/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm index 600db7a..1fe11f0 100644 --- a/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm +++ b/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm @@ -6,9 +6,23 @@ use warnings; use base qw/ DBIx::Class::Storage::DBI::Sybase::Base DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server + DBIx::Class::Storage::DBI::NoBindVars /; use mro 'c3'; +sub _rebless { + my $self = shift; + $self->disable_sth_caching(1); + +# LongReadLen doesn't work with MSSQL through DBD::Sybase, and the default is +# huge on some versions of SQL server and can cause memory problems, so we +# fix it up here. + $self->set_textsize( + eval { $self->_dbi_connect_info->[-1]->{LongReadLen} } || + 32768 # the DBD::Sybase default + ); +} + 1; =head1 NAME @@ -29,11 +43,12 @@ into the SQL query itself, without using bind placeholders. More importantly this means that caching of prepared statements is explicitly disabled, as the interpolation renders it useless. -=head1 AUTHORS +The actual driver code for MSSQL is in +L. -Brandon L Black +=head1 AUTHORS -Justin Hunter +See L. =head1 LICENSE diff --git a/lib/DBIx/Class/Storage/DBI/Sybase/NoBindVars.pm b/lib/DBIx/Class/Storage/DBI/Sybase/NoBindVars.pm new file mode 100644 index 0000000..78bf807 --- /dev/null +++ b/lib/DBIx/Class/Storage/DBI/Sybase/NoBindVars.pm @@ -0,0 +1,106 @@ +package DBIx::Class::Storage::DBI::Sybase::NoBindVars; + +use Class::C3; +use base qw/ + DBIx::Class::Storage::DBI::NoBindVars + DBIx::Class::Storage::DBI::Sybase +/; +use List::Util (); +use Scalar::Util (); + +sub _rebless { + my $self = shift; + $self->disable_sth_caching(1); + $self->_insert_txn(0); +} + +# this works when NOT using placeholders +sub _fetch_identity_sql { 'SELECT @@IDENTITY' } + +my $number = sub { Scalar::Util::looks_like_number($_[0]) }; + +my $decimal = sub { $_[0] =~ /^ [-+]? \d+ (?:\.\d*)? \z/x }; + +my %noquote = ( + int => sub { $_[0] =~ /^ [-+]? \d+ \z/x }, + bit => => sub { $_[0] =~ /^[01]\z/ }, + money => sub { $_[0] =~ /^\$ \d+ (?:\.\d*)? \z/x }, + float => $number, + real => $number, + double => $number, + decimal => $decimal, + numeric => $decimal, +); + +sub should_quote_value { + my $self = shift; + my ($type, $value) = @_; + + return $self->next::method(@_) if not defined $value or not defined $type; + + if (my $key = List::Util::first { $type =~ /$_/i } keys %noquote) { + return 0 if $noquote{$key}->($value); + } elsif ($self->is_datatype_numeric($type) && $number->($value)) { + return 0; + } + +## try to guess based on value +# elsif (not $type) { +# return 0 if $number->($value) || $noquote->{money}->($value); +# } + + return $self->next::method(@_); +} + +sub transform_unbound_value { + my ($self, $type, $value) = @_; + + if ($type =~ /money/i && defined $value) { + $value =~ s/^\$//; + $value = '$' . $value; + } + + return $value; +} + +1; + +=head1 NAME + +DBIx::Class::Storage::DBI::Sybase::NoBindVars - Storage::DBI subclass for Sybase +without placeholder support + +=head1 DESCRIPTION + +If you're using this driver than your version of Sybase, or the libraries you +use to connect to it, do not support placeholders. + +You can also enable this driver explicitly using: + + my $schema = SchemaClass->clone; + $schema->storage_type('::DBI::Sybase::NoBindVars'); + $schema->connect($dsn, $user, $pass, \%opts); + +See the discussion in L<< DBD::Sybase/Using ? Placeholders & bind parameters to +$sth->execute >> for details on the pros and cons of using placeholders. + +One advantage of not using placeholders is that C in a transaction as the base Sybase driver does. + +When using this driver, bind variables will be interpolated (properly quoted of +course) into the SQL query itself, without using placeholders. + +The caching of prepared statements is also explicitly disabled, as the +interpolation renders it useless. + +=head1 AUTHORS + +See L. + +=head1 LICENSE + +You may distribute this code under the same terms as Perl itself. + +=cut +# vim:sts=2 sw=2: diff --git a/t/746mssql.t b/t/746mssql.t index fa8f137..0dbd479 100644 --- a/t/746mssql.t +++ b/t/746mssql.t @@ -33,7 +33,6 @@ $schema->storage->dbh_do (sub { my ($storage, $dbh) = @_; eval { $dbh->do("DROP TABLE artist") }; $dbh->do(<<'SQL'); - CREATE TABLE artist ( artistid INT IDENTITY NOT NULL, name VARCHAR(100), @@ -41,7 +40,6 @@ CREATE TABLE artist ( charfield CHAR(10) NULL, primary key(artistid) ) - SQL }); @@ -80,14 +78,11 @@ $schema->storage->dbh_do (sub { my ($storage, $dbh) = @_; eval { $dbh->do("DROP TABLE money_test") }; $dbh->do(<<'SQL'); - CREATE TABLE money_test ( id INT IDENTITY PRIMARY KEY, amount MONEY NULL ) - SQL - }); my $rs = $schema->resultset('Money'); @@ -116,8 +111,6 @@ $schema->storage->dbh_do (sub { eval { $dbh->do("DROP TABLE Owners") }; eval { $dbh->do("DROP TABLE Books") }; $dbh->do(<<'SQL'); - - CREATE TABLE Books ( id INT IDENTITY (1, 1) NOT NULL, source VARCHAR(100), @@ -130,7 +123,6 @@ CREATE TABLE Owners ( id INT IDENTITY (1, 1) NOT NULL, name VARCHAR(100), ) - SQL }); @@ -268,11 +260,9 @@ $schema->storage->_sql_maker->{name_sep} = '.'; # clean up our mess END { - if (my $dbh = eval { $schema->storage->_dbh }) { - $dbh->do('DROP TABLE artist'); - $dbh->do('DROP TABLE money_test'); - $dbh->do('DROP TABLE Books'); - $dbh->do('DROP TABLE Owners'); - } + if (my $dbh = eval { $schema->storage->_dbh }) { + eval { $dbh->do("DROP TABLE $_") } + for qw/artist money_test Books Owners/; + } } # vim:sw=2 sts=2 diff --git a/t/746sybase.t b/t/746sybase.t index 9fc87f0..9f52894 100644 --- a/t/746sybase.t +++ b/t/746sybase.t @@ -1,5 +1,6 @@ use strict; use warnings; +no warnings 'uninitialized'; use Test::More; use Test::Exception; @@ -8,84 +9,267 @@ use DBICTest; my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/}; -plan skip_all => 'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test' - unless ($dsn && $user); +my $TESTS = 35 + 2; -plan tests => 13; +if (not ($dsn && $user)) { + plan skip_all => + 'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test' . + "\nWarning: This test drops and creates the tables " . + "'artist' and 'bindtype_test'"; +} else { + plan tests => $TESTS*2; +} + +my @storage_types = ( + 'DBI::Sybase', + 'DBI::Sybase::NoBindVars', +); +my $schema; +my $storage_idx = -1; + +for my $storage_type (@storage_types) { + $storage_idx++; +# this is so we can set ->storage_type before connecting + my $schema = DBICTest::Schema->clone; -my $schema = DBICTest::Schema->connect($dsn, $user, $pass, {AutoCommit => 1}); + unless ($storage_type eq 'DBI::Sybase') { # autodetect + $schema->storage_type("::$storage_type"); + } -# start disconnected to test reconnection -$schema->storage->ensure_connected; -$schema->storage->_dbh->disconnect; + $schema->connection($dsn, $user, $pass, { + AutoCommit => 1, + on_connect_call => [ + [ blob_setup => log_on_update => 1 ], # this is a safer option + ], + }); -isa_ok( $schema->storage, 'DBIx::Class::Storage::DBI::Sybase' ); + $schema->storage->ensure_connected; -my $dbh; -lives_ok (sub { - $dbh = $schema->storage->dbh; -}, 'reconnect works'); + if ($storage_idx == 0 && + $schema->storage->isa('DBIx::Class::Storage::DBI::Sybase::NoBindVars')) { +# no placeholders in this version of Sybase or DBD::Sybase (or using FreeTDS) + my $tb = Test::More->builder; + $tb->skip('no placeholders') for 1..$TESTS; + next; + } -$schema->storage->dbh_do (sub { - my ($storage, $dbh) = @_; - eval { $dbh->do("DROP TABLE artist") }; - $dbh->do(<<'SQL'); + isa_ok( $schema->storage, "DBIx::Class::Storage::$storage_type" ); + $schema->storage->_dbh->disconnect; + lives_ok (sub { $schema->storage->dbh }, 'reconnect works'); + + $schema->storage->dbh_do (sub { + my ($storage, $dbh) = @_; + eval { $dbh->do("DROP TABLE artist") }; + $dbh->do(<<'SQL'); CREATE TABLE artist ( - artistid INT IDENTITY NOT NULL, + artistid INT IDENTITY PRIMARY KEY, name VARCHAR(100), rank INT DEFAULT 13 NOT NULL, - charfield CHAR(10) NULL, - primary key(artistid) + charfield CHAR(10) NULL ) - SQL + }); -}); + my %seen_id; -my %seen_id; +# so we start unconnected + $schema->storage->disconnect; -# fresh $schema so we start unconnected -$schema = DBICTest::Schema->connect($dsn, $user, $pass, {AutoCommit => 1}); +# inserts happen in a txn, so we make sure it still works inside a txn too + $schema->txn_begin; # test primary key handling -my $new = $schema->resultset('Artist')->create({ name => 'foo' }); -ok($new->artistid > 0, "Auto-PK worked"); + my $new = $schema->resultset('Artist')->create({ name => 'foo' }); + ok($new->artistid > 0, "Auto-PK worked"); -$seen_id{$new->artistid}++; + $seen_id{$new->artistid}++; -# test LIMIT support -for (1..6) { + for (1..6) { $new = $schema->resultset('Artist')->create({ name => 'Artist ' . $_ }); is ( $seen_id{$new->artistid}, undef, "id for Artist $_ is unique" ); $seen_id{$new->artistid}++; -} + } + + $schema->txn_commit; -my $it; +# test simple count + is ($schema->resultset('Artist')->count, 7, 'count(*) of whole table ok'); -$it = $schema->resultset('Artist')->search( {}, { +# test LIMIT support + my $it = $schema->resultset('Artist')->search({ + artistid => { '>' => 0 } + }, { rows => 3, order_by => 'artistid', -}); + }); -TODO: { - local $TODO = 'Sybase is very very fucked in the limit department'; + is( $it->count, 3, "LIMIT count ok" ); - is( $it->count, 3, "LIMIT count ok" ); -} + is( $it->next->name, "foo", "iterator->next ok" ); + $it->next; + is( $it->next->name, "Artist 2", "iterator->next ok" ); + is( $it->next, undef, "next past end of resultset ok" ); + +# now try with offset + $it = $schema->resultset('Artist')->search({}, { + rows => 3, + offset => 3, + order_by => 'artistid', + }); + + is( $it->count, 3, "LIMIT with offset count ok" ); + + is( $it->next->name, "Artist 3", "iterator->next ok" ); + $it->next; + is( $it->next->name, "Artist 5", "iterator->next ok" ); + is( $it->next, undef, "next past end of resultset ok" ); + +# now try a grouped count + $schema->resultset('Artist')->create({ name => 'Artist 6' }) + for (1..6); + + $it = $schema->resultset('Artist')->search({}, { + group_by => 'name' + }); + + is( $it->count, 7, 'COUNT of GROUP_BY ok' ); + +# mostly stolen from the blob stuff Nniuq wrote for t/73oracle.t + SKIP: { + skip 'TEXT/IMAGE support does not work with FreeTDS', 12 + if $schema->storage->using_freetds; + + my $dbh = $schema->storage->dbh; + { + local $SIG{__WARN__} = sub {}; + eval { $dbh->do('DROP TABLE bindtype_test') }; + + $dbh->do(qq[ + CREATE TABLE bindtype_test + ( + id INT IDENTITY PRIMARY KEY, + bytea INT NULL, + blob IMAGE NULL, + clob TEXT NULL + ) + ],{ RaiseError => 1, PrintError => 0 }); + } -# The iterator still works correctly with rows => 3, even though the sql is -# fucked, very interesting. + my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) ); + $binstr{'large'} = $binstr{'small'} x 1024; -is( $it->next->name, "foo", "iterator->next ok" ); -$it->next; -is( $it->next->name, "Artist 2", "iterator->next ok" ); -is( $it->next, undef, "next past end of resultset ok" ); + my $maxloblen = length $binstr{'large'}; + + if (not $schema->storage->using_freetds) { + $dbh->{'LongReadLen'} = $maxloblen * 2; + } else { + $dbh->do("set textsize ".($maxloblen * 2)); + } + my $rs = $schema->resultset('BindType'); + my $last_id; + + foreach my $type (qw(blob clob)) { + foreach my $size (qw(small large)) { + no warnings 'uninitialized'; + + my $created = eval { $rs->create( { $type => $binstr{$size} } ) }; + ok(!$@, "inserted $size $type without dying"); + diag $@ if $@; + + $last_id = $created->id if $created; + + my $got = eval { + $rs->find($last_id)->$type + }; + diag $@ if $@; + ok($got eq $binstr{$size}, "verified inserted $size $type"); + } + } + + # blob insert with explicit PK + # also a good opportunity to test IDENTITY_INSERT + { + local $SIG{__WARN__} = sub {}; + eval { $dbh->do('DROP TABLE bindtype_test') }; + + $dbh->do(qq[ + CREATE TABLE bindtype_test + ( + id INT IDENTITY PRIMARY KEY, + bytea INT NULL, + blob IMAGE NULL, + clob TEXT NULL + ) + ],{ RaiseError => 1, PrintError => 0 }); + } + my $created = eval { $rs->create( { id => 1, blob => $binstr{large} } ) }; + ok(!$@, "inserted large blob without dying with manual PK"); + diag $@ if $@; + + my $got = eval { + $rs->find(1)->blob + }; + diag $@ if $@; + ok($got eq $binstr{large}, "verified inserted large blob with manual PK"); + + # try a blob update + my $new_str = $binstr{large} . 'mtfnpy'; + eval { $rs->search({ id => 1 })->update({ blob => $new_str }) }; + ok !$@, 'updated blob successfully'; + diag $@ if $@; + $got = eval { + $rs->find(1)->blob + }; + diag $@ if $@; + ok($got eq $new_str, "verified updated blob"); + } + +# test MONEY column support + $schema->storage->dbh_do (sub { + my ($storage, $dbh) = @_; + eval { $dbh->do("DROP TABLE money_test") }; + $dbh->do(<<'SQL'); +CREATE TABLE money_test ( + id INT IDENTITY PRIMARY KEY, + amount MONEY NULL +) +SQL + }); + + my $rs = $schema->resultset('Money'); + + my $row; + lives_ok { + $row = $rs->create({ amount => 100 }); + } 'inserted a money value'; + + is eval { $rs->find($row->id)->amount }, 100, 'money value round-trip'; + + lives_ok { + $row->update({ amount => 200 }); + } 'updated a money value'; + + is eval { $rs->find($row->id)->amount }, + 200, 'updated money value round-trip'; + + lives_ok { + $row->update({ amount => undef }); + } 'updated a money value to NULL'; + + my $null_amount = eval { $rs->find($row->id)->amount }; + ok( + (($null_amount == undef) && (not $@)), + 'updated money value to NULL round-trip' + ); + diag $@ if $@; +} # clean up our mess END { - my $dbh = eval { $schema->storage->_dbh }; - $dbh->do('DROP TABLE artist') if $dbh; + if (my $dbh = eval { $schema->storage->_dbh }) { + eval { $dbh->do("DROP TABLE $_") } + for qw/artist bindtype_test money_test/; + } } - diff --git a/t/inflate/datetime_sybase.t b/t/inflate/datetime_sybase.t new file mode 100644 index 0000000..24d0f07 --- /dev/null +++ b/t/inflate/datetime_sybase.t @@ -0,0 +1,85 @@ +use strict; +use warnings; + +use Test::More; +use Test::Exception; +use lib qw(t/lib); +use DBICTest; + +my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/}; + +if (not ($dsn && $user)) { + plan skip_all => + 'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test' . + "\nWarning: This test drops and creates a table called 'track'"; +} else { + eval "use DateTime; use DateTime::Format::Sybase;"; + if ($@) { + plan skip_all => 'needs DateTime and DateTime::Format::Sybase for testing'; + } + else { + plan tests => (4 * 2 * 2) + 2; # (tests * dt_types * storage_types) + storage_tests + } +} + +my @storage_types = ( + 'DBI::Sybase', + 'DBI::Sybase::NoBindVars', +); +my $schema; + +for my $storage_type (@storage_types) { + $schema = DBICTest::Schema->clone; + + unless ($storage_type eq 'DBI::Sybase') { # autodetect + $schema->storage_type("::$storage_type"); + } + $schema->connection($dsn, $user, $pass, { + AutoCommit => 1, + on_connect_call => [ 'datetime_setup' ], + }); + + $schema->storage->ensure_connected; + + isa_ok( $schema->storage, "DBIx::Class::Storage::$storage_type" ); + +# coltype, col, date + my @dt_types = ( + ['DATETIME', 'last_updated_at', '2004-08-21T14:36:48.080Z'], +# minute precision + ['SMALLDATETIME', 'small_dt', '2004-08-21T14:36:00.000Z'], + ); + + for my $dt_type (@dt_types) { + my ($type, $col, $sample_dt) = @$dt_type; + + eval { $schema->storage->dbh->do("DROP TABLE track") }; + $schema->storage->dbh->do(<<"SQL"); +CREATE TABLE track ( + trackid INT IDENTITY PRIMARY KEY, + cd INT, + position INT, + $col $type, +) +SQL + ok(my $dt = DateTime::Format::Sybase->parse_datetime($sample_dt)); + + my $row; + ok( $row = $schema->resultset('Track')->create({ + $col => $dt, + cd => 1, + })); + ok( $row = $schema->resultset('Track') + ->search({ trackid => $row->trackid }, { select => [$col] }) + ->first + ); + is( $row->$col, $dt, 'DateTime roundtrip' ); + } +} + +# clean up our mess +END { + if (my $dbh = eval { $schema->storage->_dbh }) { + $dbh->do('DROP TABLE track'); + } +} diff --git a/t/lib/DBICTest/Schema/Track.pm b/t/lib/DBICTest/Schema/Track.pm index 4966800..a6de595 100644 --- a/t/lib/DBICTest/Schema/Track.pm +++ b/t/lib/DBICTest/Schema/Track.pm @@ -30,6 +30,10 @@ __PACKAGE__->add_columns( data_type => 'datetime', is_nullable => 1 }, + small_dt => { # for mssql and sybase DT tests + data_type => 'smalldatetime', + is_nullable => 1 + }, ); __PACKAGE__->set_primary_key('trackid'); diff --git a/t/lib/sqlite.sql b/t/lib/sqlite.sql index 463c2c6..a74d91a 100644 --- a/t/lib/sqlite.sql +++ b/t/lib/sqlite.sql @@ -1,6 +1,6 @@ -- -- Created by SQL::Translator::Producer::SQLite --- Created on Thu Jul 30 08:44:22 2009 +-- Created on Thu Jul 30 09:36:16 2009 -- @@ -284,7 +284,8 @@ CREATE TABLE track ( position integer NOT NULL, title varchar(100) NOT NULL, last_updated_on datetime, - last_updated_at datetime + last_updated_at datetime, + small_dt smalldatetime ); CREATE INDEX track_idx_cd ON track (cd);