From: Peter Rabbitson Date: Wed, 26 Jan 2011 13:03:22 +0000 (+0100) Subject: Massive rewrite of bind handling, and overall simplification of ::Storage::DBI X-Git-Tag: v0.08191~47 X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=commitdiff_plain;h=0e773352;p=dbsrgits%2FDBIx-Class.git Massive rewrite of bind handling, and overall simplification of ::Storage::DBI There's no practical way to split this into smaller pieces so here it goes: Bind attribute handling was badly integrated into dbic almost from the start. Until now the only information about a value was encoded as the column name contained as the first element of the bind arrayref. The column name was then resolved to the proper colinfo (deep in ::Storage::DBI) and then a match was ran on the datatype to try to find an appropriate set of bind attributes. Besides being fragile and inefficient, this method also broke down completely when: * No column name could be associated with a bind (arguments to complex literal functions) * as_query results would encode the column names that can no longer be resolved since the inner result sources are no longer visible To fix this all up and provide more flexibility the standard [ $col => $val ] was replaced with [ \%args => $val ]. The format of \%args is currently: { dbd_attrs => ' If present (in any form) this is what is being passed directly to bind_param. Note that different DBD's expect different bind args, e.g. DBD::SQLite takes a single numerical type, while DBD::Pg takes a hashref if bind options. If this is specified all other bind options described below are ignored ', sqlt_datatype => ' If present it is used to infer the actual bind attribute by passing to $resolved_storage->bind_attribute_by_data_type(). Note that the data type is somewhat freeform (hence the sqlt_ prefix) - currently drivers are expected to dtrt when given a common datatype name (not ideal, but that's what we got at this point). Defaults to the "data_type" from the add_columns colinfo. ', sqlt_size => ' Currently used to correctly allocate buffers for bind_param_inout(). Defaults to "size" from the add_columns colinfo, or to a sensible value based on the "data_type" ', dbic_colname => ' Used to fill in missing sqlt_datatype and sqlt_size attributes (if they are explicitly specified they are never overriden). Also used by some weird DBDs where the column name should be available at bind_param time (hello Oracle). ', } For backcompat/convenience the following shortcuts are supported: [ $name => $val ] === [ { dbic_colname => $name }, $val ] [ \$dt => $val ] === [ { sqlt_datatype => $dt }, $val ] [ undef => $val ] === [ {}, $val ] ( pending in the next patch: [ $val ] === [ {}, $val ] ) On each passage through the storages (either for execute or for as_query formatting) the information is filled in whenever available, so that by the time the final binds_param takes place ::Storage::DBI::_dbi_attrs_for_bind has all the available information about a particular bind value (no matter where it came from). A side efect of this is that as_query now always returns resolved [ \%args => $val ] forms of bind values (hence the huge amount of test changes in this patchset). While it should not be a major concern, it could potentially throw off tests that expect a specific output of as_query. If this becomes a problem a "compat mode as_query" flag will be introduced asap. Additional changes in this patchset are: * The signatures of pretty much the entire execution chain changed. Luckily everything that required changing was private. All drivers were adjusted appropriately (though something could have been missed). Affected methods on ::Storage::DBI are: _prep_for_execute _dbh_execute _execute _select_args_to_query _max_column_bytesize additionally the invocation of _prep_for_execute moved from _dbh_execute to _execute, and the return of _select_args also changed * source_bind_attributes was deprecated. Luckily it was never documented in the main documentation. Sadly it was documented in individual storage drivers. As such it was necessary to provide a compat shim that would invoke the thing if it is detected (with the approproate warning) * _fix_bind_params was renamed to _format_for_trace --- diff --git a/Changes b/Changes index 922bd92..7bc45a9 100644 --- a/Changes +++ b/Changes @@ -9,6 +9,8 @@ Revision history for DBIx::Class plain ::Storage::DBI - ::Storage::DBI::sth was mistakenly marked/documented as public, privatize and warn on deprecated use + - Massive overhaul of bind values/attributes handling - slightly + changes the output of as_query (should not cause compat issues) * Fixes - Fix ::Storage::DBI::* MRO problems on 5.8.x perls diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 189f4fb..cdaac30 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -1280,9 +1280,9 @@ sub _do_query { my $attrs = shift @do_args; my @bind = map { [ undef, $_ ] } @do_args; - $self->_query_start($sql, @bind); + $self->_query_start($sql, \@bind); $self->_get_dbh->do($sql, $attrs, @do_args); - $self->_query_end($sql, @bind); + $self->_query_end($sql, \@bind); } return $self; @@ -1581,93 +1581,163 @@ sub _dbh_rollback { # easier to override in NoBindVars without duping the rest. It takes up # all of _execute's args, and emits $sql, @bind. sub _prep_for_execute { - my ($self, $op, $extra_bind, $ident, $args) = @_; + my ($self, $op, $ident, $args) = @_; - if( blessed $ident && $ident->isa("DBIx::Class::ResultSource") ) { - $ident = $ident->from(); - } + my ($sql, @bind) = $self->sql_maker->$op( + blessed($ident) ? $ident->from : $ident, + @$args, + ); - my ($sql, @bind) = $self->sql_maker->$op($ident, @$args); + my (@final_bind, $colinfos); + my $resolve_bindinfo = sub { + $colinfos ||= $self->_resolve_column_info($ident); + if (my $col = $_[1]->{dbic_colname}) { + $_[1]->{sqlt_datatype} ||= $colinfos->{$col}{data_type} + if $colinfos->{$col}{data_type}; + $_[1]->{sqlt_size} ||= $colinfos->{$col}{size} + if $colinfos->{$col}{size}; + } + $_[1]; + }; - unshift(@bind, - map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind) - if $extra_bind; - return ($sql, \@bind); + for my $e (@{$args->[2]{bind}||[]}, @bind) { + push @final_bind, [ do { + if (ref $e ne 'ARRAY') { + ({}, $e) + } + elsif (! defined $e->[0]) { + ({}, $e->[1]) + } + elsif (ref $e->[0] eq 'HASH') { + ( + (first { $e->[0]{$_} } qw/dbd_attrs sqlt_datatype/) ? $e->[0] : $self->$resolve_bindinfo($e->[0]), + $e->[1] + ) + } + elsif (ref $e->[0] eq 'SCALAR') { + ( { sqlt_datatype => ${$e->[0]} }, $e->[1] ) + } + else { + ( $self->$resolve_bindinfo({ dbic_colname => $e->[0] }), $e->[1] ) + } + }]; + } + + ($sql, \@final_bind); } +sub _format_for_trace { + #my ($self, $bind) = @_; -sub _fix_bind_params { - my ($self, @bind) = @_; + ### Turn @bind from something like this: + ### ( [ "artist", 1 ], [ \%attrs, 3 ] ) + ### to this: + ### ( "'1'", "'3'" ) - ### Turn @bind from something like this: - ### ( [ "artist", 1 ], [ "cdid", 1, 3 ] ) - ### to this: - ### ( "'1'", "'1'", "'3'" ) - return - map { - if ( defined( $_ && $_->[1] ) ) { - map { qq{'$_'}; } @{$_}[ 1 .. $#$_ ]; - } - else { q{NULL}; } - } @bind; + map { + defined( $_ && $_->[1] ) + ? qq{'$_->[1]'} + : q{NULL} + } @{$_[1] || []}; } sub _query_start { - my ( $self, $sql, @bind ) = @_; - - if ( $self->debug ) { - @bind = $self->_fix_bind_params(@bind); + my ( $self, $sql, $bind ) = @_; - $self->debugobj->query_start( $sql, @bind ); - } + $self->debugobj->query_start( $sql, $self->_format_for_trace($bind) ) + if $self->debug; } sub _query_end { - my ( $self, $sql, @bind ) = @_; + my ( $self, $sql, $bind ) = @_; - if ( $self->debug ) { - @bind = $self->_fix_bind_params(@bind); - $self->debugobj->query_end( $sql, @bind ); - } + $self->debugobj->query_end( $sql, $self->_format_for_trace($bind) ) + if $self->debug; } -sub _dbh_execute { - my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_; - - my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args); +my $sba_compat; +sub _dbi_attrs_for_bind { + my ($self, $ident, $bind) = @_; - $self->_query_start( $sql, @$bind ); + if (! defined $sba_compat) { + $self->_determine_driver; + $sba_compat = $self->can('source_bind_attributes') == \&source_bind_attributes + ? 0 + : 1 + ; + } - my $sth = $self->_sth($sql,$op); + my $sba_attrs; + if ($sba_compat) { + my $class = ref $self; + carp_unique ( + "The source_bind_attributes() override in $class relies on a deprecated codepath. " + .'You are strongly advised to switch your code to override bind_attribute_by_datatype() ' + .'instead. This legacy compat shim will also disappear some time before DBIC 0.09' + ); - my $placeholder_index = 1; + my $sba_attrs = $self->source_bind_attributes + } - foreach my $bound (@$bind) { - my $attributes = {}; - my($column_name, @data) = @$bound; + my @attrs; - if ($bind_attributes) { - $attributes = $bind_attributes->{$column_name} - if defined $bind_attributes->{$column_name}; + for (map { $_->[0] } @$bind) { + push @attrs, do { + if ($_->{dbd_attrs}) { + $_->{dbd_attrs} + } + elsif($_->{sqlt_datatype}) { + $self->bind_attribute_by_data_type($_->{sqlt_datatype}) || undef; + } + elsif ($sba_attrs and $_->{dbic_colname}) { + $sba_attrs->{$_->{dbic_colname}} || undef; + } + else { + undef; # always push something at this position + } } + } - foreach my $data (@data) { - my $ref = ref $data; + return \@attrs; +} - if ($ref and overload::Method($data, '""') ) { - $data = "$data"; - } - elsif ($ref eq 'SCALAR') { # any scalarrefs are assumed to be bind_inouts - $sth->bind_param_inout( - $placeholder_index++, - $data, - $self->_max_column_bytesize($ident, $column_name), - $attributes - ); - next; - } +sub _execute { + my ($self, $op, $ident, @args) = @_; + + my ($sql, $bind) = $self->_prep_for_execute($op, $ident, \@args); + + shift->dbh_do( # retry over disconnects + '_dbh_execute', + $sql, + $bind, + $self->_dbi_attrs_for_bind($ident, $bind) + ); +} - $sth->bind_param($placeholder_index++, $data, $attributes); +sub _dbh_execute { + my ($self, undef, $sql, $bind, $bind_attrs) = @_; + + $self->_query_start( $sql, $bind ); + my $sth = $self->_sth($sql); + + for my $i (0 .. $#$bind) { + if (ref $bind->[$i][1] eq 'SCALAR') { # any scalarrefs are assumed to be bind_inouts + $sth->bind_param_inout( + $i + 1, # bind params counts are 1-based + $bind->[$i][1], + $bind->[$i][0]{dbd_size} || $self->_max_column_bytesize($bind->[$i][0]), # size + $bind_attrs->[$i], + ); + } + else { + $sth->bind_param( + $i + 1, + (ref $bind->[$i][1] and overload::Method($bind->[$i][1], '""')) + ? "$bind->[$i][1]" + : $bind->[$i][1] + , + $bind_attrs->[$i], + ); } } @@ -1677,16 +1747,11 @@ sub _dbh_execute { $sth->errstr || $sth->err || 'Unknown error: execute() returned false, but error flags were not set...' ) if !$rv; - $self->_query_end( $sql, @$bind ); + $self->_query_end( $sql, $bind ); return (wantarray ? ($rv, $sth, @$bind) : $rv); } -sub _execute { - my $self = shift; - $self->dbh_do('_dbh_execute', @_); # retry over disconnects -} - sub _prefetch_autovalues { my ($self, $source, $to_insert) = @_; @@ -1742,9 +1807,7 @@ sub insert { } } - my $bind_attributes = $self->source_bind_attributes($source); - - my ($rv, $sth) = $self->_execute('insert' => [], $source, $bind_attributes, $to_insert, $sqla_opts); + my ($rv, $sth) = $self->_execute('insert', $source, $to_insert, $sqla_opts); my %returned_cols; @@ -1830,7 +1893,7 @@ sub insert_bulk { } my ($sql, $bind) = $self->_prep_for_execute ( - 'insert', undef, $source, [\%colvalues] + 'insert', $source, [\%colvalues] ); if (! @$bind) { @@ -1848,7 +1911,7 @@ sub insert_bulk { # scope guard my $guard = $self->txn_scope_guard; - $self->_query_start( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () ); + $self->_query_start( $sql, @$bind ? [[undef => '__BULK_INSERT__' ]] : () ); my $sth = $self->_sth($sql); my $rv = do { if (@$bind) { @@ -1861,7 +1924,7 @@ sub insert_bulk { } }; - $self->_query_end( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () ); + $self->_query_end( $sql, @$bind ? [[ undef => '__BULK_INSERT__' ]] : () ); $guard->commit; @@ -1874,30 +1937,18 @@ sub _execute_array { ## This must be an arrayref, else nothing works! my $tuple_status = []; - ## Get the bind_attributes, if any exist - my $bind_attributes = $self->source_bind_attributes($source); - - ## Bind the values and execute - my $placeholder_index = 1; + # $bind contains colnames as keys and dbic-col-index as values + my $bind_attrs = $self->_dbi_attrs_for_bind($source, $bind); - foreach my $bound (@$bind) { - - my $attributes = {}; - my ($column_name, $data_index) = @$bound; - - if( $bind_attributes ) { - $attributes = $bind_attributes->{$column_name} - if defined $bind_attributes->{$column_name}; - } - - my @data = map { $_->[$data_index] } @$data; + # Bind the values by column slices + for my $i (0 .. $#$bind) { + my $dbic_data_index = $bind->[$i][1]; $sth->bind_param_array( - $placeholder_index, - [@data], - (%$attributes ? $attributes : ()), + $i+1, # DBI bind indexes are 1-based + [ map { $_->[$dbic_data_index] } @$data ], + defined $bind_attrs->[$i] ? $bind_attrs->[$i] : (), # some DBDs throw up when given an undef ); - $placeholder_index++; } my ($rv, $err); @@ -1976,20 +2027,14 @@ sub _dbh_execute_inserts_with_no_binds { } sub update { - my ($self, $source, @args) = @_; - - my $bind_attrs = $self->source_bind_attributes($source); - - return $self->_execute('update' => [], $source, $bind_attrs, @args); + #my ($self, $source, @args) = @_; + shift->_execute('update', @_); } sub delete { - my ($self, $source, @args) = @_; - - my $bind_attrs = $self->source_bind_attributes($source); - - return $self->_execute('delete' => [], $source, $bind_attrs, @args); + #my ($self, $source, @args) = @_; + shift->_execute('delete', @_); } # We were sent here because the $rs contains a complex search @@ -2097,17 +2142,17 @@ sub _select { sub _select_args_to_query { my $self = shift; - # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $rs_attrs, $rows, $offset) + # my ($op, $ident, $select, $cond, $rs_attrs, $rows, $offset) # = $self->_select_args($ident, $select, $cond, $attrs); - my ($op, $bind, $ident, $bind_attrs, @args) = + my ($op, $ident, @args) = $self->_select_args(@_); - # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]); - my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args); + # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]); + my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $ident, \@args); $prepared_bind ||= []; return wantarray - ? ($sql, $prepared_bind, $bind_attrs) + ? ($sql, $prepared_bind) : \[ "($sql)", @$prepared_bind ] ; } @@ -2129,40 +2174,12 @@ sub _select_args { , }; - # calculate bind_attrs before possible $ident mangling - my $bind_attrs = {}; - for my $alias (keys %$alias2source) { - my $bindtypes = $self->source_bind_attributes ($alias2source->{$alias}) || {}; - for my $col (keys %$bindtypes) { - - my $fqcn = join ('.', $alias, $col); - $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col}; - - # Unqialified column names are nice, but at the same time can be - # rather ambiguous. What we do here is basically go along with - # the loop, adding an unqualified column slot to $bind_attrs, - # alongside the fully qualified name. As soon as we encounter - # another column by that name (which would imply another table) - # we unset the unqualified slot and never add any info to it - # to avoid erroneous type binding. If this happens the users - # only choice will be to fully qualify his column name - - if (exists $bind_attrs->{$col}) { - $bind_attrs->{$col} = {}; - } - else { - $bind_attrs->{$col} = $bind_attrs->{$fqcn}; - } - } - } - # Sanity check the attributes (SQLMaker does it too, but # in case of a software_limit we'll never reach there) if (defined $attrs->{offset}) { $self->throw_exception('A supplied offset attribute must be a non-negative integer') if ( $attrs->{offset} =~ /\D/ or $attrs->{offset} < 0 ); } - $attrs->{offset} ||= 0; if (defined $attrs->{rows}) { $self->throw_exception("The rows attribute must be a positive integer if present") @@ -2193,7 +2210,10 @@ sub _select_args { = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs); } elsif (! $attrs->{software_limit} ) { - push @limit, $attrs->{rows}, $attrs->{offset}; + push @limit, ( + $attrs->{rows} || (), + $attrs->{offset} || (), + ); } # try to simplify the joinmap further (prune unreferenced type-single joins) @@ -2209,7 +2229,7 @@ sub _select_args { # invoked, and that's just bad... ### - return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $attrs, @limit); + return ('select', $ident, $select, $where, $attrs, @limit); } # Returns a counting SELECT for a simple count @@ -2221,21 +2241,13 @@ sub _count_select { return { count => '*' }; } - sub source_bind_attributes { - my ($self, $source) = @_; - - my $bind_attributes; - - my $colinfo = $source->columns_info; - - for my $col (keys %$colinfo) { - if (my $dt = $colinfo->{$col}{data_type} ) { - $bind_attributes->{$col} = $self->bind_attribute_by_data_type($dt) - } - } - - return $bind_attributes; + shift->throw_exception( + 'source_bind_attributes() was never meant to be a callable public method - ' + .'please contact the DBIC dev-team and describe your use case so that a reasonable ' + .'solution can be provided' + ."\nhttp://search.cpan.org/dist/DBIx-Class/lib/DBIx/Class.pm#GETTING_HELP/SUPPORT" + ); } =head2 select @@ -2513,11 +2525,11 @@ be performed instead of the usual C. =cut sub is_datatype_numeric { - my ($self, $dt) = @_; + #my ($self, $dt) = @_; - return 0 unless $dt; + return 0 unless $_[1]; - return $dt =~ /^ (?: + $_[1] =~ /^ (?: numeric | int(?:eger)? | (?:tiny|small|medium|big)int | dec(?:imal)? | real | float | double (?: \s+ precision)? | (?:big)?serial ) $/ix; } @@ -2909,45 +2921,50 @@ sub relname_to_table_alias { # version and it may be necessary to amend or override it for a specific storage # if such binds are necessary. sub _max_column_bytesize { - my ($self, $source, $col) = @_; + my ($self, $attr) = @_; - my $inf = $source->column_info($col); - return $inf->{_max_bytesize} ||= do { + my $max_size; - my $max_size; + if ($attr->{sqlt_datatype}) { + my $data_type = lc($attr->{sqlt_datatype}); - if (my $data_type = $inf->{data_type}) { - $data_type = lc($data_type); + if ($attr->{sqlt_size}) { # String/sized-binary types - if ($data_type =~ /^(?:l?(?:var)?char(?:acter)?(?:\s*varying)? - |(?:var)?binary(?:\s*varying)?|raw)\b/x + if ($data_type =~ /^(?: + l? (?:var)? char(?:acter)? (?:\s*varying)? + | + (?:var)? binary (?:\s*varying)? + | + raw + )\b/x ) { - $max_size = $inf->{size}; + $max_size = $attr->{sqlt_size}; } # Other charset/unicode types, assume scale of 4 - elsif ($data_type =~ /^(?:national\s*character(?:\s*varying)?|nchar - |univarchar - |nvarchar)\b/x + elsif ($data_type =~ /^(?: + national \s* character (?:\s*varying)? + | + nchar + | + univarchar + | + nvarchar + )\b/x ) { - $max_size = $inf->{size} * 4 if $inf->{size}; - } - # Blob types - elsif ($self->_is_lob_type($data_type)) { - # default to longreadlen - } - else { - $max_size = 100; # for all other (numeric?) datatypes + $max_size = $attr->{sqlt_size} * 4; } } - $max_size ||= $self->_get_dbh->{LongReadLen} || 8000; - }; + if (!$max_size and !$self->_is_lob_type($data_type)) { + $max_size = 100 # for all other (numeric?) datatypes + } + } + + $max_size || $self->_dbic_connect_attributes->{LongReadLen} || $self->_get_dbh->{LongReadLen} || 8000; } # Determine if a data_type is some type of BLOB -# FIXME: these regexes are expensive, result of these checks should be cached in -# the column_info . sub _is_lob_type { my ($self, $data_type) = @_; $data_type && ($data_type =~ /lob|bfile|text|image|bytea|memo/i diff --git a/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm b/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm index efe32b6..7e08098 100644 --- a/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm +++ b/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm @@ -66,17 +66,16 @@ sub _rebless { $self->_identity_method('@@identity'); } -sub source_bind_attributes { - my $self = shift; - my ($source) = @_; - - my $bind_attributes = $self->next::method(@_); +# work around a bug in the ADO driver - use the max VARCHAR size for all +# binds that do not specify one via bind_attributes_by_data_type() +sub _dbi_attrs_for_bind { + my $attrs = shift->next::method(@_); - foreach my $column ($source->columns) { - $bind_attributes->{$column}{ado_size} ||= 8000; # max VARCHAR + for (@$attrs) { + $_->{ado_size} ||= 8000 if $_; } - return $bind_attributes; + $attrs; } sub bind_attribute_by_data_type { diff --git a/lib/DBIx/Class/Storage/DBI/AutoCast.pm b/lib/DBIx/Class/Storage/DBI/AutoCast.pm index f099bc5..b7f28a6 100644 --- a/lib/DBIx/Class/Storage/DBI/AutoCast.pm +++ b/lib/DBIx/Class/Storage/DBI/AutoCast.pm @@ -38,7 +38,6 @@ L as: sub _prep_for_execute { my $self = shift; - my ($op, $extra_bind, $ident, $args) = @_; my ($sql, $bind) = $self->next::method (@_); @@ -46,20 +45,12 @@ sub _prep_for_execute { # gets skippeed. if ($self->auto_cast && @$bind) { my $new_sql; - my @sql_part = split /\?/, $sql; - my $col_info = $self->_resolve_column_info($ident,[ map $_->[0], @$bind ]); - - foreach my $bound (@$bind) { - my $col = $bound->[0]; - my $type = $self->_native_data_type($col_info->{$col}{data_type}); - - foreach my $data (@{$bound}[1..$#$bound]) { - $new_sql .= shift(@sql_part) . - ($type ? "CAST(? AS $type)" : '?'); - } + my @sql_part = split /\?/, $sql, scalar @$bind + 1; + for (@$bind) { + my $cast_type = $self->_native_data_type($_->[0]{sqlt_datatype}); + $new_sql .= shift(@sql_part) . ($cast_type ? "CAST(? AS $cast_type)" : '?'); } - $new_sql .= join '', @sql_part; - $sql = $new_sql; + $sql = $new_sql . shift @sql_part; } return ($sql, $bind); diff --git a/lib/DBIx/Class/Storage/DBI/MSSQL.pm b/lib/DBIx/Class/Storage/DBI/MSSQL.pm index 07d3a4f..46f5828 100644 --- a/lib/DBIx/Class/Storage/DBI/MSSQL.pm +++ b/lib/DBIx/Class/Storage/DBI/MSSQL.pm @@ -71,7 +71,7 @@ sub insert { sub _prep_for_execute { my $self = shift; - my ($op, $extra_bind, $ident, $args) = @_; + my ($op, $ident, $args) = @_; # cast MONEY values properly if ($op eq 'insert' || $op eq 'update') { @@ -113,7 +113,7 @@ sub _execute { my $self = shift; my ($op) = @_; - my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_); + my ($rv, $sth, @bind) = $self->next::method(@_); if ($op eq 'insert') { diff --git a/lib/DBIx/Class/Storage/DBI/NoBindVars.pm b/lib/DBIx/Class/Storage/DBI/NoBindVars.pm index 9f84702..71de5b9 100644 --- a/lib/DBIx/Class/Storage/DBI/NoBindVars.pm +++ b/lib/DBIx/Class/Storage/DBI/NoBindVars.pm @@ -43,31 +43,30 @@ sub _prep_for_execute { my ($sql, $bind) = $self->next::method(@_); # stringify bind args, quote via $dbh, and manually insert - #my ($op, $extra_bind, $ident, $args) = @_; - my $ident = $_[2]; + #my ($op, $ident, $args) = @_; + my $ident = $_[1]; my @sql_part = split /\?/, $sql; my $new_sql; - my $col_info = $self->_resolve_column_info($ident, [ map $_->[0], @$bind ]); + my $col_info = $self->_resolve_column_info( + $ident, [ map { $_->[0]{dbic_colname} || () } @$bind ] + ); - foreach my $bound (@$bind) { - my $col = shift @$bound; + for (@$bind) { + my $datatype = $col_info->{ $_->[0]{dbic_colname}||'' }{data_type}; - my $datatype = $col_info->{$col}{data_type}; + my $data = (ref $_->[1]) ? "$_->[1]" : $_->[1]; # always stringify - foreach my $data (@$bound) { - $data = ''.$data if ref $data; + $data = $self->_prep_interpolated_value($datatype, $data) + if $datatype; - $data = $self->_prep_interpolated_value($datatype, $data) - if $datatype; + $data = $self->_get_dbh->quote($data) + unless $self->interpolate_unquoted($datatype, $data); - $data = $self->_dbh->quote($data) - unless $self->interpolate_unquoted($datatype, $data); - - $new_sql .= shift(@sql_part) . $data; - } + $new_sql .= shift(@sql_part) . $data; } + $new_sql .= join '', @sql_part; return ($new_sql, []); diff --git a/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm b/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm index 087f0c2..b41b1f3 100644 --- a/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm +++ b/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm @@ -16,8 +16,7 @@ sub insert { my $self = shift; my ( $source, $to_insert ) = @_; - my $bind_attributes = $self->source_bind_attributes( $source ); - my ( undef, $sth ) = $self->_execute( 'insert' => [], $source, $bind_attributes, $to_insert ); + my ( undef, $sth ) = $self->_execute( 'insert', $source, $to_insert ); #store the identity here since @@IDENTITY is connection global and this prevents #possibility that another insert to a different table overwrites it for this resultsource diff --git a/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm b/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm index 8e769b6..8c6b9d3 100644 --- a/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm +++ b/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm @@ -272,15 +272,14 @@ sub _ping { } sub _dbh_execute { - my $self = shift; - my ($dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_; + my ($self, $dbh, $sql, @args) = @_; my (@res, $tried); my $want = wantarray; my $next = $self->next::can; do { try { - my $exec = sub { $self->$next($dbh, $op, $extra_bind, $ident, $bind_attributes, @args) }; + my $exec = sub { $self->$next($dbh, $sql, @args) }; if (!defined $want) { $exec->(); @@ -298,7 +297,6 @@ sub _dbh_execute { if (! $tried and $_ =~ /ORA-01003/) { # ORA-01003: no statement parsed (someone changed the table somehow, # invalidating your cursor.) - my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args); delete $dbh->{CachedKids}{$sql}; } else { @@ -384,55 +382,57 @@ sub connect_call_datetime_setup { ); } -=head2 source_bind_attributes - -Handle LOB types in Oracle. Under a certain size (4k?), you can get away -with the driver assuming your input is the deprecated LONG type if you -encode it as a hex string. That ain't gonna fly at larger values, where -you'll discover you have to do what this does. - -This method had to be overridden because we need to set ora_field to the -actual column, and that isn't passed to the call (provided by Storage) to -bind_attribute_by_data_type. - -According to L, the ora_field isn't always necessary, but -adding it doesn't hurt, and will save your bacon if you're modifying a -table with more than one LOB column. - -=cut - -sub source_bind_attributes -{ - require DBD::Oracle; - my $self = shift; - my($source) = @_; - - my %bind_attributes = %{ $self->next::method(@_) }; - - foreach my $column ($source->columns) { - my %column_bind_attrs = %{ $bind_attributes{$column} || {} }; +### Note originally by Ron "Quinn" Straight +### http://git.shadowcat.co.uk/gitweb/gitweb.cgi?p=dbsrgits/DBIx-Class.git;a=commitdiff;h=5db2758de644d53e07cd3e05f0e9037bf40116fc +# +# Handle LOB types in Oracle. Under a certain size (4k?), you can get away +# with the driver assuming your input is the deprecated LONG type if you +# encode it as a hex string. That ain't gonna fly at larger values, where +# you'll discover you have to do what this does. +# +# This method had to be overridden because we need to set ora_field to the +# actual column, and that isn't passed to the call (provided by Storage) to +# bind_attribute_by_data_type. +# +# According to L, the ora_field isn't always necessary, but +# adding it doesn't hurt, and will save your bacon if you're modifying a +# table with more than one LOB column. +# +sub _dbi_attrs_for_bind { + my ($self, $ident, $bind) = @_; + my $attrs = $self->next::method($ident, $bind); + + for my $i (0 .. $#$attrs) { + if (keys %{$attrs->[$i]||{}} and my $col = $bind->[$i][0]{dbic_colname}) { + $attrs->[$i]{ora_field} = $col; + } + } - my $data_type = $source->column_info($column)->{data_type}; + $attrs; +} - if ($self->_is_lob_type($data_type)) { - if ($DBD::Oracle::VERSION eq '1.23') { - $self->throw_exception( -"BLOB/CLOB support in DBD::Oracle == 1.23 is broken, use an earlier or later ". -"version.\n\nSee: https://rt.cpan.org/Public/Bug/Display.html?id=46016\n" - ); - } +my $dbd_loaded; +sub bind_attribute_by_data_type { + my ($self, $dt) = @_; + + $dbd_loaded ||= do { + require DBD::Oracle; + if ($DBD::Oracle::VERSION eq '1.23') { + $self->throw_exception( + "BLOB/CLOB support in DBD::Oracle == 1.23 is broken, use an earlier or later ". + "version.\n\nSee: https://rt.cpan.org/Public/Bug/Display.html?id=46016\n" + ); + } + 1; + }; - $column_bind_attrs{'ora_type'} = $self->_is_text_lob_type($data_type) + if ($self->_is_lob_type($dt)) { + return { + ora_type => $self->_is_text_lob_type($dt) ? DBD::Oracle::ORA_CLOB() : DBD::Oracle::ORA_BLOB() - ; - $column_bind_attrs{'ora_field'} = $column; - } - - $bind_attributes{$column} = \%column_bind_attrs; + }; } - - return \%bind_attributes; } sub _svp_begin { diff --git a/lib/DBIx/Class/Storage/DBI/Replicated.pm b/lib/DBIx/Class/Storage/DBI/Replicated.pm index 0523bb7..9a9e05f 100644 --- a/lib/DBIx/Class/Storage/DBI/Replicated.pm +++ b/lib/DBIx/Class/Storage/DBI/Replicated.pm @@ -280,26 +280,26 @@ my $method_dispatch = { svp_release relname_to_table_alias _dbh_last_insert_id - _fix_bind_params _default_dbi_connect_attributes _dbi_connect_info _dbic_connect_attributes auto_savepoint + _query_start _query_end + _format_for_trace + _dbi_attrs_for_bind bind_attribute_by_data_type transaction_depth _dbh _select_args _dbh_execute_array _sql_maker - _query_start _per_row_update_delete _dbh_begin_work _dbh_execute_inserts_with_no_binds _select_args_to_query _svp_generate_name _multipk_update_delete - source_bind_attributes _normalize_connect_info _parse_connect_do _dbh_commit @@ -336,6 +336,8 @@ my $method_dispatch = { _arm_global_destructor _verify_pid + source_bind_attributes + get_use_dbms_capability set_use_dbms_capability get_dbms_capability diff --git a/lib/DBIx/Class/Storage/DBI/SQLite.pm b/lib/DBIx/Class/Storage/DBI/SQLite.pm index 0fa8e75..15e70ba 100644 --- a/lib/DBIx/Class/Storage/DBI/SQLite.pm +++ b/lib/DBIx/Class/Storage/DBI/SQLite.pm @@ -64,6 +64,13 @@ sub deployment_statements { $self->next::method($schema, $type, $version, $dir, $sqltargs, @rest); } +sub bind_attribute_by_data_type { + $_[1] =~ /^ (?: int(?:eger)? | (?:tiny|small|medium|big)int ) $/ix + ? do { require DBI; DBI::SQL_INTEGER() } + : undef + ; +} + =head2 connect_call_use_foreign_keys Used as: diff --git a/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm b/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm index 24b3ab1..0e57f02 100644 --- a/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm +++ b/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm @@ -243,14 +243,14 @@ sub _is_lob_column { sub _prep_for_execute { my $self = shift; - my ($op, $extra_bind, $ident, $args) = @_; + my ($op, $ident, $args) = @_; my ($sql, $bind) = $self->next::method (@_); my $table = blessed $ident ? $ident->from : $ident; my $bind_info = $self->_resolve_column_info( - $ident, [map $_->[0], @{$bind}] + $ident, [map { $_->[0]{dbic_colname} || () } @{$bind}] ); my $bound_identity_col = first { $bind_info->{$_}{is_auto_increment} } @@ -333,7 +333,7 @@ sub _execute { my $self = shift; my ($op) = @_; - my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_); + my ($rv, $sth, @bind) = $self->next::method(@_); if ($op eq 'insert') { $self->_identity($sth->fetchrow_array); @@ -634,10 +634,7 @@ EOF } ); - my @bind = do { - my $idx = 0; - map [ $_, $idx++ ], @source_columns; - }; + my @bind = map { [ $source_columns[$_] => $_ ] } (0 .. $#source_columns); $self->_execute_array( $source, $sth, \@bind, \@source_columns, \@new_data, sub { diff --git a/t/73oracle_hq.t b/t/73oracle_hq.t index 397a97f..1025f69 100644 --- a/t/73oracle_hq.t +++ b/t/73oracle_hq.t @@ -116,7 +116,8 @@ do_creates($dbh); START WITH name = ? CONNECT BY parentid = PRIOR artistid )', - [ [ name => 'root'] ], + [ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 } + => 'root'] ], ); is_deeply ( [ $rs->get_column ('name')->all ], @@ -132,7 +133,8 @@ do_creates($dbh); START WITH name = ? CONNECT BY parentid = PRIOR artistid )', - [ [ name => 'root'] ], + [ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 } + => 'root'] ], ); is( $rs->count, 5, 'Connect By count ok' ); @@ -159,7 +161,8 @@ do_creates($dbh); CONNECT BY parentid = PRIOR artistid ORDER SIBLINGS BY name DESC )', - [ [ name => 'root'] ], + [ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 } + => 'root'] ], ); is_deeply ( @@ -185,7 +188,8 @@ do_creates($dbh); START WITH name = ? CONNECT BY parentid = PRIOR artistid )', - [ [ name => 'root'] ], + [ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 } + => 'root'] ], ); is_deeply( @@ -220,7 +224,12 @@ do_creates($dbh); START WITH me.name = ? CONNECT BY parentid = PRIOR artistid )', - [ [ 'cds.title' => '%cd' ], [ 'me.name' => 'root' ] ], + [ + [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'cds.title', 'sqlt_size' => 100 } + => '%cd'], + [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'me.name', 'sqlt_size' => 100 } + => 'root'], + ], ); is_deeply( @@ -239,7 +248,12 @@ do_creates($dbh); START WITH me.name = ? CONNECT BY parentid = PRIOR artistid )', - [ [ 'cds.title' => '%cd' ], [ 'me.name' => 'root' ] ], + [ + [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'cds.title', 'sqlt_size' => 100 } + => '%cd'], + [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'me.name', 'sqlt_size' => 100 } + => 'root'], + ], ); is( $rs->count, 1, 'Connect By with a join; count ok' ); @@ -262,7 +276,10 @@ do_creates($dbh); CONNECT BY parentid = PRIOR artistid ORDER BY LEVEL ASC, name ASC )', - [ [ name => 'root' ] ], + [ + [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 } + => 'root'], + ], ); @@ -312,7 +329,10 @@ do_creates($dbh); ) me WHERE ROWNUM <= 2 )', - [ [ name => 'root' ] ], + [ + [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 } + => 'root'], + ], ); is_deeply ( @@ -336,7 +356,10 @@ do_creates($dbh); WHERE ROWNUM <= 2 ) me )', - [ [ name => 'root' ] ], + [ + [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 } + => 'root'], + ], ); is( $rs->count, 2, 'Connect By; LIMIT count ok' ); @@ -364,10 +387,14 @@ do_creates($dbh); GROUP BY( rank + ? ) HAVING count(rank) < ? )', [ - [ __cbind => 3 ], - [ name => 'root' ], - [ __gbind => 1 ], - [ cnt => 2 ] + [ { dbic_colname => '__cbind' } + => 3 ], + [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 } + => 'root'], + [ { dbic_colname => '__gbind' } + => 1 ], + [ { dbic_colname => 'cnt' } + => 2 ], ], ); @@ -411,7 +438,10 @@ do_creates($dbh); START WITH name = ? CONNECT BY NOCYCLE parentid = PRIOR artistid )', - [ [ name => 'cycle-root'] ], + [ + [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 } + => 'cycle-root'], + ], ); is_deeply ( [ $rs->get_column ('name')->all ], @@ -432,7 +462,10 @@ do_creates($dbh); START WITH name = ? CONNECT BY NOCYCLE parentid = PRIOR artistid )', - [ [ name => 'cycle-root'] ], + [ + [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 } + => 'cycle-root'], + ], ); is( $rs->count, 4, 'Connect By Nocycle count ok' ); diff --git a/t/746mssql.t b/t/746mssql.t index bb0b254..3c2a8c3 100644 --- a/t/746mssql.t +++ b/t/746mssql.t @@ -375,11 +375,16 @@ SQL ); my ($sql, @bind) = @${$owners->page(3)->as_query}; - is_deeply ( + is_same_bind ( \@bind, [ - $dialect eq 'Top' ? [ test => 'xxx' ] : (), # the extra re-order bind - ([ 'me.name' => 'somebogusstring' ], [ test => 'xxx' ]) x 2 # double because of the prefetch subq + $dialect eq 'Top' ? [ { dbic_colname => 'test' } => 'xxx' ] : (), # the extra re-order bind + (map { + [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'me.name' } + => 'somebogusstring' ], + [ { dbic_colname => 'test' } + => 'xxx' ], + } (1,2)), # double because of the prefetch subq ], ); @@ -411,13 +416,26 @@ SQL ); ($sql, @bind) = @${$books->page(3)->as_query}; - is_deeply ( + is_same_bind ( \@bind, [ # inner - [ 'owner.name' => 'wiggle' ], [ 'owner.name' => 'woggle' ], [ source => 'Library' ], [ test => '1' ], + [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' } + => 'wiggle' ], + [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' } + => 'woggle' ], + [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } + => 'Library' ], + [ { dbic_colname => 'test' } + => '1' ], + # outer - [ 'owner.name' => 'wiggle' ], [ 'owner.name' => 'woggle' ], [ source => 'Library' ], + [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' } + => 'wiggle' ], + [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' } + => 'woggle' ], + [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } + => 'Library' ], ], ); diff --git a/t/93autocast.t b/t/93autocast.t index 0a146a7..a0eb9d3 100644 --- a/t/93autocast.t +++ b/t/93autocast.t @@ -33,14 +33,18 @@ my $rs = $schema->resultset ('CD')->search ({ 'tracks.last_updated_at' => { '!=', undef }, 'tracks.last_updated_on' => { '<', 2009 }, 'tracks.position' => 4, - 'tracks.single_track' => \[ '= ?', [ single_track => [1, 2, 3 ] ] ], + 'me.single_track' => \[ '= ?', [ single_track => [1, 2, 3 ] ] ], }, { join => 'tracks' }); my $bind = [ - [ cdid => 5 ], - [ 'tracks.last_updated_on' => 2009 ], - [ 'tracks.position' => 4 ], - [ 'single_track' => [ 1, 2, 3] ], + [ { sqlt_datatype => 'integer', dbic_colname => 'cdid' } + => 5 ], + [ { sqlt_datatype => 'integer', dbic_colname => 'single_track' } + => [ 1, 2, 3] ], + [ { sqlt_datatype => 'datetime', dbic_colname => 'tracks.last_updated_on' } + => 2009 ], + [ { sqlt_datatype => 'int', dbic_colname => 'tracks.position' } + => 4 ], ]; is_same_sql_bind ( @@ -51,10 +55,10 @@ is_same_sql_bind ( LEFT JOIN track tracks ON tracks.cd = me.cdid WHERE cdid > ? + AND me.single_track = ? AND tracks.last_updated_at IS NOT NULL AND tracks.last_updated_on < ? AND tracks.position = ? - AND tracks.single_track = ? )', $bind, 'expected sql with casting off', @@ -70,10 +74,10 @@ is_same_sql_bind ( LEFT JOIN track tracks ON tracks.cd = me.cdid WHERE cdid > CAST(? AS INT) + AND me.single_track = CAST(? AS INT) AND tracks.last_updated_at IS NOT NULL AND tracks.last_updated_on < CAST (? AS DateTime) AND tracks.position = ? - AND tracks.single_track = CAST(? AS INT) )', $bind, 'expected sql with casting on', diff --git a/t/count/count_rs.t b/t/count/count_rs.t index 30ca0ca..af0f036 100644 --- a/t/count/count_rs.t +++ b/t/count/count_rs.t @@ -54,7 +54,12 @@ my $schema = DBICTest->init_schema(); LIMIT 3 OFFSET 8 ) tracks )', - [ [ position => 1 ], [ position => 2 ] ], + [ + [ { sqlt_datatype => 'int', dbic_colname => 'position' } + => 1 ], + [ { sqlt_datatype => 'int', dbic_colname => 'position' } + => 2 ], + ], 'count_rs db-side limit applied', ); } @@ -109,7 +114,12 @@ my $schema = DBICTest->init_schema(); LIMIT 3 OFFSET 4 ) cds )', - [ [ 'tracks.position' => 1 ], [ 'tracks.position' => 2 ] ], + [ + [ { sqlt_datatype => 'int', dbic_colname => 'tracks.position' } + => 1 ], + [ { sqlt_datatype => 'int', dbic_colname => 'tracks.position' } + => 2 ], + ], 'count_rs db-side limit applied', ); } @@ -140,7 +150,8 @@ my $schema = DBICTest->init_schema(); HAVING newest_cd_year = ? ) me )', - [ [ 'newest_cd_year' => '2001' ],], + [ [ { dbic_colname => 'newest_cd_year' } + => '2001' ] ], 'count with having clause keeps sql as alias', ); diff --git a/t/count/prefetch.t b/t/count/prefetch.t index f3818c1..25ae856 100644 --- a/t/count/prefetch.t +++ b/t/count/prefetch.t @@ -33,7 +33,7 @@ my $schema = DBICTest->init_schema(); GROUP BY cds.cdid ) cds )', - [ map { [ 'tracks.position' => $_ ] } (1, 2) ], + [ map { [ { sqlt_datatype => 'int', dbic_colname => 'tracks.position' } => $_ ] } (1, 2) ], ); } @@ -65,7 +65,9 @@ my $schema = DBICTest->init_schema(); ) genre )', - [ [ 'genre.name' => 'emo' ] ], + [ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'genre.name' } + => 'emo' ] + ], ); } @@ -91,7 +93,7 @@ my $schema = DBICTest->init_schema(); LEFT JOIN lyrics lyrics ON lyrics.track_id = tracks.trackid WHERE lyrics.lyric_id IS NULL AND (position = ? OR position = ?) )', - [ map { [ position => $_ ] } (1, 2) ], + [ map { [ { sqlt_datatype => 'int', dbic_colname => 'position' } => $_ ] } (1, 2) ], ); } diff --git a/t/lib/DBICTest/Schema/FourKeys.pm b/t/lib/DBICTest/Schema/FourKeys.pm index 9966cfb..def6ade 100644 --- a/t/lib/DBICTest/Schema/FourKeys.pm +++ b/t/lib/DBICTest/Schema/FourKeys.pm @@ -10,7 +10,7 @@ __PACKAGE__->add_columns( 'hello' => { data_type => 'integer' }, 'goodbye' => { data_type => 'integer' }, 'sensors' => { data_type => 'character', size => 10 }, - 'read_count' => { data_type => 'integer', is_nullable => 1 }, + 'read_count' => { data_type => 'int', is_nullable => 1 }, ); __PACKAGE__->set_primary_key(qw/foo bar hello goodbye/); diff --git a/t/prefetch/correlated.t b/t/prefetch/correlated.t index 7e7690d..fd5ef1d 100644 --- a/t/prefetch/correlated.t +++ b/t/prefetch/correlated.t @@ -24,7 +24,7 @@ my $c_rs = $cdrs->search ({}, { '+columns' => { sibling_count => $cdrs->search( { 'siblings.artist' => { -ident => 'me.artist' }, - 'siblings.cdid' => { '!=' => ['-and', { -ident => 'me.cdid' }, 'bogus condition'] }, + 'siblings.cdid' => { '!=' => ['-and', { -ident => 'me.cdid' }, 23414] }, }, { alias => 'siblings' }, )->count_rs->as_query, }, @@ -51,11 +51,15 @@ is_same_sql_bind( [ # subselect - [ 'siblings.cdid' => 'bogus condition' ], - [ 'me.artist' => 2 ], + [ { sqlt_datatype => 'integer', dbic_colname => 'siblings.cdid' } + => 23414 ], + + [ { sqlt_datatype => 'integer', dbic_colname => 'me.artist' } + => 2 ], # outher WHERE - [ 'me.artist' => 2 ], + [ { sqlt_datatype => 'integer', dbic_colname => 'me.artist' } + => 2 ], ], 'Expected SQL on correlated realiased subquery' ); @@ -85,7 +89,7 @@ $schema->storage->debugcb(undef); # first add a lone non-as-ed select # it should be reordered to appear at the end without throwing prefetch/bind off -$c_rs = $c_rs->search({}, { '+select' => \[ 'me.cdid + ?', [ __add => 1 ] ] }); +$c_rs = $c_rs->search({}, { '+select' => \[ 'me.cdid + ?', [ \ 'inTEger' => 1 ] ] }); # now add an unbalanced select/as pair $c_rs = $c_rs->search ({}, { @@ -127,17 +131,23 @@ is_same_sql_bind( [ # first subselect - [ 'siblings.cdid' => 'bogus condition' ], - [ 'me.artist' => 2 ], + [ { sqlt_datatype => 'integer', dbic_colname => 'siblings.cdid' } + => 23414 ], + + [ { sqlt_datatype => 'integer', dbic_colname => 'me.artist' } + => 2 ], # second subselect - [ 'me.artist' => 2 ], + [ { sqlt_datatype => 'integer', dbic_colname => 'me.artist' } + => 2 ], # the addition - [ __add => 1 ], + [ { sqlt_datatype => 'inTEger' } + => 1 ], # outher WHERE - [ 'me.artist' => 2 ], + [ { sqlt_datatype => 'integer', dbic_colname => 'me.artist' } + => 2 ], ], 'Expected SQL on correlated realiased subquery' ); diff --git a/t/prefetch/count.t b/t/prefetch/count.t index 49370a4..ef2f88b 100644 --- a/t/prefetch/count.t +++ b/t/prefetch/count.t @@ -73,7 +73,8 @@ is_same_sql_bind ( JOIN track tracks ON tracks.cd = cds.cdid WHERE ( me.artistid = ? ) )', - [ [ 'me.artistid' => 4 ] ], + [ [ { sqlt_datatype => 'integer', dbic_colname => 'me.artistid' } + => 4 ] ], ); diff --git a/t/prefetch/grouped.t b/t/prefetch/grouped.t index d0b8e6c..c8c3e87 100644 --- a/t/prefetch/grouped.t +++ b/t/prefetch/grouped.t @@ -78,7 +78,8 @@ for ($cd_rs->all) { ) me )', - [ map { [ 'me.cd' => $_] } ($cd_rs->get_column ('cdid')->all) ], + [ map { [ { sqlt_datatype => 'integer', dbic_colname => 'me.cd' } + => $_ ] } ($cd_rs->get_column ('cdid')->all) ], 'count() query generated expected SQL', ); @@ -96,7 +97,8 @@ for ($cd_rs->all) { JOIN cd cd ON cd.cdid = me.cd WHERE ( me.cd IN ( ?, ?, ?, ?, ? ) ) )', - [ map { [ 'me.cd' => $_] } ( ($cd_rs->get_column ('cdid')->all) x 2 ) ], + [ map { [ { sqlt_datatype => 'integer', dbic_colname => 'me.cd' } + => $_ ] } ( ($cd_rs->get_column ('cdid')->all) x 2 ) ], 'next() query generated expected SQL', ); @@ -264,7 +266,8 @@ for ($cd_rs->all) { ) me )', - [ map { [ 'me.cd' => $_] } ($cd_rs->get_column ('cdid')->all) ], + [ map { [ { sqlt_datatype => 'integer', dbic_colname => 'me.cd' } + => $_ ] } ($cd_rs->get_column ('cdid')->all) ], 'count() query generated expected SQL', ); } @@ -323,7 +326,9 @@ for ($cd_rs->all) { GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, artist.artistid, artist.name, artist.rank, artist.charfield )', - [ map { [ 'tracks.title' => 'ugabuganoexist' ] } (1 .. 2) ], + [ map { [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'tracks.title' } + => 'ugabuganoexist' ] } (1,2) + ], ); } diff --git a/t/prefetch/o2m_o2m_order_by_with_limit.t b/t/prefetch/o2m_o2m_order_by_with_limit.t index 4aead92..a4476c3 100644 --- a/t/prefetch/o2m_o2m_order_by_with_limit.t +++ b/t/prefetch/o2m_o2m_order_by_with_limit.t @@ -44,7 +44,9 @@ is_same_sql_bind( WHERE ( me.rank = ? ) ORDER BY me.name ASC, me.artistid DESC, tracks.cd )}, - [ [ 'me.rank' => 13 ], [ 'me.rank' => 13 ] ], + [ map { [ { sqlt_datatype => 'integer', dbic_colname => 'me.rank' } + => 13 ] } (1,2) + ], 'correct SQL on limited prefetch over search_related ordered by root', ); diff --git a/t/prefetch/standard.t b/t/prefetch/standard.t index 66479b0..4c1c004 100644 --- a/t/prefetch/standard.t +++ b/t/prefetch/standard.t @@ -148,10 +148,11 @@ $rs = $schema->resultset("CD")->search( cmp_ok( $rs->count, '==', 3, "count() ok after group_by on related column" ); -$rs = $schema->resultset("Artist")->search( - {}, - { join => [qw/ cds /], group_by => [qw/ me.name /], having =>{ 'MAX(cds.cdid)'=> \'< 5' } } -); +$rs = $schema->resultset("Artist")->search({}, { + join => [qw/ cds /], + group_by => [qw/ me.name /], + having => \[ 'MAX(cds.cdid) < ?', [ \'int' => 5 ] ], +}); cmp_ok( $rs->all, '==', 2, "results ok after group_by on related column with a having" ); diff --git a/t/prefetch/with_limit.t b/t/prefetch/with_limit.t index f6729b1..977a3f9 100644 --- a/t/prefetch/with_limit.t +++ b/t/prefetch/with_limit.t @@ -31,7 +31,7 @@ my $use_prefetch = $no_prefetch->search( } ); -# add a floating +select to make sure it does nto throw things off +# add a floating +select to make sure it does not throw things off # we also expect it to appear in both selectors, as we can not know # for sure which part of the query it applies to (may be order_by, # maybe something else) @@ -39,11 +39,15 @@ my $use_prefetch = $no_prefetch->search( # we use a reference to the same array in bind vals, because # is_deeply picks up this difference too (not sure if bug or # feature) -my $bind_one = [ __add => 1 ]; $use_prefetch = $use_prefetch->search({}, { - '+select' => \[ 'me.artistid + ?', $bind_one ], + '+select' => \[ 'me.artistid + ?', [ \ 'inTEger' => 1 ] ], }); +my $bind_int_resolved = sub { [ { sqlt_datatype => 'inTEger' } => 1 ] }; +my $bind_vc_resolved = sub { [ + { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'tracks.title' } + => 'blah-blah-1234568' +] }; is_same_sql_bind ( $use_prefetch->as_query, '( @@ -77,12 +81,12 @@ is_same_sql_bind ( ORDER BY name DESC, cds.artist, cds.year ASC )', [ - $bind_one, # outer select - $bind_one, # inner select - [ 'tracks.title' => 'blah-blah-1234568' ], # inner where - $bind_one, # inner group_by - [ 'tracks.title' => 'blah-blah-1234568' ], # outer where - $bind_one, # outer group_by + $bind_int_resolved->(), # outer select + $bind_int_resolved->(), # inner select + $bind_vc_resolved->(), # inner where + $bind_int_resolved->(), # inner group_by + $bind_vc_resolved->(), # outer where + $bind_int_resolved->(), # outer group_by ], 'Expected SQL on complex limited prefetch' ); @@ -184,12 +188,12 @@ is_same_sql_bind ( WHERE ( ( artist.name = ? AND me.year = ? ) ) ORDER BY tracks.cd )', - [ - [ 'artist.name' => 'foo' ], - [ 'me.year' => 2010 ], - [ 'artist.name' => 'foo' ], - [ 'me.year' => 2010 ], - ], + [ map { + [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'artist.name' } + => 'foo' ], + [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'me.year' } + => 2010 ], + } (1,2)], 'No grouping of non-multiplying resultsets', ); diff --git a/t/relationship/core.t b/t/relationship/core.t index d6cb3a3..44a61a3 100644 --- a/t/relationship/core.t +++ b/t/relationship/core.t @@ -271,7 +271,8 @@ is_same_sql_bind ( ON artist_undirected_maps.id1 = me.artistid OR artist_undirected_maps.id2 = me.artistid WHERE ( artistid = ? ) )', - [[artistid => 1]], + [[ { sqlt_datatype => 'integer', dbic_colname => 'artistid' } + => 1 ]], 'expected join sql produced', ); diff --git a/t/resultset/as_query.t b/t/resultset/as_query.t index bdc907d..0e77078 100644 --- a/t/resultset/as_query.t +++ b/t/resultset/as_query.t @@ -22,21 +22,31 @@ my $cdrs = $schema->resultset('CD'); $art_rs = $art_rs->search({ name => 'Billy Joel' }); +my $name_resolved_bind = [ + { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'name' } + => 'Billy Joel' +]; + { is_same_sql_bind( $art_rs->as_query, "(SELECT me.artistid, me.name, me.rank, me.charfield FROM artist me WHERE ( name = ? ))", - [ [ name => 'Billy Joel' ] ], + [ $name_resolved_bind ], ); } $art_rs = $art_rs->search({ rank => 2 }); +my $rank_resolved_bind = [ + { sqlt_datatype => 'integer', dbic_colname => 'rank' } + => 2 +]; + { is_same_sql_bind( $art_rs->as_query, "(SELECT me.artistid, me.name, me.rank, me.charfield FROM artist me WHERE ( ( ( rank = ? ) AND ( name = ? ) ) ) )", - [ [ rank => 2 ], [ name => 'Billy Joel' ] ], + [ $rank_resolved_bind, $name_resolved_bind ], ); } @@ -46,7 +56,7 @@ my $rscol = $art_rs->get_column( 'charfield' ); is_same_sql_bind( $rscol->as_query, "(SELECT me.charfield FROM artist me WHERE ( ( ( rank = ? ) AND ( name = ? ) ) ) )", - [ [ rank => 2 ], [ name => 'Billy Joel' ] ], + [ $rank_resolved_bind, $name_resolved_bind ], ); } diff --git a/t/resultset/as_subselect_rs.t b/t/resultset/as_subselect_rs.t index 1453f63..61acc59 100644 --- a/t/resultset/as_subselect_rs.t +++ b/t/resultset/as_subselect_rs.t @@ -35,7 +35,8 @@ is_same_sql_bind ( WHERE ( source = ? ) ) me )', - [ [ source => 'Library' ] ], + [ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } + => 'Library' ] ], 'Resultset-class attributes do not seep outside of the subselect', ); diff --git a/t/resultset/bind_attr.t b/t/resultset/bind_attr.t index ca00c30..e3fccc9 100644 --- a/t/resultset/bind_attr.t +++ b/t/resultset/bind_attr.t @@ -71,8 +71,8 @@ TODO: { $rs->as_query, "(SELECT me.artistid, me.name, me.rank, me.charfield FROM (SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year FROM artist a JOIN cd ON cd.artist = a.artistid WHERE cd.year = ?) me WHERE title LIKE ?)", [ - [ '!!dummy' => '1999' ], - [ '!!dummy' => 'Spoon%' ] + [ {} => '1999' ], + [ {} => 'Spoon%' ] ], 'got correct SQL' ); @@ -100,8 +100,8 @@ TODO: { $rs->as_query, "(SELECT me.artistid, me.name, me.rank, me.charfield FROM (SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year FROM artist a JOIN cd ON cd.artist = a.artistid WHERE cd.year = ?) me WHERE title LIKE ?)", [ - [ '!!dummy' => '1999' ], - [ '!!dummy' => 'Spoon%' ] + [ {} => '1999' ], + [ {} => 'Spoon%' ] ], 'got correct SQL (cookbook arbitrary SQL, in separate file)' ); diff --git a/t/search/related_strip_prefetch.t b/t/search/related_strip_prefetch.t index 419fd32..0745baf 100644 --- a/t/search/related_strip_prefetch.t +++ b/t/search/related_strip_prefetch.t @@ -11,7 +11,7 @@ use DBICTest; my $schema = DBICTest->init_schema(); my $rs = $schema->resultset('CD')->search ( - { 'tracks.id' => { '!=', 666 }}, + { 'tracks.trackid' => { '!=', 666 }}, { join => 'artist', prefetch => 'tracks', rows => 2 } ); @@ -26,7 +26,7 @@ is_same_sql_bind ( FROM cd me JOIN artist artist ON artist.artistid = me.artist LEFT JOIN track tracks ON tracks.cd = me.cdid - WHERE ( tracks.id != ? ) + WHERE ( tracks.trackid != ? ) LIMIT 2 ) me JOIN artist artist ON artist.artistid = me.artist @@ -35,7 +35,9 @@ is_same_sql_bind ( GROUP BY tags.tagid, tags.cd, tags.tag )', - [ [ 'tracks.id' => 666 ] ], + [ [ { sqlt_datatype => 'integer', dbic_colname => 'tracks.trackid' } + => 666 ] + ], 'Prefetch spec successfully stripped on search_related' ); diff --git a/t/search/subquery.t b/t/search/subquery.t index be0febf..c371e66 100644 --- a/t/search/subquery.t +++ b/t/search/subquery.t @@ -18,18 +18,20 @@ my @tests = ( attrs => { rows => 5 }, sqlbind => \[ "( SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE (title = ? AND year LIKE ?) LIMIT 5)", - [ title => 'buahaha' ], - [ year => '20%' ], + [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'title' } + => 'buahaha' ], + [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'year' } + => '20%' ], ], }, { rs => $cdrs, search => { - artist_id => { 'in' => $art_rs->search({}, { rows => 1 })->get_column( 'id' )->as_query }, + artistid => { 'in' => $art_rs->search({}, { rows => 1 })->get_column( 'artistid' )->as_query }, }, sqlbind => \[ - "( SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE artist_id IN ( SELECT me.id FROM artist me LIMIT 1 ) )", + "( SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE artistid IN ( SELECT me.artistid FROM artist me LIMIT 1 ) )", ], }, @@ -62,15 +64,15 @@ my @tests = ( attrs => { alias => 'cd2', from => [ - { cd2 => $cdrs->search({ id => { '>' => 20 } })->as_query }, + { cd2 => $cdrs->search({ artist => { '>' => 20 } })->as_query }, ], }, sqlbind => \[ "( SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM ( - SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE id > ? + SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE artist > ? ) cd2 )", - [ 'id', 20 ] + [ { sqlt_datatype => 'integer', dbic_colname => 'artist' } => 20 ] ], }, @@ -96,11 +98,11 @@ my @tests = ( alias => 'cd2', from => [ { cd2 => $cdrs->search( - { id => { '>' => 20 } }, + { artist => { '>' => 20 } }, { alias => 'cd3', from => [ - { cd3 => $cdrs->search( { id => { '<' => 40 } } )->as_query } + { cd3 => $cdrs->search( { artist => { '<' => 40 } } )->as_query } ], }, )->as_query }, ], @@ -111,11 +113,11 @@ my @tests = ( (SELECT cd3.cdid, cd3.artist, cd3.title, cd3.year, cd3.genreid, cd3.single_track FROM (SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track - FROM cd me WHERE id < ?) cd3 - WHERE id > ?) cd2 + FROM cd me WHERE artist < ?) cd3 + WHERE artist > ?) cd2 )", - [ 'id', 40 ], - [ 'id', 20 ] + [ { sqlt_datatype => 'integer', dbic_colname => 'artist' } => 40 ], + [ { dbic_colname => 'artist' } => 20 ], # no rsrc in outer manual from - hence no resolution ], }, @@ -147,8 +149,8 @@ my @tests = ( SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE title = ? ) cd2 )", - [ 'title', - 'Thriller' + [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'title' } + => 'Thriller' ] ], }, diff --git a/t/sqlmaker/bind_transport.t b/t/sqlmaker/bind_transport.t index 98baa4f..493dd62 100644 --- a/t/sqlmaker/bind_transport.t +++ b/t/sqlmaker/bind_transport.t @@ -8,10 +8,9 @@ use DBIC::SqlMakerTest; my $schema = DBICTest->init_schema(); -my $ne_bind = [ _ne => 'bar' ]; my $rs = $schema->resultset('CD')->search({ -and => [ - 'me.artist' => { '!=', 'foo' }, - 'me.artist' => { '!=', \[ '?', $ne_bind ] }, + 'me.artist' => { '!=', '666' }, + 'me.artist' => { '!=', \[ '?', [ _ne => 'bar' ] ] }, ]}); # bogus sql query to make sure bind composition happens properly @@ -40,14 +39,16 @@ for (1,2) { LIMIT 1 OFFSET 2 )', [ - [ 'me.artist' => 'foo' ], - $ne_bind, - [ _add => 1 ], - [ 'me.artist' => 'foo' ], - $ne_bind, - [ _sub => 2 ], - [ _lt => 3 ], - [ _mu => 4 ], + [ { sqlt_datatype => 'integer', dbic_colname => 'me.artist' } + => 666 ], + [ { dbic_colname => '_ne' } => 'bar' ], + [ { dbic_colname => '_add' } => 1 ], + [ { sqlt_datatype => 'integer', dbic_colname => 'me.artist' } + => 666 ], + [ { dbic_colname => '_ne' } => 'bar' ], + [ { dbic_colname => '_sub' } => 2 ], + [ { dbic_colname => '_lt' } => 3 ], + [ { dbic_colname => '_mu' } => 4 ], ], 'Correct crazy sql', ); diff --git a/t/sqlmaker/limit_dialects/generic_subq.t b/t/sqlmaker/limit_dialects/generic_subq.t index 9e771a9..8907808 100644 --- a/t/sqlmaker/limit_dialects/generic_subq.t +++ b/t/sqlmaker/limit_dialects/generic_subq.t @@ -37,7 +37,8 @@ is_same_sql_bind( ) < 2 ORDER BY me.title )', - [ [ 'source', 'Library' ] ], + [ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } + => 'Library' ] ], ); is_deeply ( @@ -78,7 +79,8 @@ is_same_sql_bind( ) BETWEEN 1 AND 3 ORDER BY "title" DESC )', - [ [ 'source', 'Library' ] ], + [ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } + => 'Library' ] ], ); is_deeply ( @@ -113,7 +115,8 @@ is_same_sql_bind( ) BETWEEN 1 AND 4294967295 ORDER BY "title" )', - [ [ 'source', 'Library' ] ], + [ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } + => 'Library' ] ], ); is_deeply ( diff --git a/t/sqlmaker/limit_dialects/rno.t b/t/sqlmaker/limit_dialects/rno.t index 4b96a65..04fb045 100644 --- a/t/sqlmaker/limit_dialects/rno.t +++ b/t/sqlmaker/limit_dialects/rno.t @@ -36,7 +36,8 @@ is_same_sql_bind( ) me WHERE rno__row__index BETWEEN 1 AND 1 )', - [ [ 'source', 'Library' ] ], + [ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } + => 'Library' ] ], ); $schema->storage->_sql_maker->quote_char ([qw/ [ ] /]); @@ -68,7 +69,8 @@ is_same_sql_bind( ) [me] WHERE [rno__row__index] BETWEEN 1 AND 1 )', - [ [ 'source', 'Library' ] ], + [ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } + => 'Library' ] ], ); { diff --git a/t/sqlmaker/limit_dialects/toplimit.t b/t/sqlmaker/limit_dialects/toplimit.t index 29ef966..630f32d 100644 --- a/t/sqlmaker/limit_dialects/toplimit.t +++ b/t/sqlmaker/limit_dialects/toplimit.t @@ -38,7 +38,8 @@ for my $null_order ( ) me ORDER BY me.id DESC )', - [ [ source => 'Library' ] ], + [ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } + => 'Library' ] ], ); } @@ -141,7 +142,8 @@ for my $ord_set ( ) me ORDER BY $ord_set->{order_req} )", - [ [ source => 'Library' ] ], + [ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } + => 'Library' ] ], ); } @@ -171,7 +173,10 @@ is_same_sql_bind ( WHERE ( source = ? ) ORDER BY title )', - [ [ source => 'Library' ], [ source => 'Library' ] ], + [ map { [ + { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } + => 'Library' ] + } (1,2) ], ); # test deprecated column mixing over join boundaries @@ -190,8 +195,9 @@ is_same_sql_bind( $rs_selectas_top->search({})->as_query, JOIN owners owner ON owner.id = me.owner WHERE ( source = ? ) ORDER BY me.id - )', - [ [ 'source', 'Library' ] ], + )', + [ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } + => 'Library' ] ], ); { diff --git a/t/sqlmaker/order_by_bindtransport.t b/t/sqlmaker/order_by_bindtransport.t index f884739..095f72d 100644 --- a/t/sqlmaker/order_by_bindtransport.t +++ b/t/sqlmaker/order_by_bindtransport.t @@ -37,11 +37,14 @@ sub test_order { ORDER BY $args->{order_req} )", [ - [qw(foo bar)], - [qw(read_count 5)], - [qw(read_count 8)], + [ { sqlt_datatype => 'integer', dbic_colname => 'foo' } + => 'bar' ], + [ { sqlt_datatype => 'int', dbic_colname => 'read_count' } + => 5 ], + [ { sqlt_datatype => 'int', dbic_colname => 'read_count' } + => 8 ], $args->{bind} - ? @{ $args->{bind} } + ? map { [ { dbic_colname => $_->[0] } => $_->[1] ] } @{ $args->{bind} } : () ], ) || diag Dumper $args->{order_by}; diff --git a/t/storage/source_bind_compat.t b/t/storage/source_bind_compat.t new file mode 100644 index 0000000..268f6a8 --- /dev/null +++ b/t/storage/source_bind_compat.t @@ -0,0 +1,49 @@ +use strict; +use warnings; + +use Test::More; +use Test::Warn; +use Test::Exception; +use lib qw(t/lib); +use DBICTest; + +{ + package DBICTest::Legacy::Storage; + use base 'DBIx::Class::Storage::DBI::SQLite'; + + use Data::Dumper::Concise; + + sub source_bind_attributes { return {} } +} + + +my $schema = DBICTest::Schema->clone; +$schema->storage_type('DBICTest::Legacy::Storage'); +$schema->connection('dbi:SQLite::memory:'); + +$schema->storage->dbh_do( sub { $_[1]->do(<<'EOS') } ); +CREATE TABLE artist ( + artistid INTEGER PRIMARY KEY NOT NULL, + name varchar(100), + rank integer NOT NULL DEFAULT 13, + charfield char(10) +) +EOS + +my $legacy = sub { $schema->resultset('Artist')->search({ name => 'foo'})->next }; +if (DBIx::Class->VERSION >= 0.09) { + &throws_ok( + $legacy, + qr/XXXXXXXXX not sure what error to put here yet XXXXXXXXXXXXXXX/, + 'deprecated use of source_bind_attributes throws', + ); +} +else { + &warnings_exist ( + $legacy, + qr/\QThe source_bind_attributes() override in DBICTest::Legacy::Storage relies on a deprecated codepath/, + 'Warning issued during invocation of legacy storage codepath', + ); +} + +done_testing;