X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=749c7901d44a50c801382f47942497bc09dd91c5;hb=d9e53b85a871e0a793d6b164a6ad977341c07b27;hp=e6586c240171c45ec16db42b346dc09275ce0608;hpb=92fe218153fcfdb968bfa929ed7f31389738335f;p=dbsrgits%2FDBIx-Class-Historic.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index e6586c2..749c790 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -7,20 +7,22 @@ use strict; use warnings; use Carp::Clan qw/^DBIx::Class/; use DBI; -use SQL::Abstract::Limit; use DBIx::Class::Storage::DBI::Cursor; use DBIx::Class::Storage::Statistics; -use Scalar::Util qw/blessed weaken/; +use Scalar::Util(); +use List::Util(); __PACKAGE__->mk_group_accessors('simple' => qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts - _conn_pid _conn_tid transaction_depth _dbh_autocommit savepoints/ + _conn_pid _conn_tid transaction_depth _dbh_autocommit _on_connect_do + _on_disconnect_do _on_connect_do_store _on_disconnect_do_store + savepoints/ ); # the values for these accessors are picked out (and deleted) from # the attribute hashref passed to connect_info my @storage_options = qw/ - on_connect_do on_disconnect_do disable_sth_caching unsafe auto_savepoint + on_connect_call on_disconnect_call disable_sth_caching unsafe auto_savepoint /; __PACKAGE__->mk_group_accessors('simple' => @storage_options); @@ -29,292 +31,8 @@ __PACKAGE__->mk_group_accessors('simple' => @storage_options); __PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor'); __PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class/); -__PACKAGE__->sql_maker_class('DBIC::SQL::Abstract'); +__PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks'); -BEGIN { - -package # Hide from PAUSE - DBIC::SQL::Abstract; # Would merge upstream, but nate doesn't reply :( - -use base qw/SQL::Abstract::Limit/; - -# This prevents the caching of $dbh in S::A::L, I believe -sub new { - my $self = shift->SUPER::new(@_); - - # If limit_dialect is a ref (like a $dbh), go ahead and replace - # it with what it resolves to: - $self->{limit_dialect} = $self->_find_syntax($self->{limit_dialect}) - if ref $self->{limit_dialect}; - - $self; -} - -sub _RowNumberOver { - my ($self, $sql, $order, $rows, $offset ) = @_; - - $offset += 1; - my $last = $rows + $offset; - my ( $order_by ) = $self->_order_by( $order ); - - $sql = <<""; -SELECT * FROM -( - SELECT Q1.*, ROW_NUMBER() OVER( ) AS ROW_NUM FROM ( - $sql - $order_by - ) Q1 -) Q2 -WHERE ROW_NUM BETWEEN $offset AND $last - - return $sql; -} - - -# While we're at it, this should make LIMIT queries more efficient, -# without digging into things too deeply -use Scalar::Util 'blessed'; -sub _find_syntax { - my ($self, $syntax) = @_; - my $dbhname = blessed($syntax) ? $syntax->{Driver}{Name} : $syntax; - if(ref($self) && $dbhname && $dbhname eq 'DB2') { - return 'RowNumberOver'; - } - - $self->{_cached_syntax} ||= $self->SUPER::_find_syntax($syntax); -} - -sub select { - my ($self, $table, $fields, $where, $order, @rest) = @_; - $table = $self->_quote($table) unless ref($table); - local $self->{rownum_hack_count} = 1 - if (defined $rest[0] && $self->{limit_dialect} eq 'RowNum'); - @rest = (-1) unless defined $rest[0]; - die "LIMIT 0 Does Not Compute" if $rest[0] == 0; - # and anyway, SQL::Abstract::Limit will cause a barf if we don't first - local $self->{having_bind} = []; - my ($sql, @ret) = $self->SUPER::select( - $table, $self->_recurse_fields($fields), $where, $order, @rest - ); - $sql .= - $self->{for} ? - ( - $self->{for} eq 'update' ? ' FOR UPDATE' : - $self->{for} eq 'shared' ? ' FOR SHARE' : - '' - ) : - '' - ; - return wantarray ? ($sql, @ret, @{$self->{having_bind}}) : $sql; -} - -sub insert { - my $self = shift; - my $table = shift; - $table = $self->_quote($table) unless ref($table); - $self->SUPER::insert($table, @_); -} - -sub update { - my $self = shift; - my $table = shift; - $table = $self->_quote($table) unless ref($table); - $self->SUPER::update($table, @_); -} - -sub delete { - my $self = shift; - my $table = shift; - $table = $self->_quote($table) unless ref($table); - $self->SUPER::delete($table, @_); -} - -sub _emulate_limit { - my $self = shift; - if ($_[3] == -1) { - return $_[1].$self->_order_by($_[2]); - } else { - return $self->SUPER::_emulate_limit(@_); - } -} - -sub _recurse_fields { - my ($self, $fields, $params) = @_; - my $ref = ref $fields; - return $self->_quote($fields) unless $ref; - return $$fields if $ref eq 'SCALAR'; - - if ($ref eq 'ARRAY') { - return join(', ', map { - $self->_recurse_fields($_) - .(exists $self->{rownum_hack_count} && !($params && $params->{no_rownum_hack}) - ? ' AS col'.$self->{rownum_hack_count}++ - : '') - } @$fields); - } elsif ($ref eq 'HASH') { - foreach my $func (keys %$fields) { - return $self->_sqlcase($func) - .'( '.$self->_recurse_fields($fields->{$func}).' )'; - } - } -} - -sub _order_by { - my $self = shift; - my $ret = ''; - my @extra; - if (ref $_[0] eq 'HASH') { - if (defined $_[0]->{group_by}) { - $ret = $self->_sqlcase(' group by ') - .$self->_recurse_fields($_[0]->{group_by}, { no_rownum_hack => 1 }); - } - if (defined $_[0]->{having}) { - my $frag; - ($frag, @extra) = $self->_recurse_where($_[0]->{having}); - push(@{$self->{having_bind}}, @extra); - $ret .= $self->_sqlcase(' having ').$frag; - } - if (defined $_[0]->{order_by}) { - $ret .= $self->_order_by($_[0]->{order_by}); - } - } elsif (ref $_[0] eq 'SCALAR') { - $ret = $self->_sqlcase(' order by ').${ $_[0] }; - } elsif (ref $_[0] eq 'ARRAY' && @{$_[0]}) { - my @order = @{+shift}; - $ret = $self->_sqlcase(' order by ') - .join(', ', map { - my $r = $self->_order_by($_, @_); - $r =~ s/^ ?ORDER BY //i; - $r; - } @order); - } else { - $ret = $self->SUPER::_order_by(@_); - } - return $ret; -} - -sub _order_directions { - my ($self, $order) = @_; - $order = $order->{order_by} if ref $order eq 'HASH'; - return $self->SUPER::_order_directions($order); -} - -sub _table { - my ($self, $from) = @_; - if (ref $from eq 'ARRAY') { - return $self->_recurse_from(@$from); - } elsif (ref $from eq 'HASH') { - return $self->_make_as($from); - } else { - return $from; # would love to quote here but _table ends up getting called - # twice during an ->select without a limit clause due to - # the way S::A::Limit->select works. should maybe consider - # bypassing this and doing S::A::select($self, ...) in - # our select method above. meantime, quoting shims have - # been added to select/insert/update/delete here - } -} - -sub _recurse_from { - my ($self, $from, @join) = @_; - my @sqlf; - push(@sqlf, $self->_make_as($from)); - foreach my $j (@join) { - my ($to, $on) = @$j; - - # check whether a join type exists - my $join_clause = ''; - my $to_jt = ref($to) eq 'ARRAY' ? $to->[0] : $to; - if (ref($to_jt) eq 'HASH' and exists($to_jt->{-join_type})) { - $join_clause = ' '.uc($to_jt->{-join_type}).' JOIN '; - } else { - $join_clause = ' JOIN '; - } - push(@sqlf, $join_clause); - - if (ref $to eq 'ARRAY') { - push(@sqlf, '(', $self->_recurse_from(@$to), ')'); - } else { - push(@sqlf, $self->_make_as($to)); - } - push(@sqlf, ' ON ', $self->_join_condition($on)); - } - return join('', @sqlf); -} - -sub _make_as { - my ($self, $from) = @_; - return join(' ', map { (ref $_ eq 'SCALAR' ? $$_ : $self->_quote($_)) } - reverse each %{$self->_skip_options($from)}); -} - -sub _skip_options { - my ($self, $hash) = @_; - my $clean_hash = {}; - $clean_hash->{$_} = $hash->{$_} - for grep {!/^-/} keys %$hash; - return $clean_hash; -} - -sub _join_condition { - my ($self, $cond) = @_; - if (ref $cond eq 'HASH') { - my %j; - for (keys %$cond) { - my $v = $cond->{$_}; - if (ref $v) { - # XXX no throw_exception() in this package and croak() fails with strange results - Carp::croak(ref($v) . qq{ reference arguments are not supported in JOINS - try using \"..." instead'}) - if ref($v) ne 'SCALAR'; - $j{$_} = $v; - } - else { - my $x = '= '.$self->_quote($v); $j{$_} = \$x; - } - }; - return scalar($self->_recurse_where(\%j)); - } elsif (ref $cond eq 'ARRAY') { - return join(' OR ', map { $self->_join_condition($_) } @$cond); - } else { - die "Can't handle this yet!"; - } -} - -sub _quote { - my ($self, $label) = @_; - return '' unless defined $label; - return "*" if $label eq '*'; - return $label unless $self->{quote_char}; - if(ref $self->{quote_char} eq "ARRAY"){ - return $self->{quote_char}->[0] . $label . $self->{quote_char}->[1] - if !defined $self->{name_sep}; - my $sep = $self->{name_sep}; - return join($self->{name_sep}, - map { $self->{quote_char}->[0] . $_ . $self->{quote_char}->[1] } - split(/\Q$sep\E/,$label)); - } - return $self->SUPER::_quote($label); -} - -sub limit_dialect { - my $self = shift; - $self->{limit_dialect} = shift if @_; - return $self->{limit_dialect}; -} - -sub quote_char { - my $self = shift; - $self->{quote_char} = shift if @_; - return $self->{quote_char}; -} - -sub name_sep { - my $self = shift; - $self->{name_sep} = shift if @_; - return $self->{name_sep}; -} - -} # End of BEGIN block =head1 NAME @@ -322,6 +40,15 @@ DBIx::Class::Storage::DBI - DBI storage handler =head1 SYNOPSIS + my $schema = MySchema->connect('dbi:SQLite:my.db'); + + $schema->storage->debug(1); + $schema->dbh_do("DROP TABLE authors"); + + $schema->resultset('Book')->search({ + written_on => $schema->storage->datetime_parser(DateTime->now) + }); + =head1 DESCRIPTION This class represents the connection to an RDBMS via L. See @@ -355,34 +82,37 @@ The argument list may contain: =item * -The same 4-element argument set one would normally pass to L, -optionally followed by L +The same 4-element argument set one would normally pass to +L, optionally followed by +L recognized by DBIx::Class: - $connect_info_args = [ $dsn, $user, $pass, \%dbi_attributes, \%extra_attributes ]; + $connect_info_args = [ $dsn, $user, $password, \%dbi_attributes?, \%extra_attributes? ]; =item * -A lone code reference which returns a connected L -optinally followed by L -recognized by DBIx::Class: +A single code reference which returns a connected +L optionally followed by +L recognized +by DBIx::Class: - $connect_info_args = [ sub { DBI->connect (...) }, \%extra_attributes ]; + $connect_info_args = [ sub { DBI->connect (...) }, \%extra_attributes? ]; =item * -A lone hashref with all the attributes and the dsn/user/pass mixed together: +A single hashref with all the attributes and the dsn/user/password +mixed together: $connect_info_args = [{ dsn => $dsn, user => $user, - pass => $pass, + password => $pass, %dbi_attributes, %extra_attributes, }]; This is particularly useful for L based applications, allowing the -following config: +following config (L style): schema_class App::DB @@ -396,13 +126,12 @@ following config: =back -Please note that the L docs -recommend that you always explicitly set C to either -C<0> or C<1>. L further recommends that it be set -to C<1>, and that you perform transactions via our L -method. L will set it to C<1> if you do not do explicitly -set it to zero. This is the default for most DBDs. See -L for details. +Please note that the L docs recommend that you always explicitly +set C to either I<0> or I<1>. L further +recommends that it be set to I<1>, and that you perform transactions +via our L method. L will set it +to I<1> if you do not do explicitly set it to zero. This is the default +for most DBDs. See L for details. =head3 DBIx::Class specific connection attributes @@ -417,7 +146,7 @@ these options will be cleared before setting the new ones, regardless of whether any options are specified in the new C. -=over 4 +=over =item on_connect_do @@ -426,6 +155,10 @@ the database. Its value may contain: =over +=item a scalar + +This contains one SQL statement to execute. + =item an array reference This contains SQL statements to execute in order. Each element contains @@ -440,10 +173,95 @@ array reference, its return value is ignored. =item on_disconnect_do -Takes arguments in the same form as L and executes them +Takes arguments in the same form as L and executes them immediately before disconnecting from the database. -Note, this only runs if you explicitly call L on the +Note, this only runs if you explicitly call L on the +storage object. + +=item on_connect_call + +A more generalized form of L that calls the specified +C methods in your storage driver. + + on_connect_do => 'select 1' + +is equivalent to: + + on_connect_call => [ [ do_sql => 'select 1' ] ] + +Its values may contain: + +=over + +=item a scalar + +Will call the C method. + +=item a code reference + +Will execute C<< $code->($storage) >> + +=item an array reference + +Each value can be a method name or code reference. + +=item an array of arrays + +For each array, the first item is taken to be the C method name +or code reference, and the rest are parameters to it. + +=back + +Some predefined storage methods you may use: + +=over + +=item do_sql + +Executes a SQL string or a code reference that returns a SQL string. This is +what L and L use. + +It can take: + +=over + +=item a scalar + +Will execute the scalar as SQL. + +=item an arrayref + +Taken to be arguments to L, the SQL string optionally followed by the +attributes hashref and bind values. + +=item a code reference + +Will execute C<< $code->($storage) >> and execute the return array refs as +above. + +=back + +=item datetime_setup + +Execute any statements necessary to initialize the database session to return +and accept datetime/timestamp values used with +L. + +Only necessary for some databases, see your specific storage driver for +implementation details. + +=back + +=item on_disconnect_call + +Takes arguments in the same form as L and executes them +immediately before disconnecting from the database. + +Calls the C methods as opposed to the +C methods called by L. + +Note, this only runs if you explicitly call L on the storage object. =item disable_sth_caching @@ -455,26 +273,31 @@ statement handles via L. Sets the limit dialect. This is useful for JDBC-bridge among others where the remote SQL-dialect cannot be determined by the name of the -driver alone. +driver alone. See also L. =item quote_char Specifies what characters to use to quote table and column names. If -you use this you will want to specify L as well. +you use this you will want to specify L as well. -quote_char expects either a single character, in which case is it is placed -on either side of the table/column, or an arrayref of length 2 in which case the -table/column name is placed between the elements. +C expects either a single character, in which case is it +is placed on either side of the table/column name, or an arrayref of length +2 in which case the table/column name is placed between the elements. -For example under MySQL you'd use C '`'>, and user SQL Server you'd -use C [qw/[ ]/]>. +For example under MySQL you should use C<< quote_char => '`' >>, and for +SQL Server you should use C<< quote_char => [qw/[ ]/] >>. =item name_sep -This only needs to be used in conjunction with L, and is used to +This only needs to be used in conjunction with C, and is used to specify the charecter that seperates elements (schemas, tables, columns) from each other. In most cases this is simply a C<.>. +The consequences of not supplying this value is that L +will assume DBIx::Class' uses of aliases to be complete column +names. The output will look like I<"me.name"> when it should actually +be I<"me"."name">. + =item unsafe This Storage driver normally installs its own C, sets @@ -497,9 +320,15 @@ If this option is true, L will use savepoints when nesting transactions, making it possible to recover from failure in the inner transaction without having to abort all outer transactions. +=item cursor_class + +Use this argument to supply a cursor class other than the default +L. + =back -Some real-life examples of arguments to L and L +Some real-life examples of arguments to L and +L # Simple SQLite connection ->connect_info([ 'dbi:SQLite:./foo.db' ]); @@ -529,7 +358,7 @@ Some real-life examples of arguments to L and L for explanation + # See parse_connect_info for explanation ->connect_info( [{ dsn => 'dbi:Pg:dbname=foo', @@ -581,7 +410,7 @@ sub connect_info { unshift @args, delete $attrs{$_}; } } - else { # otherwise assume dsn/user/pass + \%attrs + \%extra_attrs + else { # otherwise assume dsn/user/password + \%attrs + \%extra_attrs %attrs = ( % { $args[3] || {} }, % { $args[4] || {} }, @@ -605,6 +434,11 @@ sub connect_info { $self->_sql_maker_opts->{$sql_maker_opt} = $opt_val; } } + for my $connect_do_opt (qw/on_connect_do on_disconnect_do/) { + if(my $opt_val = delete $attrs{$connect_do_opt}) { + $self->$connect_do_opt($opt_val); + } + } } %attrs = () if (ref $args[0] eq 'CODE'); # _connect() never looks past $args[0] in this case @@ -615,8 +449,57 @@ sub connect_info { =head2 on_connect_do -This method is deprecated in favor of setting via L. +This method is deprecated in favour of setting via L. +=cut + +sub on_connect_do { + my $self = shift; + $self->_setup_connect_do(on_connect_do => @_); +} + +=head2 on_disconnect_do + +This method is deprecated in favour of setting via L. + +=cut + +sub on_disconnect_do { + my $self = shift; + $self->_setup_connect_do(on_disconnect_do => @_); +} + +sub _setup_connect_do { + my ($self, $opt) = (shift, shift); + + my $accessor = "_$opt"; + my $store = "_${opt}_store"; + + return $self->$accessor if not @_; + + my $val = shift; + + if (not defined $val) { + $self->$accessor(undef); + $self->$store(undef); + return; + } + + my @store; + + if (not ref($val)) { + push @store, [ 'do_sql', $val ]; + } elsif (ref($val) eq 'CODE') { + push @store, $val; + } elsif (ref($val) eq 'ARRAY') { + push @store, map [ 'do_sql', $_ ], @$val; + } else { + $self->throw_exception("Invalid type for $opt ".ref($val)); + } + + $self->$store(\@store); + $self->$accessor($val); +} =head2 dbh_do @@ -764,8 +647,12 @@ sub disconnect { my ($self) = @_; if( $self->connected ) { - my $connection_do = $self->on_disconnect_do; - $self->_do_connection_actions($connection_do) if ref($connection_do); + if (my $connection_call = $self->on_disconnect_call) { + $self->_do_connection_actions(disconnect_call_ => $connection_call) + } + if (my $connection_do = $self->_on_disconnect_do_store) { + $self->_do_connection_actions(disconnect_call_ => $connection_do) + } $self->_dbh->rollback unless $self->_dbh_autocommit; $self->_dbh->disconnect; @@ -853,13 +740,14 @@ sub dbh { sub _sql_maker_args { my ($self) = @_; - return ( bindtype=>'columns', limit_dialect => $self->dbh, %{$self->_sql_maker_opts} ); + return ( bindtype=>'columns', array_datatypes => 1, limit_dialect => $self->dbh, %{$self->_sql_maker_opts} ); } sub sql_maker { my ($self) = @_; unless ($self->_sql_maker) { my $sql_maker_class = $self->sql_maker_class; + $self->ensure_class_loaded ($sql_maker_class); $self->_sql_maker($sql_maker_class->new( $self->_sql_maker_args )); } return $self->_sql_maker; @@ -872,39 +760,80 @@ sub _populate_dbh { my @info = @{$self->_dbi_connect_info || []}; $self->_dbh($self->_connect(@info)); + $self->_conn_pid($$); + $self->_conn_tid(threads->tid) if $INC{'threads.pm'}; + + $self->_determine_driver; + # Always set the transaction depth on connect, since # there is no transaction in progress by definition $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1; - if(ref $self eq 'DBIx::Class::Storage::DBI') { - my $driver = $self->_dbh->{Driver}->{Name}; + if (my $connection_call = $self->on_connect_call) { + $self->_do_connection_actions(connect_call_ => $connection_call) + } + if (my $connection_do = $self->_on_connect_do_store) { + $self->_do_connection_actions(connect_call_ => $connection_do) + } +} + +sub _determine_driver { + my ($self) = @_; + + if (ref $self eq 'DBIx::Class::Storage::DBI') { + my $driver; + + if ($self->_dbh) { # we are connected + $driver = $self->_dbh->{Driver}{Name}; + } else { + # try to use dsn to not require being connected, the driver may still + # force a connection in _rebless to determine version + ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i; + } + if ($self->load_optional_class("DBIx::Class::Storage::DBI::${driver}")) { bless $self, "DBIx::Class::Storage::DBI::${driver}"; $self->_rebless(); } } - - my $connection_do = $self->on_connect_do; - $self->_do_connection_actions($connection_do) if ref($connection_do); - - $self->_conn_pid($$); - $self->_conn_tid(threads->tid) if $INC{'threads.pm'}; } sub _do_connection_actions { - my $self = shift; - my $connection_do = shift; - - if (ref $connection_do eq 'ARRAY') { - $self->_do_query($_) foreach @$connection_do; - } - elsif (ref $connection_do eq 'CODE') { - $connection_do->(); + my $self = shift; + my $method_prefix = shift; + my $call = shift; + + if (not ref($call)) { + my $method = $method_prefix . $call; + $self->$method(@_); + } elsif (ref($call) eq 'CODE') { + $self->$call(@_); + } elsif (ref($call) eq 'ARRAY') { + if (ref($call->[0]) ne 'ARRAY') { + $self->_do_connection_actions($method_prefix, $_) for @$call; + } else { + $self->_do_connection_actions($method_prefix, @$_) for @$call; + } + } else { + $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) ); } return $self; } +sub connect_call_do_sql { + my $self = shift; + $self->_do_query(@_); +} + +sub disconnect_call_do_sql { + my $self = shift; + $self->_do_query(@_); +} + +# override in db-specific backend when necessary +sub connect_call_datetime_setup { 1 } + sub _do_query { my ($self, $action) = @_; @@ -913,10 +842,18 @@ sub _do_query { $self->_do_query($_) foreach @$action; } else { - my @to_run = (ref $action eq 'ARRAY') ? (@$action) : ($action); - $self->_query_start(@to_run); - $self->_dbh->do(@to_run); - $self->_query_end(@to_run); + # Most debuggers expect ($sql, @bind), so we need to exclude + # the attribute hash which is the second argument to $dbh->do + # furthermore the bind values are usually to be presented + # as named arrayref pairs, so wrap those here too + my @do_args = (ref $action eq 'ARRAY') ? (@$action) : ($action); + my $sql = shift @do_args; + my $attrs = shift @do_args; + my @bind = map { [ undef, $_ ] } @do_args; + + $self->_query_start($sql, @bind); + $self->_dbh->do($sql, $attrs, @do_args); + $self->_query_end($sql, @bind); } return $self; @@ -945,7 +882,7 @@ sub _connect { if($dbh && !$self->unsafe) { my $weak_self = $self; - weaken($weak_self); + Scalar::Util::weaken($weak_self); $dbh->{HandleError} = sub { if ($weak_self) { $weak_self->throw_exception("DBI Exception: $_[0]"); @@ -1126,14 +1063,19 @@ sub txn_rollback { sub _prep_for_execute { my ($self, $op, $extra_bind, $ident, $args) = @_; + if( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) { + $ident = $ident->from(); + } + my ($sql, @bind) = $self->sql_maker->$op($ident, @$args); + unshift(@bind, map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind) if $extra_bind; - return ($sql, \@bind); } + sub _fix_bind_params { my ($self, @bind) = @_; @@ -1155,7 +1097,7 @@ sub _query_start { if ( $self->debug ) { @bind = $self->_fix_bind_params(@bind); - + $self->debugobj->query_start( $sql, @bind ); } } @@ -1171,10 +1113,6 @@ sub _query_end { sub _dbh_execute { my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_; - - if( blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) { - $ident = $ident->from(); - } my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args); @@ -1194,7 +1132,8 @@ sub _dbh_execute { } foreach my $data (@data) { - $data = ref $data ? ''.$data : $data; # stringify args + my $ref = ref $data; + $data = $ref && $ref ne 'ARRAY' ? ''.$data : $data; # stringify args (except arrayrefs) $sth->bind_param($placeholder_index, $data, $attributes); $placeholder_index++; @@ -1217,24 +1156,26 @@ sub _execute { sub insert { my ($self, $source, $to_insert) = @_; - - my $ident = $source->from; + + my $ident = $source->from; my $bind_attributes = $self->source_bind_attributes($source); + my $updated_cols = {}; + $self->ensure_connected; foreach my $col ( $source->columns ) { if ( !defined $to_insert->{$col} ) { my $col_info = $source->column_info($col); if ( $col_info->{auto_nextval} ) { - $to_insert->{$col} = $self->_sequence_fetch( 'nextval', $col_info->{sequence} || $self->_dbh_get_autoinc_seq($self->dbh, $source) ); + $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch( 'nextval', $col_info->{sequence} || $self->_dbh_get_autoinc_seq($self->dbh, $source) ); } } } $self->_execute('insert' => [], $source, $bind_attributes, $to_insert); - return $to_insert; + return $updated_cols; } ## Still not quite perfect, and EXPERIMENTAL @@ -1254,13 +1195,7 @@ sub insert_bulk { # @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args ## This must be an arrayref, else nothing works! - my $tuple_status = []; - - ##use Data::Dumper; - ##print STDERR Dumper( $data, $sql, [@bind] ); - - my $time = time(); ## Get the bind_attributes, if any exist my $bind_attributes = $self->source_bind_attributes($source); @@ -1283,7 +1218,27 @@ sub insert_bulk { $sth->bind_param_array( $placeholder_index, [@data], $attributes ); $placeholder_index++; } - my $rv = $sth->execute_array({ArrayTupleStatus => $tuple_status}); + my $rv = eval { $sth->execute_array({ArrayTupleStatus => $tuple_status}) }; + if (my $err = $@) { + my $i = 0; + ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i]; + + $self->throw_exception($sth->errstr || "Unexpected populate error: $err") + if ($i > $#$tuple_status); + + require Data::Dumper; + local $Data::Dumper::Terse = 1; + local $Data::Dumper::Indent = 1; + local $Data::Dumper::Useqq = 1; + local $Data::Dumper::Quotekeys = 0; + + $self->throw_exception(sprintf "%s for populate slice:\n%s", + $tuple_status->[$i][1], + Data::Dumper::Dumper( + { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) } + ), + ); + } $self->throw_exception($sth->errstr) if !$rv; $self->_query_end( $sql, @bind ); @@ -1303,38 +1258,161 @@ sub delete { my $self = shift @_; my $source = shift @_; - my $bind_attrs = {}; ## If ever it's needed... + my $bind_attrs = $self->source_bind_attributes($source); return $self->_execute('delete' => [], $source, $bind_attrs, @_); } -sub _select { - my ($self, $ident, $select, $condition, $attrs) = @_; - my $order = $attrs->{order_by}; - - if (ref $condition eq 'SCALAR') { - my $unwrap = ${$condition}; - if ($unwrap =~ s/ORDER BY (.*)$//i) { - $order = $1; - $condition = \$unwrap; +# We were sent here because the $rs contains a complex search +# which will require a subquery to select the correct rows +# (i.e. joined or limited resultsets) +# +# Genarating a single PK column subquery is trivial and supported +# by all RDBMS. However if we have a multicolumn PK, things get ugly. +# Look at _multipk_update_delete() +sub _subq_update_delete { + my $self = shift; + my ($rs, $op, $values) = @_; + + my $rsrc = $rs->result_source; + + # we already check this, but double check naively just in case. Should be removed soon + my $sel = $rs->_resolved_attrs->{select}; + $sel = [ $sel ] unless ref $sel eq 'ARRAY'; + my @pcols = $rsrc->primary_columns; + if (@$sel != @pcols) { + $self->throw_exception ( + 'Subquery update/delete can not be called on resultsets selecting a' + .' number of columns different than the number of primary keys' + ); + } + + if (@pcols == 1) { + return $self->$op ( + $rsrc, + $op eq 'update' ? $values : (), + { $pcols[0] => { -in => $rs->as_query } }, + ); + } + + else { + return $self->_multipk_update_delete (@_); + } +} + +# ANSI SQL does not provide a reliable way to perform a multicol-PK +# resultset update/delete involving subqueries. So by default resort +# to simple (and inefficient) delete_all style per-row opearations, +# while allowing specific storages to override this with a faster +# implementation. +# +sub _multipk_update_delete { + return shift->_per_row_update_delete (@_); +} + +# This is the default loop used to delete/update rows for multi PK +# resultsets, and used by mysql exclusively (because it can't do anything +# else). +# +# We do not use $row->$op style queries, because resultset update/delete +# is not expected to cascade (this is what delete_all/update_all is for). +# +# There should be no race conditions as the entire operation is rolled +# in a transaction. +# +sub _per_row_update_delete { + my $self = shift; + my ($rs, $op, $values) = @_; + + my $rsrc = $rs->result_source; + my @pcols = $rsrc->primary_columns; + + my $guard = $self->txn_scope_guard; + + # emulate the return value of $sth->execute for non-selects + my $row_cnt = '0E0'; + + my $subrs_cur = $rs->cursor; + while (my @pks = $subrs_cur->next) { + + my $cond; + for my $i (0.. $#pcols) { + $cond->{$pcols[$i]} = $pks[$i]; } + + $self->$op ( + $rsrc, + $op eq 'update' ? $values : (), + $cond, + ); + + $row_cnt++; } - my $for = delete $attrs->{for}; + $guard->commit; + + return $row_cnt; +} + +sub _select { + my $self = shift; + + # localization is neccessary as + # 1) there is no infrastructure to pass this around (easy to do, but will wait) + # 2) _select_args sets it and _prep_for_execute consumes it my $sql_maker = $self->sql_maker; - local $sql_maker->{for} = $for; + local $sql_maker->{for}; - if (exists $attrs->{group_by} || $attrs->{having}) { - $order = { - group_by => $attrs->{group_by}, - having => $attrs->{having}, - ($order ? (order_by => $order) : ()) - }; + return $self->_execute($self->_select_args(@_)); +} + +sub _select_args_to_query { + my $self = shift; + + # localization is neccessary as + # 1) there is no infrastructure to pass this around (easy to do, but will wait) + # 2) _select_args sets it and _prep_for_execute consumes it + my $sql_maker = $self->sql_maker; + local $sql_maker->{for}; + + # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $order, $rows, $offset) + # = $self->_select_args($ident, $select, $cond, $attrs); + my ($op, $bind, $ident, $bind_attrs, @args) = + $self->_select_args(@_); + + # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $order, $rows, $offset ]); + my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args); + $prepared_bind ||= []; + + return wantarray + ? ($sql, $prepared_bind, $bind_attrs) + : \[ "($sql)", @$prepared_bind ] + ; +} + +sub _select_args { + my ($self, $ident, $select, $where, $attrs) = @_; + + my $sql_maker = $self->sql_maker; + my $alias2source = $self->_resolve_ident_sources ($ident); + + # calculate bind_attrs before possible $ident mangling + my $bind_attrs = {}; + for my $alias (keys %$alias2source) { + my $bindtypes = $self->source_bind_attributes ($alias2source->{$alias}) || {}; + for my $col (keys %$bindtypes) { + + my $fqcn = join ('.', $alias, $col); + $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col}; + + # so that unqualified searches can be bound too + $bind_attrs->{$col} = $bind_attrs->{$fqcn} if $alias eq 'me'; + } } - my $bind_attrs = {}; ## Future support - my @args = ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $condition, $order); + + my @limit; if ($attrs->{software_limit} || - $self->sql_maker->_default_limit_syntax eq "GenericSubQ") { + $sql_maker->_default_limit_syntax eq "GenericSubQ") { $attrs->{software_limit} = 1; } else { $self->throw_exception("rows attribute must be positive if present") @@ -1342,18 +1420,246 @@ sub _select { # MySQL actually recommends this approach. I cringe. $attrs->{rows} = 2**48 if not defined $attrs->{rows} and defined $attrs->{offset}; - push @args, $attrs->{rows}, $attrs->{offset}; + + if ($attrs->{rows} && keys %{$attrs->{collapse}}) { + ($ident, $select, $where, $attrs) + = $self->_adjust_select_args_for_limited_prefetch ($ident, $select, $where, $attrs); + } + else { + push @limit, $attrs->{rows}, $attrs->{offset}; + } } - return $self->_execute(@args); +### + # This would be the point to deflate anything found in $where + # (and leave $attrs->{bind} intact). Problem is - inflators historically + # expect a row object. And all we have is a resultsource (it is trivial + # to extract deflator coderefs via $alias2source above). + # + # I don't see a way forward other than changing the way deflators are + # invoked, and that's just bad... +### + + my $order = { map + { $attrs->{$_} ? ( $_ => $attrs->{$_} ) : () } + (qw/order_by group_by having _virtual_order_by/ ) + }; + + + $sql_maker->{for} = delete $attrs->{for}; + + return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $order, @limit); } +sub _adjust_select_args_for_limited_prefetch { + my ($self, $from, $select, $where, $attrs) = @_; + + if ($attrs->{group_by} and @{$attrs->{group_by}}) { + $self->throw_exception ('Prefetch with limit (rows/offset) is not supported on resultsets with a group_by attribute'); + } + + $self->throw_exception ('Prefetch with limit (rows/offset) is not supported on resultsets with a custom from attribute') + if (ref $from ne 'ARRAY'); + + + # separate attributes + my $sub_attrs = { %$attrs }; + delete $attrs->{$_} for qw/where bind rows offset/; + delete $sub_attrs->{$_} for qw/for collapse select order_by/; + + my $alias = $attrs->{alias}; + + # create subquery select list + my $sub_select = [ grep { $_ =~ /^$alias\./ } @{$attrs->{select}} ]; + + # bring over all non-collapse-induced order_by into the inner query (if any) + # the outer one will have to keep them all + if (my $ord_cnt = @{$attrs->{order_by}} - @{$attrs->{_collapse_order_by}} ) { + $sub_attrs->{order_by} = [ + @{$attrs->{order_by}}[ 0 .. ($#{$attrs->{order_by}} - $ord_cnt - 1) ] + ]; + } + + # mangle {from} + $from = [ @$from ]; + my $select_root = shift @$from; + my @outer_from = @$from; + + my %inner_joins; + my %join_info = map { $_->[0]{-alias} => $_->[0] } (@$from); + + # in complex search_related chains $alias may *not* be 'me' + # so always include it in the inner join, and also shift away + # from the outer stack, so that the two datasets actually do + # meet + if ($select_root->{-alias} ne $alias) { + $inner_joins{$alias} = 1; + + while (@outer_from && $outer_from[0][0]{-alias} ne $alias) { + shift @outer_from; + } + if (! @outer_from) { + $self->throw_exception ("Unable to find '$alias' in the {from} stack, something is wrong"); + } + + shift @outer_from; # the new subquery will represent this alias, so get rid of it + } + + + # decide which parts of the join will remain on the inside + # + # this is not a very viable optimisation, but it was written + # before I realised this, so might as well remain. We can throw + # away _any_ branches of the join tree that are: + # 1) not mentioned in the condition/order + # 2) left-join leaves (or left-join leaf chains) + # Most of the join ocnditions will not satisfy this, but for real + # complex queries some might, and we might make some RDBMS happy. + # + # + # since we do not have introspectable SQLA, we fall back to ugly + # scanning of raw SQL for WHERE, and for pieces of ORDER BY + # in order to determine what goes into %inner_joins + # It may not be very efficient, but it's a reasonable stop-gap + { + # produce stuff unquoted, so it can be scanned + my $sql_maker = $self->sql_maker; + local $sql_maker->{quote_char}; + + my @order_by = (map + { ref $_ ? $_->[0] : $_ } + $sql_maker->_order_by_chunks ($sub_attrs->{order_by}) + ); + + my $where_sql = $sql_maker->where ($where); + + # sort needed joins + for my $alias (keys %join_info) { + + # any table alias found on a column name in where or order_by + # gets included in %inner_joins + # Also any parent joins that are needed to reach this particular alias + for my $piece ($where_sql, @order_by ) { + if ($piece =~ /\b$alias\./) { + $inner_joins{$alias} = 1; + } + } + } + } + + # scan for non-leaf/non-left joins and mark as needed + # also mark all ancestor joins that are needed to reach this particular alias + # (e.g. join => { cds => 'tracks' } - tracks will bring cds too ) + # + # traverse by the size of the -join_path i.e. reverse depth first + for my $alias (sort { @{$join_info{$b}{-join_path}} <=> @{$join_info{$a}{-join_path}} } (keys %join_info) ) { + + my $j = $join_info{$alias}; + $inner_joins{$alias} = 1 if (! $j->{-join_type} || ($j->{-join_type} !~ /^left$/i) ); + + if ($inner_joins{$alias}) { + $inner_joins{$_} = 1 for (@{$j->{-join_path}}); + } + } + + # construct the inner $from for the subquery + my $inner_from = [ $select_root ]; + for my $j (@$from) { + push @$inner_from, $j if $inner_joins{$j->[0]{-alias}}; + } + + # if a multi-type join was needed in the subquery ("multi" is indicated by + # presence in {collapse}) - add a group_by to simulate the collapse in the subq + + for my $alias (keys %inner_joins) { + + # the dot comes from some weirdness in collapse + # remove after the rewrite + if ($attrs->{collapse}{".$alias"}) { + $sub_attrs->{group_by} = $sub_select; + last; + } + } + + # generate the subquery + my $subq = $self->_select_args_to_query ( + $inner_from, + $sub_select, + $where, + $sub_attrs + ); + + # put it in the new {from} + unshift @outer_from, { $alias => $subq }; + + # This is totally horrific - the $where ends up in both the inner and outer query + # Unfortunately not much can be done until SQLA2 introspection arrives + # + # OTOH it can be seen as a plus: (notes that this query would make a DBA cry ;) + return (\@outer_from, $select, $where, $attrs); +} + +sub _resolve_ident_sources { + my ($self, $ident) = @_; + + my $alias2source = {}; + + # the reason this is so contrived is that $ident may be a {from} + # structure, specifying multiple tables to join + if ( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) { + # this is compat mode for insert/update/delete which do not deal with aliases + $alias2source->{me} = $ident; + } + elsif (ref $ident eq 'ARRAY') { + + for (@$ident) { + my $tabinfo; + if (ref $_ eq 'HASH') { + $tabinfo = $_; + } + if (ref $_ eq 'ARRAY' and ref $_->[0] eq 'HASH') { + $tabinfo = $_->[0]; + } + + $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-source_handle}->resolve + if ($tabinfo->{-source_handle}); + } + } + + return $alias2source; +} + +# Returns a counting SELECT for a simple count +# query. Abstracted so that a storage could override +# this to { count => 'firstcol' } or whatever makes +# sense as a performance optimization +sub _count_select { + #my ($self, $source, $rs_attrs) = @_; + return { count => '*' }; +} + +# Returns a SELECT which will end up in the subselect +# There may or may not be a group_by, as the subquery +# might have been called to accomodate a limit +# +# Most databases would be happy with whatever ends up +# here, but some choke in various ways. +# +sub _subq_count_select { + my ($self, $source, $rs_attrs) = @_; + return $rs_attrs->{group_by} if $rs_attrs->{group_by}; + + my @pcols = map { join '.', $rs_attrs->{alias}, $_ } ($source->primary_columns); + return @pcols ? \@pcols : [ 1 ]; +} + + sub source_bind_attributes { my ($self, $source) = @_; - + my $bind_attributes; foreach my $column ($source->columns) { - + my $data_type = $source->column_info($column)->{data_type} || ''; $bind_attributes->{$column} = $self->bind_attribute_by_data_type($data_type) if $data_type; @@ -1384,7 +1690,8 @@ sub select_single { my $self = shift; my ($rv, $sth, @bind) = $self->_select(@_); my @row = $sth->fetchrow_array; - if(@row && $sth->fetchrow_array) { + my @nextrow = $sth->fetchrow_array if @row; + if(@row && @nextrow) { carp "Query returned more than one row. SQL that returns multiple rows is DEPRECATED for ->find and ->single"; } # Need to call finish() to work round broken DBDs @@ -1493,9 +1800,18 @@ Return the row id of the last insert. =cut sub _dbh_last_insert_id { - my ($self, $dbh, $source, $col) = @_; - # XXX This is a SQLite-ism as a default... is there a DBI-generic way? - $dbh->func('last_insert_rowid'); + # All Storage's need to register their own _dbh_last_insert_id + # the old SQLite-based method was highly inappropriate + + my $self = shift; + my $class = ref $self; + $self->throw_exception (<dbh->{Driver}->{Name} } =head2 bind_attribute_by_data_type -Given a datatype from column info, returns a database specific bind attribute for -$dbh->bind_param($val,$attribute) or nothing if we will let the database planner -just handle it. +Given a datatype from column info, returns a database specific bind +attribute for C<< $dbh->bind_param($val,$attribute) >> or nothing if we will +let the database planner just handle it. Generally only needed for special case column types, like bytea in postgres. @@ -1525,7 +1841,28 @@ sub bind_attribute_by_data_type { return; } -=head2 create_ddl_dir +=head2 is_datatype_numeric + +Given a datatype from column_info, returns a boolean value indicating if +the current RDBMS considers it a numeric value. This controls how +L decides whether to mark the column as +dirty - when the datatype is deemed numeric a C<< != >> comparison will +be performed instead of the usual C. + +=cut + +sub is_datatype_numeric { + my ($self, $dt) = @_; + + return 0 unless $dt; + + return $dt =~ /^ (?: + numeric | int(?:eger)? | (?:tiny|small|medium|big)int | dec(?:imal)? | real | float | double (?: \s+ precision)? | (?:big)?serial + ) $/ix; +} + + +=head2 create_ddl_dir (EXPERIMENTAL) =over 4 @@ -1534,7 +1871,38 @@ sub bind_attribute_by_data_type { =back Creates a SQL file based on the Schema, for each of the specified -database types, in the given directory. +database engines in C<\@databases> in the given directory. +(note: specify L names, not L driver names). + +Given a previous version number, this will also create a file containing +the ALTER TABLE statements to transform the previous schema into the +current one. Note that these statements may contain C or +C statements that can potentially destroy data. + +The file names are created using the C method below, please +override this method in your schema if you would like a different file +name format. For the ALTER file, the same format is used, replacing +$version in the name with "$preversion-$version". + +See L for a list of values for C<\%sqlt_args>. +The most common value for this would be C<< { add_drop_table => 1 } >> +to have the SQL produced include a C statement for each table +created. For quoting purposes supply C and +C. + +If no arguments are passed, then the following default values are assumed: + +=over 4 + +=item databases - ['MySQL', 'SQLite', 'PostgreSQL'] + +=item version - $schema->schema_version + +=item directory - './' + +=item preversion - + +=back By default, C<\%sqlt_args> will have @@ -1545,18 +1913,27 @@ hashref like the following { ignore_constraint_names => 0, # ... other options } + +Note that this feature is currently EXPERIMENTAL and may not work correctly +across all databases, or fully handle complex relationships. + +WARNING: Please check all SQL files created, before applying them. + =cut sub create_ddl_dir { my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_; if(!$dir || !-d $dir) { - warn "No directory given, using ./\n"; + carp "No directory given, using ./\n"; $dir = "./"; } $databases ||= ['MySQL', 'SQLite', 'PostgreSQL']; $databases = [ $databases ] if(ref($databases) ne 'ARRAY'); - $version ||= $schema->VERSION || '1.x'; + + my $schema_version = $schema->schema_version || '1.x'; + $version ||= $schema_version; + $sqltargs = { add_drop_table => 1, ignore_constraint_names => 1, @@ -1564,32 +1941,32 @@ sub create_ddl_dir { %{$sqltargs || {}} }; - $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09: '} + $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09003: '} . $self->_check_sqlt_message . q{'}) if !$self->_check_sqlt_version; my $sqlt = SQL::Translator->new( $sqltargs ); $sqlt->parser('SQL::Translator::Parser::DBIx::Class'); - my $sqlt_schema = $sqlt->translate({ data => $schema }) or die $sqlt->error; + my $sqlt_schema = $sqlt->translate({ data => $schema }) + or $self->throw_exception ($sqlt->error); foreach my $db (@$databases) { $sqlt->reset(); - $sqlt = $self->configure_sqlt($sqlt, $db); $sqlt->{schema} = $sqlt_schema; $sqlt->producer($db); my $file; my $filename = $schema->ddl_filename($db, $version, $dir); - if (-e $filename && (!$version || ($version == $schema->schema_version()))) { + if (-e $filename && ($version eq $schema_version )) { # if we are dumping the current version, overwrite the DDL - warn "Overwriting existing DDL file - $filename"; + carp "Overwriting existing DDL file - $filename"; unlink($filename); } my $output = $sqlt->translate; if(!$output) { - warn("Failed to translate to $db, skipping. (" . $sqlt->error . ")"); + carp("Failed to translate to $db, skipping. (" . $sqlt->error . ")"); next; } if(!open($file, ">$filename")) { @@ -1605,13 +1982,13 @@ sub create_ddl_dir { my $prefilename = $schema->ddl_filename($db, $preversion, $dir); if(!-e $prefilename) { - warn("No previous schema file found ($prefilename)"); + carp("No previous schema file found ($prefilename)"); next; } my $difffile = $schema->ddl_filename($db, $version, $dir, $preversion); if(-e $difffile) { - warn("Overwriting existing diff file - $difffile"); + carp("Overwriting existing diff file - $difffile"); unlink($difffile); } @@ -1620,28 +1997,37 @@ sub create_ddl_dir { my $t = SQL::Translator->new($sqltargs); $t->debug( 0 ); $t->trace( 0 ); - $t->parser( $db ) or die $t->error; - $t = $self->configure_sqlt($t, $db); - my $out = $t->translate( $prefilename ) or die $t->error; + + $t->parser( $db ) + or $self->throw_exception ($t->error); + + my $out = $t->translate( $prefilename ) + or $self->throw_exception ($t->error); + $source_schema = $t->schema; - unless ( $source_schema->name ) { - $source_schema->name( $prefilename ); - } + + $source_schema->name( $prefilename ) + unless ( $source_schema->name ); } # The "new" style of producers have sane normalization and can support # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't # And we have to diff parsed SQL against parsed SQL. my $dest_schema = $sqlt_schema; - + unless ( "SQL::Translator::Producer::$db"->can('preprocess_schema') ) { my $t = SQL::Translator->new($sqltargs); $t->debug( 0 ); $t->trace( 0 ); - $t->parser( $db ) or die $t->error; - $t = $self->configure_sqlt($t, $db); - my $out = $t->translate( $filename ) or die $t->error; + + $t->parser( $db ) + or $self->throw_exception ($t->error); + + my $out = $t->translate( $filename ) + or $self->throw_exception ($t->error); + $dest_schema = $t->schema; + $dest_schema->name( $filename ) unless $dest_schema->name; } @@ -1659,17 +2045,6 @@ sub create_ddl_dir { } } -sub configure_sqlt() { - my $self = shift; - my $tr = shift; - my $db = shift || $self->sqlt_type; - if ($db eq 'PostgreSQL') { - $tr->quote_table_names(0); - $tr->quote_field_names(0); - } - return $tr; -} - =head2 deployment_statements =over 4 @@ -1679,8 +2054,9 @@ sub configure_sqlt() { =back Returns the statements used by L and L. -The database driver name is given by C<$type>, though the value from -L is used if it is not specified. + +The L (not L) database driver name can be explicitly +provided in C<$type>, otherwise the result of L is used as default. C<$directory> is used to return statements from files in a previously created L directory and is optional. The filenames are constructed @@ -1698,9 +2074,9 @@ sub deployment_statements { # Need to be connected to get the correct sqlt_type $self->ensure_connected() unless $type; $type ||= $self->sqlt_type; - $version ||= $schema->VERSION || '1.x'; + $version ||= $schema->schema_version || '1.x'; $dir ||= './'; - my $filename = $schema->ddl_filename($type, $dir, $version); + my $filename = $schema->ddl_filename($type, $version, $dir); if(-f $filename) { my $file; @@ -1711,7 +2087,7 @@ sub deployment_statements { return join('', @rows); } - $self->throw_exception(q{Can't deploy without SQL::Translator 0.09: '} + $self->throw_exception(q{Can't deploy without SQL::Translator 0.09003: '} . $self->_check_sqlt_message . q{'}) if !$self->_check_sqlt_version; @@ -1731,22 +2107,32 @@ sub deployment_statements { sub deploy { my ($self, $schema, $type, $sqltargs, $dir) = @_; - foreach my $statement ( $self->deployment_statements($schema, $type, undef, $dir, { no_comments => 1, %{ $sqltargs || {} } } ) ) { - foreach my $line ( split(";\n", $statement)) { - next if($line =~ /^--/); - next if(!$line); -# next if($line =~ /^DROP/m); - next if($line =~ /^BEGIN TRANSACTION/m); - next if($line =~ /^COMMIT/m); - next if $line =~ /^\s+$/; # skip whitespace only - $self->_query_start($line); - eval { - $self->dbh->do($line); # shouldn't be using ->dbh ? - }; - if ($@) { - warn qq{$@ (running "${line}")}; - } - $self->_query_end($line); + my $deploy = sub { + my $line = shift; + return if($line =~ /^--/); + return if(!$line); + # next if($line =~ /^DROP/m); + return if($line =~ /^BEGIN TRANSACTION/m); + return if($line =~ /^COMMIT/m); + return if $line =~ /^\s+$/; # skip whitespace only + $self->_query_start($line); + eval { + $self->dbh->do($line); # shouldn't be using ->dbh ? + }; + if ($@) { + carp qq{$@ (running "${line}")}; + } + $self->_query_end($line); + }; + my @statements = $self->deployment_statements($schema, $type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } ); + if (@statements > 1) { + foreach my $statement (@statements) { + $deploy->( $statement ); + } + } + elsif (@statements == 1) { + foreach my $line ( split(";\n", $statements[0])) { + $deploy->( $line ); } } } @@ -1793,7 +2179,7 @@ sub build_datetime_parser { my $_check_sqlt_message; # private sub _check_sqlt_version { return $_check_sqlt_version if defined $_check_sqlt_version; - eval 'use SQL::Translator "0.09"'; + eval 'use SQL::Translator "0.09003"'; $_check_sqlt_message = $@ || ''; $_check_sqlt_version = !$@; } @@ -1856,40 +2242,6 @@ cases if you choose the C<< AutoCommit => 0 >> path, just as you would be with raw DBI. -=head1 SQL METHODS - -The module defines a set of methods within the DBIC::SQL::Abstract -namespace. These build on L to provide the -SQL query functions. - -The following methods are extended:- - -=over 4 - -=item delete - -=item insert - -=item select - -=item update - -=item limit_dialect - -See L for details. -For setting, this method is deprecated in favor of L. - -=item quote_char - -See L for details. -For setting, this method is deprecated in favor of L. - -=item name_sep - -See L for details. -For setting, this method is deprecated in favor of L. - -=back =head1 AUTHORS