Revision history for DBIx::Class
+ - Add a warning to load_namespaces if a class in ResultSet/
+ is not a subclass of DBIx::Class::ResultSet
+ - ::Storage::DBI now correctly preserves a parent $dbh from
+ terminating children, even during interpreter global
+ out-of-order destruction
+ - InflateColumn::DateTime support for MSSQL via DBD::Sybase
+ - Millisecond precision support for MSSQL datetimes for
+ InflateColumn::DateTime
+ - Support connecting using $ENV{DBI_DSN} and $ENV{DBI_DRIVER}
+ - current_source_alias method on ResultSet objects to
+ determine the alias to use in programatically assembled
+ search()es (originally added in 0.08100 but unmentioned)
+ - Depend on optimized SQL::Abstract (faster SQL generation)
+
0.08121 2010-04-11 18:43:00 (UTC)
- Support for Firebird RDBMS with DBD::InterBase and ODBC
- Add core support for INSERT RETURNING (for storages that
### All of them should go to DBIx::Class::Optional::Dependencies
###
-
name 'DBIx-Class';
perl_version '5.008001';
all_from 'lib/DBIx/Class.pm';
my $runtime_requires = {
'Carp::Clan' => '6.0',
- 'Class::Accessor::Grouped' => '0.09002',
+ 'Class::Accessor::Grouped' => '0.09003',
'Class::C3::Componentised' => '1.0005',
'Class::Inspector' => '1.24',
'Data::Page' => '2.00',
'MRO::Compat' => '0.09',
'Module::Find' => '0.06',
'Path::Class' => '0.18',
- 'SQL::Abstract' => '1.64',
+ 'SQL::Abstract' => '1.66',
'SQL::Abstract::Limit' => '0.13',
'Sub::Name' => '0.04',
'Data::Dumper::Concise' => '1.000',
zamolxes: Bogdan Lucaciu <bogdan@wiz.ro>
+Possum: Daniel LeWarne <possum@cpan.org>
+
=head1 COPYRIGHT
Copyright (c) 2005 - 2010 the DBIx::Class L</AUTHOR> and L</CONTRIBUTORS>
sub has_many {
my ($class, $rel, $f_class, $f_key, @rest) = @_;
- return $class->next::method($rel, $f_class, ( ref($f_key) ?
- $f_key :
- lc($f_key) ), @rest);
+ return $class->next::method(
+ $rel,
+ $f_class,
+ (ref($f_key) ?
+ $f_key :
+ lc($f_key||'')
+ ),
+ @rest
+ );
}
sub get_inflated_column {
$info->{_ic_dt_method} ||= "timestamp_without_timezone";
} elsif ($type eq "smalldatetime") {
$type = "datetime";
- $info->{_ic_dt_method} ||= "datetime";
+ $info->{_ic_dt_method} ||= "smalldatetime";
}
}
my $count = $rs->count;
# Equivalent SQL:
- # SELECT COUNT( * ) FROM (SELECT me.name FROM artist me GROUP BY me.name) count_subq:
+ # SELECT COUNT( * ) FROM (SELECT me.name FROM artist me GROUP BY me.name) me:
=head2 Grouping results
statement and dig down to see if certain parameters cause aberrant behavior.
You might want to check out L<DBIx::Class::QueryLog> as well.
+=head1 IMPROVING PERFORMANCE
+
+=over
+
+=item *
+
+Install L<Class::XSAccessor> to speed up L<Class::Accessor::Grouped>.
+
+=item *
+
+On Perl 5.8 install L<Class::C3::XS>.
+
+=item *
+
+L<prefetch|DBIx::Class::ResultSet/prefetch> relationships, where possible. See
+L</Using joins and prefetch>.
+
+=item *
+
+Use L<populate|DBIx::Class::ResultSet/populate> in void context to insert data
+when you don't need the resulting L<DBIx::Class::Row> objects, if possible, but
+see the caveats.
+
+When inserting many rows, for best results, populate a large number of rows at a
+time, but not so large that the table is locked for an unacceptably long time.
+
+If using L<create|DBIx::Class::ResultSet/create> instead, use a transaction and
+commit every C<X> rows; where C<X> gives you the best performance without
+locking the table for too long.
+
+=item *
+
+When selecting many rows, if you don't need full-blown L<DBIx::Class::Row>
+objects, consider using L<DBIx::Class::ResultClass::HashRefInflator>.
+
+=item *
+
+See also L</STARTUP SPEED> and L</MEMORY USAGE> in this document.
+
+=back
+
=head1 STARTUP SPEED
L<DBIx::Class|DBIx::Class> programs can have a significant startup delay
my $rsrc = $self->result_source;
$attrs ||= $self->_resolved_attrs;
- my $tmp_attrs = { %$attrs };
-
- # take off any limits, record_filter is cdbi, and no point of ordering a count
- delete $tmp_attrs->{$_} for (qw/select as rows offset order_by record_filter/);
+ # only take pieces we need for a simple count
+ my $tmp_attrs = { map
+ { $_ => $attrs->{$_} }
+ qw/ alias from where bind join /
+ };
# overwrite the selector (supplied by the storage)
$tmp_attrs->{select} = $rsrc->storage->_count_select ($rsrc, $tmp_attrs);
my ($self, $attrs) = @_;
my $rsrc = $self->result_source;
- $attrs ||= $self->_resolved_attrs_copy;
-
- my $sub_attrs = { %$attrs };
+ $attrs ||= $self->_resolved_attrs;
- # extra selectors do not go in the subquery and there is no point of ordering it
- delete $sub_attrs->{$_} for qw/collapse select _prefetch_select as order_by/;
+ my $sub_attrs = { map
+ { $_ => $attrs->{$_} }
+ qw/ alias from where bind join group_by having rows offset /
+ };
# if we multi-prefetch we group_by primary keys only as this is what we would
# get out of the rs via ->next/->all. We *DO WANT* to clobber old group_by regardless
$sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } ($rsrc->_pri_cols) ]
}
- $sub_attrs->{select} = $rsrc->storage->_subq_count_select ($rsrc, $attrs);
+ # Calculate subquery selector
+ if (my $g = $sub_attrs->{group_by}) {
- # this is so that the query can be simplified e.g.
- # * ordering can be thrown away in things like Top limit
- $sub_attrs->{-for_count_only} = 1;
+ # necessary as the group_by may refer to aliased functions
+ my $sel_index;
+ for my $sel (@{$attrs->{select}}) {
+ $sel_index->{$sel->{-as}} = $sel
+ if (ref $sel eq 'HASH' and $sel->{-as});
+ }
- my $sub_rs = $rsrc->resultset_class->new ($rsrc, $sub_attrs);
+ for my $g_part (@$g) {
+ push @{$sub_attrs->{select}}, $sel_index->{$g_part} || $g_part;
+ }
+ }
+ else {
+ my @pcols = map { "$attrs->{alias}.$_" } ($rsrc->primary_columns);
+ $sub_attrs->{select} = @pcols ? \@pcols : [ 1 ];
+ }
- $attrs->{from} = [{
- -alias => 'count_subq',
- -source_handle => $rsrc->handle,
- count_subq => $sub_rs->as_query,
- }];
- # the subquery replaces this
- delete $attrs->{$_} for qw/where bind collapse group_by having having_bind rows offset/;
+ # this is so that the query can be simplified e.g.
+ # * ordering can be thrown away in things like Top limit
+ $sub_attrs->{-for_count_only} = 1;
- return $self->_count_rs ($attrs);
+ return $rsrc->resultset_class
+ ->new ($rsrc, $sub_attrs)
+ ->as_subselect_rs
+ ->search ({}, { columns => { count => $rsrc->storage->_count_select ($rsrc, $attrs) } })
+ -> get_column ('count');
}
sub _bool {
producer => $producer,
name => 'harry',
}, {
- key => 'primary,
+ key => 'primary',
});
=cut
sub as_subselect_rs {
- my $self = shift;
-
- return $self->result_source->resultset->search( undef, {
- alias => $self->current_source_alias,
- from => [{
- $self->current_source_alias => $self->as_query,
- -alias => $self->current_source_alias,
- -source_handle => $self->result_source->handle,
- }]
+ my $self = shift;
+
+ my $attrs = $self->_resolved_attrs;
+
+ return $self->result_source->resultset->search( undef, {
+ from => [{
+ $attrs->{alias} => $self->as_query,
+ -alias => $attrs->{alias},
+ -source_handle => $self->result_source->handle,
+ }],
+ map { $_ => $attrs->{$_} } qw/select as alias/
+
});
}
# ->_resolve_join as otherwise they get lost - captainL
my $join = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} );
- delete @{$attrs}{qw/join prefetch collapse distinct select as columns +select +as +columns/};
+ delete @{$attrs}{qw/join prefetch collapse group_by distinct select as columns +select +as +columns/};
my $seen = { %{ (delete $attrs->{seen_join}) || {} } };
-alias => $attrs->{alias},
$attrs->{alias} => $rs_copy->as_query,
}];
- delete @{$attrs}{@force_subq_attrs, 'where'};
+ delete @{$attrs}{@force_subq_attrs, qw/where bind/};
$seen->{-relation_chain_depth} = 0;
}
elsif ($attrs->{from}) { #shallow copy suffices
sub __new_related_find_or_new_helper {
my ($self, $relname, $data) = @_;
+ my $rsrc = $self->result_source;
+
# create a mock-object so all new/set_column component overrides will run:
- my $rel_rs = $self->result_source
- ->related_source($relname)
- ->resultset;
+ my $rel_rs = $rsrc->related_source($relname)->resultset;
my $new_rel_obj = $rel_rs->new_result($data);
my $proc_data = { $new_rel_obj->get_columns };
MULTICREATE_DEBUG and warn "MC $self constructing $relname via new_result";
return $new_rel_obj;
}
- elsif ($self->result_source->_pk_depends_on($relname, $proc_data )) {
+ elsif ($rsrc->_pk_depends_on($relname, $proc_data )) {
if (! keys %$proc_data) {
# there is nothing to search for - blind create
MULTICREATE_DEBUG and warn "MC $self constructing default-insert $relname";
return $new_rel_obj;
}
else {
- my $us = $self->source_name;
+ my $us = $rsrc->source_name;
$self->throw_exception ("'$us' neither depends nor is depended on by '$relname', something is wrong...");
}
}
# ANSI standard Limit/Offset implementation. DB2 and MSSQL use this
sub _RowNumberOver {
- my ($self, $sql, $order, $rows, $offset ) = @_;
+ my ($self, $sql, $rs_attrs, $rows, $offset ) = @_;
# get the select to make the final amount of columns equal the original one
my ($select) = $sql =~ /^ \s* SELECT \s+ (.+?) \s+ FROM/ix
or croak "Unrecognizable SELECT: $sql";
- # get the order_by only (or make up an order if none exists)
+ # make up an order if none exists
my $order_by = $self->_order_by(
- (delete $order->{order_by}) || $self->_rno_default_order
+ (delete $rs_attrs->{order_by}) || $self->_rno_default_order
);
# whatever is left of the order_by
- my $group_having = $self->_order_by($order);
+ my $group_having = $self->_parse_rs_attrs($rs_attrs);
- my $qalias = $self->_quote ($self->{_dbic_rs_attrs}{alias});
+ my $qalias = $self->_quote ($rs_attrs->{alias});
$sql = sprintf (<<EOS, $offset + 1, $offset + $rows, );
# Informix specific limit, almost like LIMIT/OFFSET
sub _SkipFirst {
- my ($self, $sql, $order, $rows, $offset) = @_;
+ my ($self, $sql, $rs_attrs, $rows, $offset) = @_;
$sql =~ s/^ \s* SELECT \s+ //ix
or croak "Unrecognizable SELECT: $sql";
,
sprintf ('FIRST %d ', $rows),
$sql,
- $self->_order_by ($order),
+ $self->_parse_rs_attrs ($rs_attrs),
);
}
# Firebird specific limit, reverse of _SkipFirst for Informix
sub _FirstSkip {
- my ($self, $sql, $order, $rows, $offset) = @_;
+ my ($self, $sql, $rs_attrs, $rows, $offset) = @_;
$sql =~ s/^ \s* SELECT \s+ //ix
or croak "Unrecognizable SELECT: $sql";
: ''
,
$sql,
- $self->_order_by ($order),
+ $self->_parse_rs_attrs ($rs_attrs),
);
}
# Crappy Top based Limit/Offset support. Legacy from MSSQL.
sub _Top {
- my ( $self, $sql, $order, $rows, $offset ) = @_;
+ my ( $self, $sql, $rs_attrs, $rows, $offset ) = @_;
# mangle the input sql so it can be properly aliased in the outer queries
$sql =~ s/^ \s* SELECT \s+ (.+?) \s+ (?=FROM)//ix
my @sql_select = split (/\s*,\s*/, $sql_select);
# we can't support subqueries (in fact MSSQL can't) - croak
- if (@sql_select != @{$self->{_dbic_rs_attrs}{select}}) {
+ if (@sql_select != @{$rs_attrs->{select}}) {
croak (sprintf (
'SQL SELECT did not parse cleanly - retrieved %d comma separated elements, while '
. 'the resultset select attribure contains %d elements: %s',
scalar @sql_select,
- scalar @{$self->{_dbic_rs_attrs}{select}},
+ scalar @{$rs_attrs->{select}},
$sql_select,
));
}
my $esc_name_sep = "\Q$name_sep\E";
my $col_re = qr/ ^ (?: (.+) $esc_name_sep )? ([^$esc_name_sep]+) $ /x;
- my $rs_alias = $self->{_dbic_rs_attrs}{alias};
+ my $rs_alias = $rs_attrs->{alias};
my $quoted_rs_alias = $self->_quote ($rs_alias);
# construct the new select lists, rename(alias) some columns if necessary
my (@outer_select, @inner_select, %seen_names, %col_aliases, %outer_col_aliases);
- for (@{$self->{_dbic_rs_attrs}{select}}) {
+ for (@{$rs_attrs->{select}}) {
next if ref $_;
my ($table, $orig_colname) = ( $_ =~ $col_re );
next unless $table;
for my $i (0 .. $#sql_select) {
- my $colsel_arg = $self->{_dbic_rs_attrs}{select}[$i];
+ my $colsel_arg = $rs_attrs->{select}[$i];
my $colsel_sql = $sql_select[$i];
# this may or may not work (in case of a scalarref or something)
%outer_col_aliases = (%outer_col_aliases, %col_aliases);
# deal with order
- croak '$order supplied to SQLAHacks limit emulators must be a hash'
- if (ref $order ne 'HASH');
+ croak '$order/attr container supplied to SQLAHacks limit emulators must be a hash'
+ if (ref $rs_attrs ne 'HASH');
- $order = { %$order }; #copy
-
- my $req_order = $order->{order_by};
+ my $req_order = $rs_attrs->{order_by};
# examine normalized version, collapses nesting
- my $limit_order;
- if (scalar $self->_order_by_chunks ($req_order)) {
- $limit_order = $req_order;
- }
- else {
- $limit_order = [ map
+ my $limit_order = scalar $self->_order_by_chunks ($req_order)
+ ? $req_order
+ : [ map
{ join ('', $rs_alias, $name_sep, $_ ) }
- ( $self->{_dbic_rs_attrs}{_source_handle}->resolve->primary_columns )
- ];
- }
+ ( $rs_attrs->{_rsroot_source_handle}->resolve->primary_columns )
+ ]
+ ;
my ( $order_by_inner, $order_by_outer ) = $self->_order_directions($limit_order);
my $order_by_requested = $self->_order_by ($req_order);
# generate the rest
- delete $order->{order_by};
- my $grpby_having = $self->_order_by ($order);
+ delete $rs_attrs->{order_by};
+ my $grpby_having = $self->_parse_rs_attrs ($rs_attrs);
# short circuit for counts - the ordering complexity is needless
- if ($self->{_dbic_rs_attrs}{-for_count_only}) {
+ if ($rs_attrs->{-for_count_only}) {
return "SELECT TOP $rows $inner_select $sql $grpby_having $order_by_outer";
}
return $self->{_cached_syntax} ||= $self->SUPER::_find_syntax($syntax);
}
-my $for_syntax = {
- update => 'FOR UPDATE',
- shared => 'FOR SHARE',
-};
# Quotes table names, handles "limit" dialects (e.g. where rownum between x and
-# y), supports SELECT ... FOR UPDATE and SELECT ... FOR SHARE.
+# y)
sub select {
- my ($self, $table, $fields, $where, $order, @rest) = @_;
+ my ($self, $table, $fields, $where, $rs_attrs, @rest) = @_;
$self->{"${_}_bind"} = [] for (qw/having from order/);
@rest = (-1) unless defined $rest[0];
croak "LIMIT 0 Does Not Compute" if $rest[0] == 0;
# and anyway, SQL::Abstract::Limit will cause a barf if we don't first
+
my ($sql, @where_bind) = $self->SUPER::select(
- $table, $self->_recurse_fields($fields), $where, $order, @rest
+ $table, $self->_recurse_fields($fields), $where, $rs_attrs, @rest
);
- if (my $for = delete $self->{_dbic_rs_attrs}{for}) {
- $sql .= " $for_syntax->{$for}" if $for_syntax->{$for};
- }
-
return wantarray ? ($sql, @{$self->{from_bind}}, @where_bind, @{$self->{having_bind}}, @{$self->{order_bind}} ) : $sql;
}
sub _emulate_limit {
my $self = shift;
+ # my ( $syntax, $sql, $order, $rows, $offset ) = @_;
+
if ($_[3] == -1) {
- return $_[1].$self->_order_by($_[2]);
+ return $_[1] . $self->_parse_rs_attrs($_[2]);
} else {
return $self->SUPER::_emulate_limit(@_);
}
}
}
-sub _order_by {
+my $for_syntax = {
+ update => 'FOR UPDATE',
+ shared => 'FOR SHARE',
+};
+
+# this used to be a part of _order_by but is broken out for clarity.
+# What we have been doing forever is hijacking the $order arg of
+# SQLA::select to pass in arbitrary pieces of data (first the group_by,
+# then pretty much the entire resultset attr-hash, as more and more
+# things in the SQLA space need to have mopre info about the $rs they
+# create SQL for. The alternative would be to keep expanding the
+# signature of _select with more and more positional parameters, which
+# is just gross. All hail SQLA2!
+sub _parse_rs_attrs {
my ($self, $arg) = @_;
- if (ref $arg eq 'HASH' and keys %$arg and not grep { $_ =~ /^-(?:desc|asc)/i } keys %$arg ) {
+ my $sql = '';
- my $ret = '';
+ if (my $g = $self->_recurse_fields($arg->{group_by}, { no_rownum_hack => 1 }) ) {
+ $sql .= $self->_sqlcase(' group by ') . $g;
+ }
- if (my $g = $self->_recurse_fields($arg->{group_by}, { no_rownum_hack => 1 }) ) {
- $ret = $self->_sqlcase(' group by ') . $g;
- }
+ if (defined $arg->{having}) {
+ my ($frag, @bind) = $self->_recurse_where($arg->{having});
+ push(@{$self->{having_bind}}, @bind);
+ $sql .= $self->_sqlcase(' having ') . $frag;
+ }
- if (defined $arg->{having}) {
- my ($frag, @bind) = $self->_recurse_where($arg->{having});
- push(@{$self->{having_bind}}, @bind);
- $ret .= $self->_sqlcase(' having ').$frag;
- }
+ if (defined $arg->{order_by}) {
+ $sql .= $self->_order_by ($arg->{order_by});
+ }
- if (defined $arg->{order_by}) {
- my ($frag, @bind) = $self->SUPER::_order_by($arg->{order_by});
- push(@{$self->{order_bind}}, @bind);
- $ret .= $frag;
- }
+ if (my $for = $arg->{for}) {
+ $sql .= " $for_syntax->{$for}" if $for_syntax->{$for};
+ }
+
+ return $sql;
+}
+
+sub _order_by {
+ my ($self, $arg) = @_;
- return $ret;
+ # check that we are not called in legacy mode (order_by as 4th argument)
+ if (ref $arg eq 'HASH' and not grep { $_ =~ /^-(?:desc|asc)/i } keys %$arg ) {
+ return $self->_parse_rs_attrs ($arg);
}
else {
my ($sql, @bind) = $self->SUPER::_order_by ($arg);
- push(@{$self->{order_bind}}, @bind);
+ push @{$self->{order_bind}}, @bind;
return $sql;
}
}
}
}
-sub _quote {
- my ($self, $label) = @_;
- return '' unless defined $label;
- return $$label if ref($label) eq 'SCALAR';
- return "*" if $label eq '*';
- return $label unless $self->{quote_char};
- if(ref $self->{quote_char} eq "ARRAY"){
- return $self->{quote_char}->[0] . $label . $self->{quote_char}->[1]
- if !defined $self->{name_sep};
- my $sep = $self->{name_sep};
- return join($self->{name_sep},
- map { $self->{quote_char}->[0] . $_ . $self->{quote_char}->[1] }
- split(/\Q$sep\E/,$label));
- }
- return $self->SUPER::_quote($label);
-}
-
sub limit_dialect {
my $self = shift;
- $self->{limit_dialect} = shift if @_;
+ if (@_) {
+ $self->{limit_dialect} = shift;
+ undef $self->{_cached_syntax};
+ }
return $self->{limit_dialect};
}
use Carp::Clan qw/^DBIx::Class|^SQL::Abstract/;
sub select {
- my ($self, $table, $fields, $where, $order, @rest) = @_;
+ my ($self, $table, $fields, $where, $rs_attrs, @rest) = @_;
if (ref($table) eq 'ARRAY') {
$where = $self->_oracle_joins($where, @{ $table });
}
- return $self->SUPER::select($table, $fields, $where, $order, @rest);
+ return $self->SUPER::select($table, $fields, $where, $rs_attrs, @rest);
}
sub _recurse_from {
#
# SQLite does not understand SELECT ... FOR UPDATE
-# Adjust SQL here instead
+# Disable it here
#
-sub select {
- my $self = shift;
- local $self->{_dbic_rs_attrs}{for} = undef;
- return $self->SUPER::select (@_);
+sub _parse_rs_attrs {
+ my ($self, $attrs) = @_;
+
+ return $self->SUPER::_parse_rs_attrs ($attrs)
+ if ref $attrs ne 'HASH';
+
+ local $attrs->{for};
+ return $self->SUPER::_parse_rs_attrs ($attrs);
}
1;
}
elsif($rs_class ||= $default_resultset_class) {
$class->ensure_class_loaded($rs_class);
+ if(!$rs_class->isa("DBIx::Class::ResultSet")) {
+ carp "load_namespaces found ResultSet class $rs_class that does not subclass DBIx::Class::ResultSet";
+ }
+
$class->_ns_get_rsrc_instance ($result_class)->resultset_class($rs_class);
}
use Data::Dumper::Concise();
use Sub::Name ();
+use File::Path ();
+
__PACKAGE__->mk_group_accessors('simple' =>
qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid
_conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints
/);
__PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks');
-
# Each of these methods need _determine_driver called before itself
# in order to function reliably. This is a purely DRY optimization
my @rdbms_specific_methods = qw/
$new->{_in_dbh_do} = 0;
$new->{_dbh_gen} = 0;
+ # read below to see what this does
+ $new->_arm_global_destructor;
+
$new;
}
+# This is hack to work around perl shooting stuff in random
+# order on exit(). If we do not walk the remaining storage
+# objects in an END block, there is a *small but real* chance
+# of a fork()ed child to kill the parent's shared DBI handle,
+# *before perl reaches the DESTROY in this package*
+# Yes, it is ugly and effective.
+{
+ my %seek_and_destroy;
+
+ sub _arm_global_destructor {
+ my $self = shift;
+ my $key = Scalar::Util::refaddr ($self);
+ $seek_and_destroy{$key} = $self;
+ Scalar::Util::weaken ($seek_and_destroy{$key});
+ }
+
+ END {
+ local $?; # just in case the DBI destructor changes it somehow
+
+ # destroy just the object if not native to this process/thread
+ $_->_preserve_foreign_dbh for (grep
+ { defined $_ }
+ values %seek_and_destroy
+ );
+ }
+}
+
+sub DESTROY {
+ my $self = shift;
+
+ # destroy just the object if not native to this process/thread
+ $self->_preserve_foreign_dbh;
+
+ # some databases need this to stop spewing warnings
+ if (my $dbh = $self->_dbh) {
+ local $@;
+ eval {
+ %{ $dbh->{CachedKids} } = ();
+ $dbh->disconnect;
+ };
+ }
+
+ $self->_dbh(undef);
+}
+
+sub _preserve_foreign_dbh {
+ my $self = shift;
+
+ return unless $self->_dbh;
+
+ $self->_verify_tid;
+
+ return unless $self->_dbh;
+
+ $self->_verify_pid;
+
+}
+
+# handle pid changes correctly - do not destroy parent's connection
+sub _verify_pid {
+ my $self = shift;
+
+ return if ( defined $self->_conn_pid and $self->_conn_pid == $$ );
+
+ $self->_dbh->{InactiveDestroy} = 1;
+ $self->_dbh(undef);
+ $self->{_dbh_gen}++;
+
+ return;
+}
+
+# very similar to above, but seems to FAIL if I set InactiveDestroy
+sub _verify_tid {
+ my $self = shift;
+
+ if ( ! defined $self->_conn_tid ) {
+ return; # no threads
+ }
+ elsif ( $self->_conn_tid == threads->tid ) {
+ return; # same thread
+ }
+
+ #$self->_dbh->{InactiveDestroy} = 1; # why does t/51threads.t fail...?
+ $self->_dbh(undef);
+ $self->{_dbh_gen}++;
+
+ return;
+}
+
+
=head2 connect_info
This method is normally called by L<DBIx::Class::Schema/connection>, which
sub _seems_connected {
my $self = shift;
+ $self->_preserve_foreign_dbh;
+
my $dbh = $self->_dbh
or return 0;
- if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) {
- $self->_dbh(undef);
- $self->{_dbh_gen}++;
- return 0;
- }
- else {
- $self->_verify_pid;
- return 0 if !$self->_dbh;
- }
-
return $dbh->FETCH('Active');
}
return $dbh->ping;
}
-# handle pid changes correctly
-# NOTE: assumes $self->_dbh is a valid $dbh
-sub _verify_pid {
- my ($self) = @_;
-
- return if defined $self->_conn_pid && $self->_conn_pid == $$;
-
- $self->_dbh->{InactiveDestroy} = 1;
- $self->_dbh(undef);
- $self->{_dbh_gen}++;
-
- return;
-}
-
sub ensure_connected {
my ($self) = @_;
# this is the internal "get dbh or connect (don't check)" method
sub _get_dbh {
my $self = shift;
- $self->_verify_pid if $self->_dbh;
+ $self->_preserve_foreign_dbh;
$self->_populate_dbh unless $self->_dbh;
return $self->_dbh;
}
# try to use dsn to not require being connected, the driver may still
# force a connection in _rebless to determine version
# (dsn may not be supplied at all if all we do is make a mock-schema)
- my $dsn = $self->_dbi_connect_info->[0] || '';
+ my $dsn = $self->_dbi_connect_info->[0] || $ENV{DBI_DSN} || '';
($driver) = $dsn =~ /dbi:([^:]+):/i;
+ $driver ||= $ENV{DBI_DRIVER};
}
}
sub _select {
my $self = shift;
-
- # localization is neccessary as
- # 1) there is no infrastructure to pass this around before SQLA2
- # 2) _select_args sets it and _prep_for_execute consumes it
- my $sql_maker = $self->sql_maker;
- local $sql_maker->{_dbic_rs_attrs};
-
- return $self->_execute($self->_select_args(@_));
+ $self->_execute($self->_select_args(@_));
}
sub _select_args_to_query {
my $self = shift;
- # localization is neccessary as
- # 1) there is no infrastructure to pass this around before SQLA2
- # 2) _select_args sets it and _prep_for_execute consumes it
- my $sql_maker = $self->sql_maker;
- local $sql_maker->{_dbic_rs_attrs};
-
- # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $order, $rows, $offset)
+ # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $rs_attrs, $rows, $offset)
# = $self->_select_args($ident, $select, $cond, $attrs);
my ($op, $bind, $ident, $bind_attrs, @args) =
$self->_select_args(@_);
- # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $order, $rows, $offset ]);
+ # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]);
my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args);
$prepared_bind ||= [];
sub _select_args {
my ($self, $ident, $select, $where, $attrs) = @_;
+ my $sql_maker = $self->sql_maker;
my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident);
- my $sql_maker = $self->sql_maker;
- $sql_maker->{_dbic_rs_attrs} = {
+ $attrs = {
%$attrs,
select => $select,
from => $ident,
where => $where,
$rs_alias && $alias2source->{$rs_alias}
- ? ( _source_handle => $alias2source->{$rs_alias}->handle )
+ ? ( _rsroot_source_handle => $alias2source->{$rs_alias}->handle )
: ()
,
};
#limited has_many
( $attrs->{rows} && keys %{$attrs->{collapse}} )
||
- # limited prefetch with RNO subqueries
+ # limited prefetch with RNO subqueries (otherwise a risk of column name clashes)
(
$attrs->{rows}
&&
@{$attrs->{_prefetch_select}}
)
||
- # grouped prefetch
+ # grouped prefetch (to satisfy group_by == select)
( $attrs->{group_by}
&&
@{$attrs->{group_by}}
}
elsif (
+ # the RNO limit dialect mangles the SQL such that the join gets lost
+ # wrap a subquery here
($attrs->{rows} || $attrs->{offset})
&&
$sql_maker->limit_dialect eq 'RowNumberOver'
&&
scalar $self->_parse_order_by ($attrs->{order_by})
) {
- # the RNO limit dialect above mangles the SQL such that the join gets lost
- # wrap a subquery here
push @limit, delete @{$attrs}{qw/rows offset/};
# invoked, and that's just bad...
###
- my $order = { map
- { $attrs->{$_} ? ( $_ => $attrs->{$_} ) : () }
- (qw/order_by group_by having/ )
- };
-
- return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $order, @limit);
+ return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $attrs, @limit);
}
# Returns a counting SELECT for a simple count
return { count => '*' };
}
-# Returns a SELECT which will end up in the subselect
-# There may or may not be a group_by, as the subquery
-# might have been called to accomodate a limit
-#
-# Most databases would be happy with whatever ends up
-# here, but some choke in various ways.
-#
-sub _subq_count_select {
- my ($self, $source, $rs_attrs) = @_;
-
- if (my $groupby = $rs_attrs->{group_by}) {
-
- my $avail_columns = $self->_resolve_column_info ($rs_attrs->{from});
-
- my $sel_index;
- for my $sel (@{$rs_attrs->{select}}) {
- if (ref $sel eq 'HASH' and $sel->{-as}) {
- $sel_index->{$sel->{-as}} = $sel;
- }
- }
-
- my @selection;
- for my $g_part (@$groupby) {
- if (ref $g_part or $avail_columns->{$g_part}) {
- push @selection, $g_part;
- }
- elsif ($sel_index->{$g_part}) {
- push @selection, $sel_index->{$g_part};
- }
- else {
- $self->throw_exception ("group_by criteria '$g_part' not contained within current resultset source(s)");
- }
- }
-
- return \@selection;
- }
-
- my @pcols = map { join '.', $rs_attrs->{alias}, $_ } ($source->primary_columns);
- return @pcols ? \@pcols : [ 1 ];
-}
sub source_bind_attributes {
my ($self, $source) = @_;
unless ($dir) {
carp "No directory given, using ./\n";
$dir = './';
+ } else {
+ -d $dir or File::Path::mkpath($dir)
+ or $self->throw_exception("create_ddl_dir: $! creating dir '$dir'");
}
$self->throw_exception ("Directory '$dir' does not exist\n") unless(-d $dir);
return $alias;
}
-sub DESTROY {
- my $self = shift;
-
- $self->_verify_pid if $self->_dbh;
-
- # some databases need this to stop spewing warnings
- if (my $dbh = $self->_dbh) {
- local $@;
- eval {
- %{ $dbh->{CachedKids} } = ();
- $dbh->disconnect;
- };
- }
-
- $self->_dbh(undef);
-}
-
1;
=head1 USAGE NOTES
$self->_get_dbh->do("ROLLBACK TRANSACTION $name");
}
-sub build_datetime_parser {
- my $self = shift;
- my $type = "DateTime::Format::Strptime";
- eval "use ${type}";
- $self->throw_exception("Couldn't load ${type}: $@") if $@;
- return $type->new( pattern => '%Y-%m-%d %H:%M:%S' ); # %F %T
-}
+sub datetime_parser_type {
+ 'DBIx::Class::Storage::DBI::MSSQL::DateTime::Format'
+}
sub sqlt_type { 'SQLServer' }
return $@ ? 0 : 1;
}
+package # hide from PAUSE
+ DBIx::Class::Storage::DBI::MSSQL::DateTime::Format;
+
+my $datetime_format = '%Y-%m-%d %H:%M:%S.%3N'; # %F %T
+my $smalldatetime_format = '%Y-%m-%d %H:%M:%S';
+
+my ($datetime_parser, $smalldatetime_parser);
+
+sub parse_datetime {
+ shift;
+ require DateTime::Format::Strptime;
+ $datetime_parser ||= DateTime::Format::Strptime->new(
+ pattern => $datetime_format,
+ on_error => 'croak',
+ );
+ return $datetime_parser->parse_datetime(shift);
+}
+
+sub format_datetime {
+ shift;
+ require DateTime::Format::Strptime;
+ $datetime_parser ||= DateTime::Format::Strptime->new(
+ pattern => $datetime_format,
+ on_error => 'croak',
+ );
+ return $datetime_parser->format_datetime(shift);
+}
+
+sub parse_smalldatetime {
+ shift;
+ require DateTime::Format::Strptime;
+ $smalldatetime_parser ||= DateTime::Format::Strptime->new(
+ pattern => $smalldatetime_format,
+ on_error => 'croak',
+ );
+ return $smalldatetime_parser->parse_datetime(shift);
+}
+
+sub format_smalldatetime {
+ shift;
+ require DateTime::Format::Strptime;
+ $smalldatetime_parser ||= DateTime::Format::Strptime->new(
+ pattern => $smalldatetime_format,
+ on_error => 'croak',
+ );
+ return $smalldatetime_parser->format_datetime(shift);
+}
+
1;
=head1 NAME
use MooseX::Types::Moose qw/ClassName HashRef Object/;
use Scalar::Util 'reftype';
use Hash::Merge;
-use List::Util qw/min max/;
+use List::Util qw/min max reduce/;
use namespace::clean -except => 'meta';
is_datatype_numeric
_supports_insert_returning
_count_select
- _subq_count_select
_subq_update_delete
svp_rollback
svp_begin
_dbh_commit
_execute_array
_placeholders_supported
- _verify_pid
savepoints
_sqlt_minimum_version
_sql_maker_opts
/],
);
+my @unimplemented = qw(
+ _arm_global_destructor
+ _preserve_foreign_dbh
+ _verify_pid
+ _verify_tid
+);
+
+for my $method (@unimplemented) {
+ __PACKAGE__->meta->add_method($method, sub {
+ croak "$method must not be called on ".(blessed shift).' objects';
+ });
+}
has _master_connect_info_opts =>
(is => 'rw', isa => HashRef, default => sub { {} });
return min map $_->_ping, $self->all_storages;
}
+my $numify_ver = sub {
+ my $ver = shift;
+ my @numparts = split /\D+/, $ver;
+ my $format = '%d.' . (join '', ('%05d') x (@numparts - 1));
+
+ return sprintf $format, @numparts;
+};
+
sub _server_info {
my $self = shift;
if (not $self->_server_info_hash) {
- no warnings 'numeric'; # in case dbms_version doesn't normalize
-
- my @infos =
- map $_->[1],
- sort { $a->[0] <=> $b->[0] }
- map [ (defined $_->{normalized_dbms_version} ? $_->{normalized_dbms_version}
- : $_->{dbms_version}), $_ ],
- map $_->_server_info, $self->all_storages;
-
- my $min_version_info = $infos[0];
+ my $min_version_info = (
+ reduce { $a->[0] < $b->[0] ? $a : $b }
+ map [ $numify_ver->($_->{dbms_version}), $_ ],
+ map $_->_server_info, $self->all_storages
+ )->[1];
$self->_server_info_hash($min_version_info); # on master
}
my $data_type = $col_info->{$selected}{data_type};
- if ($data_type && $data_type =~ /^uniqueidentifier\z/i) {
+ if ($data_type && lc($data_type) eq 'uniqueidentifier') {
$select->[$select_idx] = { UUIDTOSTR => $selected };
}
}
DBIx::Class::Storage::DBI::MSSQL
/;
use mro 'c3';
+use Carp::Clan qw/^DBIx::Class/;
sub _rebless {
my $self = shift;
}
}
+=head2 connect_call_datetime_setup
+
+Used as:
+
+ on_connect_call => 'datetime_setup'
+
+In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set:
+
+ $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z
+
+On connection for use with L<DBIx::Class::InflateColumn::DateTime>
+
+This works for both C<DATETIME> and C<SMALLDATETIME> columns, although
+C<SMALLDATETIME> columns only have minute precision.
+
+=cut
+
+{
+ my $old_dbd_warned = 0;
+
+ sub connect_call_datetime_setup {
+ my $self = shift;
+ my $dbh = $self->_get_dbh;
+
+ if ($dbh->can('syb_date_fmt')) {
+ # amazingly, this works with FreeTDS
+ $dbh->syb_date_fmt('ISO_strict');
+ } elsif (not $old_dbd_warned) {
+ carp "Your DBD::Sybase is too old to support ".
+ "DBIx::Class::InflateColumn::DateTime, please upgrade!";
+ $old_dbd_warned = 1;
+ }
+ }
+}
+
+sub datetime_parser_type {
+ 'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::DateTime::Format'
+}
+
+package # hide from PAUSE
+ DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::DateTime::Format;
+
+my $datetime_parse_format = '%Y-%m-%dT%H:%M:%S.%3NZ';
+my $datetime_format_format = '%Y-%m-%d %H:%M:%S.%3N'; # %F %T
+
+my ($datetime_parser, $datetime_formatter);
+
+sub parse_datetime {
+ shift;
+ require DateTime::Format::Strptime;
+ $datetime_parser ||= DateTime::Format::Strptime->new(
+ pattern => $datetime_parse_format,
+ on_error => 'croak',
+ );
+ return $datetime_parser->parse_datetime(shift);
+}
+
+sub format_datetime {
+ shift;
+ require DateTime::Format::Strptime;
+ $datetime_formatter ||= DateTime::Format::Strptime->new(
+ pattern => $datetime_format_format,
+ on_error => 'croak',
+ );
+ return $datetime_formatter->format_datetime(shift);
+}
+
1;
=head1 NAME
use lib qw(t/lib);
use DBICTest; # do not remove even though it is not used
-plan tests => 8;
-
my $warnings;
eval {
local $SIG{__WARN__} = sub { $warnings .= shift };
use base qw/DBIx::Class::Schema/;
__PACKAGE__->load_namespaces;
};
-ok(!$@) or diag $@;
-like($warnings, qr/load_namespaces found ResultSet class C with no corresponding Result class/);
+ok(!$@, 'load_namespaces doesnt die') or diag $@;
+like($warnings, qr/load_namespaces found ResultSet class C with no corresponding Result class/, 'Found warning about extra ResultSet classes');
+
+like($warnings, qr/load_namespaces found ResultSet class DBICNSTest::ResultSet::D that does not subclass DBIx::Class::ResultSet/, 'Found warning about ResultSets with incorrect subclass');
my $source_a = DBICNSTest->source('A');
isa_ok($source_a, 'DBIx::Class::ResultSource::Table');
for my $moniker (qw/A B/) {
my $class = "DBICNSTest::Result::$moniker";
- ok(!defined($class->result_source_instance->source_name));
+ ok(!defined($class->result_source_instance->source_name), "Source name of $moniker not defined");
}
+
+done_testing;
$schema2->source("Artist")->name("dbic_t_schema.artist");
$schema->txn_do( sub {
- my $artist = $schema->resultset('Artist')->search(
+ my $rs = $schema->resultset('Artist')->search(
{
artistid => 1
},
$t->{update_lock} ? { for => 'update' } : {}
- )->first;
+ );
+ ok ($rs->count, 'Count works');
+
+ my $artist = $rs->next;
is($artist->artistid, 1, "select returns artistid = 1");
$timed_out = 0;
JOIN cd cd ON cd.cdid = me.cd_id
JOIN artist artist_2 ON artist_2.artistid = cd.artist
GROUP BY me.cd_id
- ) count_subq
+ ) me
)',
[],
);
my $rating = $waves->{rating};
$waves->Rating("PG");
is $rating, "R", 'evaluation of column value is not deferred';
- } qr{^Column 'rating' of 'Film/$waves' was fetched as a hash at \Q$0};
+ } qr{^Column 'rating' of 'Film/$waves' was fetched as a hash at\b};
warnings_like {
is $waves->{title}, $waves->Title, "columns can be accessed as hashes";
JOIN cd disc ON disc.cdid = tracks.cd
WHERE ( ( position = ? OR position = ? ) )
LIMIT 3 OFFSET 8
- ) count_subq
+ ) tracks
)',
[ [ position => 1 ], [ position => 2 ] ],
'count_rs db-side limit applied',
JOIN artist artist ON artist.artistid = cds.artist
WHERE tracks.position = ? OR tracks.position = ?
GROUP BY cds.cdid
- ) count_subq
+ ) cds
',
[ qw/'1' '2'/ ],
'count softlimit applied',
WHERE tracks.position = ? OR tracks.position = ?
GROUP BY cds.cdid
LIMIT 3 OFFSET 4
- ) count_subq
+ ) cds
)',
[ [ 'tracks.position' => 1 ], [ 'tracks.position' => 2 ] ],
'count_rs db-side limit applied',
JOIN artist artist ON artist.artistid = cds.artist
WHERE tracks.position = ? OR tracks.position = ?
GROUP BY cds.cdid
- ) count_subq
+ ) cds
)',
[ map { [ 'tracks.position' => $_ ] } (1, 2) ],
);
WHERE ( genre.name = ? )
GROUP BY genre.genreid
)
- count_subq
+ genre
)',
[ [ 'genre.name' => 'emo' ] ],
);
use Test::More;
use Test::Exception;
+use Scope::Guard ();
use lib qw(t/lib);
use DBICTest;
-my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MSSQL_ODBC_${_}" } qw/DSN USER PASS/};
+# use this if you keep a copy of DBD::Sybase linked to FreeTDS somewhere else
+BEGIN {
+ if (my $lib_dirs = $ENV{DBICTEST_MSSQL_PERL5LIB}) {
+ unshift @INC, $_ for split /:/, $lib_dirs;
+ }
+}
-if (not ($dsn && $user)) {
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MSSQL_ODBC_${_}" } qw/DSN USER PASS/};
+my ($dsn2, $user2, $pass2) = @ENV{map { "DBICTEST_MSSQL_${_}" } qw/DSN USER PASS/};
+
+if (not ($dsn || $dsn2)) {
plan skip_all =>
- 'Set $ENV{DBICTEST_MSSQL_ODBC_DSN}, _USER and _PASS to run this test' .
+ 'Set $ENV{DBICTEST_MSSQL_ODBC_DSN} and/or $ENV{DBICTEST_MSSQL_DSN} _USER '
+ .'and _PASS to run this test' .
"\nWarning: This test drops and creates a table called 'track'";
} else {
eval "use DateTime; use DateTime::Format::Strptime;";
if ($@) {
plan skip_all => 'needs DateTime and DateTime::Format::Strptime for testing';
}
- else {
- plan tests => 4 * 2; # (tests * dt_types)
- }
}
-my $schema = DBICTest::Schema->clone;
+my @connect_info = (
+ [ $dsn, $user, $pass ],
+ [ $dsn2, $user2, $pass2 ],
+);
+
+my $schema;
+
+for my $connect_info (@connect_info) {
+ my ($dsn, $user, $pass) = @$connect_info;
+
+ next unless $dsn;
-$schema->connection($dsn, $user, $pass);
-$schema->storage->ensure_connected;
+ $schema = DBICTest::Schema->connect($dsn, $user, $pass, {
+ on_connect_call => 'datetime_setup'
+ });
+
+ my $guard = Scope::Guard->new(\&cleanup);
# coltype, column, datehash
-my @dt_types = (
- ['DATETIME',
- 'last_updated_at',
- {
- year => 2004,
- month => 8,
- day => 21,
- hour => 14,
- minute => 36,
- second => 48,
- nanosecond => 500000000,
- }],
- ['SMALLDATETIME', # minute precision
- 'small_dt',
- {
- year => 2004,
- month => 8,
- day => 21,
- hour => 14,
- minute => 36,
- }],
-);
+ my @dt_types = (
+ ['DATETIME',
+ 'last_updated_at',
+ {
+ year => 2004,
+ month => 8,
+ day => 21,
+ hour => 14,
+ minute => 36,
+ second => 48,
+ nanosecond => 500000000,
+ }],
+ ['SMALLDATETIME', # minute precision
+ 'small_dt',
+ {
+ year => 2004,
+ month => 8,
+ day => 21,
+ hour => 14,
+ minute => 36,
+ }],
+ );
-for my $dt_type (@dt_types) {
- my ($type, $col, $sample_dt) = @$dt_type;
+ for my $dt_type (@dt_types) {
+ my ($type, $col, $sample_dt) = @$dt_type;
- eval { $schema->storage->dbh->do("DROP TABLE track") };
- $schema->storage->dbh->do(<<"SQL");
+ eval { $schema->storage->dbh->do("DROP TABLE track") };
+ $schema->storage->dbh->do(<<"SQL");
CREATE TABLE track (
trackid INT IDENTITY PRIMARY KEY,
cd INT,
$col $type,
)
SQL
- ok(my $dt = DateTime->new($sample_dt));
-
- my $row;
- ok( $row = $schema->resultset('Track')->create({
- $col => $dt,
- cd => 1,
- }));
- ok( $row = $schema->resultset('Track')
- ->search({ trackid => $row->trackid }, { select => [$col] })
- ->first
- );
- is( $row->$col, $dt, 'DateTime roundtrip' );
+ ok(my $dt = DateTime->new($sample_dt));
+
+ my $row;
+ ok( $row = $schema->resultset('Track')->create({
+ $col => $dt,
+ cd => 1,
+ }));
+ ok( $row = $schema->resultset('Track')
+ ->search({ trackid => $row->trackid }, { select => [$col] })
+ ->first
+ );
+ is( $row->$col, $dt, "$type roundtrip" );
+
+ is( $row->$col->nanosecond, $sample_dt->{nanosecond},
+ 'DateTime fractional portion roundtrip' )
+ if exists $sample_dt->{nanosecond};
+ }
}
+done_testing;
+
# clean up our mess
-END {
- if (my $dbh = eval { $schema->storage->_dbh }) {
+sub cleanup {
+ if (my $dbh = eval { $schema->storage->dbh }) {
$dbh->do('DROP TABLE track');
}
}
->search({ trackid => $row->trackid }, { select => [$col] })
->first
);
- is( $row->$col, $dt, 'DateTime roundtrip' );
+ is( $row->$col, $dt, "$type roundtrip" );
+
+ is( $row->$col->nanosecond, $dt->nanosecond,
+ 'fractional DateTime portion roundtrip' )
+ if $dt->nanosecond > 0;
}
# test a computed datetime column
--- /dev/null
+package DBICNSTest::Result::D;
+use base qw/DBIx::Class::Core/;
+__PACKAGE__->table('d');
+__PACKAGE__->add_columns('d');
+1;
--- /dev/null
+package DBICNSTest::ResultSet::D;
+1;
WHERE ( me.cd IN ( ?, ?, ?, ?, ? ) )
GROUP BY me.cd
)
- count_subq
+ me
)',
[ map { [ 'me.cd' => $_] } ($cd_rs->get_column ('cdid')->all) ],
'count() query generated expected SQL',
WHERE ( me.cdid IS NOT NULL )
GROUP BY me.cdid
LIMIT 2
- ) count_subq
+ ) me
)',
[],
'count() query generated expected SQL',
WHERE ( me.cd IN ( ?, ?, ?, ?, ? ) )
GROUP BY SUBSTR(me.cd, 1, 1)
)
- count_subq
+ me
)',
[ map { [ 'me.cd' => $_] } ($cd_rs->get_column ('cdid')->all) ],
'count() query generated expected SQL',
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+
+my $schema = DBICTest->init_schema;
+
+$schema->storage->_sql_maker->limit_dialect ('RowNumberOver');
+
+my $rs_selectas_col = $schema->resultset ('BooksInLibrary')->search ({}, {
+ '+select' => ['owner.name'],
+ '+as' => ['owner.name'],
+ join => 'owner',
+ rows => 1,
+});
+
+is_same_sql_bind(
+ $rs_selectas_col->as_query,
+ '(
+ SELECT id, source, owner, title, price,
+ owner__name
+ FROM (
+ SELECT id, source, owner, title, price,
+ owner__name,
+ ROW_NUMBER() OVER( ) AS rno__row__index
+ FROM (
+ SELECT me.id, me.source, me.owner, me.title, me.price,
+ owner.name AS owner__name
+ FROM books me
+ JOIN owners owner ON owner.id = me.owner
+ WHERE ( source = ? )
+ ) me
+ ) me
+ WHERE rno__row__index BETWEEN 1 AND 1
+ )',
+ [ [ 'source', 'Library' ] ],
+);
+
+$schema->storage->_sql_maker->quote_char ([qw/ [ ] /]);
+$schema->storage->_sql_maker->name_sep ('.');
+
+my $rs_selectas_rel = $schema->resultset ('BooksInLibrary')->search ({}, {
+ '+select' => ['owner.name'],
+ '+as' => ['owner_name'],
+ join => 'owner',
+ rows => 1,
+});
+
+is_same_sql_bind(
+ $rs_selectas_rel->as_query,
+ '(
+ SELECT [id], [source], [owner], [title], [price],
+ [owner_name]
+ FROM (
+ SELECT [id], [source], [owner], [title], [price],
+ [owner_name],
+ ROW_NUMBER() OVER( ) AS [rno__row__index]
+ FROM (
+ SELECT [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price],
+ [owner].[name] AS [owner_name]
+ FROM [books] [me]
+ JOIN [owners] [owner] ON [owner].[id] = [me].[owner]
+ WHERE ( [source] = ? )
+ ) [me]
+ ) [me]
+ WHERE [rno__row__index] BETWEEN 1 AND 1
+ )',
+ [ [ 'source', 'Library' ] ],
+);
+
+done_testing;
# Trick the sqlite DB to use Top limit emulation
# We could test all of this via $sq->$op directly,
# but some conditions need a $rsrc
-delete $schema->storage->_sql_maker->{_cached_syntax};
$schema->storage->_sql_maker->limit_dialect ('Top');
my $rs = $schema->resultset ('BooksInLibrary')->search ({}, { prefetch => 'owner', rows => 1, offset => 3 });
my @default_tests = ( undef, '', {}, [] );
-plan (tests => scalar @tests + scalar @default_tests + 1);
-
test_order ($_) for @tests;
default_test_order ($_) for @default_tests;
ORDER BY title)' ,
[ [ source => 'Library' ], [ source => 'Library' ] ],
);
+
+my $rs_selectas_top = $schema->resultset ('BooksInLibrary')->search ({}, {
+ '+select' => ['owner.name'],
+ '+as' => ['owner_name'],
+ join => 'owner',
+ rows => 1
+});
+
+is_same_sql_bind( $rs_selectas_top->search({})->as_query,
+ '(SELECT
+ TOP 1 me.id, me.source, me.owner, me.title, me.price,
+ owner.name
+ FROM books me
+ JOIN owners owner ON owner.id = me.owner
+ WHERE ( source = ? )
+ ORDER BY me.id ASC
+ )',
+ [ [ 'source', 'Library' ] ],
+ );
+
+done_testing;
'artist.name' => 'Caterwauler McCrae',
'me.year' => 2001
},
- [],
+ {},
undef,
undef
);
'me.year'
],
undef,
- 'year DESC',
+ { order_by => 'year DESC' },
undef,
undef
);
'me.year'
],
undef,
- [
+ { order_by => [
'year DESC',
'title ASC'
- ],
+ ]},
undef,
undef
);
'me.year'
],
undef,
- { -desc => 'year' },
+ { order_by => { -desc => 'year' } },
undef,
undef
);
'me.year'
],
undef,
- [
+ { order_by => [
{ -desc => 'year' },
- { -asc => 'title' }
- ],
+ { -asc => 'title' },
+ ]},
undef,
undef
);
'me.year'
],
undef,
- \'year DESC',
+ { order_by => \'year DESC' },
undef,
undef
);
'me.year'
],
undef,
- [
+ { order_by => [
\'year DESC',
\'title ASC'
- ],
+ ]},
undef,
undef
);
'me.*'
],
undef,
- [],
undef,
- undef
+ undef,
+ undef,
);
is_same_sql_bind(
'artist.name' => 'Caterwauler McCrae',
'me.year' => 2001
},
- [],
undef,
- undef
+ undef,
+ undef,
);
is_same_sql_bind(
--- /dev/null
+#!/usr/bin/perl
+use strict;
+use warnings;
+use lib qw(t/lib);
+use DBICTest;
+use Test::More;
+use Test::Exception;
+
+BEGIN { delete @ENV{qw(DBI_DSN DBI_DRIVER)} }
+
+my $schema;
+
+DBICTest->init_schema(sqlite_use_file => 1);
+
+my $dbname = DBICTest->_sqlite_dbname(sqlite_use_file => 1);
+
+sub count_sheep {
+ my $schema = shift;
+ scalar $schema->resultset('Artist')->search( { name => "Exploding Sheep" } )
+ ->all;
+}
+
+$schema = DBICTest::Schema->connect("dbi::$dbname");
+throws_ok { count_sheep($schema) } qr{I can't work out what driver to use},
+ 'Driver in DSN empty';
+isa_ok $schema->storage, 'DBIx::Class::Storage::DBI';
+
+$schema = DBICTest::Schema->connect("dbi:Test_NonExistant_DBD:$dbname");
+throws_ok { count_sheep($schema) }
+ qr{Can't locate DBD/Test_NonExistant_DBD\.pm in \@INC},
+ "Driver class doesn't exist";
+isa_ok $schema->storage, 'DBIx::Class::Storage::DBI';
+
+$ENV{DBI_DSN} = "dbi::$dbname";
+$schema = DBICTest::Schema->connect;
+throws_ok { count_sheep($schema) } qr{I can't work out what driver to use},
+ "Driver class not defined in DBI_DSN either.";
+isa_ok $schema->storage, 'DBIx::Class::Storage::DBI';
+
+$ENV{DBI_DSN} = "dbi:Test_NonExistant_DBD2:$dbname";
+$schema = DBICTest::Schema->connect;
+throws_ok { count_sheep($schema) }
+ qr{Can't locate DBD/Test_NonExistant_DBD2\.pm in \@INC},
+ "Driver class defined in DBI_DSN doesn't exist";
+isa_ok $schema->storage, 'DBIx::Class::Storage::DBI';
+
+$ENV{DBI_DSN} = "dbi::$dbname";
+$ENV{DBI_DRIVER} = 'Test_NonExistant_DBD3';
+$schema = DBICTest::Schema->connect;
+throws_ok { count_sheep($schema) }
+ qr{Can't locate DBD/Test_NonExistant_DBD3\.pm in \@INC},
+ "Driver class defined in DBI_DRIVER doesn't exist";
+isa_ok $schema->storage, 'DBIx::Class::Storage::DBI';
+
+$ENV{DBI_DSN} = "dbi:Test_NonExistant_DBD4:$dbname";
+$schema = DBICTest::Schema->connect;
+throws_ok { count_sheep($schema) }
+qr{Can't locate DBD/Test_NonExistant_DBD4\.pm in \@INC},
+ "Driver class defined in DBI_DSN doesn't exist";
+isa_ok $schema->storage, 'DBIx::Class::Storage::DBI';
+
+delete @ENV{qw(DBI_DSN DBI_DRIVER)};
+
+$schema = DBICTest::Schema->connect("dbi:SQLite:$dbname");
+lives_ok { count_sheep($schema) } 'SQLite passed to connect_info';
+isa_ok $schema->storage, 'DBIx::Class::Storage::DBI::SQLite';
+
+$ENV{DBI_DRIVER} = 'SQLite';
+$schema = DBICTest::Schema->connect("dbi::$dbname");
+lives_ok { count_sheep($schema) } 'SQLite in DBI_DRIVER';
+isa_ok $schema->storage, 'DBIx::Class::Storage::DBI::SQLite';
+
+undef $ENV{DBI_DRIVER};
+$ENV{DBI_DSN} = "dbi:SQLite:$dbname";
+$schema = DBICTest::Schema->connect;
+lives_ok { count_sheep($schema) } 'SQLite in DBI_DSN';
+isa_ok $schema->storage, 'DBIx::Class::Storage::DBI::SQLite';
+
+$ENV{DBI_DRIVER} = 'SQLite';
+$schema = DBICTest::Schema->connect;
+lives_ok { count_sheep($schema) } 'SQLite in DBI_DSN (and DBI_DRIVER)';
+isa_ok $schema->storage, 'DBIx::Class::Storage::DBI::SQLite';
+
+$ENV{DBI_DSN} = "dbi::$dbname";
+$ENV{DBI_DRIVER} = 'SQLite';
+$schema = DBICTest::Schema->connect;
+lives_ok { count_sheep($schema) } 'SQLite in DBI_DRIVER (not DBI_DSN)';
+isa_ok $schema->storage, 'DBIx::Class::Storage::DBI::SQLite';
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+
+use lib qw(t/lib);
+use DBICTest;
+
+use File::Spec;
+use File::Path qw/ mkpath rmtree /;
+
+
+my $schema = DBICTest->init_schema();
+
+my $var = File::Spec->catfile(qw| t var create_ddl_dir |);
+-d $var
+ or mkpath($var)
+ or die "can't create $var";
+
+my $test_dir_1 = File::Spec->catdir( $var, 'test1', 'foo', 'bar' );
+rmtree( $test_dir_1 ) if -d $test_dir_1;
+$schema->create_ddl_dir( undef, undef, $test_dir_1 );
+
+ok( -d $test_dir_1, 'create_ddl_dir did a mkpath on its target dir' );
+ok( scalar( glob $test_dir_1.'/*.sql' ), 'there are sql files in there' );
+
+TODO: {
+ local $TODO = 'we should probably add some tests here for actual deployability of the DDL?';
+ ok( 0 );
+}
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBICTest;
+
+for my $type (qw/PG MYSQL/) {
+
+ SKIP: {
+ skip "Skipping $type tests without DBICTEST_${type}_DSN", 1
+ unless $ENV{"DBICTEST_${type}_DSN"};
+
+ my $schema = DBICTest::Schema->connect (@ENV{map { "DBICTEST_${type}_${_}" } qw/DSN USER PASS/});
+
+ # emulate a singleton-factory, just cache the object *somewhere in a different package*
+ # to induce out-of-order destruction
+ $DBICTest::FakeSchemaFactory::schema = $schema;
+
+ # so we can see the retry exceptions (if any)
+ $ENV{DBIC_DBIRETRY_DEBUG} = 1;
+
+ ok (!$schema->storage->connected, "$type: start disconnected");
+
+ lives_ok (sub {
+ $schema->txn_do (sub {
+
+ ok ($schema->storage->connected, "$type: transaction starts connected");
+
+ my $pid = fork();
+ SKIP: {
+ skip "Fork failed: $!", 1 if (! defined $pid);
+
+ if ($pid) {
+ note "Parent $$ sleeping...";
+ wait();
+ note "Parent $$ woken up after child $pid exit";
+ }
+ else {
+ note "Child $$ terminating";
+ exit 0;
+ }
+
+ ok ($schema->storage->connected, "$type: parent still connected (in txn_do)");
+ }
+ });
+ });
+
+ ok ($schema->storage->connected, "$type: parent still connected (outside of txn_do)");
+
+ undef $DBICTest::FakeSchemaFactory::schema;
+ }
+}
+
+done_testing;