- use inc::Module::Install 0.89;
+ use inc::Module::Install 0.93;
use strict;
use warnings;
use POSIX ();
use 5.008001;
- # ****** DO NOT ADD OPTIONAL DEPENDENCIES. EVER. --mst ******
+ use FindBin;
+ use lib "$FindBin::Bin/lib";
+
+ ###
+ ### DO NOT ADD OPTIONAL DEPENDENCIES HERE, EVEN AS recommends()
+ ### All of them should go to DBIx::Class::Optional::Dependencies
+ ###
+
name 'DBIx-Class';
perl_version '5.008001';
all_from 'lib/DBIx/Class.pm';
+ my $build_requires = {
+ 'DBD::SQLite' => '1.25',
+ };
+
+ my $test_requires = {
+ 'File::Temp' => '0.22',
+ 'Test::Builder' => '0.33',
+ 'Test::Deep' => '0',
+ 'Test::Exception' => '0',
+ 'Test::More' => '0.92',
+ 'Test::Warn' => '0.21',
+ };
+
+ my $runtime_requires = {
+ # Core
+ 'List::Util' => '0',
+ 'Scalar::Util' => '0',
+ 'Storable' => '0',
+
+ # Dependencies
+ 'Carp::Clan' => '6.0',
+ 'Class::Accessor::Grouped' => '0.09002',
+ 'Class::C3::Componentised' => '1.0005',
+ 'Class::Inspector' => '1.24',
+ 'Data::Page' => '2.00',
+ 'DBI' => '1.609',
+ 'MRO::Compat' => '0.09',
+ 'Module::Find' => '0.06',
+ 'Path::Class' => '0.18',
+ 'Scope::Guard' => '0.03',
+ 'SQL::Abstract' => '1.61',
+ 'SQL::Abstract::Limit' => '0.13',
+ 'Sub::Name' => '0.04',
+ 'Data::Dumper::Concise' => '1.000',
+ };
+
+ # this is so we can order requires alphabetically
+ # copies are needed for author requires injection
+ my $reqs = {
+ build_requires => { %$build_requires },
+ requires => { %$runtime_requires },
+ test_requires => { %$test_requires },
+ };
+
+ # re-build README and require extra modules for testing if we're in a checkout
+ if ($Module::Install::AUTHOR) {
+
+ print "Regenerating README\n";
+ system('pod2text lib/DBIx/Class.pm > README');
+
+ if (-f 'MANIFEST') {
+ print "Removing MANIFEST\n";
+ unlink 'MANIFEST';
+ }
+
+ print "Regenerating Optional/Dependencies.pod\n";
+ require DBIx::Class::Optional::Dependencies;
+ DBIx::Class::Optional::Dependencies->_gen_pod;
+
+ # FIXME Disabled due to unsolved issues, ask theorbtwo
+ # require Module::Install::Pod::Inherit;
+ # PodInherit();
+
+ warn <<'EOW';
+ ******************************************************************************
+ ******************************************************************************
+ *** ***
+ *** AUTHOR MODE: all optional test dependencies converted to hard requires ***
+ *** ***
+ ******************************************************************************
+ ******************************************************************************
+
+ EOW
+
+ $reqs->{test_requires} = {
+ %{$reqs->{test_requires}},
+ %{DBIx::Class::Optional::Dependencies->_all_optional_requirements},
+ };
+ }
+
+ # compose final req list, for alphabetical ordering
+ my %final_req;
+ for my $rtype (keys %$reqs) {
+ for my $mod (keys %{$reqs->{$rtype}} ) {
+
+ # sanity check req duplications
+ if ($final_req{$mod}) {
+ die "$mod specified as both a '$rtype' and a '$final_req{$mod}[0]'\n";
+ }
- test_requires 'Test::Builder' => '0.33';
- test_requires 'Test::Deep' => '0';
- test_requires 'Test::Exception' => '0';
- test_requires 'Test::More' => '0.92';
- test_requires 'Test::Warn' => '0.21';
-
- test_requires 'File::Temp' => '0.22';
-
-
- # Core
- requires 'List::Util' => '0';
- requires 'Scalar::Util' => '0';
- requires 'Storable' => '0';
-
- # Dependencies (keep in alphabetical order)
- requires 'Carp::Clan' => '6.0';
- requires 'Class::Accessor::Grouped' => '0.09002';
- requires 'Class::C3::Componentised' => '1.0005';
- requires 'Class::Inspector' => '1.24';
- requires 'Data::Page' => '2.00';
- requires 'DBD::SQLite' => '1.25';
- requires 'DBI' => '1.609';
- requires 'JSON::Any' => '1.18';
- requires 'MRO::Compat' => '0.09';
- requires 'Module::Find' => '0.06';
- requires 'Path::Class' => '0.16';
- requires 'Scope::Guard' => '0.03';
- requires 'SQL::Abstract' => '1.60';
- requires 'SQL::Abstract::Limit' => '0.13';
- requires 'Sub::Name' => '0.04';
- requires 'Data::Dumper::Concise' => '1.000';
-
- my %replication_requires = (
- 'Moose', => '0.90',
- 'MooseX::Types', => '0.21',
- 'namespace::clean' => '0.11',
- 'Hash::Merge', => '0.11',
- );
-
- #************************************************************************#
- # Make *ABSOLUTELY SURE* that nothing on this list is a real require, #
- # since every module listed in %force_requires_if_author is deleted #
- # from the final META.yml (thus will never make it as a CPAN dependency) #
- #************************************************************************#
- my %force_requires_if_author = (
- %replication_requires,
-
- # when changing also adjust $DBIx::Class::Storage::DBI::minimum_sqlt_version
- 'SQL::Translator' => '0.11002',
-
- # 'Module::Install::Pod::Inherit' => '0.01',
-
- # when changing also adjust version in t/02pod.t
- 'Test::Pod' => '1.26',
-
- # when changing also adjust version in t/06notabs.t
- # 'Test::NoTabs' => '0.9',
-
- # when changing also adjust version in t/07eol.t
- # 'Test::EOL' => '0.6',
-
- # when changing also adjust version in t/03podcoverage.t
- 'Test::Pod::Coverage' => '1.08',
- 'Pod::Coverage' => '0.20',
-
- # CDBI-compat related
- 'DBIx::ContextualFetch' => '0',
- 'Class::DBI::Plugin::DeepAbstractSearch' => '0',
- 'Class::Trigger' => '0',
- 'Time::Piece::MySQL' => '0',
- 'Clone' => '0',
- 'Date::Simple' => '3.03',
-
- # t/52cycle.t
- 'Test::Memory::Cycle' => '0',
- 'Devel::Cycle' => '1.10',
-
- # t/36datetime.t
- # t/60core.t
- 'DateTime::Format::SQLite' => '0',
-
- # t/96_is_deteministic_value.t
- 'DateTime::Format::Strptime'=> '0',
-
- # database-dependent reqs
- #
- $ENV{DBICTEST_PG_DSN}
- ? (
- 'Sys::SigAction' => '0',
- 'DBD::Pg' => '2.009002',
- 'DateTime::Format::Pg' => '0',
- ) : ()
- ,
-
- $ENV{DBICTEST_MYSQL_DSN}
- ? (
- 'DateTime::Format::MySQL' => '0',
- ) : ()
- ,
-
- $ENV{DBICTEST_ORA_DSN}
- ? (
- 'DateTime::Format::Oracle' => '0',
- ) : ()
- ,
-
- $ENV{DBICTEST_SYBASE_DSN}
- ? (
- 'DateTime::Format::Sybase' => 0,
- ) : ()
- ,
- );
- #************************************************************************#
- # Make ABSOLUTELY SURE that nothing on the list above is a real require, #
- # since every module listed in %force_requires_if_author is deleted #
- # from the final META.yml (thus will never make it as a CPAN dependency) #
- #************************************************************************#
+ $final_req{$mod} = [ $rtype, $reqs->{$rtype}{$mod}||0 ],
+ }
+ }
+ # actual require
+ for my $mod (sort keys %final_req) {
+ my ($rtype, $ver) = @{$final_req{$mod}};
+ no strict 'refs';
+ $rtype->($mod, $ver);
+ }
install_script (qw|
script/dbicadmin
lib/DBIx/Class/PK/Auto
|);
no_index package => $_ for (qw/
DBIx::Class::SQLAHacks DBIx::Class::Storage::DBIHacks
/);
- # re-build README and require extra modules for testing if we're in a checkout
-
- if ($Module::Install::AUTHOR) {
- warn <<'EOW';
- ******************************************************************************
- ******************************************************************************
- *** ***
- *** AUTHOR MODE: all optional test dependencies converted to hard requires ***
- *** ***
- ******************************************************************************
- ******************************************************************************
-
- EOW
-
- foreach my $module (sort keys %force_requires_if_author) {
- build_requires ($module => $force_requires_if_author{$module});
- }
-
- print "Regenerating README\n";
- system('pod2text lib/DBIx/Class.pm > README');
-
- if (-f 'MANIFEST') {
- print "Removing MANIFEST\n";
- unlink 'MANIFEST';
- }
-
- # require Module::Install::Pod::Inherit;
- # PodInherit();
- }
auto_install();
WriteAll();
+
# Re-write META.yml to _exclude_ all forced requires (we do not want to ship this)
if ($Module::Install::AUTHOR) {
+ # FIXME test_requires is not yet part of META
+ my %original_build_requires = ( %$build_requires, %$test_requires );
+
+ print "Regenerating META with author requires excluded\n";
Meta->{values}{build_requires} = [ grep
- { not exists $force_requires_if_author{$_->[0]} }
- ( @{Meta->{values}{build_requires}} )
+ { exists $original_build_requires{$_->[0]} }
+ ( @{Meta->{values}{build_requires}} )
];
Meta->write;
$rows = $self->get_cache;
}
+ # reset the selector list
+ if (List::Util::first { exists $attrs->{$_} } qw{columns select as}) {
+ delete @{$our_attrs}{qw{select as columns +select +as +columns include_columns}};
+ }
+
my $new_attrs = { %{$our_attrs}, %{$attrs} };
# merge new attrs into inherited
- foreach my $key (qw/join prefetch +select +as bind/) {
+ foreach my $key (qw/join prefetch +select +as +columns include_columns bind/) {
next unless exists $attrs->{$key};
$new_attrs->{$key} = $self->_merge_attr($our_attrs->{$key}, $attrs->{$key});
}
# if we multi-prefetch we group_by primary keys only as this is what we would
# get out of the rs via ->next/->all. We *DO WANT* to clobber old group_by regardless
if ( keys %{$attrs->{collapse}} ) {
- $sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } ($rsrc->primary_columns) ]
+ $sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } ($rsrc->_pri_cols) ]
}
$sub_attrs->{select} = $rsrc->storage->_subq_count_select ($rsrc, $sub_attrs);
my $attrs = $self->_resolved_attrs_copy;
delete $attrs->{$_} for qw/collapse select as/;
- $attrs->{columns} = [ map { "$attrs->{alias}.$_" } ($self->result_source->primary_columns) ];
+ $attrs->{columns} = [ map { "$attrs->{alias}.$_" } ($self->result_source->_pri_cols) ];
if ($needs_group_by_subq) {
# make sure no group_by was supplied, or if there is one - make sure it matches
return !!$self->{attrs}{page};
}
+ =head2 is_ordered
+
+ =over 4
+
+ =item Arguments: none
+
+ =item Return Value: true, if the resultset has been ordered with C<order_by>.
+
+ =back
+
+ =cut
+
+ sub is_ordered {
+ my ($self) = @_;
+ return scalar $self->result_source->storage->_parse_order_by($self->{attrs}{order_by});
+ }
+
=head2 related_resultset
=over 4
->relname_to_table_alias($rel, $join_count);
# since this is search_related, and we already slid the select window inwards
- # (the select/as attrs were deleted in the beginning), we need to flip all
+ # (the select/as attrs were deleted in the beginning), we need to flip all
# left joins to inner, so we get the expected results
# read the comment on top of the actual function to see what this does
$attrs->{from} = $rsrc->schema->storage->_straight_join_to_node ($attrs->{from}, $alias);
return ($self->{attrs} || {})->{alias} || 'me';
}
+ =head2 as_subselect_rs
+
+ =over 4
+
+ =item Arguments: none
+
+ =item Return Value: $resultset
+
+ =back
+
+ Act as a barrier to SQL symbols. The resultset provided will be made into a
+ "virtual view" by including it as a subquery within the from clause. From this
+ point on, any joined tables are inaccessible to ->search on the resultset (as if
+ it were simply where-filtered without joins). For example:
+
+ my $rs = $schema->resultset('Bar')->search({'x.name' => 'abc'},{ join => 'x' });
+
+ # 'x' now pollutes the query namespace
+
+ # So the following works as expected
+ my $ok_rs = $rs->search({'x.other' => 1});
+
+ # But this doesn't: instead of finding a 'Bar' related to two x rows (abc and
+ # def) we look for one row with contradictory terms and join in another table
+ # (aliased 'x_2') which we never use
+ my $broken_rs = $rs->search({'x.name' => 'def'});
+
+ my $rs2 = $rs->as_subselect_rs;
+
+ # doesn't work - 'x' is no longer accessible in $rs2, having been sealed away
+ my $not_joined_rs = $rs2->search({'x.other' => 1});
+
+ # works as expected: finds a 'table' row related to two x rows (abc and def)
+ my $correctly_joined_rs = $rs2->search({'x.name' => 'def'});
+
+ Another example of when one might use this would be to select a subset of
+ columns in a group by clause:
+
+ my $rs = $schema->resultset('Bar')->search(undef, {
+ group_by => [qw{ id foo_id baz_id }],
+ })->as_subselect_rs->search(undef, {
+ columns => [qw{ id foo_id }]
+ });
+
+ In the above example normally columns would have to be equal to the group by,
+ but because we isolated the group by into a subselect the above works.
+
+ =cut
+
+ sub as_subselect_rs {
+ my $self = shift;
+
+ return $self->result_source->resultset->search( undef, {
+ alias => $self->current_source_alias,
+ from => [{
+ $self->current_source_alias => $self->as_query,
+ -alias => $self->current_source_alias,
+ -source_handle => $self->result_source->handle,
+ }]
+ });
+ }
+
# This code is called by search_related, and makes sure there
# is clear separation between the joins before, during, and
# after the relationship. This information is needed later
# build columns (as long as select isn't set) into a set of as/select hashes
unless ( $attrs->{select} ) {
- my @cols = ( ref($attrs->{columns}) eq 'ARRAY' )
- ? @{ delete $attrs->{columns}}
- : (
- ( delete $attrs->{columns} )
- ||
- $source->columns
- )
- ;
-
- @colbits = map {
- ( ref($_) eq 'HASH' )
- ? $_
- : {
- (
- /^\Q${alias}.\E(.+)$/
- ? "$1"
- : "$_"
- )
- =>
- (
- /\./
- ? "$_"
- : "${alias}.$_"
- )
- }
- } @cols;
+ my @cols;
+ if ( ref $attrs->{columns} eq 'ARRAY' ) {
+ @cols = @{ delete $attrs->{columns}}
+ } elsif ( defined $attrs->{columns} ) {
+ @cols = delete $attrs->{columns}
+ } else {
+ @cols = $source->columns
+ }
+
+ for (@cols) {
+ if ( ref $_ eq 'HASH' ) {
+ push @colbits, $_
+ } else {
+ my $key = /^\Q${alias}.\E(.+)$/
+ ? "$1"
+ : "$_";
+ my $value = /\./
+ ? "$_"
+ : "${alias}.$_";
+ push @colbits, { $key => $value };
+ }
+ }
}
# add the additional columns on
- foreach ( 'include_columns', '+columns' ) {
- push @colbits, map {
- ( ref($_) eq 'HASH' )
- ? $_
- : { ( split( /\./, $_ ) )[-1] => ( /\./ ? $_ : "${alias}.$_" ) }
- } ( ref($attrs->{$_}) eq 'ARRAY' ) ? @{ delete $attrs->{$_} } : delete $attrs->{$_} if ( $attrs->{$_} );
+ foreach (qw{include_columns +columns}) {
+ if ( $attrs->{$_} ) {
+ my @list = ( ref($attrs->{$_}) eq 'ARRAY' )
+ ? @{ delete $attrs->{$_} }
+ : delete $attrs->{$_};
+ for (@list) {
+ if ( ref($_) eq 'HASH' ) {
+ push @colbits, $_
+ } else {
+ my $key = ( split /\./, $_ )[-1];
+ my $value = ( /\./ ? $_ : "$alias.$_" );
+ push @colbits, { $key => $value };
+ }
+ }
+ }
}
# start with initial select items
( ref $attrs->{select} eq 'ARRAY' )
? [ @{ $attrs->{select} } ]
: [ $attrs->{select} ];
- $attrs->{as} = (
- $attrs->{as}
- ? (
- ref $attrs->{as} eq 'ARRAY'
- ? [ @{ $attrs->{as} } ]
- : [ $attrs->{as} ]
+
+ if ( $attrs->{as} ) {
+ $attrs->{as} =
+ (
+ ref $attrs->{as} eq 'ARRAY'
+ ? [ @{ $attrs->{as} } ]
+ : [ $attrs->{as} ]
)
- : [ map { m/^\Q${alias}.\E(.+)$/ ? $1 : $_ } @{ $attrs->{select} } ]
- );
+ } else {
+ $attrs->{as} = [ map {
+ m/^\Q${alias}.\E(.+)$/
+ ? $1
+ : $_
+ } @{ $attrs->{select} }
+ ]
+ }
}
else {
}
# now add colbits to select/as
- push( @{ $attrs->{select} }, map { values( %{$_} ) } @colbits );
- push( @{ $attrs->{as} }, map { keys( %{$_} ) } @colbits );
+ push @{ $attrs->{select} }, map values %{$_}, @colbits;
+ push @{ $attrs->{as} }, map keys %{$_}, @colbits;
- my $adds;
- if ( $adds = delete $attrs->{'+select'} ) {
+ if ( my $adds = delete $attrs->{'+select'} ) {
$adds = [$adds] unless ref $adds eq 'ARRAY';
- push(
- @{ $attrs->{select} },
- map { /\./ || ref $_ ? $_ : "${alias}.$_" } @$adds
- );
+ push @{ $attrs->{select} },
+ map { /\./ || ref $_ ? $_ : "$alias.$_" } @$adds;
}
- if ( $adds = delete $attrs->{'+as'} ) {
+ if ( my $adds = delete $attrs->{'+as'} ) {
$adds = [$adds] unless ref $adds eq 'ARRAY';
- push( @{ $attrs->{as} }, @$adds );
+ push @{ $attrs->{as} }, @$adds;
}
- $attrs->{from} ||= [ {
+ $attrs->{from} ||= [{
-source_handle => $source->handle,
-alias => $self->{attrs}{alias},
$self->{attrs}{alias} => $source->from,
- } ];
+ }];
if ( $attrs->{join} || $attrs->{prefetch} ) {
$join,
$alias,
{ %{ $attrs->{seen_join} || {} } },
- ($attrs->{seen_join} && keys %{$attrs->{seen_join}})
+ ( $attrs->{seen_join} && keys %{$attrs->{seen_join}})
? $attrs->{from}[-1][0]{-join_path}
: []
,
my %already_grouped = map { $_ => 1 } (@{$attrs->{group_by}});
my $storage = $self->result_source->schema->storage;
- my $sql_maker = $storage->sql_maker;
- local $sql_maker->{quote_char}; #disable quoting
my $rs_column_list = $storage->_resolve_column_info ($attrs->{from});
- my @chunks = $sql_maker->_order_by_chunks ($attrs->{order_by});
- for my $chunk (map { ref $_ ? @$_ : $_ } (@chunks) ) {
- $chunk =~ s/\s+ (?: ASC|DESC ) \s* $//ix;
+ for my $chunk ($storage->_parse_order_by($attrs->{order_by})) {
if ($rs_column_list->{$chunk} && not $already_grouped{$chunk}++) {
push @{$attrs->{group_by}}, $chunk;
}
return @{shift->_primaries||[]};
}
+sub _pri_cols {
+ my $self = shift;
+ my @pcols = $self->primary_columns
+ or $self->throw_exception (sprintf(
+ 'Operation requires a primary key to be declared on %s via set_primary_key',
+ ref $self,
+ ));
+ return @pcols;
+}
+
=head2 add_unique_constraint
=over 4
return $found;
}
- sub resolve_join {
- carp 'resolve_join is a private method, stop calling it';
- my $self = shift;
- $self->_resolve_join (@_);
- }
-
# Returns the {from} structure used to express JOIN conditions
sub _resolve_join {
my ($self, $join, $alias, $seen, $jpath, $parent_force_left) = @_;
: $rel_info->{attrs}{join_type}
,
-join_path => [@$jpath, { $join => $as } ],
- -is_single => (List::Util::first { $rel_info->{attrs}{accessor} eq $_ } (qw/single filter/) ),
+ -is_single => (
+ $rel_info->{attrs}{accessor}
+ &&
+ List::Util::first { $rel_info->{attrs}{accessor} eq $_ } (qw/single filter/)
+ ),
-alias => $as,
-relation_chain_depth => $seen->{-relation_chain_depth} || 0,
},
}
}
- # Legacy code, needs to go entirely away (fully replaced by _resolve_prefetch)
- sub resolve_prefetch {
- carp 'resolve_prefetch is a private method, stop calling it';
-
- my ($self, $pre, $alias, $seen, $order, $collapse) = @_;
- $seen ||= {};
- if( ref $pre eq 'ARRAY' ) {
- return
- map { $self->resolve_prefetch( $_, $alias, $seen, $order, $collapse ) }
- @$pre;
- }
- elsif( ref $pre eq 'HASH' ) {
- my @ret =
- map {
- $self->resolve_prefetch($_, $alias, $seen, $order, $collapse),
- $self->related_source($_)->resolve_prefetch(
- $pre->{$_}, "${alias}.$_", $seen, $order, $collapse)
- } keys %$pre;
- return @ret;
- }
- elsif( ref $pre ) {
- $self->throw_exception(
- "don't know how to resolve prefetch reftype ".ref($pre));
- }
- else {
- my $count = ++$seen->{$pre};
- my $as = ($count > 1 ? "${pre}_${count}" : $pre);
- my $rel_info = $self->relationship_info( $pre );
- $self->throw_exception( $self->name . " has no such relationship '$pre'" )
- unless $rel_info;
- my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
- my $rel_source = $self->related_source($pre);
-
- if (exists $rel_info->{attrs}{accessor}
- && $rel_info->{attrs}{accessor} eq 'multi') {
- $self->throw_exception(
- "Can't prefetch has_many ${pre} (join cond too complex)")
- unless ref($rel_info->{cond}) eq 'HASH';
- my $dots = @{[$as_prefix =~ m/\./g]} + 1; # +1 to match the ".${as_prefix}"
- if (my ($fail) = grep { @{[$_ =~ m/\./g]} == $dots }
- keys %{$collapse}) {
- my ($last) = ($fail =~ /([^\.]+)$/);
- carp (
- "Prefetching multiple has_many rels ${last} and ${pre} "
- .(length($as_prefix)
- ? "at the same level (${as_prefix}) "
- : "at top level "
- )
- . 'will explode the number of row objects retrievable via ->next or ->all. '
- . 'Use at your own risk.'
- );
- }
- #my @col = map { (/^self\.(.+)$/ ? ("${as_prefix}.$1") : ()); }
- # values %{$rel_info->{cond}};
- $collapse->{".${as_prefix}${pre}"} = [ $rel_source->primary_columns ];
- # action at a distance. prepending the '.' allows simpler code
- # in ResultSet->_collapse_result
- my @key = map { (/^foreign\.(.+)$/ ? ($1) : ()); }
- keys %{$rel_info->{cond}};
- my @ord = (ref($rel_info->{attrs}{order_by}) eq 'ARRAY'
- ? @{$rel_info->{attrs}{order_by}}
- : (defined $rel_info->{attrs}{order_by}
- ? ($rel_info->{attrs}{order_by})
- : ()));
- push(@$order, map { "${as}.$_" } (@key, @ord));
- }
-
- return map { [ "${as}.$_", "${as_prefix}${pre}.$_", ] }
- $rel_source->columns;
- }
- }
# Accepts one or more relationships for the current source and returns an
# array of column names for each of those relationships. Column names are
my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
my $rel_source = $self->related_source($pre);
- if (exists $rel_info->{attrs}{accessor}
- && $rel_info->{attrs}{accessor} eq 'multi') {
+ if ($rel_info->{attrs}{accessor} && $rel_info->{attrs}{accessor} eq 'multi') {
$self->throw_exception(
"Can't prefetch has_many ${pre} (join cond too complex)")
unless ref($rel_info->{cond}) eq 'HASH';
keys %{$rel_info->{cond}};
my @ord = (ref($rel_info->{attrs}{order_by}) eq 'ARRAY'
? @{$rel_info->{attrs}{order_by}}
- : (defined $rel_info->{attrs}{order_by}
+
+ : (defined $rel_info->{attrs}{order_by}
? ($rel_info->{attrs}{order_by})
: ()));
push(@$order, map { "${as}.$_" } (@key, @ord));
use Data::Dumper::Concise();
use Sub::Name ();
- # what version of sqlt do we require if deploy() without a ddl_dir is invoked
- # when changing also adjust the corresponding author_require in Makefile.PL
- my $minimum_sqlt_version = '0.11002';
-
-
__PACKAGE__->mk_group_accessors('simple' =>
qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid
_conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints/
my $rsrc = $rs->result_source;
# quick check if we got a sane rs on our hands
- my @pcols = $rsrc->primary_columns;
- unless (@pcols) {
- $self->throw_exception (
- sprintf (
- "You must declare primary key(s) on source '%s' (via set_primary_key) in order to update or delete complex resultsets",
- $rsrc->source_name || $rsrc->from
- )
- );
- }
+ my @pcols = $rsrc->_pri_cols;
my $sel = $rs->_resolved_attrs->{select};
$sel = [ $sel ] unless ref $sel eq 'ARRAY';
my ($rs, $op, $values) = @_;
my $rsrc = $rs->result_source;
- my @pcols = $rsrc->primary_columns;
+ my @pcols = $rsrc->_pri_cols;
my $guard = $self->txn_scope_guard;
&&
(ref $ident eq 'ARRAY' && @$ident > 1) # indicates a join
&&
- scalar $sql_maker->_order_by_chunks ($attrs->{order_by})
+ scalar $self->_parse_order_by ($attrs->{order_by})
) {
# the RNO limit dialect above mangles the SQL such that the join gets lost
# wrap a subquery here
=cut
sub _dbh_last_insert_id {
- # All Storage's need to register their own _dbh_last_insert_id
- # the old SQLite-based method was highly inappropriate
+ my ($self, $dbh, $source, $col) = @_;
- my $self = shift;
- my $class = ref $self;
- $self->throw_exception (<<EOE);
+ my $id = eval { $dbh->last_insert_id (undef, undef, $source->name, $col) };
+
+ return $id if defined $id;
- No _dbh_last_insert_id() method found in $class.
- Since the method of obtaining the autoincrement id of the last insert
- operation varies greatly between different databases, this method must be
- individually implemented for every storage class.
- EOE
+ my $class = ref $self;
+ $self->throw_exception ("No storage specific _dbh_last_insert_id() method implemented in $class, and the generic DBI::last_insert_id() failed");
}
sub last_insert_id {
%{$sqltargs || {}}
};
- $self->throw_exception("Can't create a ddl file without SQL::Translator: " . $self->_sqlt_version_error)
- if !$self->_sqlt_version_ok;
+ unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) {
+ $self->throw_exception("Can't create a ddl file without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
+ }
my $sqlt = SQL::Translator->new( $sqltargs );
return join('', @rows);
}
- $self->throw_exception("Can't deploy without either SQL::Translator or a ddl_dir: " . $self->_sqlt_version_error )
- if !$self->_sqlt_version_ok;
+ unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy') ) {
+ $self->throw_exception("Can't deploy without a ddl_dir or " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
+ }
# sources needs to be a parser arg, but for simplicty allow at top level
# coming in
return;
}
- # SQLT version handling
- {
- my $_sqlt_version_ok; # private
- my $_sqlt_version_error; # private
-
- sub _sqlt_version_ok {
- if (!defined $_sqlt_version_ok) {
- eval "use SQL::Translator $minimum_sqlt_version";
- if ($@) {
- $_sqlt_version_ok = 0;
- $_sqlt_version_error = $@;
- }
- else {
- $_sqlt_version_ok = 1;
- }
- }
- return $_sqlt_version_ok;
- }
-
- sub _sqlt_version_error {
- shift->_sqlt_version_ok unless defined $_sqlt_version_ok;
- return $_sqlt_version_error;
- }
-
- sub _sqlt_minimum_version { $minimum_sqlt_version };
- }
-
=head2 relname_to_table_alias
=over 4
# some databases need this to stop spewing warnings
if (my $dbh = $self->_dbh) {
local $@;
- eval { $dbh->disconnect };
+ eval {
+ %{ $dbh->{CachedKids} } = ();
+ $dbh->disconnect;
+ };
}
$self->_dbh(undef);
use strict;
use warnings;
-use base qw/DBIx::Class::Storage::DBI::AmbiguousGlob DBIx::Class::Storage::DBI/;
+use base qw/DBIx::Class::Storage::DBI/;
use mro 'c3';
use List::Util();
# see if this is an ordered subquery
my $attrs = $_[3];
- if ( scalar $self->sql_maker->_order_by_chunks ($attrs->{order_by}) ) {
+ if ( scalar $self->_parse_order_by ($attrs->{order_by}) ) {
$self->throw_exception(
'An ordered subselect encountered - this is not safe! Please see "Ordered Subselects" in DBIx::Class::Storage::DBI::MSSQL
') unless $attrs->{unsafe_subselect_ok};
# check for empty insert
# INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
- # try to insert explicit 'DEFAULT's instead (except for identity)
+ # try to insert explicit 'DEFAULT's instead (except for identity, timestamp
+ # and computed columns)
if (not %$to_insert) {
for my $col ($source->columns) {
next if $col eq $identity_col;
+
+ my $info = $source->column_info($col);
+
+ next if ref $info->{default_value} eq 'SCALAR'
+ || (exists $info->{data_type} && (not defined $info->{data_type}));
+
+ next if $info->{data_type} && $info->{data_type} =~ /^timestamp\z/i;
+
$to_insert->{$col} = \'DEFAULT';
}
}
sub _update_blobs {
my ($self, $source, $blob_cols, $where) = @_;
- my (@primary_cols) = $source->primary_columns;
-
- $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
- unless @primary_cols;
+ my @primary_cols = eval { $source->_pri_cols };
+ $self->throw_exception("Cannot update TEXT/IMAGE column(s): $@")
+ if $@;
# check if we're updating a single row by PK
my $pk_cols_in_where = 0;
my $table = $source->name;
my %row = %$row;
- my (@primary_cols) = $source->primary_columns;
-
- $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
- unless @primary_cols;
+ my @primary_cols = eval { $source->_pri_cols} ;
+ $self->throw_exception("Cannot update TEXT/IMAGE column(s): $@")
+ if $@;
$self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values')
if ((grep { defined $row{$_} } @primary_cols) != @primary_cols);
=head1 Schema::Loader Support
- There is an experimental branch of L<DBIx::Class::Schema::Loader> that will
- allow you to dump a schema from most (if not all) versions of Sybase.
-
- It is available via subversion from:
-
- http://dev.catalyst.perl.org/repos/bast/branches/DBIx-Class-Schema-Loader/current/
+ As of version C<0.05000>, L<DBIx::Class::Schema::Loader> should work well with
+ most (if not all) versions of Sybase ASE.
=head1 FreeTDS
When inserting IMAGE columns using this method, you'll need to use
L</connect_call_blob_setup> as well.
+ =head1 COMPUTED COLUMNS
+
+ If you have columns such as:
+
+ created_dtm AS getdate()
+
+ represent them in your Result classes as:
+
+ created_dtm => {
+ data_type => undef,
+ default_value => \'getdate()',
+ is_nullable => 0,
+ }
+
+ The C<data_type> must exist and must be C<undef>. Then empty inserts will work
+ on tables with such columns.
+
+ =head1 TIMESTAMP COLUMNS
+
+ C<timestamp> columns in Sybase ASE are not really timestamps, see:
+ L<http://dba.fyicenter.com/Interview-Questions/SYBASE/The_timestamp_datatype_in_Sybase_.html>.
+
+ They should be defined in your Result classes as:
+
+ ts => {
+ data_type => 'timestamp',
+ is_nullable => 0,
+ inflate_datetime => 0,
+ }
+
+ The C<<inflate_datetime => 0>> is necessary if you use
+ L<DBIx::Class::InflateColumn::DateTime>, and most people do, and still want to
+ be able to read these values.
+
+ The values will come back as hexadecimal.
+
=head1 TODO
=over