Revision history for DBIx::Class
+ - Perl 5.8.1 is now the minimum supported version
+ - Subqueries no longer marked experimental
- might_have/has_one now warn if applied calling class's column
has is_nullable set to true.
+ - Fixed regression in deploy() with a {sources} table limit applied
+ (RT#52812)
- Cookbook POD fix for add_drop_table instead of add_drop_tables
- Views without a view_definition will throw an exception when
parsed by SQL::Translator::Parser::DBIx::Class
+ - Schema POD improvement for dclone
+ - Fix regression in context sensitiveness of deployment_statements
+ - Fix regression resulting in overcomplicated query on
+ search_related from prefetching resultsets
+ - Better isolation of RNO-limited queries from the rest of a
+ prefetching resultset
+ - New MSSQL specific resultset attribute to allow hacky ordered
+ subquery support
+ - Fix nasty schema/dbhandle leak due to SQL::Translator
0.08115 2009-12-10 09:02:00 (CST)
- Real limit/offset support for MSSQL server (via Row_Number)
use warnings;
use POSIX ();
-use 5.006001; # delete this line if you want to send patches for earlier.
+use 5.008001;
# ****** DO NOT ADD OPTIONAL DEPENDENCIES. EVER. --mst ******
name 'DBIx-Class';
-perl_version '5.006001';
+perl_version '5.008001';
all_from 'lib/DBIx/Class.pm';
requires 'Scalar::Util' => '0';
requires 'Storable' => '0';
-# Perl 5.8.0 doesn't have utf8::is_utf8()
-requires 'Encode' => '0' if ($] <= 5.008000);
-
# Dependencies (keep in alphabetical order)
requires 'Carp::Clan' => '6.0';
-requires 'Class::Accessor::Grouped' => '0.09000';
+requires 'Class::Accessor::Grouped' => '0.09002';
requires 'Class::C3::Componentised' => '1.0005';
requires 'Class::Inspector' => '1.24';
requires 'Data::Page' => '2.00';
requires 'DBD::SQLite' => '1.25';
-requires 'DBI' => '1.605';
+requires 'DBI' => '1.609';
requires 'JSON::Any' => '1.18';
requires 'MRO::Compat' => '0.09';
requires 'Module::Find' => '0.06';
requires 'Data::Dumper::Concise' => '1.000';
my %replication_requires = (
- 'Moose', => '0.87',
- 'MooseX::AttributeHelpers' => '0.21',
+ 'Moose', => '0.90',
'MooseX::Types', => '0.16',
'namespace::clean' => '0.11',
'Hash::Merge', => '0.11',
use warnings;
use MRO::Compat;
+use mro 'c3';
use vars qw($VERSION);
-use base qw/Class::C3::Componentised Class::Accessor::Grouped/;
+use base qw/DBIx::Class::Componentised Class::Accessor::Grouped/;
use DBIx::Class::StartupCheck;
sub mk_classdata {
# Output all artists names
# $artist here is a DBIx::Class::Row, which has accessors
# for all its columns. Rows are also subclasses of your Result class.
- foreach $artist (@artists) {
+ foreach $artist (@all_artists) {
print $artist->name, "\n";
}
use strict;
use warnings;
-###
-# Keep this class for backwards compatibility
-###
-
use base 'Class::C3::Componentised';
+use Carp::Clan qw/^DBIx::Class|^Class::C3::Componentised/;
+use mro 'c3';
+
+# this warns of subtle bugs introduced by UTF8Columns hacky handling of store_column
+sub inject_base {
+ my $class = shift;
+ my $target = shift;
+
+ my @present_components = (@{mro::get_linear_isa ($target)||[]});
+
+ no strict 'refs';
+ for my $comp (reverse @_) {
+ if (
+ $comp->isa ('DBIx::Class::UTF8Columns')
+ and
+ my @broken = grep { $_ ne 'DBIx::Class::Row' and defined ${"${_}::"}{store_column} } (@present_components)
+ ) {
+ carp "Incorrect loading order of $comp by ${target} will affect other components overriding store_column ("
+ . join (', ', @broken)
+ .'). Refer to the documentation of DBIx::Class::UTF8Columns for more info';
+ }
+ else {
+ unshift @present_components, $comp;
+ }
+ }
+
+ $class->next::method($target, @_);
+}
1;
are in any way unsure about the use of the attributes above (C< join
>, C< select >, C< as > and C< group_by >).
-=head2 Subqueries (EXPERIMENTAL)
+=head2 Subqueries
You can write subqueries relatively easily in DBIC.
WHERE artist_id = me.artist_id
)
-=head3 EXPERIMENTAL
-
-Please note that subqueries are considered an experimental feature.
-
=head2 Predefined searches
You can write your own L<DBIx::Class::ResultSet> class by inheriting from it
1;
-To use your resultset, first tell DBIx::Class to create an instance of it
-for you, in your My::DBIC::Schema::CD class:
+If you're using L<DBIx::Class::Schema/load_namespaces>, simply place the file
+into the C<ResultSet> directory next to your C<Result> directory, and it will
+be automatically loaded.
+
+If however you are still using L<DBIx::Class::Schema/load_classes>, first tell
+DBIx::Class to create an instance of the ResultSet class for you, in your
+My::DBIC::Schema::CD class:
# class definition as normal
use base 'DBIx::Class::Core';
my $key = $1;
my $column_info = $class->column_info($key);
if ( $column_info->{is_nullable} ) {
- carp(qq'"might_have/has_one" must not be on columns with is_nullable set to true ($class/$key) ');
+ carp(qq'"might_have/has_one" must not be on columns with is_nullable set to true ($class/$key). This might indicate an incorrect use of those relationship helpers instead of belongs_to.');
}
}
}
my $cond = $rsrc->schema->storage->_strip_cond_qualifiers ($self->{cond});
my $needs_group_by_subq = $self->_has_resolved_attr (qw/collapse group_by -join/);
- my $needs_subq = (not defined $cond) || $self->_has_resolved_attr(qw/row offset/);
+ my $needs_subq = $needs_group_by_subq || (not defined $cond) || $self->_has_resolved_attr(qw/row offset/);
if ($needs_group_by_subq or $needs_subq) {
return \%unaliased;
}
-=head2 as_query (EXPERIMENTAL)
+=head2 as_query
=over 4
This is generally used as the RHS for a subquery.
-B<NOTE>: This feature is still experimental.
-
=cut
sub as_query {
my $attrs = $self->_chain_relationship($rel);
my $join_count = $attrs->{seen_join}{$rel};
- my $alias = ($join_count > 1 ? join('_', $rel, $join_count) : $rel);
+
+ my $alias = $self->result_source->storage
+ ->relname_to_table_alias($rel, $join_count);
# since this is search_related, and we already slid the select window inwards
# (the select/as attrs were deleted in the beginning), we need to flip all
# read the comment on top of the actual function to see what this does
$attrs->{from} = $rsrc->schema->storage->_straight_join_to_node ($attrs->{from}, $alias);
+
#XXX - temp fix for result_class bug. There likely is a more elegant fix -groditi
delete @{$attrs}{qw(result_class alias)};
||
$self->_has_resolved_attr (@force_subq_attrs)
) {
+ # Nuke the prefetch (if any) before the new $rs attrs
+ # are resolved (prefetch is useless - we are wrapping
+ # a subquery anyway).
+ my $rs_copy = $self->search;
+ $rs_copy->{attrs}{join} = $self->_merge_attr (
+ $rs_copy->{attrs}{join},
+ delete $rs_copy->{attrs}{prefetch},
+ );
+
$from = [{
-source_handle => $source->handle,
-alias => $attrs->{alias},
- $attrs->{alias} => $self->as_query,
+ $attrs->{alias} => $rs_copy->as_query,
}];
delete @{$attrs}{@force_subq_attrs, 'where'};
$seen->{-relation_chain_depth} = 0;
return $new;
}
-=head2 as_query (EXPERIMENTAL)
+=head2 as_query
=over 4
This is generally used as the RHS for a subquery.
-B<NOTE>: This feature is still experimental.
-
=cut
sub as_query { return shift->_resultset->as_query(@_) }
$force_left ||= lc($rel_info->{attrs}{join_type}||'') eq 'left';
# the actual seen value will be incremented by the recursion
- my $as = ($seen->{$rel} ? join ('_', $rel, $seen->{$rel} + 1) : $rel);
+ my $as = $self->storage->relname_to_table_alias(
+ $rel, ($seen->{$rel} && $seen->{$rel} + 1)
+ );
push @ret, (
$self->_resolve_join($rel, $alias, $seen, [@$jpath], $force_left),
}
else {
my $count = ++$seen->{$join};
- my $as = ($count > 1 ? "${join}_${count}" : $join);
+ my $as = $self->storage->relname_to_table_alias(
+ $join, ($count > 1 && $count)
+ );
my $rel_info = $self->relationship_info($join)
or $self->throw_exception("No such relationship ${join}");
=cut
sub handle {
- return new DBIx::Class::ResultSourceHandle({
+ return DBIx::Class::ResultSourceHandle->new({
schema => $_[0]->schema,
source_moniker => $_[0]->source_name
});
$self->{_orig_ident} ||= $self->ident_condition;
my $old_value = $self->get_column($column);
- $self->store_column($column, $new_value);
+ $new_value = $self->store_column($column, $new_value);
my $dirty;
if (!$self->in_storage) { # no point tracking dirtyness on uninserted data
sub _RowNumberOver {
my ($self, $sql, $order, $rows, $offset ) = @_;
+ # get the select to make the final amount of columns equal the original one
+ my ($select) = $sql =~ /^ \s* SELECT \s+ (.+?) \s+ FROM/ix
+ or croak "Unrecognizable SELECT: $sql";
+
# get the order_by only (or make up an order if none exists)
my $order_by = $self->_order_by(
(delete $order->{order_by}) || $self->_rno_default_order
);
- # whatever is left
+ # whatever is left of the order_by
my $group_having = $self->_order_by($order);
- $sql = sprintf (<<'EOS', $order_by, $sql, $group_having, $offset + 1, $offset + $rows, );
+ my $qalias = $self->_quote ($self->{_dbic_rs_attrs}{alias});
+
+ $sql = sprintf (<<EOS, $offset + 1, $offset + $rows, );
-SELECT * FROM (
- SELECT orig_query.*, ROW_NUMBER() OVER(%s ) AS rno__row__index FROM (%s%s) orig_query
-) rno_subq WHERE rno__row__index BETWEEN %d AND %d
+SELECT $select FROM (
+ SELECT $qalias.*, ROW_NUMBER() OVER($order_by ) AS rno__row__index FROM (
+ ${sql}${group_having}
+ ) $qalias
+) $qalias WHERE rno__row__index BETWEEN %d AND %d
EOS
use DBIx::Class::Exception;
use Carp::Clan qw/^DBIx::Class/;
-use Scalar::Util qw/weaken/;
+use Scalar::Util ();
use File::Spec;
use Sub::Name ();
use Module::Find();
$self->storage->deployment_statements($self, @_);
}
-=head2 create_ddl_dir (EXPERIMENTAL)
+=head2 create_ddl_dir
=over 4
=head2 dclone
-Recommeneded way of dcloning objects. This is needed to properly maintain
-references to the schema object (which itself is B<not> cloned.)
+=over 4
+
+=item Arguments: $object
+
+=item Return Value: dcloned $object
+
+=back
+
+Recommended way of dcloning L<DBIx::Class::Row> and L<DBIx::Class::ResultSet>
+objects so their references to the schema object
+(which itself is B<not> cloned) are properly maintained.
=cut
$self->_register_source(@_);
}
+=head2 unregister_source
+
+=over 4
+
+=item Arguments: $moniker
+
+=back
+
+Removes the L<DBIx::Class::ResultSource> from the schema for the given moniker.
+
+=cut
+
+sub unregister_source {
+ my $self = shift;
+
+ $self->_unregister_source(@_);
+}
+
=head2 register_extra_source
=over 4
$source = $source->new({ %$source, source_name => $moniker });
$source->schema($self);
- weaken($source->{schema}) if ref($self);
+ Scalar::Util::weaken($source->{schema}) if ref($self);
my $rs_class = $source->result_class;
=cut
sub connect_info {
- my ($self, $info_arg) = @_;
+ my ($self, $info) = @_;
- return $self->_connect_info if !$info_arg;
+ return $self->_connect_info if !$info;
- my @args = @$info_arg; # take a shallow copy for further mutilation
- $self->_connect_info([@args]); # copy for _connect_info
+ $self->_connect_info($info); # copy for _connect_info
+
+ $info = $self->_normalize_connect_info($info)
+ if ref $info eq 'ARRAY';
+
+ for my $storage_opt (keys %{ $info->{storage_options} }) {
+ my $value = $info->{storage_options}{$storage_opt};
+
+ $self->$storage_opt($value);
+ }
+
+ # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
+ # the new set of options
+ $self->_sql_maker(undef);
+ $self->_sql_maker_opts({});
+
+ for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) {
+ my $value = $info->{sql_maker_options}{$sql_maker_opt};
+
+ $self->_sql_maker_opts->{$sql_maker_opt} = $value;
+ }
+
+ my %attrs = (
+ %{ $self->_default_dbi_connect_attributes || {} },
+ %{ $info->{attributes} || {} },
+ );
+
+ my @args = @{ $info->{arguments} };
+ $self->_dbi_connect_info([@args,
+ %attrs && !(ref $args[0] eq 'CODE') ? \%attrs : ()]);
+
+ return $self->_connect_info;
+}
+
+sub _normalize_connect_info {
+ my ($self, $info_arg) = @_;
+ my %info;
+
+ my @args = @$info_arg; # take a shallow copy for further mutilation
# combine/pre-parse arguments depending on invocation style
@args = @args[0,1,2];
}
- # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
- # the new set of options
- $self->_sql_maker(undef);
- $self->_sql_maker_opts({});
+ $info{arguments} = \@args;
- if(keys %attrs) {
- for my $storage_opt (@storage_options, 'cursor_class') { # @storage_options is declared at the top of the module
- if(my $value = delete $attrs{$storage_opt}) {
- $self->$storage_opt($value);
- }
- }
- for my $sql_maker_opt (qw/limit_dialect quote_char name_sep/) {
- if(my $opt_val = delete $attrs{$sql_maker_opt}) {
- $self->_sql_maker_opts->{$sql_maker_opt} = $opt_val;
- }
- }
- }
+ my @storage_opts = grep exists $attrs{$_},
+ @storage_options, 'cursor_class';
- if (ref $args[0] eq 'CODE') {
- # _connect() never looks past $args[0] in this case
- %attrs = ()
- } else {
- %attrs = (
- %{ $self->_default_dbi_connect_attributes || {} },
- %attrs,
- );
- }
+ @{ $info{storage_options} }{@storage_opts} =
+ delete @attrs{@storage_opts} if @storage_opts;
+
+ my @sql_maker_opts = grep exists $attrs{$_},
+ qw/limit_dialect quote_char name_sep/;
+
+ @{ $info{sql_maker_options} }{@sql_maker_opts} =
+ delete @attrs{@sql_maker_opts} if @sql_maker_opts;
+
+ $info{attributes} = \%attrs if %attrs;
- $self->_dbi_connect_info([@args, keys %attrs ? \%attrs : ()]);
- $self->_connect_info;
+ return \%info;
}
sub _default_dbi_connect_attributes {
return $updated_cols;
}
-## Still not quite perfect, and EXPERIMENTAL
## Currently it is assumed that all values passed will be "normal", i.e. not
## scalar refs, or at least, all the same type as the first set, the statement is
## only prepped once.
my @limit;
- # see if we need to tear the prefetch apart (either limited has_many or grouped prefetch)
- # otherwise delegate the limiting to the storage, unless software limit was requested
+ # see if we need to tear the prefetch apart otherwise delegate the limiting to the
+ # storage, unless software limit was requested
if (
+ #limited has_many
( $attrs->{rows} && keys %{$attrs->{collapse}} )
||
+ # limited prefetch with RNO subqueries
+ (
+ $attrs->{rows}
+ &&
+ $sql_maker->limit_dialect eq 'RowNumberOver'
+ &&
+ $attrs->{_prefetch_select}
+ &&
+ @{$attrs->{_prefetch_select}}
+ )
+ ||
+ # grouped prefetch
( $attrs->{group_by}
&&
@{$attrs->{group_by}}
@{$attrs->{_prefetch_select}}
)
) {
-
($ident, $select, $where, $attrs)
= $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs);
}
}
-=head2 create_ddl_dir (EXPERIMENTAL)
+=head2 create_ddl_dir
=over 4
{ ignore_constraint_names => 0, # ... other options }
-Note that this feature is currently EXPERIMENTAL and may not work correctly
-across all databases, or fully handle complex relationships.
-
-WARNING: Please check all SQL files created, before applying them.
+WARNING: You are strongly advised to check all SQL files created, before applying
+them.
=cut
data => $schema,
);
- my $ret = $tr->translate
- or $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error);
+ my @ret;
+ my $wa = wantarray;
+ if ($wa) {
+ @ret = $tr->translate;
+ }
+ else {
+ $ret[0] = $tr->translate;
+ }
- return $ret;
+ $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error)
+ unless (@ret && defined $ret[0]);
+
+ return $wa ? @ret : $ret[0];
}
sub deploy {
sub _sqlt_minimum_version { $minimum_sqlt_version };
}
+=head2 relname_to_table_alias
+
+=over 4
+
+=item Arguments: $relname, $join_count
+
+=back
+
+L<DBIx::Class> uses L<DBIx::Class::Relationship> names as table aliases in
+queries.
+
+This hook is to allow specific L<DBIx::Class::Storage> drivers to change the
+way these aliases are named.
+
+The default behavior is C<"$relname_$join_count" if $join_count > 1>, otherwise
+C<"$relname">.
+
+=cut
+
+sub relname_to_table_alias {
+ my ($self, $relname, $join_count) = @_;
+
+ my $alias = ($join_count && $join_count > 1 ?
+ join('_', $relname, $join_count) : $relname);
+
+ return $alias;
+}
+
sub DESTROY {
my $self = shift;
=head3 truncation bug
There is a bug with MSSQL ADO providers where data gets truncated based on the
-size on the bind sizes in the first prepare:
+size of the bind sizes in the first prepare call:
L<https://rt.cpan.org/Ticket/Display.html?id=52048>
=head1 NAME
-DBIx::Class::Storage::DBI::AmbiguousGlob - Storage component for RDBMS supporting multicolumn in clauses
+DBIx::Class::Storage::DBI::AmbiguousGlob - Storage component for RDBMS choking on count(*)
=head1 DESCRIPTION
sub last_insert_id { shift->_identity }
#
-# MSSQL is retarded wrt ordered subselects. One needs to add a TOP 100%
-# to *all* subqueries, do it here.
+# MSSQL is retarded wrt ordered subselects. One needs to add a TOP
+# to *all* subqueries, but one also can't use TOP 100 PERCENT
+# http://sqladvice.com/forums/permalink/18496/22931/ShowThread.aspx#22931
#
sub _select_args_to_query {
my $self = shift;
# see if this is an ordered subquery
my $attrs = $_[3];
if ( scalar $self->sql_maker->_order_by_chunks ($attrs->{order_by}) ) {
- $sql =~ s/^ \s* SELECT \s/SELECT TOP 100 PERCENT /xi;
+ $self->throw_exception(
+ 'An ordered subselect encountered - this is not safe! Please see "Ordered Subselects" in DBIx::Class::Storage::DBI::MSSQL
+ ') unless $attrs->{unsafe_subselect_ok};
+ my $max = 2 ** 32;
+ $sql =~ s/^ \s* SELECT \s/SELECT TOP $max /xi;
}
return wantarray
C<db_ddladmin> privilege, which is normally not included in the standard
write-permissions.
+=head2 Ordered Subselects
+
+If you attempted the following query (among many others) in Microsoft SQL
+Server
+
+ $rs->search ({}, {
+ prefetch => 'relation',
+ rows => 2,
+ offset => 3,
+ });
+
+You may be surprised to receive an exception. The reason for this is a quirk
+in the MSSQL engine itself, and sadly doesn't have a sensible workaround due
+to the way DBIC is built. DBIC can do truly wonderful things with the aid of
+subselects, and does so automatically when necessary. The list of situations
+when a subselect is necessary is long and still changes often, so it can not
+be exhaustively enumerated here. The general rule of thumb is a joined
+L<has_many|DBIx::Class::Relationship/has_many> relationship with limit/group
+applied to the left part of the join.
+
+In its "pursuit of standards" Microsft SQL Server goes to great lengths to
+forbid the use of ordered subselects. This breaks a very useful group of
+searches like "Give me things number 4 to 6 (ordered by name), and prefetch
+all their relations, no matter how many". While there is a hack which fools
+the syntax checker, the optimizer may B<still elect to break the subselect>.
+Testing has determined that while such breakage does occur (the test suite
+contains an explicit test which demonstrates the problem), it is relative
+rare. The benefits of ordered subselects are on the other hand too great to be
+outright disabled for MSSQL.
+
+Thus compromise between usability and perfection is the MSSQL-specific
+L<resultset attribute|DBIx::Class::ResultSet/ATTRIBUTES> C<unsafe_subselect_ok>.
+It is deliberately not possible to set this on the Storage level, as the user
+should inspect (and preferrably regression-test) the return of every such
+ResultSet individually. The example above would work if written like:
+
+ $rs->search ({}, {
+ unsafe_subselect_ok => 1,
+ prefetch => 'relation',
+ rows => 2,
+ offset => 3,
+ });
+
+If it is possible to rewrite the search() in a way that will avoid the need
+for this flag - you are urged to do so. If DBIC internals insist that an
+ordered subselect is necessary for an operation, and you believe there is a
+differnt/better way to get the same result - please file a bugreport.
+
=head1 AUTHOR
See L<DBIx::Class/CONTRIBUTORS>.
=head1 DESCRIPTION
-This class implements autoincrements for Oracle.
+This class implements base Oracle support. The subclass
+L<DBIx::Class::Storage::DBI::Oracle::WhereJoins> is for C<(+)> joins in Oracle
+versions before 9.
=head1 METHODS
$self->_get_dbh->do("ROLLBACK TO SAVEPOINT $name")
}
+=head2 relname_to_table_alias
+
+L<DBIx::Class> uses L<DBIx::Class::Relationship> names as table aliases in
+queries.
+
+Unfortunately, Oracle doesn't support identifiers over 30 chars in length, so
+the L<DBIx::Class::Relationship> name is shortened and appended with half of an
+MD5 hash.
+
+See L<DBIx::Class::Storage/"relname_to_table_alias">.
+
+=cut
+
+sub relname_to_table_alias {
+ my $self = shift;
+ my ($relname, $join_count) = @_;
+
+ my $alias = $self->next::method(@_);
+
+ return $alias if length($alias) <= 30;
+
+ # get a base64 md5 of the alias with join_count
+ require Digest::MD5;
+ my $ctx = Digest::MD5->new;
+ $ctx->add($alias);
+ my $md5 = $ctx->b64digest;
+
+ # remove alignment mark just in case
+ $md5 =~ s/=*\z//;
+
+ # truncate and prepend to truncated relname without vowels
+ (my $devoweled = $relname) =~ s/[aeiou]//g;
+ my $shortened = substr($devoweled, 0, 18);
+
+ my $new_alias =
+ $shortened . '_' . substr($md5, 0, 30 - length($shortened) - 1);
+
+ return $new_alias;
+}
+
=head1 AUTHOR
See L<DBIx::Class/CONTRIBUTORS>.
## use, so we explicitly test for these.
my %replication_required = (
- 'Moose' => '0.87',
- 'MooseX::AttributeHelpers' => '0.21',
+ 'Moose' => '0.90',
'MooseX::Types' => '0.16',
'namespace::clean' => '0.11',
'Hash::Merge' => '0.11'
also define your arguments, such as which balancer you want and any arguments
that the Pool object should get.
+ my $schema = Schema::Class->clone;
$schema->storage_type( ['::DBI::Replicated', {balancer=>'::Random'}] );
+ $schema->connection(...);
Next, you need to add in the Replicants. Basically this is an array of
arrayrefs, where each arrayref is database connect information. Think of these
Replicated Storage has additional requirements not currently part of L<DBIx::Class>
- Moose => '0.87',
- MooseX::AttributeHelpers => '0.20',
+ Moose => '0.90',
MooseX::Types => '0.16',
namespace::clean => '0.11',
Hash::Merge => '0.11'
you use (or upgrade to) the latest L<Catalyst::Model::DBIC::Schema>, which makes
this job even easier.
-First, you need to connect your L<DBIx::Class::Schema>. Let's assume you have
-such a schema called, "MyApp::Schema".
-
- use MyApp::Schema;
- my $schema = MyApp::Schema->connect($dsn, $user, $pass);
-
-Next, you need to set the storage_type.
-
- $schema->storage_type(
- ::DBI::Replicated' => {
- balancer_type => '::Random',
- balancer_args => {
- auto_validate_every => 5,
- master_read_weight => 1
- },
- pool_args => {
- maximum_lag =>2,
- },
- }
- );
+First, you need to get a C<$schema> object and set the storage_type:
+
+ my $schema = MyApp::Schema->clone;
+ $schema->storage_type([
+ '::DBI::Replicated' => {
+ balancer_type => '::Random',
+ balancer_args => {
+ auto_validate_every => 5,
+ master_read_weight => 1
+ },
+ pool_args => {
+ maximum_lag =>2,
+ },
+ }
+ ]);
+
+Then, you need to connect your L<DBIx::Class::Schema>.
+
+ $schema->connection($dsn, $user, $pass);
Let's break down the settings. The method L<DBIx::Class::Schema/storage_type>
takes one mandatory parameter, a scalar value, and an option second value which
After you've configured the replicated storage, you need to add the connection
information for the replicants:
- $schema->storage->connect_replicants(
- [$dsn1, $user, $pass, \%opts],
- [$dsn2, $user, $pass, \%opts],
- [$dsn3, $user, $pass, \%opts],
- );
+ $schema->storage->connect_replicants(
+ [$dsn1, $user, $pass, \%opts],
+ [$dsn2, $user, $pass, \%opts],
+ [$dsn3, $user, $pass, \%opts],
+ );
These replicants should be configured as slaves to the master using the
instructions for MySQL native replication, or if you are just learning, you
package DBIx::Class::Storage::DBI::Replicated::Pool;
use Moose;
-use MooseX::AttributeHelpers;
use DBIx::Class::Storage::DBI::Replicated::Replicant;
use List::Util 'sum';
use Scalar::Util 'reftype';
has 'replicants' => (
is=>'rw',
- metaclass => 'Collection::Hash',
+ traits => ['Hash'],
isa=>HashRef['Object'],
default=>sub {{}},
- provides => {
- 'set' => 'set_replicant',
- 'get' => 'get_replicant',
- 'empty' => 'has_replicants',
- 'count' => 'num_replicants',
- 'delete' => 'delete_replicant',
- 'values' => 'all_replicant_storages',
+ handles => {
+ 'set_replicant' => 'set',
+ 'get_replicant' => 'get',
+ 'has_replicants' => 'is_empty',
+ 'num_replicants' => 'count',
+ 'delete_replicant' => 'delete',
+ 'all_replicant_storages' => 'values',
},
);
+around has_replicants => sub {
+ my ($orig, $self) = @_;
+ return !$self->$orig;
+};
+
has next_unknown_replicant_id => (
is => 'rw',
- metaclass => 'Counter',
+ traits => ['Counter'],
isa => Int,
default => 1,
- provides => {
- inc => 'inc_unknown_replicant_id'
+ handles => {
+ 'inc_unknown_replicant_id' => 'inc',
},
);
my $self = shift;
my $dbh = $self->_get_dbh;
+ return if ref $self ne __PACKAGE__;
+
if (not $self->_typeless_placeholders_supported) {
+ require
+ DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars;
bless $self,
'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars';
$self->_rebless;
use strict;
use warnings;
use base qw/DBIx::Class/;
-
-BEGIN {
-
- # Perl 5.8.0 doesn't have utf8::is_utf8()
- # Yes, 5.8.0 support for Unicode is suboptimal, but things like RHEL3 ship with it.
- if ($] <= 5.008000) {
- require Encode;
- } else {
- require utf8;
- }
-}
+use utf8;
__PACKAGE__->mk_classdata( '_utf8_columns' );
This module allows you to get columns data that have utf8 (Unicode) flag.
+=head2 Warning
+
+Note that this module overloads L<DBIx::Class::Row/store_column> in a way
+that may prevent other components overloading the same method from working
+correctly. This component must be the last one before L<DBIx::Class::Row>
+(which is provided by L<DBIx::Class::Core>). DBIx::Class will detect such
+incorrect component order and issue an appropriate warning, advising which
+components need to be loaded differently.
+
=head1 SEE ALSO
L<Template::Stash::ForceUTF8>, L<DBIx::Class::UUIDColumns>.
foreach my $col (@_) {
$self->throw_exception("column $col doesn't exist")
unless $self->has_column($col);
- }
+ }
return $self->_utf8_columns({ map { $_ => 1 } @_ });
} else {
return $self->_utf8_columns;
my ( $self, $column ) = @_;
my $value = $self->next::method($column);
- my $cols = $self->_utf8_columns;
- if ( $cols and defined $value and $cols->{$column} ) {
+ utf8::decode($value) if (
+ defined $value and $self->_is_utf8_column($column) and ! utf8::is_utf8($value)
+ );
- if ($] <= 5.008000) {
- Encode::_utf8_on($value) unless Encode::is_utf8($value);
- } else {
- utf8::decode($value) unless utf8::is_utf8($value);
- }
- }
-
- $value;
+ return $value;
}
=head2 get_columns
my $self = shift;
my %data = $self->next::method(@_);
- foreach my $col (grep { defined $data{$_} } keys %{ $self->_utf8_columns || {} }) {
-
- if ($] <= 5.008000) {
- Encode::_utf8_on($data{$col}) unless Encode::is_utf8($data{$col});
- } else {
- utf8::decode($data{$col}) unless utf8::is_utf8($data{$col});
- }
+ foreach my $col (keys %data) {
+ utf8::decode($data{$col}) if (
+ exists $data{$col} and defined $data{$col} and $self->_is_utf8_column($col) and ! utf8::is_utf8($data{$col})
+ );
}
- %data;
+ return %data;
}
=head2 store_column
sub store_column {
my ( $self, $column, $value ) = @_;
- my $cols = $self->_utf8_columns;
- if ( $cols and defined $value and $cols->{$column} ) {
+ # the dirtyness comparison must happen on the non-encoded value
+ my $copy;
- if ($] <= 5.008000) {
- Encode::_utf8_off($value) if Encode::is_utf8($value);
- } else {
- utf8::encode($value) if utf8::is_utf8($value);
- }
+ if ( defined $value and $self->_is_utf8_column($column) and utf8::is_utf8($value) ) {
+ $copy = $value;
+ utf8::encode($value);
}
$self->next::method( $column, $value );
+
+ return $copy || $value;
}
-=head1 AUTHOR
+# override this if you want to force everything to be encoded/decoded
+sub _is_utf8_column {
+ return (shift->utf8_columns || {})->{shift};
+}
-Daisuke Murase <typester@cpan.org>
+=head1 AUTHORS
-=head1 COPYRIGHT
+See L<DBIx::Class/CONTRIBUTORS>.
-This program is free software; you can redistribute
-it and/or modify it under the same terms as Perl itself.
+=head1 LICENSE
-The full text of the license can be found in the
-LICENSE file included with this module.
+You may distribute this code under the same terms as Perl itself.
=cut
1;
-
use Exporter;
use SQL::Translator::Utils qw(debug normalize_name);
use Carp::Clan qw/^SQL::Translator|^DBIx::Class/;
+use Scalar::Util ();
use base qw(Exporter);
# We're working with DBIx::Class Schemas, not data streams.
# -------------------------------------------------------------------
sub parse {
+ # this is a hack to prevent schema leaks due to a retarded SQLT implementation
+ # DO NOT REMOVE (until SQLT2 is out, the all of this will be rewritten anyway)
+ Scalar::Util::weaken ($_[1]);
+
my ($tr, $data) = @_;
my $args = $tr->parser_args;
my $dbicschema = $args->{'DBIx::Class::Schema'} || $args->{"DBIx::Schema"} ||$data;
}
- my(@table_monikers, @view_monikers);
+ my(%table_monikers, %view_monikers);
for my $moniker (@monikers){
my $source = $dbicschema->source($moniker);
if ( $source->isa('DBIx::Class::ResultSource::Table') ) {
- push(@table_monikers, $moniker);
+ $table_monikers{$moniker}++;
} elsif( $source->isa('DBIx::Class::ResultSource::View') ){
next if $source->is_virtual;
- push(@view_monikers, $moniker);
+ $view_monikers{$moniker}++;
}
}
my %tables;
- foreach my $moniker (sort @table_monikers)
+ foreach my $moniker (sort keys %table_monikers)
{
my $source = $dbicschema->source($moniker);
my $table_name = $source->name;
my $f = $table->add_field(%colinfo)
|| $dbicschema->throw_exception ($table->error);
}
- $table->primary_key($source->primary_columns);
my @primary = $source->primary_columns;
+
+ $table->primary_key(@primary) if @primary;
+
my %unique_constraints = $source->unique_constraints;
foreach my $uniq (sort keys %unique_constraints) {
if (!$source->_compare_relationship_keys($unique_constraints{$uniq}, \@primary)) {
my %created_FK_rels;
# global add_fk_index set in parser_args
- my $add_fk_index = (exists $args->{add_fk_index} && ($args->{add_fk_index} == 0)) ? 0 : 1;
+ my $add_fk_index = (exists $args->{add_fk_index} && ! $args->{add_fk_index}) ? 0 : 1;
foreach my $rel (sort @rels)
{
+
my $rel_info = $source->relationship_info($rel);
# Ignore any rel cond that isn't a straight hash
next unless ref $rel_info->{cond} eq 'HASH';
- my $othertable = $source->related_source($rel);
- next if $othertable->isa('DBIx::Class::ResultSource::View'); # can't define constraints referencing a view
- my $rel_table = $othertable->name;
+ my $relsource = $source->related_source($rel);
+
+ # related sources might be excluded via a {sources} filter or might be views
+ next unless exists $table_monikers{$relsource->source_name};
+
+ my $rel_table = $relsource->name;
# FIXME - this isn't the right way to do it, but sqlt does not
# support quoting properly to be signaled about this
# Force the order of @cond to match the order of ->add_columns
my $idx;
- my %other_columns_idx = map {'foreign.'.$_ => ++$idx } $othertable->columns;
+ my %other_columns_idx = map {'foreign.'.$_ => ++$idx } $relsource->columns;
my @cond = sort { $other_columns_idx{$a} cmp $other_columns_idx{$b} } keys(%{$rel_info->{cond}});
# Get the key information, mapping off the foreign/self markers
my $is_deferrable = $rel_info->{attrs}{is_deferrable};
- # do not consider deferrable constraints and self-references
- # for dependency calculations
+ # calculate dependencies: do not consider deferrable constraints and
+ # self-references for dependency calculations
if (! $is_deferrable and $rel_table ne $table_name) {
$tables{$table_name}{foreign_table_deps}{$rel_table}++;
}
+
$table->add_constraint(
type => 'foreign_key',
name => join('_', $table_name, 'fk', @keys),
}
my %views;
- foreach my $moniker (sort @view_monikers)
+ foreach my $moniker (sort keys %view_monikers)
{
my $source = $dbicschema->source($moniker);
my $view_name = $source->name;
my $schema = MyApp::Schema->connect;
my $trans = SQL::Translator->new (
parser => 'SQL::Translator::Parser::DBIx::Class',
- parser_args => { package => $schema },
+ parser_args => {
+ package => $schema,
+ # to explicitly specify which ResultSources are to be parsed
+ sources => [qw/
+ Artist
+ CD
+ /],
+ },
producer => 'SQLite',
) or die SQL::Translator->error;
my $out = $trans->translate() or die $trans->error;
BEGIN {
plan skip_all => 'Your perl does not support ithreads'
- if !$Config{useithreads} || $] < 5.008;
+ if !$Config{useithreads};
}
use threads;
BEGIN {
plan skip_all => 'Your perl does not support ithreads'
- if !$Config{useithreads} || $] < 5.008;
+ if !$Config{useithreads};
}
use threads;
eval { require Test::Memory::Cycle; require Devel::Cycle };
if ($@ or Devel::Cycle->VERSION < 1.10) {
plan skip_all => "leak test needs Test::Memory::Cycle and Devel::Cycle >= 1.10";
- } else {
- plan tests => 1;
- }
+ };
}
use DBICTest;
use DBICTest::Schema;
+use Scalar::Util ();
import Test::Memory::Cycle;
-my $s = DBICTest::Schema->clone;
+my $weak;
-memory_cycle_ok($s, 'No cycles in schema');
+{
+ my $s = $weak->{schema} = DBICTest->init_schema;
+ memory_cycle_ok($s, 'No cycles in schema');
+
+ my $rs = $weak->{resultset} = $s->resultset ('Artist');
+ memory_cycle_ok($rs, 'No cycles in resultset');
+
+ my $rsrc = $weak->{resultsource} = $rs->result_source;
+ memory_cycle_ok($rsrc, 'No cycles in resultsource');
+
+ my $row = $weak->{row} = $rs->first;
+ memory_cycle_ok($row, 'No cycles in row');
+
+ Scalar::Util::weaken ($_) for values %$weak;
+ memory_cycle_ok($weak, 'No cycles in weak object collection');
+}
+
+for (keys %$weak) {
+ ok (! $weak->{$_}, "No $_ leaks");
+}
+
+done_testing;
{
ok(my $artist = $schema->resultset('Artist')->create({name => 'store_column test'}));
is($artist->name, 'X store_column test'); # used to be 'X X store...'
-
+
# call store_column even though the column doesn't seem to be dirty
- ok($artist->update({name => 'X store_column test'}));
+ $artist->name($artist->name);
is($artist->name, 'X X store_column test');
+ ok($artist->is_column_changed('name'), 'changed column marked as dirty');
+
$artist->delete;
}
is ($rs->count, 6, 'CDs created successfully');
$rs = $rs->search ({}, {
- select => [ {year => 'year'} ], as => ['y'], distinct => 1, order_by => 'year',
+ select => [ \ 'YEAR(year)' ], as => ['y'], distinct => 1,
});
is_deeply (
- [ $rs->get_column ('y')->all ],
+ [ sort ($rs->get_column ('y')->all) ],
[ sort keys %$cds_per_year ],
'Years group successfully',
);
$rs->create ({ artist => 1, year => '0-1-1', title => 'Jesus Rap' });
is_deeply (
- [ $rs->get_column ('y')->all ],
+ [ sort $rs->get_column ('y')->all ],
[ 0, sort keys %$cds_per_year ],
'Zero-year groups successfully',
);
' as well as following sequences: \'pkid1_seq\', \'pkid2_seq\' and \'nonpkid_seq\''
unless ($dsn && $user && $pass);
-plan tests => 36;
-
DBICTest::Schema->load_classes('ArtistFQN');
my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
$dbh->do("CREATE SEQUENCE nonpkid_seq START WITH 20 MAXVALUE 999999 MINVALUE 0");
$dbh->do("CREATE TABLE artist (artistid NUMBER(12), name VARCHAR(255), rank NUMBER(38), charfield VARCHAR2(10))");
$dbh->do("CREATE TABLE sequence_test (pkid1 NUMBER(12), pkid2 NUMBER(12), nonpkid NUMBER(12), name VARCHAR(255))");
-$dbh->do("CREATE TABLE cd (cdid NUMBER(12), artist NUMBER(12), title VARCHAR(255), year VARCHAR(4))");
+$dbh->do("CREATE TABLE cd (cdid NUMBER(12), artist NUMBER(12), title VARCHAR(255), year VARCHAR(4), genreid NUMBER(12), single_track NUMBER(12))");
$dbh->do("CREATE TABLE track (trackid NUMBER(12), cd NUMBER(12), position NUMBER(12), title VARCHAR(255), last_updated_on DATE, last_updated_at DATE, small_dt DATE)");
$dbh->do("ALTER TABLE artist ADD (CONSTRAINT artist_pk PRIMARY KEY (artistid))");
is($new->artistid, 1, "Oracle Auto-PK worked");
my $cd = $schema->resultset('CD')->create({ artist => 1, title => 'EP C', year => '2003' });
-is($new->artistid, 1, "Oracle Auto-PK worked - using scalar ref as table name");
+is($cd->cdid, 1, "Oracle Auto-PK worked - using scalar ref as table name");
# test again with fully-qualified table name
$new = $schema->resultset('ArtistFQN')->create( { name => 'bar' } );
is( $new->artistid, 2, "Oracle Auto-PK worked with fully-qualified tablename" );
+# test rel names over the 30 char limit
+my $query = $schema->resultset('Artist')->search({
+ artistid => 1
+}, {
+ prefetch => 'cds_very_very_very_long_relationship_name'
+});
+
+lives_and {
+ is $query->first->cds_very_very_very_long_relationship_name->first->cdid, 1
+} 'query with rel name over 30 chars survived and worked';
+
+# rel name over 30 char limit with user condition
+# This requires walking the SQLA data structure.
+{
+ local $TODO = 'user condition on rel longer than 30 chars';
+
+ $query = $schema->resultset('Artist')->search({
+ 'cds_very_very_very_long_relationship_name.title' => 'EP C'
+ }, {
+ prefetch => 'cds_very_very_very_long_relationship_name'
+ });
+
+ lives_and {
+ is $query->first->cds_very_very_very_long_relationship_name->first->cdid, 1
+ } 'query with rel name over 30 chars and user condition survived and worked';
+}
+
# test join with row count ambiguity
my $track = $schema->resultset('Track')->create({ trackid => 1, cd => 1,
}
}
+done_testing;
+
# clean up our mess
END {
if($schema && ($dbh = $schema->storage->dbh)) {
$schema->storage->dbh_do (sub {
my ($storage, $dbh) = @_;
- eval { $dbh->do("DROP TABLE Owners") };
- eval { $dbh->do("DROP TABLE Books") };
+ eval { $dbh->do("DROP TABLE owners") };
+ eval { $dbh->do("DROP TABLE books") };
$dbh->do(<<'SQL');
-CREATE TABLE Books (
+CREATE TABLE books (
id INT IDENTITY (1, 1) NOT NULL,
source VARCHAR(100),
owner INT,
price INT NULL
)
-CREATE TABLE Owners (
+CREATE TABLE owners (
id INT IDENTITY (1, 1) NOT NULL,
name VARCHAR(100),
)
[qw/1 wiggle/],
[qw/2 woggle/],
[qw/3 boggle/],
- [qw/4 fREW/],
- [qw/5 fRIOUX/],
- [qw/6 fROOH/],
- [qw/7 fRUE/],
+ [qw/4 fRIOUX/],
+ [qw/5 fRUE/],
+ [qw/6 fREW/],
+ [qw/7 fROOH/],
[qw/8 fISMBoC/],
[qw/9 station/],
[qw/10 mirror/],
]);
}, 'populate with PKs supplied ok' );
+
lives_ok (sub {
# start a new connection, make sure rebless works
# test an insert with a supplied identity, followed by one without
my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
- for (1..2) {
+ for (2, 1) {
my $id = $_ * 20 ;
$schema->resultset ('Owners')->create ({ id => $id, name => "troglodoogle $id" });
$schema->resultset ('Owners')->create ({ name => "troglodoogle " . ($id + 1) });
]);
}, 'populate without PKs supplied ok' );
-# make sure ordered subselects work
+# plain ordered subqueries throw
+throws_ok (sub {
+ $schema->resultset('Owners')->search ({}, { order_by => 'name' })->as_query
+}, qr/ordered subselect encountered/, 'Ordered Subselect detection throws ok');
+
+# make sure ordered subselects *somewhat* work
{
+ my $owners = $schema->resultset ('Owners')->search ({}, { order_by => 'name', offset => 2, rows => 3, unsafe_subselect_ok => 1 });
+
+ my $al = $owners->current_source_alias;
+ my $sealed_owners = $owners->result_source->resultset->search (
+ {},
+ {
+ alias => $al,
+ from => [{
+ -alias => $al,
+ -source_handle => $owners->result_source->handle,
+ $al => $owners->as_query,
+ }],
+ },
+ );
+
+ is_deeply (
+ [ map { $_->name } ($sealed_owners->all) ],
+ [ map { $_->name } ($owners->all) ],
+ 'Sort preserved from within a subquery',
+ );
+}
+
+TODO: {
+ local $TODO = "This porbably will never work, but it isn't critical either afaik";
+
my $book_owner_ids = $schema->resultset ('BooksInLibrary')
- ->search ({}, { join => 'owner', distinct => 1, order_by => { -desc => 'owner'} })
+ ->search ({}, { join => 'owner', distinct => 1, order_by => 'owner.name', unsafe_subselect_ok => 1 })
->get_column ('owner');
- my $owners = $schema->resultset ('Owners')->search ({
+ my $book_owners = $schema->resultset ('Owners')->search ({
id => { -in => $book_owner_ids->as_query }
});
- is ($owners->count, 8, 'Correct amount of book owners');
- is ($owners->all, 8, 'Correct amount of book owner objects');
+ is_deeply (
+ [ map { $_->id } ($book_owners->all) ],
+ [ $book_owner_ids->all ],
+ 'Sort is preserved across IN subqueries',
+ );
}
+# This is known not to work - thus the negative test
+{
+ my $owners = $schema->resultset ('Owners')->search ({}, { order_by => 'name', offset => 2, rows => 3, unsafe_subselect_ok => 1 });
+ my $corelated_owners = $owners->result_source->resultset->search (
+ {
+ id => { -in => $owners->get_column('id')->as_query },
+ },
+ {
+ order_by => 'name' #reorder because of what is shown above
+ },
+ );
+
+ cmp_ok (
+ join ("\x00", map { $_->name } ($corelated_owners->all) ),
+ 'ne',
+ join ("\x00", map { $_->name } ($owners->all) ),
+ 'Sadly sort not preserved from within a corelated subquery',
+ );
+
+ cmp_ok (
+ join ("\x00", sort map { $_->name } ($corelated_owners->all) ),
+ 'ne',
+ join ("\x00", sort map { $_->name } ($owners->all) ),
+ 'Which in fact gives a completely wrong dataset',
+ );
+}
+
+
+# make sure right-join-side single-prefetch ordering limit works
+{
+ my $rs = $schema->resultset ('BooksInLibrary')->search (
+ {
+ 'owner.name' => { '!=', 'woggle' },
+ },
+ {
+ prefetch => 'owner',
+ order_by => 'owner.name',
+ }
+ );
+ # this is the order in which they should come from the above query
+ my @owner_names = qw/boggle fISMBoC fREW fRIOUX fROOH fRUE wiggle wiggle/;
+
+ is ($rs->all, 8, 'Correct amount of objects from right-sorted joined resultset');
+ is_deeply (
+ [map { $_->owner->name } ($rs->all) ],
+ \@owner_names,
+ 'Rows were properly ordered'
+ );
+
+ my $limited_rs = $rs->search ({}, {rows => 7, offset => 2, unsafe_subselect_ok => 1});
+ is ($limited_rs->count, 6, 'Correct count of limited right-sorted joined resultset');
+ is ($limited_rs->count_rs->next, 6, 'Correct count_rs of limited right-sorted joined resultset');
+
+ my $queries;
+ $schema->storage->debugcb(sub { $queries++; });
+ $schema->storage->debug(1);
+
+ is_deeply (
+ [map { $_->owner->name } ($limited_rs->all) ],
+ [@owner_names[2 .. 7]],
+ 'Limited rows were properly ordered'
+ );
+ is ($queries, 1, 'Only one query with prefetch');
+
+ $schema->storage->debugcb(undef);
+ $schema->storage->debug(0);
+
+
+ is_deeply (
+ [map { $_->name } ($limited_rs->search_related ('owner')->all) ],
+ [@owner_names[2 .. 7]],
+ 'Rows are still properly ordered after search_related'
+ );
+}
+
+
#
# try a prefetch on tables with identically named columns
#
prefetch => 'books',
order_by => { -asc => \['name + ?', [ test => 'xxx' ]] }, # test bindvar propagation
rows => 3, # 8 results total
+ unsafe_subselect_ok => 1,
},
);
prefetch => 'owner',
rows => 2, # 3 results total
order_by => { -desc => 'owner' },
+ unsafe_subselect_ok => 1,
},
);
is ($books->page(2)->count_rs->next, 1, 'Prefetched grouped search returns correct count_rs');
}
-# make sure right-join-side ordering limit works
-{
- my $rs = $schema->resultset ('BooksInLibrary')->search (
- {
- 'owner.name' => [qw/wiggle woggle/],
- },
- {
- join => 'owner',
- order_by => { -desc => 'owner.name' },
- }
- );
-
- is ($rs->all, 3, 'Correct amount of objects from right-sorted joined resultset');
- my $limited_rs = $rs->search ({}, {rows => 3, offset => 1});
- is ($limited_rs->count, 2, 'Correct count of limited right-sorted joined resultset');
- is ($limited_rs->count_rs->next, 2, 'Correct count_rs of limited right-sorted joined resultset');
- is ($limited_rs->all, 2, 'Correct amount of objects from limited right-sorted joined resultset');
-
- is_deeply (
- [map { $_->name } ($limited_rs->search_related ('owner')->all) ],
- [qw/woggle wiggle/], # there is 1 woggle library book and 2 wiggle books, the limit gets us one of each
- 'Rows were properly ordered'
- );
-}
-
done_testing;
# clean up our mess
END {
if (my $dbh = eval { $schema->storage->_dbh }) {
eval { $dbh->do("DROP TABLE $_") }
- for qw/artist money_test Books Owners/;
+ for qw/artist money_test books owners/;
}
}
# vim:sw=2 sts=2
$schema = DBICTest::Schema->clone;
- if ($storage_idx != 0) { # autodetect
- $schema->storage_type("::$storage_type");
- }
-
$schema->connection($dsn, $user, $pass);
- $schema->storage->ensure_connected;
+ if ($storage_idx != 0) { # autodetect
+ no warnings 'redefine';
+ local *DBIx::Class::Storage::DBI::_typeless_placeholders_supported =
+ sub { 0 };
+# $schema->storage_type("::$storage_type");
+ $schema->storage->ensure_connected;
+ }
+ else {
+ $schema->storage->ensure_connected;
+ }
if ($storage_idx == 0 && ref($schema->storage) =~ /NoBindVars\z/) {
my $tb = Test::More->builder;
use strict;
-use warnings;
+use warnings;
use Test::More;
+use Test::Warn;
use lib qw(t/lib);
use DBICTest;
+use utf8;
-my $schema = DBICTest->init_schema();
-
-if ($] <= 5.008000) {
+warning_like (sub {
- eval 'use Encode; 1' or plan skip_all => 'Need Encode run this test';
+ package A::Comp;
+ use base 'DBIx::Class';
+ sub store_column { shift->next::method (@_) };
+ 1;
-} else {
+ package A::Test;
+ use base 'DBIx::Class::Core';
+ __PACKAGE__->load_components(qw(UTF8Columns +A::Comp));
+ 1;
+}, qr/Incorrect loading order of DBIx::Class::UTF8Columns/ );
- eval 'use utf8; 1' or plan skip_all => 'Need utf8 run this test';
-}
-plan tests => 6;
+my $schema = DBICTest->init_schema();
DBICTest::Schema::CD->load_components('UTF8Columns');
DBICTest::Schema::CD->utf8_columns('title');
my $utf8_char = 'uniuni';
-ok( _is_utf8( $cd->title ), 'got title with utf8 flag' );
-ok(! _is_utf8( $cd->year ), 'got year without utf8 flag' );
+ok( utf8::is_utf8( $cd->title ), 'got title with utf8 flag' );
+ok(! utf8::is_utf8( $cd->year ), 'got year without utf8 flag' );
-_force_utf8($utf8_char);
+utf8::decode($utf8_char);
$cd->title($utf8_char);
-ok(! _is_utf8( $cd->{_column_data}{title} ), 'store utf8-less chars' );
+ok(! utf8::is_utf8( $cd->{_column_data}{title} ), 'store utf8-less chars' );
my $v_utf8 = "\x{219}";
TODO: {
local $TODO = 'There is currently no way to propagate aliases to inflate_result()';
$cd = $schema->resultset('CD')->find ({ title => $v_utf8 }, { select => 'title', as => 'name' });
- ok (_is_utf8( $cd->get_column ('name') ), 'utf8 flag propagates via as');
+ ok (utf8::is_utf8( $cd->get_column ('name') ), 'utf8 flag propagates via as');
}
-
-sub _force_utf8 {
- if ($] <= 5.008000) {
- Encode::_utf8_on ($_[0]);
- }
- else {
- utf8::decode ($_[0]);
- }
-}
-
-sub _is_utf8 {
- if ($] <= 5.008000) {
- return Encode::is_utf8 (shift);
- }
- else {
- return utf8::is_utf8 (shift);
- }
-}
+done_testing;
my $schema = DBICTest->init_schema (no_deploy => 1);
+
+# Check deployment statements ctx sensitivity
+{
+ my $not_first_table_creation_re = qr/CREATE TABLE fourkeys_to_twokeys/;
+
+
+ my $statements = $schema->deployment_statements;
+ like (
+ $statements,
+ $not_first_table_creation_re,
+ 'All create statements returned in 1 string in scalar ctx'
+ );
+
+ my @statements = $schema->deployment_statements;
+ cmp_ok (scalar @statements, '>', 1, 'Multiple statement lines in array ctx');
+
+ my $i = 0;
+ while ($i <= $#statements) {
+ last if $statements[$i] =~ $not_first_table_creation_re;
+ $i++;
+ }
+
+ ok (
+ ($i > 0) && ($i <= $#statements),
+ "Creation statement was found somewherere within array ($i)"
+ );
+}
+
+
+
# replace the sqlt calback with a custom version ading an index
$schema->source('Track')->sqlt_deploy_callback(sub {
my ($self, $sqlt_table) = @_;
my $schema = DBICTest->init_schema();
-my $rs = $schema->resultset("CD")->search({}, { order_by => 'cdid' });
+my $rs = $schema->resultset("CD");
+
+cmp_ok (
+ $rs->count,
+ '!=',
+ $rs->search ({}, {columns => ['year'], distinct => 1})->count,
+ 'At least one year is the same in rs'
+);
my $rs_title = $rs->get_column('title');
my $rs_year = $rs->get_column('year');
is($rs_year->single, 1999, "single okay");
}, qr/Query returned more than one row/, 'single warned');
+
+# test distinct propagation
+is_deeply (
+ [$rs->search ({}, { distinct => 1 })->get_column ('year')->all],
+ [$rs_year->func('distinct')],
+ 'distinct => 1 is passed through properly',
+);
+
# test +select/+as for single column
my $psrs = $schema->resultset('CD')->search({},
{
-#!/usr/bin/perl
use strict;
use warnings;
+
use Test::More;
use Test::Exception;
use lib qw(t/lib);
use DBICTest;
+use DBICTest::Schema;
+use Scalar::Util ();
BEGIN {
require DBIx::Class::Storage::DBI;
if not DBIx::Class::Storage::DBI->_sqlt_version_ok;
}
+# Test for SQLT-related leaks
+{
+ my $s = DBICTest::Schema->clone;
+ create_schema ({ schema => $s });
+ Scalar::Util::weaken ($s);
+
+ ok (!$s, 'Schema not leaked');
+}
+
+
my $schema = DBICTest->init_schema();
# Dummy was yanked out by the sqlt hook test
# CustomSql tests the horrific/deprecated ->name(\$sql) hack
;
{
- my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { } } });
+ my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { } } });
- foreach my $source (@sources) {
- my $table = get_table($sqlt_schema, $schema, $source);
+ foreach my $source (@sources) {
+ my $table = get_table($sqlt_schema, $schema, $source);
- my $fk_count = scalar(grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints);
- my @indices = $table->get_indices;
- my $index_count = scalar(@indices);
+ my $fk_count = scalar(grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints);
+ my @indices = $table->get_indices;
+ my $index_count = scalar(@indices);
$index_count++ if ($source eq 'TwoKeys'); # TwoKeys has the index turned off on the rel def
- is($index_count, $fk_count, "correct number of indices for $source with no args");
- }
+ is($index_count, $fk_count, "correct number of indices for $source with no args");
+ }
}
{
- my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { add_fk_index => 1 } } });
+ my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { add_fk_index => 1 } } });
- foreach my $source (@sources) {
- my $table = get_table($sqlt_schema, $schema, $source);
+ foreach my $source (@sources) {
+ my $table = get_table($sqlt_schema, $schema, $source);
- my $fk_count = scalar(grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints);
- my @indices = $table->get_indices;
- my $index_count = scalar(@indices);
+ my $fk_count = scalar(grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints);
+ my @indices = $table->get_indices;
+ my $index_count = scalar(@indices);
$index_count++ if ($source eq 'TwoKeys'); # TwoKeys has the index turned off on the rel def
- is($index_count, $fk_count, "correct number of indices for $source with add_fk_index => 1");
- }
+ is($index_count, $fk_count, "correct number of indices for $source with add_fk_index => 1");
+ }
}
{
- my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { add_fk_index => 0 } } });
+ my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { add_fk_index => 0 } } });
- foreach my $source (@sources) {
- my $table = get_table($sqlt_schema, $schema, $source);
+ foreach my $source (@sources) {
+ my $table = get_table($sqlt_schema, $schema, $source);
- my @indices = $table->get_indices;
- my $index_count = scalar(@indices);
- is($index_count, 0, "correct number of indices for $source with add_fk_index => 0");
- }
+ my @indices = $table->get_indices;
+ my $index_count = scalar(@indices);
+ is($index_count, 0, "correct number of indices for $source with add_fk_index => 0");
+ }
}
{
'parser detects views with a view_definition';
}
+lives_ok (sub {
+ my $sqlt_schema = create_schema ({
+ schema => $schema,
+ args => {
+ parser_args => {
+ sources => ['CD']
+ },
+ },
+ });
+
+ is_deeply (
+ [$sqlt_schema->get_tables ],
+ ['cd'],
+ 'sources limitng with relationships works',
+ );
+
+});
+
done_testing;
sub create_schema {
- my $args = shift;
+ my $args = shift;
- my $schema = $args->{schema};
- my $additional_sqltargs = $args->{args} || {};
+ my $schema = $args->{schema};
+ my $additional_sqltargs = $args->{args} || {};
- my $sqltargs = {
- add_drop_table => 1,
- ignore_constraint_names => 1,
- ignore_index_names => 1,
- %{$additional_sqltargs}
- };
+ my $sqltargs = {
+ add_drop_table => 1,
+ ignore_constraint_names => 1,
+ ignore_index_names => 1,
+ %{$additional_sqltargs}
+ };
- my $sqlt = SQL::Translator->new( $sqltargs );
+ my $sqlt = SQL::Translator->new( $sqltargs );
- $sqlt->parser('SQL::Translator::Parser::DBIx::Class');
- return $sqlt->translate({ data => $schema }) || die $sqlt->error;
+ $sqlt->parser('SQL::Translator::Parser::DBIx::Class');
+ return $sqlt->translate({ data => $schema }) || die $sqlt->error;
}
sub get_table {
my $args = shift || {};
if ($ENV{"DBICTEST_SQLT_DEPLOY"}) {
- $schema->deploy($args);
+ $schema->deploy($args);
} else {
open IN, "t/lib/sqlite.sql";
my $sql;
__PACKAGE__->has_many(
cds_unordered => 'DBICTest::Schema::CD'
);
+__PACKAGE__->has_many(
+ cds_very_very_very_long_relationship_name => 'DBICTest::Schema::CD'
+);
__PACKAGE__->has_many( twokeys => 'DBICTest::Schema::TwoKeys' );
__PACKAGE__->has_many( onekeys => 'DBICTest::Schema::OneKey' );
--- /dev/null
+use strict;
+use warnings;
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBICTest;
+
+lives_ok (sub {
+ DBICTest->init_schema()->resultset('Artist')->find({artistid => 1 })->update({name => 'anon test'});
+}, 'Schema object not lost in chaining');
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBIC::SqlMakerTest;
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $rs = $schema->resultset('CD')->search (
+ { 'tracks.id' => { '!=', 666 }},
+ { join => 'artist', prefetch => 'tracks', rows => 2 }
+);
+
+my $rel_rs = $rs->search_related ('tags', { 'tags.tag' => { '!=', undef }}, { distinct => 1});
+
+is_same_sql_bind (
+ $rel_rs->as_query,
+ '(
+ SELECT tags.tagid, tags.cd, tags.tag
+ FROM (
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ FROM cd me
+ JOIN artist artist ON artist.artistid = me.artist
+ LEFT JOIN track tracks ON tracks.cd = me.cdid
+ WHERE ( tracks.id != ? )
+ LIMIT 2
+ ) me
+ JOIN artist artist ON artist.artistid = me.artist
+ LEFT JOIN track tracks ON tracks.cd = me.cdid
+ LEFT JOIN tags tags ON tags.cd = me.cdid
+ WHERE ( tags.tag IS NOT NULL )
+ GROUP BY tags.tagid, tags.cd, tags.tag
+ )',
+
+ [ [ 'tracks.id' => 666 ] ],
+ 'Prefetch spec successfully stripped on search_related'
+);
+
+done_testing;