use MooseX::Types::Moose qw/ClassName HashRef Object/;
use Scalar::Util 'reftype';
use Hash::Merge;
-use List::Util qw/min max/;
+use List::Util qw/min max reduce/;
+use Try::Tiny;
+use namespace::clean;
use namespace::clean -except => 'meta';
$schema->storage_type( ['::DBI::Replicated', {balancer=>'::Random'}] );
$schema->connection(...);
-Next, you need to add in the Replicants. Basically this is an array of
+Next, you need to add in the Replicants. Basically this is an array of
arrayrefs, where each arrayref is database connect information. Think of these
arguments as what you'd pass to the 'normal' $schema->connect method.
my $RS = $schema->resultset('Source')->search(undef, {force_pool=>'master'});
Now $RS will force everything (both reads and writes) to use whatever was setup
-as the master storage. 'master' is hardcoded to always point to the Master,
+as the master storage. 'master' is hardcoded to always point to the Master,
but you can also use any Replicant name. Please see:
L<DBIx::Class::Storage::DBI::Replicated::Pool> and the replicants attribute for more.
=head2 pool_type
-Contains the classname which will instantiate the L</pool> object. Defaults
+Contains the classname which will instantiate the L</pool> object. Defaults
to: L<DBIx::Class::Storage::DBI::Replicated::Pool>.
=cut
=head2 balancer
-Is a <DBIx::Class::Storage::DBI::Replicated::Balancer> or derived class. This
+Is a <DBIx::Class::Storage::DBI::Replicated::Balancer> or derived class. This
is a class that takes a pool (<DBIx::Class::Storage::DBI::Replicated::Pool>)
=cut
=head1 ATTRIBUTES IMPLEMENTING THE DBIx::Storage::DBI INTERFACE
-The following methods are delegated all the methods required for the
+The following methods are delegated all the methods required for the
L<DBIx::Class::Storage::DBI> interface.
=head2 read_handler
select
select_single
columns_info_for
- _dbh_columns_info_for
+ _dbh_columns_info_for
_select
/],
);
backup
is_datatype_numeric
+ _supports_insert_returning
_count_select
- _subq_count_select
_subq_update_delete
svp_rollback
svp_begin
_fix_bind_params
_default_dbi_connect_attributes
_dbi_connect_info
+ _dbic_connect_attributes
auto_savepoint
_sqlt_version_ok
_query_end
_dbh_commit
_execute_array
_placeholders_supported
- _verify_pid
savepoints
_sqlt_minimum_version
_sql_maker_opts
_do_query
_dbh_sth
_dbh_execute
+ _prefetch_insert_auto_nextvals
+ _server_info_hash
/],
);
+my @unimplemented = qw(
+ _arm_global_destructor
+ _preserve_foreign_dbh
+ _verify_pid
+ _verify_tid
+);
+
+for my $method (@unimplemented) {
+ __PACKAGE__->meta->add_method($method, sub {
+ croak "$method must not be called on ".(blessed shift).' objects';
+ });
+}
+
has _master_connect_info_opts =>
(is => 'rw', isa => HashRef, default => sub { {} });
my $wantarray = wantarray;
- my $merge = Hash::Merge->new;
+ my $merge = Hash::Merge->new('LEFT_PRECEDENT');
my %opts;
for my $arg (@$info) {
=cut
sub BUILDARGS {
- my ($class, $schema, $storage_type_args, @args) = @_;
+ my ($class, $schema, $storage_type_args, @args) = @_;
return {
schema=>$schema,
$self->throw_exception('too many hashrefs in connect_info')
if @hashes > 2;
- my $merge = Hash::Merge->new;
+ my $merge = Hash::Merge->new('LEFT_PRECEDENT');
my %opts = %{ $merge->merge(reverse @hashes) };
# delete them
my $reliably = sub {
my $name = shift @_;
$schema->resultset('User')->create({name=>$name});
- my $user_rs = $schema->resultset('User')->find({name=>$name});
+ my $user_rs = $schema->resultset('User')->find({name=>$name});
return $user_rs;
};
my @result;
my $want_array = wantarray;
- eval {
+ try {
if($want_array) {
@result = $coderef->(@args);
} elsif(defined $want_array) {
} else {
$coderef->(@args);
}
+ } catch {
+ $self->throw_exception("coderef returned an error: $_");
+ } finally {
+ ##Reset to the original state
+ $self->read_handler($current);
};
- ##Reset to the original state
- $self->read_handler($current);
-
- ##Exception testing has to come last, otherwise you might leave the
- ##read_handler set to master.
-
- if($@) {
- $self->throw_exception("coderef returned an error: $@");
- } else {
- return $want_array ? @result : $result[0];
- }
+ return $want_array ? @result : $result[0];
}
=head2 set_reliable_storage
my $self = shift;
return max map $_->lag_behind_master, $self->replicants;
-}
+}
=head2 is_replicating
sub _driver_determined {
my $self = shift;
-
+
if (@_) {
$_->_driver_determined(@_) for $self->all_storages;
}
sub _init {
my $self = shift;
-
+
$_->_init for $self->all_storages;
}
sub _run_connection_actions {
my $self = shift;
-
+
$_->_run_connection_actions for $self->all_storages;
}
sub _do_connection_actions {
my $self = shift;
-
+
if (@_) {
$_->_do_connection_actions(@_) for $self->all_storages;
}
return min map $_->_ping, $self->all_storages;
}
+my $numify_ver = sub {
+ my $ver = shift;
+ my @numparts = split /\D+/, $ver;
+ my $format = '%d.' . (join '', ('%05d') x (@numparts - 1));
+
+ return sprintf $format, @numparts;
+};
+
+sub _server_info {
+ my $self = shift;
+
+ if (not $self->_server_info_hash) {
+ my $min_version_info = (
+ reduce { $a->[0] < $b->[0] ? $a : $b }
+ map [ $numify_ver->($_->{dbms_version}), $_ ],
+ map $_->_server_info, $self->all_storages
+ )->[1];
+
+ $self->_server_info_hash($min_version_info); # on master
+ }
+
+ return $self->_server_info_hash;
+}
+
+sub _get_server_version {
+ my $self = shift;
+
+ return $self->_server_info->{dbms_version};
+}
+
=head1 GOTCHAS
Due to the fact that replicants can lag behind a master, you must take care to