BEGIN {
use Carp::Clan qw/^DBIx::Class/;
-
+
## Modules required for Replication support not required for general DBIC
## use, so we explicitly test for these.
-
+
my %replication_required = (
- Moose => '0.77',
- MooseX::AttributeHelpers => '0.12',
- MooseX::Types => '0.10',
- namespace::clean => '0.11',
+ 'Moose' => '0.87',
+ 'MooseX::AttributeHelpers' => '0.21',
+ 'MooseX::Types' => '0.16',
+ 'namespace::clean' => '0.11',
+ 'Hash::Merge' => '0.11'
);
-
+
my @didnt_load;
-
+
for my $module (keys %replication_required) {
- eval "use $module $replication_required{$module}";
- push @didnt_load, "$module $replication_required{$module}"
- if $@;
+ eval "use $module $replication_required{$module}";
+ push @didnt_load, "$module $replication_required{$module}"
+ if $@;
}
-
+
croak("@{[ join ', ', @didnt_load ]} are missing and are required for Replication")
- if @didnt_load;
+ if @didnt_load;
}
use Moose;
use DBIx::Class::Storage::DBI;
use DBIx::Class::Storage::DBI::Replicated::Pool;
use DBIx::Class::Storage::DBI::Replicated::Balancer;
-use DBIx::Class::Storage::DBI::Replicated::Types 'BalancerClassNamePart';
+use DBIx::Class::Storage::DBI::Replicated::Types qw/BalancerClassNamePart DBICSchema DBICStorageDBI/;
use MooseX::Types::Moose qw/ClassName HashRef Object/;
use Scalar::Util 'reftype';
-use Carp::Clan qw/^DBIx::Class/;
+use Hash::Merge 'merge';
use namespace::clean -except => 'meta';
storage type, add some replicated (readonly) databases, and perform reporting
tasks.
- ## Change storage_type in your schema class
+You should set the 'storage_type attribute to a replicated type. You should
+also define your arguments, such as which balancer you want and any arguments
+that the Pool object should get.
+
$schema->storage_type( ['::DBI::Replicated', {balancer=>'::Random'}] );
-
- ## Add some slaves. Basically this is an array of arrayrefs, where each
- ## arrayref is database connect information
-
+
+Next, you need to add in the Replicants. Basically this is an array of
+arrayrefs, where each arrayref is database connect information. Think of these
+arguments as what you'd pass to the 'normal' $schema->connect method.
+
$schema->storage->connect_replicants(
[$dsn1, $user, $pass, \%opts],
[$dsn2, $user, $pass, \%opts],
[$dsn3, $user, $pass, \%opts],
);
-
- ## Now, just use the $schema as normal
+
+Now, just use the $schema as you normally would. Automatically all reads will
+be delegated to the replicants, while writes to the master.
+
$schema->resultset('Source')->search({name=>'etc'});
-
- ## You can force a given query to use a particular storage using the search
- ### attribute 'force_pool'. For example:
-
+
+You can force a given query to use a particular storage using the search
+attribute 'force_pool'. For example:
+
my $RS = $schema->resultset('Source')->search(undef, {force_pool=>'master'});
-
- ## Now $RS will force everything (both reads and writes) to use whatever was
- ## setup as the master storage. 'master' is hardcoded to always point to the
- ## Master, but you can also use any Replicant name. Please see:
- ## L<DBIx::Class::Storage::Replicated::Pool> and the replicants attribute for
- ## More. Also see transactions and L</execute_reliably> for alternative ways
- ## to force read traffic to the master.
-
+
+Now $RS will force everything (both reads and writes) to use whatever was setup
+as the master storage. 'master' is hardcoded to always point to the Master,
+but you can also use any Replicant name. Please see:
+L<DBIx::Class::Storage::DBI::Replicated::Pool> and the replicants attribute for more.
+
+Also see transactions and L</execute_reliably> for alternative ways to
+force read traffic to the master. In general, you should wrap your statements
+in a transaction when you are reading and writing to the same tables at the
+same time, since your replicants will often lag a bit behind the master.
+
+See L<DBIx::Class::Storage::DBI::Replicated::Instructions> for more help and
+walkthroughs.
+
=head1 DESCRIPTION
Warning: This class is marked BETA. This has been running a production
=head1 NOTES
The consistancy betweeen master and replicants is database specific. The Pool
-gives you a method to validate it's replicants, removing and replacing them
+gives you a method to validate its replicants, removing and replacing them
when they fail/pass predefined criteria. Please make careful use of the ways
to force a query to run against Master when needed.
Replicated Storage has additional requirements not currently part of L<DBIx::Class>
- Moose => 0.77
- MooseX::AttributeHelpers => 0.12
- MooseX::Types => 0.10
- namespace::clean => 0.11
-
+ Moose => '0.87',
+ MooseX::AttributeHelpers => '0.20',
+ MooseX::Types => '0.16',
+ namespace::clean => '0.11',
+ Hash::Merge => '0.11'
+
You will need to install these modules manually via CPAN or make them part of the
Makefile for your distribution.
has 'schema' => (
is=>'rw',
- isa=>'DBIx::Class::Schema',
+ isa=>DBICSchema,
weak_ref=>1,
required=>1,
);
=head2 pool_args
Contains a hashref of initialized information to pass to the Balancer object.
-See L<DBIx::Class::Storage::Replicated::Pool> for available arguments.
+See L<DBIx::Class::Storage::DBI::Replicated::Pool> for available arguments.
=cut
=head2 balancer_args
Contains a hashref of initialized information to pass to the Balancer object.
-See L<DBIx::Class::Storage::Replicated::Balancer> for available arguments.
+See L<DBIx::Class::Storage::DBI::Replicated::Balancer> for available arguments.
=cut
isa=>'DBIx::Class::Storage::DBI::Replicated::Pool',
lazy_build=>1,
handles=>[qw/
- connect_replicants
+ connect_replicants
replicants
has_replicants
/],
has 'master' => (
is=> 'ro',
- isa=>'DBIx::Class::Storage::DBI',
+ isa=>DBICStorageDBI,
lazy_build=>1,
);
select
select_single
columns_info_for
- /],
+ /],
);
=head2 write_handler
is=>'ro',
isa=>Object,
lazy_build=>1,
- lazy_build=>1,
- handles=>[qw/
+ handles=>[qw/
on_connect_do
- on_disconnect_do
+ on_disconnect_do
connect_info
throw_exception
sql_maker
create_ddl_dir
deployment_statements
datetime_parser
- datetime_parser_type
+ datetime_parser_type
+ build_datetime_parser
last_insert_id
insert
insert_bulk
sth
deploy
with_deferred_fk_checks
-
+ dbh_do
reload_row
+ with_deferred_fk_checks
_prep_for_execute
-
+
+ backup
+ is_datatype_numeric
+ _count_select
+ _subq_count_select
+ _subq_update_delete
+ svp_rollback
+ svp_begin
+ svp_release
/],
);
around connect_info => sub {
my ($next, $self, $info, @extra) = @_;
+ my $wantarray = wantarray;
+
my %opts;
for my $arg (@$info) {
next unless (reftype($arg)||'') eq 'HASH';
- %opts = (%opts, %$arg);
+ %opts = %{ merge($arg, \%opts) };
}
delete $opts{dsn};
$self->pool_type(delete $opts{pool_type})
if $opts{pool_type};
- $self->pool_args({
- %{ $self->pool_args },
- %{ delete $opts{pool_args} || {} }
- });
+ $self->pool_args(
+ merge((delete $opts{pool_args} || {}), $self->pool_args)
+ );
- $self->pool($self->_build_pool);
+ $self->pool($self->_build_pool)
+ if $self->pool;
}
if (@opts{qw/balancer_type balancer_args/}) {
$self->balancer_type(delete $opts{balancer_type})
if $opts{balancer_type};
- $self->balancer_args({
- %{ $self->balancer_args },
- %{ delete $opts{balancer_args} || {} }
- });
+ $self->balancer_args(
+ merge((delete $opts{balancer_args} || {}), $self->balancer_args)
+ );
- $self->balancer($self->_build_balancer);
+ $self->balancer($self->_build_balancer)
+ if $self->balancer;
}
$self->_master_connect_info_opts(\%opts);
- $self->$next($info, @extra);
+ my (@res, $res);
+ if ($wantarray) {
+ @res = $self->$next($info, @extra);
+ } else {
+ $res = $self->$next($info, @extra);
+ }
+
+ # Make sure master is blessed into the correct class and apply role to it.
+ my $master = $self->master;
+ $master->_determine_driver;
+ Moose::Meta::Class->initialize(ref $master);
+ DBIx::Class::Storage::DBI::Replicated::WithDSN->meta->apply($master);
+
+ $wantarray ? @res : $res;
};
=head1 METHODS
=head2 BUILDARGS
-L<DBIx::Class::Schema> when instantiating it's storage passed itself as the
+L<DBIx::Class::Schema> when instantiating its storage passed itself as the
first argument. So we need to massage the arguments a bit so that all the
bits get put into the correct places.
sub BUILDARGS {
my ($class, $schema, $storage_type_args, @args) = @_;
-
+
return {
- schema=>$schema,
- %$storage_type_args,
- @args
+ schema=>$schema,
+ %$storage_type_args,
+ @args
}
}
sub _build_master {
my $self = shift @_;
- DBIx::Class::Storage::DBI->new($self->schema);
+ my $master = DBIx::Class::Storage::DBI->new($self->schema);
+ $master
}
=head2 _build_pool
sub _build_balancer {
my $self = shift @_;
$self->create_balancer(
- pool=>$self->pool,
+ pool=>$self->pool,
master=>$self->master,
%{$self->balancer_args},
);
for my $r (@args) {
$r = [ $r ] unless reftype $r eq 'ARRAY';
- croak "coderef replicant connect_info not supported"
+ $self->throw_exception('coderef replicant connect_info not supported')
if ref $r->[0] && reftype $r->[0] eq 'CODE';
# any connect_info options?
my $i = 0;
$i++ while $i < @$r && (reftype($r->[$i])||'') ne 'HASH';
-# make one if none
+# make one if none
$r->[$i] = {} unless $r->[$i];
# merge if two hashes
- my %opts = map %$_, @$r[$i .. $#{$r}];
+ my @hashes = @$r[$i .. $#{$r}];
+
+ $self->throw_exception('invalid connect_info options')
+ if (grep { reftype($_) eq 'HASH' } @hashes) != @hashes;
+
+ $self->throw_exception('too many hashrefs in connect_info')
+ if @hashes > 2;
+
+ my %opts = %{ merge(reverse @hashes) };
+
+# delete them
splice @$r, $i+1, ($#{$r} - $i), ();
+# make sure master/replicants opts don't clash
+ my %master_opts = %{ $self->_master_connect_info_opts };
+ if (exists $opts{dbh_maker}) {
+ delete @master_opts{qw/dsn user password/};
+ }
+ delete $master_opts{dbh_maker};
+
# merge with master
- %opts = (%{ $self->_master_connect_info_opts }, %opts);
+ %opts = %{ merge(\%opts, \%master_opts) };
# update
$r->[$i] = \%opts;
sub execute_reliably {
my ($self, $coderef, @args) = @_;
-
+
unless( ref $coderef eq 'CODE') {
$self->throw_exception('Second argument must be a coderef');
}
-
+
##Get copy of master storage
my $master = $self->master;
-
+
##Get whatever the current read hander is
my $current = $self->read_handler;
-
+
##Set the read handler to master
$self->read_handler($master);
-
+
## do whatever the caller needs
my @result;
my $want_array = wantarray;
-
+
eval {
if($want_array) {
@result = $coderef->(@args);
($result[0]) = ($coderef->(@args));
} else {
$coderef->(@args);
- }
+ }
};
-
+
##Reset to the original state
- $self->read_handler($current);
-
+ $self->read_handler($current);
+
##Exception testing has to come last, otherwise you might leave the
##read_handler set to master.
-
+
if($@) {
$self->throw_exception("coderef returned an error: $@");
} else {
Sets the current $schema to be 'reliable', that is all queries, both read and
write are sent to the master
-
+
=cut
sub set_reliable_storage {
my $self = shift @_;
my $schema = $self->schema;
my $write_handler = $self->schema->storage->write_handler;
-
+
$schema->storage->read_handler($write_handler);
}
Sets the current $schema to be use the </balancer> for all reads, while all
writea are sent to the master only
-
+
=cut
sub set_balanced_storage {
my $self = shift @_;
my $schema = $self->schema;
- my $write_handler = $self->schema->storage->balancer;
-
- $schema->storage->read_handler($write_handler);
-}
-
-=head2 around: txn_do ($coderef)
+ my $balanced_handler = $self->schema->storage->balancer;
-Overload to the txn_do method, which is delegated to whatever the
-L<write_handler> is set to. We overload this in order to wrap in inside a
-L</execute_reliably> method.
-
-=cut
-
-around 'txn_do' => sub {
- my($txn_do, $self, $coderef, @args) = @_;
- $self->execute_reliably(sub {$self->$txn_do($coderef, @args)});
-};
+ $schema->storage->read_handler($balanced_handler);
+}
=head2 connected
if(@_) {
foreach my $source ($self->all_storages) {
$source->debug(@_);
- }
+ }
}
return $self->master->debug;
}
if(@_) {
foreach my $source ($self->all_storages) {
$source->debugobj(@_);
- }
+ }
}
return $self->master->debugobj;
}
if(@_) {
foreach my $source ($self->all_storages) {
$source->debugfh(@_);
- }
+ }
}
return $self->master->debugfh;
}
if(@_) {
foreach my $source ($self->all_storages) {
$source->debugcb(@_);
- }
+ }
}
return $self->master->debugcb;
}
}
$self->master->cursor_class;
}
-
+
=head1 GOTCHAS
Due to the fact that replicants can lag behind a master, you must take care to
my $new_schema = $schema->clone;
$new_schema->set_reliable_storage;
-
+
## $new_schema will use only the Master storage for all reads/writes while
## the $schema object will use replicated storage.