package DBIx::Class::Storage::DBI::Replicated;
-use Moose;
-use Class::MOP;
-use Moose::Util::TypeConstraints;
+BEGIN {
+ use Carp::Clan qw/^DBIx::Class/;
+
+ ## Modules required for Replication support not required for general DBIC
+ ## use, so we explicitly test for these.
+
+ my %replication_required = (
+ Moose => '0.54',
+ MooseX::AttributeHelpers => '0.12',
+ Moose::Util::TypeConstraints => '0.54',
+ Class::MOP => '0.63',
+ );
+
+ my @didnt_load;
+
+ for my $module (keys %replication_required) {
+ eval "use $module $replication_required{$module}";
+ push @didnt_load, "$module $replication_required{$module}"
+ if $@;
+ }
+
+ croak("@{[ join ', ', @didnt_load ]} are missing and are required for Replication")
+ if @didnt_load;
+}
+
use DBIx::Class::Storage::DBI;
use DBIx::Class::Storage::DBI::Replicated::Pool;
use DBIx::Class::Storage::DBI::Replicated::Balancer;
=head1 NAME
-DBIx::Class::Storage::DBI::Replicated - ALPHA Replicated database support
+DBIx::Class::Storage::DBI::Replicated - BETA Replicated database support
=head1 SYNOPSIS
[$dsn3, $user, $pass, \%opts],
);
+ ## Now, just use the $schema as normal
+ $schema->resultset('Source')->search({name=>'etc'});
+
+ ## You can force a given query to use a particular storage using the search
+ ### attribute 'force_pool'. For example:
+
+ my $RS = $schema->resultset('Source')->search(undef, {force_pool=>'master'});
+
+ ## Now $RS will force everything (both reads and writes) to use whatever was
+ ## setup as the master storage. 'master' is hardcoded to always point to the
+ ## Master, but you can also use any Replicant name. Please see:
+ ## L<DBIx::Class::Storage::Replicated::Pool> and the replicants attribute for
+ ## More. Also see transactions and L</execute_reliably> for alternative ways
+ ## to force read traffic to the master.
+
=head1 DESCRIPTION
-Warning: This class is marked ALPHA. We are using this in development and have
-some basic test coverage but the code hasn't yet been stressed by a variety
-of databases. Individual DB's may have quirks we are not aware of. Please
-use this in development and pass along your experiences/bug fixes.
+Warning: This class is marked BETA. This has been running a production
+website using MySQL native replication as its backend and we have some decent
+test coverage but the code hasn't yet been stressed by a variety of databases.
+Individual DB's may have quirks we are not aware of. Please use this in first
+development and pass along your experiences/bug fixes.
This class implements replicated data store for DBI. Currently you can define
one master and numerous slave database connections. All write-type queries
The consistancy betweeen master and replicants is database specific. The Pool
gives you a method to validate it's replicants, removing and replacing them
-when they fail/pass predefined criteria. It is recommened that your application
-define two schemas, one using the replicated storage and another that just
-connects to the master.
+when they fail/pass predefined criteria. Please make careful use of the ways
+to force a query to run against Master when needed.
+
+=head1 REQUIREMENTS
+
+Replicated Storage has additional requirements not currently part of L<DBIx::Class>
+
+ Moose => 0.54
+ MooseX::AttributeHelpers => 0.12
+ Moose::Util::TypeConstraints => 0.54
+ Class::MOP => 0.63
+
+You will need to install these modules manually via CPAN or make them part of the
+Makefile for your distribution.
=head1 ATTRIBUTES
txn_scope_guard
sth
deploy
+ with_deferred_fk_checks
reload_row
_prep_for_execute
This class defines the following methods.
-=head2 new
+=head2 BUILDARGS
L<DBIx::Class::Schema> when instantiating it's storage passed itself as the
first argument. So we need to massage the arguments a bit so that all the
=cut
-around 'new' => sub {
- my ($new, $self, $schema, $storage_type_args, @args) = @_;
- return $self->$new(schema=>$schema, %$storage_type_args, @args);
-};
+sub BUILDARGS {
+ my ($class, $schema, $storage_type_args, @args) = @_;
+
+ return {
+ schema=>$schema,
+ %$storage_type_args,
+ @args
+ }
+}
=head2 _build_master
$self->execute_reliably(sub {$self->$txn_do($coderef, @args)});
};
-=head2 reload_row ($row)
-
-Overload to the reload_row method so that the reloading is always directed to
-the master storage.
-
-=cut
-
-around 'reload_row' => sub {
- my ($reload_row, $self, $row) = @_;
- return $self->execute_reliably(sub {
- return $self->$reload_row(shift);
- }, $row);
-};
-
=head2 connected
Check that the master and at least one of the replicants is connected.
foreach my $source ($self->all_storages) {
$source->limit_dialect(@_);
}
+ return $self->master->quote_char;
}
=head2 quote_char
foreach my $source ($self->all_storages) {
$source->quote_char(@_);
}
+ return $self->master->quote_char;
}
=head2 name_sep
foreach my $source ($self->all_storages) {
$source->name_sep(@_);
}
+ return $self->master->name_sep;
}
=head2 set_schema
sub debug {
my $self = shift @_;
- foreach my $source ($self->all_storages) {
- $source->debug(@_);
+ if(@_) {
+ foreach my $source ($self->all_storages) {
+ $source->debug(@_);
+ }
}
+ return $self->master->debug;
}
=head2 debugobj
sub debugobj {
my $self = shift @_;
- foreach my $source ($self->all_storages) {
- $source->debugobj(@_);
+ if(@_) {
+ foreach my $source ($self->all_storages) {
+ $source->debugobj(@_);
+ }
}
+ return $self->master->debugobj;
}
=head2 debugfh
sub debugfh {
my $self = shift @_;
- foreach my $source ($self->all_storages) {
- $source->debugfh(@_);
+ if(@_) {
+ foreach my $source ($self->all_storages) {
+ $source->debugfh(@_);
+ }
}
+ return $self->master->debugfh;
}
=head2 debugcb
sub debugcb {
my $self = shift @_;
- foreach my $source ($self->all_storages) {
- $source->debugcb(@_);
+ if(@_) {
+ foreach my $source ($self->all_storages) {
+ $source->debugcb(@_);
+ }
}
+ return $self->master->debugcb;
}
=head2 disconnect
}
}
+=head1 GOTCHAS
+
+Due to the fact that replicants can lag behind a master, you must take care to
+make sure you use one of the methods to force read queries to a master should
+you need realtime data integrity. For example, if you insert a row, and then
+immediately re-read it from the database (say, by doing $row->discard_changes)
+or you insert a row and then immediately build a query that expects that row
+to be an item, you should force the master to handle reads. Otherwise, due to
+the lag, there is no certainty your data will be in the expected state.
+
+For data integrity, all transactions automatically use the master storage for
+all read and write queries. Using a transaction is the preferred and recommended
+method to force the master to handle all read queries.
+
+Otherwise, you can force a single query to use the master with the 'force_pool'
+attribute:
+
+ my $row = $resultset->search(undef, {force_pool=>'master'})->find($pk);
+
+This attribute will safely be ignore by non replicated storages, so you can use
+the same code for both types of systems.
+
+Lastly, you can use the L</execute_reliably> method, which works very much like
+a transaction.
+
+For debugging, you can turn replication on/off with the methods L</set_reliable_storage>
+and L</set_balanced_storage>, however this operates at a global level and is not
+suitable if you have a shared Schema object being used by multiple processes,
+such as on a web application server. You can get around this limitation by
+using the Schema clone method.
+
+ my $new_schema = $schema->clone;
+ $new_schema->set_reliable_storage;
+
+ ## $new_schema will use only the Master storage for all reads/writes while
+ ## the $schema object will use replicated storage.
+
=head1 AUTHOR
John Napiorkowski <john.napiorkowski@takkle.com>
=cut
+__PACKAGE__->meta->make_immutable;
+
1;