package DBIx::Class::Storage::DBI::Replicated;
use Moose;
+use Class::MOP;
+use Moose::Util::TypeConstraints;
use DBIx::Class::Storage::DBI;
use DBIx::Class::Storage::DBI::Replicated::Pool;
use DBIx::Class::Storage::DBI::Replicated::Balancer;
-use Scalar::Util qw(blessed);
-
-extends 'DBIx::Class::Storage::DBI', 'Moose::Object';
=head1 NAME
[$dsn3, $user, $pass, \%opts],
);
+ ## Now, just use the $schema as normal
+ $schema->resultset('Source')->search({name=>'etc'});
+
+ ## You can force a given query to use a particular storage using the search
+ ### attribute 'force_pool'. For example:
+
+ my $RS = $schema->resultset('Source')->search(undef, {force_pool=>'master'});
+
+ ## Now $RS will force everything (both reads and writes) to use whatever was
+ ## setup as the master storage. 'master' is hardcoded to always point to the
+ ## Master, but you can also use any Replicant name. Please see:
+ ## L<DBIx::Class::Storage::Replicated::Pool> and the replicants attribute for
+ ## More. Also see transactions and L</execute_reliably> for alternative ways
+ ## to force read traffic to the master.
+
=head1 DESCRIPTION
-Warning: This class is marked ALPHA. We are using this in development and have
-some basic test coverage but the code hasn't yet been stressed by a variety
-of databases. Individual DB's may have quirks we are not aware of. Please
-use this in development and pass along your experiences/bug fixes.
+Warning: This class is marked BETA. This has been running a production
+website using MySQL native replication as it's backend and we have some decent
+test coverage but the code hasn't yet been stressed by a variety of databases.
+Individual DB's may have quirks we are not aware of. Please use this in first
+development and pass along your experiences/bug fixes.
This class implements replicated data store for DBI. Currently you can define
one master and numerous slave database connections. All write-type queries
The consistancy betweeen master and replicants is database specific. The Pool
gives you a method to validate it's replicants, removing and replacing them
-when they fail/pass predefined criteria. It is recommened that your application
-define two schemas, one using the replicated storage and another that just
-connects to the master.
+when they fail/pass predefined criteria. Please make careful use of the ways
+to force a query to run against Master when needed.
=head1 ATTRIBUTES
This class defines the following attributes.
+=head2 schema
+
+The underlying L<DBIx::Class::Schema> object this storage is attaching
+
+=cut
+
+has 'schema' => (
+ is=>'rw',
+ isa=>'DBIx::Class::Schema',
+ weak_ref=>1,
+ required=>1,
+);
+
=head2 pool_type
Contains the classname which will instantiate the L</pool> object. Defaults
has 'pool_type' => (
is=>'ro',
isa=>'ClassName',
- lazy_build=>1,
+ required=>1,
+ default=>'DBIx::Class::Storage::DBI::Replicated::Pool',
handles=>{
'create_pool' => 'new',
},
=cut
+subtype 'DBIx::Class::Storage::DBI::Replicated::BalancerClassNamePart',
+ as 'ClassName';
+
+coerce 'DBIx::Class::Storage::DBI::Replicated::BalancerClassNamePart',
+ from 'Str',
+ via {
+ my $type = $_;
+ if($type=~m/^::/) {
+ $type = 'DBIx::Class::Storage::DBI::Replicated::Balancer'.$type;
+ }
+ Class::MOP::load_class($type);
+ $type;
+ };
+
has 'balancer_type' => (
is=>'ro',
- isa=>'ClassName',
- lazy_build=>1,
+ isa=>'DBIx::Class::Storage::DBI::Replicated::BalancerClassNamePart',
+ coerce=>1,
+ required=>1,
+ default=> 'DBIx::Class::Storage::DBI::Replicated::Balancer::First',
handles=>{
'create_balancer' => 'new',
},
update
delete
dbh
+ txn_begin
txn_do
txn_commit
txn_rollback
+ txn_scope_guard
sth
deploy
- schema
+
reload_row
+ _prep_for_execute
+ configure_sqlt
+
/],
);
This class defines the following methods.
-=head2 new
+=head2 BUILDARGS
L<DBIx::Class::Schema> when instantiating it's storage passed itself as the
-first argument. We need to invoke L</new> on the underlying parent class, make
-sure we properly give it a L<Moose> meta class, and then correctly instantiate
-our attributes. Basically we pass on whatever the schema has in it's class
-data for 'storage_type_args' to our replicated storage type.
+first argument. So we need to massage the arguments a bit so that all the
+bits get put into the correct places.
=cut
-sub new {
- my $class = shift @_;
- my $schema = shift @_;
- my $storage_type_args = shift @_;
- my $obj = $class->SUPER::new($schema, $storage_type_args, @_);
+sub BUILDARGS {
+ my ($class, $schema, $storage_type_args, @args) = @_;
- ## Hate to do it this way, but can't seem to get advice on the attribute working right
- ## maybe we can do a type and coercion for it.
- if( $storage_type_args->{balancer_type} && $storage_type_args->{balancer_type}=~m/^::/) {
- $storage_type_args->{balancer_type} = 'DBIx::Class::Storage::DBI::Replicated::Balancer'.$storage_type_args->{balancer_type};
- eval "require $storage_type_args->{balancer_type}";
+ return {
+ schema=>$schema,
+ %$storage_type_args,
+ @args
}
-
- return $class->meta->new_object(
- __INSTANCE__ => $obj,
- %$storage_type_args,
- @_,
- );
}
=head2 _build_master
=cut
sub _build_master {
- DBIx::Class::Storage::DBI->new;
-}
-
-=head2 _build_pool_type
-
-Lazy builder for the L</pool_type> attribute.
-
-=cut
-
-sub _build_pool_type {
- return 'DBIx::Class::Storage::DBI::Replicated::Pool';
+ my $self = shift @_;
+ DBIx::Class::Storage::DBI->new($self->schema);
}
=head2 _build_pool
$self->create_pool(%{$self->pool_args});
}
-=head2 _build_balancer_type
-
-Lazy builder for the L</balancer_type> attribute.
-
-=cut
-
-sub _build_balancer_type {
- return 'DBIx::Class::Storage::DBI::Replicated::Balancer::First';
-}
-
=head2 _build_balancer
Lazy builder for the L</balancer> attribute. This takes a Pool object so that
$self->execute_reliably(sub {$self->$txn_do($coderef, @args)});
};
-=head2 reload_row ($row)
-
-Overload to the reload_row method so that the reloading is always directed to
-the master storage.
-
-=cut
-
-around 'reload_row' => sub {
- my ($reload_row, $self, $row) = @_;
- return $self->execute_reliably(sub {
- return $self->$reload_row(shift);
- }, $row);
-};
-
=head2 connected
Check that the master and at least one of the replicants is connected.
}
}
+=head1 GOTCHAS
+
+Due to the fact that replicants can lag behind a master, you must take care to
+make sure you use one of the methods to force read queries to a master should
+you need realtime data integrity. For example, if you insert a row, and then
+immediately re-read it from the database (say, by doing $row->discard_changes)
+or you insert a row and then immediately build a query that expects that row
+to be an item, you should force the master to handle reads. Otherwise, due to
+the lag, there is no certainty your data will be in the expected state.
+
+For data integrity, all transactions automatically use the master storage for
+all read and write queries. Using a transaction is the preferred and recommended
+method to force the master to handle all read queries.
+
+Otherwise, you can force a single query to use the master with the 'force_pool'
+attribute:
+
+ my $row = $resultset->search(undef, {force_pool=>'master'})->find($pk);
+
+This attribute will safely be ignore by non replicated storages, so you can use
+the same code for both types of systems.
+
+Lastly, you can use the L</execute_reliably> method, which works very much like
+a transaction.
+
+For debugging, you can turn replication on/off with the methods L</set_reliable_storage>
+and L</set_balanced_storage>, however this operates at a global level and is not
+suitable if you have a shared Schema object being used by multiple processes,
+such as on a web application server. You can get around this limitation by
+using the Schema clone method.
+
+ my $new_schema = $schema->clone;
+ $new_schema->set_reliable_storage;
+
+ ## $new_schema will use only the Master storage for all reads/writes while
+ ## the $schema object will use replicated storage.
+
=head1 AUTHOR
John Napiorkowski <john.napiorkowski@takkle.com>
=cut
+__PACKAGE__->meta->make_immutable;
+
1;