From: John Napiorkowski Date: Wed, 30 Apr 2008 15:51:48 +0000 (+0000) Subject: new config option to DBICTest to let you set an alternative storage type, start on... X-Git-Tag: v0.08240~402^2~68 X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=commitdiff_plain;h=2bf79155a8be1532cce2538e967f32c4ff22a87b;p=dbsrgits%2FDBIx-Class.git new config option to DBICTest to let you set an alternative storage type, start on creating a DBIC based load balancer --- diff --git a/lib/DBIx/Class/Storage/DBI/Replicated.pm b/lib/DBIx/Class/Storage/DBI/Replicated.pm index d736c41..69997d4 100644 --- a/lib/DBIx/Class/Storage/DBI/Replicated.pm +++ b/lib/DBIx/Class/Storage/DBI/Replicated.pm @@ -1,5 +1,503 @@ package DBIx::Class::Storage::DBI::Replicated; +use Moose; +use DBIx::Class::Storage::DBI::Replicated::Pool; + +#extends 'DBIx::Class::Storage::DBI', 'Moose::Object'; + +=head1 NAME + +DBIx::Class::Storage::DBI::Replicated - ALPHA Replicated database support + +=head1 SYNOPSIS + +The Following example shows how to change an existing $schema to a replicated +storage type, add some replicated (readonly) databases, and perform reporting +tasks + + ## Change storage_type in your schema class + $schema->storage_type( '::DBI::Replicated' ); + + ## Add some slaves. Basically this is an array of arrayrefs, where each + ## arrayref is database connect information + + $schema->storage->create_replicants( + [$dsn1, $user, $pass, \%opts], + [$dsn1, $user, $pass, \%opts], + [$dsn1, $user, $pass, \%opts], + ## This is just going to use the standard DBIC connect method, so it + ## supports everything that method supports, such as connecting to an + ## existing database handle. + [$dbh], + \%global_opts + ); + + ## a hash of replicants, keyed by their DSN + my %replicants = $schema->storage->replicants; + my $replicant = $schema->storage->get_replicant($dsn); + $replicant->status; + $replicant->is_active; + $replicant->active; + +=head1 DESCRIPTION + +Warning: This class is marked ALPHA. We are using this in development and have +some basic test coverage but the code hasn't yet been stressed by a variety +of databases. Individual DB's may have quirks we are not aware of. Please +use this in development and pass along your experiences/bug fixes. + +This class implements replicated data store for DBI. Currently you can define +one master and numerous slave database connections. All write-type queries +(INSERT, UPDATE, DELETE and even LAST_INSERT_ID) are routed to master +database, all read-type queries (SELECTs) go to the slave database. + +Basically, any method request that L would normally +handle gets delegated to one of the two attributes: L or to +L. Additionally, some methods need to be distributed +to all existing storages. This way our storage class is a drop in replacement +for L. + +Read traffic is spread across the replicants (slaves) occuring to a user +selected algorithm. The default algorithm is random weighted. + +TODO more details about the algorithm. + +=head1 ATTRIBUTES + +This class defines the following attributes. + +=head2 master + +The master defines the canonical state for a pool of connected databases. All +the replicants are expected to match this databases state. Thus, in a classic +Master / Slaves distributed system, all the slaves are expected to replicate +the Master's state as quick as possible. This is the only database in the +pool of databases that is allowed to handle write traffic. + +=cut + +has 'master' => ( + is=> 'ro', + isa=>'DBIx::Class::Storage::DBI', + lazy_build=>1, + handles=>[qw/ + on_connect_do + on_disconnect_do + columns_info_for + connect_info + throw_exception + sql_maker + sqlt_type + create_ddl_dir + deployment_statements + datetime_parser + datetime_parser_type + last_insert_id + insert + insert_bulk + update + delete + dbh + txn_do + txn_commit + txn_rollback + sth + deploy + /], +); + + +=head2 current_replicant + +Replicant storages (slaves) handle all read only traffic. The assumption is +that your database will become readbound well before it becomes write bound +and that being able to spread your read only traffic around to multiple +databases is going to help you to scale traffic. + +This attribute returns the next slave to handle a read request. Your L +attribute has methods to help you shuffle through all the available replicants +via it's balancer object. + +This attribute defines the following reader/writer methods + +=over 4 + +=item get_current_replicant + +Returns the contained L replicant + +=item set_current_replicant + +Set the attribute to a given L (or subclass) object. + +=back + +We split the reader/writer to make it easier to selectively override how the +replicant is altered. + +=cut + +has 'current_replicant' => ( + is=> 'rw', + reader=>'get_current_replicant', + writer=>'set_current_replicant', + isa=>'DBIx::Class::Storage::DBI', + lazy_build=>1, + handles=>[qw/ + select + select_single + columns_info_for + /], +); + + +=head2 replicant_storage_pool_type + +Contains the classname which will instantiate the L +object. Defaults to: L. + +=cut + +has 'replicant_storage_pool_type' => ( + is=>'ro', + isa=>'ClassName', + required=>1, + default=>'DBIx::Class::Storage::DBI::Replicated::Pool', + handles=> { + 'create_replicant_storage_pool' => 'new', + }, +); + + +=head2 pool_balancer_type + +The replication pool requires a balance class to provider the methods for +choose how to spread the query load across each replicant in the pool. + +=cut + +has 'pool_balancer_type' => ( + is=>'ro', + isa=>'ClassName', + required=>1, + default=>'DBIx::Class::Storage::DBI::Replicated::Pool::Balancer', + handles=> { + 'create_replicant_storage_pool' => 'new', + }, +); + + +=head2 replicant_storage_pool + +Holds the list of connected replicants, their status and other housekeeping or +reporting methods. + +=cut + +has 'replicant_storage_pool' => ( + is=>'ro', + isa=>'DBIx::Class::Storage::DBI::Replicated::Pool', + lazy_build=>1, + handles=>[qw/replicant_storages/], +); + + + +=head1 METHODS + +This class defines the following methods. + +=head2 new + +Make sure we properly inherit from L. + +=cut + +sub new { + my $class = shift @_; + my $obj = $class->SUPER::new(@_); + + return $class->meta->new_object( + __INSTANCE__ => $obj, @_ + ); +} + +=head2 _build_master_storage + +Lazy builder for the L attribute. + +=cut + +sub _build_next_replicant_storage { + DBIx::Class::Storage::DBI->new; +} + + +=head2 _build_current_replicant_storage + +Lazy builder for the L attribute. + +=cut + +sub _build_current_replicant_storage { + shift->replicant_storage_pool->first; +} + + +=head2 _build_replicant_storage_pool + +Lazy builder for the L attribute. + +=cut + +sub _build_replicant_storage_pool { + my $self = shift @_; + $self->create_replicant_storage_pool; +} + + +=head2 around: create_replicant_storage_pool + +Make sure all calles to the method set a default balancer type to our current +balancer type. + +=cut + +around 'create_replicant_storage_pool' => sub { + my ($method, $self, @args) = @_; + return $self->$method(balancer_type=>$self->pool_balancer_type, @args); +} + + +=head2 after: get_current_replicant_storage + +Advice on the current_replicant_storage attribute. Each time we use a replicant +we need to change it via the storage pool algorithm. That way we are spreading +the load evenly (hopefully) across existing capacity. + +=cut + +after 'get_current_replicant_storage' => sub { + my $self = shift @_; + my $next_replicant = $self->replicant_storage_pool->next; + $self->next_replicant_storage($next_replicant); +}; + + +=head2 find_or_create + +First do a find on the replicant. If no rows are found, pass it on to the +L + +=cut + +sub find_or_create { + my $self = shift @_; +} + +=head2 all_storages + +Returns an array of of all the connected storage backends. The first element +in the returned array is the master, and the remainings are each of the +replicants. + +=cut + +sub all_storages { + my $self = shift @_; + + return ( + $self->master_storage, + $self->replicant_storages, + ); +} + + +=head2 connected + +Check that the master and at least one of the replicants is connected. + +=cut + +sub connected { + my $self = shift @_; + + return + $self->master_storage->connected && + $self->replicant_storage_pool->has_connected_slaves; +} + + +=head2 ensure_connected + +Make sure all the storages are connected. + +=cut + +sub ensure_connected { + my $self = shift @_; + foreach $source (shift->all_sources) { + $source->ensure_connected(@_); + } +} + + +=head2 limit_dialect + +Set the limit_dialect for all existing storages + +=cut + +sub limit_dialect { + my $self = shift @_; + foreach $source (shift->all_sources) { + $source->name_sep(@_); + } +} + + +=head2 quote_char + +Set the quote_char for all existing storages + +=cut + +sub quote_char { + my $self = shift @_; + foreach $source (shift->all_sources) { + $source->name_sep(@_); + } +} + + +=head2 name_sep + +Set the name_sep for all existing storages + +=cut + +sub name_sep { + my $self = shift @_; + foreach $source (shift->all_sources) { + $source->name_sep(@_); + } +} + + +=head2 set_schema + +Set the schema object for all existing storages + +=cut + +sub set_schema { + my $self = shift @_; + foreach $source (shift->all_sources) { + $source->set_schema(@_); + } +} + + +=head2 debug + +set a debug flag across all storages + +=cut + +sub debug { + my $self = shift @_; + foreach $source (shift->all_sources) { + $source->debug(@_); + } +} + + +=head2 debugobj + +set a debug object across all storages + +=cut + +sub debugobj { + my $self = shift @_; + foreach $source (shift->all_sources) { + $source->debugobj(@_); + } +} + + +=head2 debugfh + +set a debugfh object across all storages + +=cut + +sub debugfh { + my $self = shift @_; + foreach $source (shift->all_sources) { + $source->debugfh(@_); + } +} + + +=head2 debugcb + +set a debug callback across all storages + +=cut + +sub debugcb { + my $self = shift @_; + foreach $source (shift->all_sources) { + $source->debugcb(@_); + } +} + + +=head2 disconnect + +disconnect everything + +=cut + +sub disconnect { + my $self = shift @_; + foreach $source (shift->all_sources) { + $source->disconnect(@_); + } +} + + +=head2 DESTROY + +Make sure we pass destroy events down to the storage handlers + +=cut + +sub DESTROY { + my $self = shift; + ## TODO, maybe we can just leave this alone ??? +} + + +=head1 AUTHOR + +Norbert Csongrádi + +Peter Siklósi + +John Napiorkowski + +=head1 LICENSE + +You may distribute this code under the same terms as Perl itself. + +=cut + +1; + +__END__ + use strict; use warnings; diff --git a/t/93storage_replication.t b/t/93storage_replication.t index 62a4d15..43d3e77 100644 --- a/t/93storage_replication.t +++ b/t/93storage_replication.t @@ -2,12 +2,13 @@ use strict; use warnings; use lib qw(t/lib); use Test::More; +use Data::Dump qw/dump/; BEGIN { - eval "use DBD::Multi"; + eval "use Moose"; plan $@ - ? ( skip_all => 'needs DBD::Multi for testing' ) - : ( tests => 20 ); + ? ( skip_all => 'needs Moose for testing' ) + : ( tests => 2 ); } ## ---------------------------------------------------------------------------- @@ -15,6 +16,68 @@ BEGIN { ## ---------------------------------------------------------------------------- TESTSCHEMACLASS: { + + package DBIx::Class::DBI::Replicated::TestReplication; + + use DBICTest; + use base qw/Class::Accessor::Fast/; + + __PACKAGE__->mk_accessors( qw/schema/ ); + + ## Initialize the object + + sub new { + my $proto = shift; + my $class = ref( $proto ) || $proto; + my $self = {}; + + bless( $self, $class ); + + $self->schema( $self->init_schema ); + + return $self; + } + + ## get the Schema and set the replication storage type + + sub init_schema { + my $class = shift @_; + my $schema = DBICTest->init_schema(storage_type=>'::DBI::Replicated'); + return $schema; + } +} + +## ---------------------------------------------------------------------------- +## Create an object and run some tests +## ---------------------------------------------------------------------------- + +my %params = ( + db_paths => [ + "t/var/DBIxClass.db", + "t/var/DBIxClass_slave1.db", + "t/var/DBIxClass_slave2.db", + ], +); + +ok my $replicate = DBIx::Class::DBI::Replicated::TestReplication->new() + => 'Created a replication object'; + +isa_ok $replicate->schema + => 'DBIx::Class::Schema'; + + + warn dump $replicate->schema->storage->meta; + + warn dump $replicate->schema->storage->master; + + +__END__ + +## ---------------------------------------------------------------------------- +## Build a class to hold all our required testing data and methods. +## ---------------------------------------------------------------------------- + +TESTSCHEMACLASS: { package DBIx::Class::DBI::Replicated::TestReplication; diff --git a/t/lib/DBICTest.pm b/t/lib/DBICTest.pm index 5c76153..2077af3 100755 --- a/t/lib/DBICTest.pm +++ b/t/lib/DBICTest.pm @@ -29,6 +29,7 @@ DBIx::Class. my $schema = DBICTest->init_schema( no_deploy=>1, no_populate=>1, + storage_type=>'::DBI::Replicated', ); This method removes the test SQLite database in t/var/DBIxClass.db @@ -72,6 +73,9 @@ sub init_schema { } else { $schema = DBICTest::Schema->compose_namespace('DBICTest'); } + if( $args{storage_type}) { + $schema->storage_type($args{storage_type}); + } if ( !$args{no_connect} ) { $schema = $schema->connect($self->_database); $schema->storage->on_connect_do(['PRAGMA synchronous = OFF']);