From: John Napiorkowski Date: Mon, 7 Jul 2008 21:38:49 +0000 (+0000) Subject: updated documentation, adding some hints and details, changed the way we can use... X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=commitdiff_plain;h=7e38d85069fbeeff050dc736b756b60c01f85fc6;p=dbsrgits%2FDBIx-Class-Historic.git updated documentation, adding some hints and details, changed the way we can use the resultset attribute to force a particular storage backend. --- diff --git a/lib/DBIx/Class/PK.pm b/lib/DBIx/Class/PK.pm index 32efba6..f282924 100644 --- a/lib/DBIx/Class/PK.pm +++ b/lib/DBIx/Class/PK.pm @@ -25,7 +25,7 @@ sub _ident_values { return (map { $self->{_column_data}{$_} } $self->primary_columns); } -=head2 discard_changes +=head2 discard_changes ($attrs) Re-selects the row from the database, losing any changes that had been made. @@ -33,14 +33,17 @@ been made. This method can also be used to refresh from storage, retrieving any changes made since the row was last read from storage. +$attrs is expected to be a hashref of attributes suitable for passing as the +second argument to $resultset->search($cond, $attrs); + =cut sub discard_changes { - my ($self) = @_; + my ($self, $attrs) = @_; delete $self->{_dirty_columns}; return unless $self->in_storage; # Don't reload if we aren't real! - if( my $current_storage = $self->get_from_storage) { + if( my $current_storage = $self->get_from_storage($attrs)) { # Set $self to the current. %$self = %$current_storage; diff --git a/lib/DBIx/Class/Row.pm b/lib/DBIx/Class/Row.pm index 798f4fa..bf2c408 100644 --- a/lib/DBIx/Class/Row.pm +++ b/lib/DBIx/Class/Row.pm @@ -799,18 +799,28 @@ sub register_column { $class->mk_group_accessors('column' => $acc); } -=head2 get_from_storage +=head2 get_from_storage ($attrs) Returns a new Row which is whatever the Storage has for the currently created Row object. You can use this to see if the storage has become inconsistent with whatever your Row object is. +$attrs is expected to be a hashref of attributes suitable for passing as the +second argument to $resultset->search($cond, $attrs); + =cut sub get_from_storage { my $self = shift @_; + my $attrs = shift @_; my @primary_columns = map { $self->$_ } $self->primary_columns; - return $self->result_source->resultset->search(undef, {execute_reliably=>1})->find(@primary_columns); + my $resultset = $self->result_source->resultset; + + if(defined $attrs) { + $resultset = $resultset->search(undef, $attrs); + } + + return $resultset->find(@primary_columns); } =head2 throw_exception diff --git a/lib/DBIx/Class/Storage/DBI/Replicated.pm b/lib/DBIx/Class/Storage/DBI/Replicated.pm index 0987ee7..d711f84 100644 --- a/lib/DBIx/Class/Storage/DBI/Replicated.pm +++ b/lib/DBIx/Class/Storage/DBI/Replicated.pm @@ -29,12 +29,28 @@ tasks. [$dsn3, $user, $pass, \%opts], ); + ## Now, just use the $schema as normal + $schema->resultset('Source')->search({name=>'etc'}); + + ## You can force a given query to use a particular storage using the search + ### attribute 'force_pool'. For example: + + my $RS = $schema->resultset('Source')->search(undef, {force_pool=>'master'}); + + ## Now $RS will force everything (both reads and writes) to use whatever was + ## setup as the master storage. 'master' is hardcoded to always point to the + ## Master, but you can also use any Replicant name. Please see: + ## L and the replicants attribute for + ## More. Also see transactions and L for alternative ways + ## to force read traffic to the master. + =head1 DESCRIPTION -Warning: This class is marked ALPHA. We are using this in development and have -some basic test coverage but the code hasn't yet been stressed by a variety -of databases. Individual DB's may have quirks we are not aware of. Please -use this in development and pass along your experiences/bug fixes. +Warning: This class is marked BETA. This has been running a production +website using MySQL native replication as it's backend and we have some decent +test coverage but the code hasn't yet been stressed by a variety of databases. +Individual DB's may have quirks we are not aware of. Please use this in first +development and pass along your experiences/bug fixes. This class implements replicated data store for DBI. Currently you can define one master and numerous slave database connections. All write-type queries @@ -54,9 +70,8 @@ selected algorithm. The default algorithm is random weighted. The consistancy betweeen master and replicants is database specific. The Pool gives you a method to validate it's replicants, removing and replacing them -when they fail/pass predefined criteria. It is recommened that your application -define two schemas, one using the replicated storage and another that just -connects to the master. +when they fail/pass predefined criteria. Please make careful use of the ways +to force a query to run against Master when needed. =head1 ATTRIBUTES @@ -624,6 +639,43 @@ sub disconnect { } } +=head1 GOTCHAS + +Due to the fact that replicants can lag behind a master, you must take care to +make sure you use one of the methods to force read queries to a master should +you need realtime data integrity. For example, if you insert a row, and then +immediately re-read it from the database (say, by doing $row->discard_changes) +or you insert a row and then immediately build a query that expects that row +to be an item, you should force the master to handle reads. Otherwise, due to +the lag, there is no certainty your data will be in the expected state. + +For data integrity, all transactions automatically use the master storage for +all read and write queries. Using a transaction is the preferred and recommended +method to force the master to handle all read queries. + +Otherwise, you can force a single query to use the master with the 'force_pool' +attribute: + + my $row = $resultset->search(undef, {force_pool=>'master'})->find($pk); + +This attribute will safely be ignore by non replicated storages, so you can use +the same code for both types of systems. + +Lastly, you can use the L method, which works very much like +a transaction. + +For debugging, you can turn replication on/off with the methods L +and L, however this operates at a global level and is not +suitable if you have a shared Schema object being used by multiple processes, +such as on a web application server. You can get around this limitation by +using the Schema clone method. + + my $new_schema = $schema->clone; + $new_schema->set_reliable_storage; + + ## $new_schema will use only the Master storage for all reads/writes while + ## the $schema object will use replicated storage. + =head1 AUTHOR John Napiorkowski diff --git a/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm b/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm index 186e483..316653a 100644 --- a/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm +++ b/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm @@ -163,8 +163,9 @@ the load evenly (hopefully) across existing capacity. around 'select' => sub { my ($select, $self, @args) = @_; - if ($args[-1]->{execute_reliably}) { - return $self->master->select(@args); + if (my $forced_pool = $args[-1]->{force_pool}) { + delete $args[-1]->{force_pool}; + return $self->_get_forced_pool($forced_pool)->select(@args); } else { $self->increment_storage; return $self->$select(@args); @@ -182,8 +183,9 @@ the load evenly (hopefully) across existing capacity. around 'select_single' => sub { my ($select_single, $self, @args) = @_; - if ($args[-1]->{execute_reliably}) { - return $self->master->select_single(@args); + if (my $forced_pool = $args[-1]->{force_pool}) { + delete $args[-1]->{force_pool}; + return $self->_get_forced_pool($forced_pool)->select_single(@args); } else { $self->increment_storage; return $self->$select_single(@args); @@ -203,6 +205,25 @@ before 'columns_info_for' => sub { $self->increment_storage; }; +=head2 _get_forced_pool ($name) + +Given an identifier, find the most correct storage object to handle the query. + +=cut + +sub _get_forced_pool { + my ($self, $forced_pool) = @_; + if(blessed $forced_pool) { + return $forced_pool; + } elsif($forced_pool eq 'master') { + return $self->master; + } elsif(my $replicant = $self->pool->replicants($forced_pool)) { + return $replicant; + } else { + $self->master->throw_exception("$forced_pool is not a named replicant."); + } +} + =head1 AUTHOR John Napiorkowski diff --git a/t/93storage_replication.t b/t/93storage_replication.t index 3de346a..46c2295 100644 --- a/t/93storage_replication.t +++ b/t/93storage_replication.t @@ -569,18 +569,18 @@ ok $replicated->schema->resultset('Artist')->find(1) => 'Got expected single result from transaction'; } -## Test the reliable_storage resultset attribute. +## Test the force_pool resultset attribute. { ok my $artist_rs = $replicated->schema->resultset('Artist') => 'got artist resultset'; - ## Turn on Reliable Storage - ok my $reliable_artist_rs = $artist_rs->search(undef, {execute_reliably=>1}) - => 'Created a resultset using reliable storage'; + ## Turn on Forced Pool Storage + ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>'master'}) + => 'Created a resultset using force_pool storage'; ok my $artist = $reliable_artist_rs->find(2) - => 'got an artist result via reliable storage'; + => 'got an artist result via force_pool storage'; } ## Delete the old database files