X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI%2FReplicated.pm;h=0dc50d784f5d07bf13eec5b33bc367caf3bdf449;hb=f2469db16c251b4a6554f244943db6d902203d3c;hp=d736c41a6cff4457f88b7df013c16aa5eb22645c;hpb=2156bbddf88e67ebe429789d0018c708cddfcbe4;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI/Replicated.pm b/lib/DBIx/Class/Storage/DBI/Replicated.pm index d736c41..0dc50d7 100644 --- a/lib/DBIx/Class/Storage/DBI/Replicated.pm +++ b/lib/DBIx/Class/Storage/DBI/Replicated.pm @@ -1,274 +1,839 @@ package DBIx::Class::Storage::DBI::Replicated; -use strict; -use warnings; +BEGIN { + use Carp::Clan qw/^DBIx::Class/; + + ## Modules required for Replication support not required for general DBIC + ## use, so we explicitly test for these. + + my %replication_required = ( + Moose => '0.77', + MooseX::AttributeHelpers => '0.12', + MooseX::Types => '0.10', + namespace::clean => '0.11', + Hash::Merge => '0.11' + ); + + my @didnt_load; + + for my $module (keys %replication_required) { + eval "use $module $replication_required{$module}"; + push @didnt_load, "$module $replication_required{$module}" + if $@; + } + + croak("@{[ join ', ', @didnt_load ]} are missing and are required for Replication") + if @didnt_load; +} +use Moose; use DBIx::Class::Storage::DBI; -use DBD::Multi; +use DBIx::Class::Storage::DBI::Replicated::Pool; +use DBIx::Class::Storage::DBI::Replicated::Balancer; +use DBIx::Class::Storage::DBI::Replicated::Types 'BalancerClassNamePart'; +use MooseX::Types::Moose qw/ClassName HashRef Object/; +use Scalar::Util 'reftype'; +use Carp::Clan qw/^DBIx::Class/; +use Hash::Merge 'merge'; -use base qw/Class::Accessor::Fast/; - -__PACKAGE__->mk_accessors( qw/read_source write_source/ ); +use namespace::clean -except => 'meta'; =head1 NAME -DBIx::Class::Storage::DBI::Replicated - ALPHA Replicated database support +DBIx::Class::Storage::DBI::Replicated - BETA Replicated database support =head1 SYNOPSIS The Following example shows how to change an existing $schema to a replicated -storage type and update it's connection information to contain a master DSN and -an array of slaves. - - ## Change storage_type in your schema class - $schema->storage_type( '::DBI::Replicated' ); - - ## Set your connection. - $schema->connect( - $dsn, $user, $password, { - AutoCommit => 1, - ## Other standard DBI connection or DBD custom attributes added as - ## usual. Additionally, we have two custom attributes for defining - ## slave information and controlling how the underlying DBD::Multi - slaves_connect_info => [ - ## Define each slave like a 'normal' DBI connection, but you add - ## in a DBD::Multi custom attribute to define how the slave is - ## prioritized. Please see DBD::Multi for more. - [$slave1dsn, $user, $password, {%slave1opts, priority=>10}], - [$slave2dsn, $user, $password, {%slave2opts, priority=>10}], - [$slave3dsn, $user, $password, {%slave3opts, priority=>20}], - ## add in a preexisting database handle - [$dbh, '','', {priority=>30}], - ## DBD::Multi will call this coderef for connects - [sub { DBI->connect(< DSN info >) }, '', '', {priority=>40}], - ## If the last item is hashref, we use that for DBD::Multi's - ## configuration information. Again, see DBD::Multi for more. - {timeout=>25, failed_max=>2}, - ], - }, - ); - - ## Now, just use the schema as normal - $schema->resultset('Table')->find(< unique >); ## Reads will use slaves - $schema->resultset('Table')->create(\%info); ## Writes will use master +storage type, add some replicated (readonly) databases, and perform reporting +tasks. + ## Change storage_type in your schema class + $schema->storage_type( ['::DBI::Replicated', {balancer=>'::Random'}] ); + + ## Add some slaves. Basically this is an array of arrayrefs, where each + ## arrayref is database connect information + + $schema->storage->connect_replicants( + [$dsn1, $user, $pass, \%opts], + [$dsn2, $user, $pass, \%opts], + [$dsn3, $user, $pass, \%opts], + ); + + ## Now, just use the $schema as normal + $schema->resultset('Source')->search({name=>'etc'}); + + ## You can force a given query to use a particular storage using the search + ### attribute 'force_pool'. For example: + + my $RS = $schema->resultset('Source')->search(undef, {force_pool=>'master'}); + + ## Now $RS will force everything (both reads and writes) to use whatever was + ## setup as the master storage. 'master' is hardcoded to always point to the + ## Master, but you can also use any Replicant name. Please see: + ## L and the replicants attribute for + ## More. Also see transactions and L for alternative ways + ## to force read traffic to the master. + =head1 DESCRIPTION -Warning: This class is marked ALPHA. We are using this in development and have -some basic test coverage but the code hasn't yet been stressed by a variety -of databases. Individual DB's may have quirks we are not aware of. Please -use this in development and pass along your experiences/bug fixes. +Warning: This class is marked BETA. This has been running a production +website using MySQL native replication as its backend and we have some decent +test coverage but the code hasn't yet been stressed by a variety of databases. +Individual DB's may have quirks we are not aware of. Please use this in first +development and pass along your experiences/bug fixes. This class implements replicated data store for DBI. Currently you can define one master and numerous slave database connections. All write-type queries (INSERT, UPDATE, DELETE and even LAST_INSERT_ID) are routed to master database, all read-type queries (SELECTs) go to the slave database. -For every slave database you can define a priority value, which controls data -source usage pattern. It uses L, so first the lower priority data -sources used (if they have the same priority, the are used randomized), than -if all low priority data sources fail, higher ones tried in order. +Basically, any method request that L would normally +handle gets delegated to one of the two attributes: L or to +L. Additionally, some methods need to be distributed +to all existing storages. This way our storage class is a drop in replacement +for L. + +Read traffic is spread across the replicants (slaves) occuring to a user +selected algorithm. The default algorithm is random weighted. + +=head1 NOTES -=head1 CONFIGURATION +The consistancy betweeen master and replicants is database specific. The Pool +gives you a method to validate it's replicants, removing and replacing them +when they fail/pass predefined criteria. Please make careful use of the ways +to force a query to run against Master when needed. -Please see L for most configuration information. +=head1 REQUIREMENTS + +Replicated Storage has additional requirements not currently part of L + + Moose => 0.77 + MooseX::AttributeHelpers => 0.12 + MooseX::Types => 0.10 + namespace::clean => 0.11 + Hash::Merge => 0.11 + +You will need to install these modules manually via CPAN or make them part of the +Makefile for your distribution. + +=head1 ATTRIBUTES + +This class defines the following attributes. + +=head2 schema + +The underlying L object this storage is attaching =cut -sub new { - my $proto = shift; - my $class = ref( $proto ) || $proto; - my $self = {}; +has 'schema' => ( + is=>'rw', + isa=>'DBIx::Class::Schema', + weak_ref=>1, + required=>1, +); - bless( $self, $class ); +=head2 pool_type - $self->write_source( DBIx::Class::Storage::DBI->new ); - $self->read_source( DBIx::Class::Storage::DBI->new ); +Contains the classname which will instantiate the L object. Defaults +to: L. - return $self; -} +=cut -sub all_sources { - my $self = shift; +has 'pool_type' => ( + is=>'rw', + isa=>ClassName, + default=>'DBIx::Class::Storage::DBI::Replicated::Pool', + handles=>{ + 'create_pool' => 'new', + }, +); - my @sources = ($self->read_source, $self->write_source); +=head2 pool_args - return wantarray ? @sources : \@sources; -} +Contains a hashref of initialized information to pass to the Balancer object. +See L for available arguments. -sub _connect_info { - my $self = shift; - my $master = $self->write_source->_connect_info; - $master->[-1]->{slave_connect_info} = $self->read_source->_connect_info; - return $master; -} +=cut + +has 'pool_args' => ( + is=>'rw', + isa=>HashRef, + lazy=>1, + default=>sub { {} }, +); + + +=head2 balancer_type + +The replication pool requires a balance class to provider the methods for +choose how to spread the query load across each replicant in the pool. + +=cut + +has 'balancer_type' => ( + is=>'rw', + isa=>BalancerClassNamePart, + coerce=>1, + required=>1, + default=> 'DBIx::Class::Storage::DBI::Replicated::Balancer::First', + handles=>{ + 'create_balancer' => 'new', + }, +); + +=head2 balancer_args + +Contains a hashref of initialized information to pass to the Balancer object. +See L for available arguments. + +=cut + +has 'balancer_args' => ( + is=>'rw', + isa=>HashRef, + lazy=>1, + required=>1, + default=>sub { {} }, +); + +=head2 pool + +Is a or derived class. This is a +container class for one or more replicated databases. + +=cut + +has 'pool' => ( + is=>'ro', + isa=>'DBIx::Class::Storage::DBI::Replicated::Pool', + lazy_build=>1, + handles=>[qw/ + connect_replicants + replicants + has_replicants + /], +); + +=head2 balancer + +Is a or derived class. This +is a class that takes a pool () + +=cut + +has 'balancer' => ( + is=>'rw', + isa=>'DBIx::Class::Storage::DBI::Replicated::Balancer', + lazy_build=>1, + handles=>[qw/auto_validate_every/], +); + +=head2 master + +The master defines the canonical state for a pool of connected databases. All +the replicants are expected to match this databases state. Thus, in a classic +Master / Slaves distributed system, all the slaves are expected to replicate +the Master's state as quick as possible. This is the only database in the +pool of databases that is allowed to handle write traffic. + +=cut + +has 'master' => ( + is=> 'ro', + isa=>'DBIx::Class::Storage::DBI', + lazy_build=>1, +); + +=head1 ATTRIBUTES IMPLEMENTING THE DBIx::Storage::DBI INTERFACE + +The following methods are delegated all the methods required for the +L interface. + +=head2 read_handler + +Defines an object that implements the read side of L. + +=cut + +has 'read_handler' => ( + is=>'rw', + isa=>Object, + lazy_build=>1, + handles=>[qw/ + select + select_single + columns_info_for + /], +); + +=head2 write_handler -sub connect_info { - my ($self, $source_info) = @_; +Defines an object that implements the write side of L. - ## if there is no $source_info, treat this sub like an accessor - return $self->_connect_info - if !$source_info; +=cut + +has 'write_handler' => ( + is=>'ro', + isa=>Object, + lazy_build=>1, + handles=>[qw/ + on_connect_do + on_disconnect_do + connect_info + throw_exception + sql_maker + sqlt_type + create_ddl_dir + deployment_statements + datetime_parser + datetime_parser_type + last_insert_id + insert + insert_bulk + update + delete + dbh + txn_begin + txn_do + txn_commit + txn_rollback + txn_scope_guard + sth + deploy + with_deferred_fk_checks + + reload_row + _prep_for_execute - ## Alright, let's conect the master - $self->write_source->connect_info($source_info); + /], +); + +has _master_connect_info_opts => + (is => 'rw', isa => HashRef, default => sub { {} }); + +=head2 around: connect_info + +Preserve master's C options (for merging with replicants.) +Also set any Replicated related options from connect_info, such as +C, C, C and C. + +=cut + +around connect_info => sub { + my ($next, $self, $info, @extra) = @_; + + my %opts; + for my $arg (@$info) { + next unless (reftype($arg)||'') eq 'HASH'; + %opts = %{ merge($arg, \%opts) }; + } + delete $opts{dsn}; + + if (@opts{qw/pool_type pool_args/}) { + $self->pool_type(delete $opts{pool_type}) + if $opts{pool_type}; + + $self->pool_args( + merge((delete $opts{pool_args} || {}), $self->pool_args) + ); + + $self->pool($self->_build_pool) + if $self->pool; + } + + if (@opts{qw/balancer_type balancer_args/}) { + $self->balancer_type(delete $opts{balancer_type}) + if $opts{balancer_type}; + + $self->balancer_args( + merge((delete $opts{balancer_args} || {}), $self->balancer_args) + ); + + $self->balancer($self->_build_balancer) + if $self->balancer; + } + + $self->_master_connect_info_opts(\%opts); + + $self->$next($info, @extra); +}; + +=head1 METHODS + +This class defines the following methods. + +=head2 BUILDARGS + +L when instantiating it's storage passed itself as the +first argument. So we need to massage the arguments a bit so that all the +bits get put into the correct places. + +=cut + +sub BUILDARGS { + my ($class, $schema, $storage_type_args, @args) = @_; - ## Now, build and then connect the Slaves - my @slaves_connect_info = @{$source_info->[-1]->{slaves_connect_info}}; - my $dbd_multi_config = ref $slaves_connect_info[-1] eq 'HASH' - ? pop @slaves_connect_info : {}; - - ## We need to do this since SQL::Abstract::Limit can't guess what DBD::Multi is - $dbd_multi_config->{limit_dialect} = $self->write_source->sql_maker->limit_dialect - unless defined $dbd_multi_config->{limit_dialect}; - - @slaves_connect_info = map { - ## if the first element in the arrayhash is a ref, make that the value - my $db = ref $_->[0] ? $_->[0] : $_; - my $priority = $_->[-1]->{priority} || 10; ## default priority is 10 - $priority => $db; - } @slaves_connect_info; - - $self->read_source->connect_info([ - 'dbi:Multi:', undef, undef, { - dsns => [@slaves_connect_info], - %$dbd_multi_config, - }, - ]); - - ## Return the formated connection information - return $self->_connect_info; + return { + schema=>$schema, + %$storage_type_args, + @args + } } -sub select { - shift->read_source->select( @_ ); -} -sub select_single { - shift->read_source->select_single( @_ ); -} -sub throw_exception { - shift->read_source->throw_exception( @_ ); -} -sub sql_maker { - shift->read_source->sql_maker( @_ ); -} -sub columns_info_for { - shift->read_source->columns_info_for( @_ ); -} -sub sqlt_type { - shift->read_source->sqlt_type( @_ ); -} -sub create_ddl_dir { - shift->read_source->create_ddl_dir( @_ ); -} -sub deployment_statements { - shift->read_source->deployment_statements( @_ ); -} -sub datetime_parser { - shift->read_source->datetime_parser( @_ ); -} -sub datetime_parser_type { - shift->read_source->datetime_parser_type( @_ ); -} -sub build_datetime_parser { - shift->read_source->build_datetime_parser( @_ ); -} +=head2 _build_master -sub limit_dialect { $_->limit_dialect( @_ ) for( shift->all_sources ) } -sub quote_char { $_->quote_char( @_ ) for( shift->all_sources ) } -sub name_sep { $_->quote_char( @_ ) for( shift->all_sources ) } -sub disconnect { $_->disconnect( @_ ) for( shift->all_sources ) } -sub set_schema { $_->set_schema( @_ ) for( shift->all_sources ) } +Lazy builder for the L attribute. -sub DESTROY { - my $self = shift; +=cut - undef $self->{write_source}; - undef $self->{read_sources}; +sub _build_master { + my $self = shift @_; + my $master = DBIx::Class::Storage::DBI->new($self->schema); + DBIx::Class::Storage::DBI::Replicated::WithDSN->meta->apply($master); + $master } -sub last_insert_id { - shift->write_source->last_insert_id( @_ ); -} -sub insert { - shift->write_source->insert( @_ ); +=head2 _build_pool + +Lazy builder for the L attribute. + +=cut + +sub _build_pool { + my $self = shift @_; + $self->create_pool(%{$self->pool_args}); } -sub update { - shift->write_source->update( @_ ); + +=head2 _build_balancer + +Lazy builder for the L attribute. This takes a Pool object so that +the balancer knows which pool it's balancing. + +=cut + +sub _build_balancer { + my $self = shift @_; + $self->create_balancer( + pool=>$self->pool, + master=>$self->master, + %{$self->balancer_args}, + ); } -sub update_all { - shift->write_source->update_all( @_ ); + +=head2 _build_write_handler + +Lazy builder for the L attribute. The default is to set this to +the L. + +=cut + +sub _build_write_handler { + return shift->master; } -sub delete { - shift->write_source->delete( @_ ); + +=head2 _build_read_handler + +Lazy builder for the L attribute. The default is to set this to +the L. + +=cut + +sub _build_read_handler { + return shift->balancer; } -sub delete_all { - shift->write_source->delete_all( @_ ); + +=head2 around: connect_replicants + +All calls to connect_replicants needs to have an existing $schema tacked onto +top of the args, since L needs it, and any C +options merged with the master, with replicant opts having higher priority. + +=cut + +around connect_replicants => sub { + my ($next, $self, @args) = @_; + + for my $r (@args) { + $r = [ $r ] unless reftype $r eq 'ARRAY'; + + croak "coderef replicant connect_info not supported" + if ref $r->[0] && reftype $r->[0] eq 'CODE'; + +# any connect_info options? + my $i = 0; + $i++ while $i < @$r && (reftype($r->[$i])||'') ne 'HASH'; + +# make one if none + $r->[$i] = {} unless $r->[$i]; + +# merge if two hashes + my @hashes = @$r[$i .. $#{$r}]; + + croak "invalid connect_info options" + if (grep { reftype($_) eq 'HASH' } @hashes) != @hashes; + + croak "too many hashrefs in connect_info" + if @hashes > 2; + + my %opts = %{ merge(reverse @hashes) }; + +# delete them + splice @$r, $i+1, ($#{$r} - $i), (); + +# merge with master + %opts = %{ merge(\%opts, $self->_master_connect_info_opts) }; + +# update + $r->[$i] = \%opts; + } + + $self->$next($self->schema, @args); +}; + +=head2 all_storages + +Returns an array of of all the connected storage backends. The first element +in the returned array is the master, and the remainings are each of the +replicants. + +=cut + +sub all_storages { + my $self = shift @_; + return grep {defined $_ && blessed $_} ( + $self->master, + values %{ $self->replicants }, + ); } -sub create { - shift->write_source->create( @_ ); + +=head2 execute_reliably ($coderef, ?@args) + +Given a coderef, saves the current state of the L, forces it to +use reliable storage (ie sets it to the master), executes a coderef and then +restores the original state. + +Example: + + my $reliably = sub { + my $name = shift @_; + $schema->resultset('User')->create({name=>$name}); + my $user_rs = $schema->resultset('User')->find({name=>$name}); + return $user_rs; + }; + + my $user_rs = $schema->storage->execute_reliably($reliably, 'John'); + +Use this when you must be certain of your database state, such as when you just +inserted something and need to get a resultset including it, etc. + +=cut + +sub execute_reliably { + my ($self, $coderef, @args) = @_; + + unless( ref $coderef eq 'CODE') { + $self->throw_exception('Second argument must be a coderef'); + } + + ##Get copy of master storage + my $master = $self->master; + + ##Get whatever the current read hander is + my $current = $self->read_handler; + + ##Set the read handler to master + $self->read_handler($master); + + ## do whatever the caller needs + my @result; + my $want_array = wantarray; + + eval { + if($want_array) { + @result = $coderef->(@args); + } elsif(defined $want_array) { + ($result[0]) = ($coderef->(@args)); + } else { + $coderef->(@args); + } + }; + + ##Reset to the original state + $self->read_handler($current); + + ##Exception testing has to come last, otherwise you might leave the + ##read_handler set to master. + + if($@) { + $self->throw_exception("coderef returned an error: $@"); + } else { + return $want_array ? @result : $result[0]; + } } -sub find_or_create { - shift->write_source->find_or_create( @_ ); + +=head2 set_reliable_storage + +Sets the current $schema to be 'reliable', that is all queries, both read and +write are sent to the master + +=cut + +sub set_reliable_storage { + my $self = shift @_; + my $schema = $self->schema; + my $write_handler = $self->schema->storage->write_handler; + + $schema->storage->read_handler($write_handler); } -sub update_or_create { - shift->write_source->update_or_create( @_ ); + +=head2 set_balanced_storage + +Sets the current $schema to be use the for all reads, while all +writea are sent to the master only + +=cut + +sub set_balanced_storage { + my $self = shift @_; + my $schema = $self->schema; + my $write_handler = $self->schema->storage->balancer; + + $schema->storage->read_handler($write_handler); } + +=head2 around: txn_do ($coderef) + +Overload to the txn_do method, which is delegated to whatever the +L is set to. We overload this in order to wrap in inside a +L method. + +=cut + +around 'txn_do' => sub { + my($txn_do, $self, $coderef, @args) = @_; + $self->execute_reliably(sub {$self->$txn_do($coderef, @args)}); +}; + +=head2 connected + +Check that the master and at least one of the replicants is connected. + +=cut + sub connected { - shift->write_source->connected( @_ ); + my $self = shift @_; + return + $self->master->connected && + $self->pool->connected_replicants; } + +=head2 ensure_connected + +Make sure all the storages are connected. + +=cut + sub ensure_connected { - shift->write_source->ensure_connected( @_ ); + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->ensure_connected(@_); + } } -sub dbh { - shift->write_source->dbh( @_ ); + +=head2 limit_dialect + +Set the limit_dialect for all existing storages + +=cut + +sub limit_dialect { + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->limit_dialect(@_); + } + return $self->master->quote_char; } -sub txn_do { - shift->write_source->txn_do( @_ ); + +=head2 quote_char + +Set the quote_char for all existing storages + +=cut + +sub quote_char { + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->quote_char(@_); + } + return $self->master->quote_char; } -sub txn_commit { - shift->write_source->txn_commit( @_ ); + +=head2 name_sep + +Set the name_sep for all existing storages + +=cut + +sub name_sep { + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->name_sep(@_); + } + return $self->master->name_sep; } -sub txn_rollback { - shift->write_source->txn_rollback( @_ ); + +=head2 set_schema + +Set the schema object for all existing storages + +=cut + +sub set_schema { + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->set_schema(@_); + } } -sub sth { - shift->write_source->sth( @_ ); + +=head2 debug + +set a debug flag across all storages + +=cut + +sub debug { + my $self = shift @_; + if(@_) { + foreach my $source ($self->all_storages) { + $source->debug(@_); + } + } + return $self->master->debug; } -sub deploy { - shift->write_source->deploy( @_ ); + +=head2 debugobj + +set a debug object across all storages + +=cut + +sub debugobj { + my $self = shift @_; + if(@_) { + foreach my $source ($self->all_storages) { + $source->debugobj(@_); + } + } + return $self->master->debugobj; } -sub _prep_for_execute { - shift->write_source->_prep_for_execute(@_); + +=head2 debugfh + +set a debugfh object across all storages + +=cut + +sub debugfh { + my $self = shift @_; + if(@_) { + foreach my $source ($self->all_storages) { + $source->debugfh(@_); + } + } + return $self->master->debugfh; } -sub debugobj { - shift->write_source->debugobj(@_); +=head2 debugcb + +set a debug callback across all storages + +=cut + +sub debugcb { + my $self = shift @_; + if(@_) { + foreach my $source ($self->all_storages) { + $source->debugcb(@_); + } + } + return $self->master->debugcb; } -sub debug { - shift->write_source->debug(@_); + +=head2 disconnect + +disconnect everything + +=cut + +sub disconnect { + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->disconnect(@_); + } } -sub debugfh { shift->_not_supported( 'debugfh' ) }; -sub debugcb { shift->_not_supported( 'debugcb' ) }; +=head2 cursor_class + +set cursor class on all storages, or return master's -sub _not_supported { - my( $self, $method ) = @_; +=cut + +sub cursor_class { + my ($self, $cursor_class) = @_; - die "This Storage does not support $method method."; + if ($cursor_class) { + $_->cursor_class($cursor_class) for $self->all_storages; + } + $self->master->cursor_class; } + +=head1 GOTCHAS + +Due to the fact that replicants can lag behind a master, you must take care to +make sure you use one of the methods to force read queries to a master should +you need realtime data integrity. For example, if you insert a row, and then +immediately re-read it from the database (say, by doing $row->discard_changes) +or you insert a row and then immediately build a query that expects that row +to be an item, you should force the master to handle reads. Otherwise, due to +the lag, there is no certainty your data will be in the expected state. -=head1 SEE ALSO +For data integrity, all transactions automatically use the master storage for +all read and write queries. Using a transaction is the preferred and recommended +method to force the master to handle all read queries. -L, L, L +Otherwise, you can force a single query to use the master with the 'force_pool' +attribute: + + my $row = $resultset->search(undef, {force_pool=>'master'})->find($pk); + +This attribute will safely be ignore by non replicated storages, so you can use +the same code for both types of systems. + +Lastly, you can use the L method, which works very much like +a transaction. + +For debugging, you can turn replication on/off with the methods L +and L, however this operates at a global level and is not +suitable if you have a shared Schema object being used by multiple processes, +such as on a web application server. You can get around this limitation by +using the Schema clone method. + + my $new_schema = $schema->clone; + $new_schema->set_reliable_storage; + + ## $new_schema will use only the Master storage for all reads/writes while + ## the $schema object will use replicated storage. =head1 AUTHOR -Norbert Csongrádi + John Napiorkowski -Peter Siklósi +Based on code originated by: -John Napiorkowski + Norbert Csongrádi + Peter Siklósi =head1 LICENSE @@ -276,4 +841,6 @@ You may distribute this code under the same terms as Perl itself. =cut +__PACKAGE__->meta->make_immutable; + 1;