X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI%2FReplicated.pm;h=1024d47c8bfbdfaa97bb74c93565e4189986718b;hb=ed7ab0f4ce1a9118ea6285ee562ef003085a6b64;hp=d64e9dbdc00b2a759dc13db809a2b8dee0c917fa;hpb=c13002976e32b818eabc3a8eaf6fa2e23ebed7e9;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI/Replicated.pm b/lib/DBIx/Class/Storage/DBI/Replicated.pm index d64e9db..1024d47 100644 --- a/lib/DBIx/Class/Storage/DBI/Replicated.pm +++ b/lib/DBIx/Class/Storage/DBI/Replicated.pm @@ -14,8 +14,9 @@ use DBIx::Class::Storage::DBI::Replicated::Balancer; use DBIx::Class::Storage::DBI::Replicated::Types qw/BalancerClassNamePart DBICSchema DBICStorageDBI/; use MooseX::Types::Moose qw/ClassName HashRef Object/; use Scalar::Util 'reftype'; -use Hash::Merge 'merge'; -use List::Util qw/min max/; +use Hash::Merge; +use List::Util qw/min max reduce/; +use Try::Tiny; use namespace::clean -except => 'meta'; @@ -26,7 +27,7 @@ DBIx::Class::Storage::DBI::Replicated - BETA Replicated database support =head1 SYNOPSIS The Following example shows how to change an existing $schema to a replicated -storage type, add some replicated (readonly) databases, and perform reporting +storage type, add some replicated (read-only) databases, and perform reporting tasks. You should set the 'storage_type attribute to a replicated type. You should @@ -75,7 +76,7 @@ walkthroughs. Warning: This class is marked BETA. This has been running a production website using MySQL native replication as its backend and we have some decent test coverage but the code hasn't yet been stressed by a variety of databases. -Individual DB's may have quirks we are not aware of. Please use this in first +Individual DBs may have quirks we are not aware of. Please use this in first development and pass along your experiences/bug fixes. This class implements replicated data store for DBI. Currently you can define @@ -89,12 +90,12 @@ L. Additionally, some methods need to be distributed to all existing storages. This way our storage class is a drop in replacement for L. -Read traffic is spread across the replicants (slaves) occuring to a user +Read traffic is spread across the replicants (slaves) occurring to a user selected algorithm. The default algorithm is random weighted. =head1 NOTES -The consistency betweeen master and replicants is database specific. The Pool +The consistency between master and replicants is database specific. The Pool gives you a method to validate its replicants, removing and replacing them when they fail/pass predefined criteria. Please make careful use of the ways to force a query to run against Master when needed. @@ -306,8 +307,8 @@ has 'write_handler' => ( backup is_datatype_numeric + _supports_insert_returning _count_select - _subq_count_select _subq_update_delete svp_rollback svp_begin @@ -342,7 +343,6 @@ has 'write_handler' => ( _dbh_commit _execute_array _placeholders_supported - _verify_pid savepoints _sqlt_minimum_version _sql_maker_opts @@ -365,16 +365,31 @@ has 'write_handler' => ( _do_query _dbh_sth _dbh_execute + _prefetch_insert_auto_nextvals + _server_info_hash /], ); +my @unimplemented = qw( + _arm_global_destructor + _preserve_foreign_dbh + _verify_pid + _verify_tid +); + +for my $method (@unimplemented) { + __PACKAGE__->meta->add_method($method, sub { + croak "$method must not be called on ".(blessed shift).' objects'; + }); +} + has _master_connect_info_opts => (is => 'rw', isa => HashRef, default => sub { {} }); =head2 around: connect_info -Preserve master's C options (for merging with replicants.) -Also set any Replicated related options from connect_info, such as +Preserves master's C options (for merging with replicants.) +Also sets any Replicated-related options from connect_info, such as C, C, C and C. =cut @@ -384,10 +399,12 @@ around connect_info => sub { my $wantarray = wantarray; + my $merge = Hash::Merge->new('LEFT_PRECEDENT'); + my %opts; for my $arg (@$info) { next unless (reftype($arg)||'') eq 'HASH'; - %opts = %{ merge($arg, \%opts) }; + %opts = %{ $merge->merge($arg, \%opts) }; } delete $opts{dsn}; @@ -396,7 +413,7 @@ around connect_info => sub { if $opts{pool_type}; $self->pool_args( - merge((delete $opts{pool_args} || {}), $self->pool_args) + $merge->merge((delete $opts{pool_args} || {}), $self->pool_args) ); $self->pool($self->_build_pool) @@ -408,7 +425,7 @@ around connect_info => sub { if $opts{balancer_type}; $self->balancer_args( - merge((delete $opts{balancer_args} || {}), $self->balancer_args) + $merge->merge((delete $opts{balancer_args} || {}), $self->balancer_args) ); $self->balancer($self->_build_balancer) @@ -553,7 +570,8 @@ around connect_replicants => sub { $self->throw_exception('too many hashrefs in connect_info') if @hashes > 2; - my %opts = %{ merge(reverse @hashes) }; + my $merge = Hash::Merge->new('LEFT_PRECEDENT'); + my %opts = %{ $merge->merge(reverse @hashes) }; # delete them splice @$r, $i+1, ($#{$r} - $i), (); @@ -566,7 +584,7 @@ around connect_replicants => sub { delete $master_opts{dbh_maker}; # merge with master - %opts = %{ merge(\%opts, \%master_opts) }; + %opts = %{ $merge->merge(\%opts, \%master_opts) }; # update $r->[$i] = \%opts; @@ -594,7 +612,7 @@ sub all_storages { =head2 execute_reliably ($coderef, ?@args) Given a coderef, saves the current state of the L, forces it to -use reliable storage (ie sets it to the master), executes a coderef and then +use reliable storage (e.g. sets it to the master), executes a coderef and then restores the original state. Example: @@ -633,7 +651,8 @@ sub execute_reliably { my @result; my $want_array = wantarray; - eval { + my $exception; + try { if($want_array) { @result = $coderef->(@args); } elsif(defined $want_array) { @@ -641,19 +660,14 @@ sub execute_reliably { } else { $coderef->(@args); } + } catch { + $self->throw_exception("coderef returned an error: $_"); + } finally { + ##Reset to the original state + $self->read_handler($current); }; - ##Reset to the original state - $self->read_handler($current); - - ##Exception testing has to come last, otherwise you might leave the - ##read_handler set to master. - - if($@) { - $self->throw_exception("coderef returned an error: $@"); - } else { - return $want_array ? @result : $result[0]; - } + return $want_array ? @result : $result[0]; } =head2 set_reliable_storage @@ -674,7 +688,7 @@ sub set_reliable_storage { =head2 set_balanced_storage Sets the current $schema to be use the for all reads, while all -writea are sent to the master only +writes are sent to the master only =cut @@ -1003,6 +1017,36 @@ sub _ping { return min map $_->_ping, $self->all_storages; } +my $numify_ver = sub { + my $ver = shift; + my @numparts = split /\D+/, $ver; + my $format = '%d.' . (join '', ('%05d') x (@numparts - 1)); + + return sprintf $format, @numparts; +}; + +sub _server_info { + my $self = shift; + + if (not $self->_server_info_hash) { + my $min_version_info = ( + reduce { $a->[0] < $b->[0] ? $a : $b } + map [ $numify_ver->($_->{dbms_version}), $_ ], + map $_->_server_info, $self->all_storages + )->[1]; + + $self->_server_info_hash($min_version_info); # on master + } + + return $self->_server_info_hash; +} + +sub _get_server_version { + my $self = shift; + + return $self->_server_info->{dbms_version}; +} + =head1 GOTCHAS Due to the fact that replicants can lag behind a master, you must take care to