X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?p=dbsrgits%2FDBIx-Class.git;a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI%2FReplicated.pm;h=447747514dd38272da3c17fa1c9e635944407822;hp=9eb92dd4a0e138d051e1997f86fbedcfb26852d1;hb=64ae166780d0cb2b9577e506da9b9b240c146d20;hpb=e398f77e59be21bfdb90087caa30a092c87df0d6 diff --git a/lib/DBIx/Class/Storage/DBI/Replicated.pm b/lib/DBIx/Class/Storage/DBI/Replicated.pm index 9eb92dd..4477475 100644 --- a/lib/DBIx/Class/Storage/DBI/Replicated.pm +++ b/lib/DBIx/Class/Storage/DBI/Replicated.pm @@ -2,27 +2,9 @@ package DBIx::Class::Storage::DBI::Replicated; BEGIN { use Carp::Clan qw/^DBIx::Class/; - - ## Modules required for Replication support not required for general DBIC - ## use, so we explicitly test for these. - - my %replication_required = ( - 'Moose' => '0.98', - 'MooseX::Types' => '0.21', - 'namespace::clean' => '0.11', - 'Hash::Merge' => '0.11' - ); - - my @didnt_load; - - for my $module (keys %replication_required) { - eval "use $module $replication_required{$module}"; - push @didnt_load, "$module $replication_required{$module}" - if $@; - } - - croak("@{[ join ', ', @didnt_load ]} are missing and are required for Replication") - if @didnt_load; + use DBIx::Class; + croak('The following modules are required for Replication ' . DBIx::Class::Optional::Dependencies->req_missing_for ('replicated') ) + unless DBIx::Class::Optional::Dependencies->req_ok_for ('replicated'); } use Moose; @@ -32,7 +14,10 @@ use DBIx::Class::Storage::DBI::Replicated::Balancer; use DBIx::Class::Storage::DBI::Replicated::Types qw/BalancerClassNamePart DBICSchema DBICStorageDBI/; use MooseX::Types::Moose qw/ClassName HashRef Object/; use Scalar::Util 'reftype'; -use Hash::Merge 'merge'; +use Hash::Merge; +use List::Util qw/min max reduce/; +use Try::Tiny; +use namespace::clean; use namespace::clean -except => 'meta'; @@ -43,7 +28,7 @@ DBIx::Class::Storage::DBI::Replicated - BETA Replicated database support =head1 SYNOPSIS The Following example shows how to change an existing $schema to a replicated -storage type, add some replicated (readonly) databases, and perform reporting +storage type, add some replicated (read-only) databases, and perform reporting tasks. You should set the 'storage_type attribute to a replicated type. You should @@ -54,7 +39,7 @@ that the Pool object should get. $schema->storage_type( ['::DBI::Replicated', {balancer=>'::Random'}] ); $schema->connection(...); -Next, you need to add in the Replicants. Basically this is an array of +Next, you need to add in the Replicants. Basically this is an array of arrayrefs, where each arrayref is database connect information. Think of these arguments as what you'd pass to the 'normal' $schema->connect method. @@ -75,7 +60,7 @@ attribute 'force_pool'. For example: my $RS = $schema->resultset('Source')->search(undef, {force_pool=>'master'}); Now $RS will force everything (both reads and writes) to use whatever was setup -as the master storage. 'master' is hardcoded to always point to the Master, +as the master storage. 'master' is hardcoded to always point to the Master, but you can also use any Replicant name. Please see: L and the replicants attribute for more. @@ -92,7 +77,7 @@ walkthroughs. Warning: This class is marked BETA. This has been running a production website using MySQL native replication as its backend and we have some decent test coverage but the code hasn't yet been stressed by a variety of databases. -Individual DB's may have quirks we are not aware of. Please use this in first +Individual DBs may have quirks we are not aware of. Please use this in first development and pass along your experiences/bug fixes. This class implements replicated data store for DBI. Currently you can define @@ -106,27 +91,20 @@ L. Additionally, some methods need to be distributed to all existing storages. This way our storage class is a drop in replacement for L. -Read traffic is spread across the replicants (slaves) occuring to a user +Read traffic is spread across the replicants (slaves) occurring to a user selected algorithm. The default algorithm is random weighted. =head1 NOTES -The consistancy betweeen master and replicants is database specific. The Pool +The consistency between master and replicants is database specific. The Pool gives you a method to validate its replicants, removing and replacing them when they fail/pass predefined criteria. Please make careful use of the ways to force a query to run against Master when needed. =head1 REQUIREMENTS -Replicated Storage has additional requirements not currently part of L - - Moose => '0.98', - MooseX::Types => '0.21', - namespace::clean => '0.11', - Hash::Merge => '0.11' - -You will need to install these modules manually via CPAN or make them part of the -Makefile for your distribution. +Replicated Storage has additional requirements not currently part of +L. See L for more details. =head1 ATTRIBUTES @@ -147,7 +125,7 @@ has 'schema' => ( =head2 pool_type -Contains the classname which will instantiate the L object. Defaults +Contains the classname which will instantiate the L object. Defaults to: L. =cut @@ -211,7 +189,7 @@ has 'balancer_args' => ( =head2 pool -Is a or derived class. This is a +Is a L or derived class. This is a container class for one or more replicated databases. =cut @@ -229,8 +207,8 @@ has 'pool' => ( =head2 balancer -Is a or derived class. This -is a class that takes a pool () +Is a L or derived class. This +is a class that takes a pool (L) =cut @@ -259,7 +237,7 @@ has 'master' => ( =head1 ATTRIBUTES IMPLEMENTING THE DBIx::Storage::DBI INTERFACE -The following methods are delegated all the methods required for the +The following methods are delegated all the methods required for the L interface. =head2 read_handler @@ -276,12 +254,17 @@ has 'read_handler' => ( select select_single columns_info_for + _dbh_columns_info_for + _select /], ); =head2 write_handler -Defines an object that implements the write side of L. +Defines an object that implements the write side of L, +as well as methods that don't write or read that can be called on only one +storage, methods that return a C<$dbh>, and any methods that don't make sense to +run on a replicant. =cut @@ -292,7 +275,10 @@ has 'write_handler' => ( handles=>[qw/ on_connect_do on_disconnect_do + on_connect_call + on_disconnect_call connect_info + _connect_info throw_exception sql_maker sqlt_type @@ -323,23 +309,101 @@ has 'write_handler' => ( backup is_datatype_numeric _count_select - _subq_count_select _subq_update_delete svp_rollback svp_begin svp_release relname_to_table_alias - _straight_join_to_node + _dbh_last_insert_id + _fix_bind_params + _default_dbi_connect_attributes + _dbi_connect_info + _dbic_connect_attributes + auto_savepoint + _sqlt_version_ok + _query_end + bind_attribute_by_data_type + transaction_depth + _dbh + _select_args + _dbh_execute_array + _sql_maker + _query_start + _sqlt_version_error + _per_row_update_delete + _dbh_begin_work + _dbh_execute_inserts_with_no_binds + _select_args_to_query + _svp_generate_name + _multipk_update_delete + source_bind_attributes + _normalize_connect_info + _parse_connect_do + _dbh_commit + _execute_array + savepoints + _sqlt_minimum_version + _sql_maker_opts + _conn_pid + _conn_tid + _dbh_autocommit + _native_data_type + _get_dbh + sql_maker_class + _dbh_rollback + _adjust_select_args_for_complex_prefetch + _resolve_ident_sources + _resolve_column_info + _prune_unused_joins + _strip_cond_qualifiers + _extract_order_columns + _resolve_aliastypes_from_select_args + _execute + _do_query + _dbh_sth + _dbh_execute + _prefetch_insert_auto_nextvals /], ); +my @unimplemented = qw( + _arm_global_destructor + _preserve_foreign_dbh + _verify_pid + _verify_tid + + get_use_dbms_capability + set_use_dbms_capability + get_dbms_capability + set_dbms_capability + _dbh_details + + sql_limit_dialect + + _inner_join_to_node + _group_over_selection +); + +# the capability framework +# not sure if CMOP->initialize does evil things to DBIC::S::DBI, fix if a problem +push @unimplemented, ( grep + { $_ =~ /^ _ (?: use | supports | determine_supports ) _ /x } + ( Class::MOP::Class->initialize('DBIx::Class::Storage::DBI')->get_all_method_names ) +); + +for my $method (@unimplemented) { + __PACKAGE__->meta->add_method($method, sub { + croak "$method must not be called on ".(blessed shift).' objects'; + }); +} + has _master_connect_info_opts => (is => 'rw', isa => HashRef, default => sub { {} }); =head2 around: connect_info -Preserve master's C options (for merging with replicants.) -Also set any Replicated related options from connect_info, such as +Preserves master's C options (for merging with replicants.) +Also sets any Replicated-related options from connect_info, such as C, C, C and C. =cut @@ -349,10 +413,12 @@ around connect_info => sub { my $wantarray = wantarray; + my $merge = Hash::Merge->new('LEFT_PRECEDENT'); + my %opts; for my $arg (@$info) { next unless (reftype($arg)||'') eq 'HASH'; - %opts = %{ merge($arg, \%opts) }; + %opts = %{ $merge->merge($arg, \%opts) }; } delete $opts{dsn}; @@ -361,11 +427,12 @@ around connect_info => sub { if $opts{pool_type}; $self->pool_args( - merge((delete $opts{pool_args} || {}), $self->pool_args) + $merge->merge((delete $opts{pool_args} || {}), $self->pool_args) ); - $self->pool($self->_build_pool) - if $self->pool; + ## Since we possibly changed the pool_args, we need to clear the current + ## pool object so that next time it is used it will be rebuilt. + $self->clear_pool; } if (@opts{qw/balancer_type balancer_args/}) { @@ -373,7 +440,7 @@ around connect_info => sub { if $opts{balancer_type}; $self->balancer_args( - merge((delete $opts{balancer_args} || {}), $self->balancer_args) + $merge->merge((delete $opts{balancer_args} || {}), $self->balancer_args) ); $self->balancer($self->_build_balancer) @@ -415,7 +482,7 @@ bits get put into the correct places. =cut sub BUILDARGS { - my ($class, $schema, $storage_type_args, @args) = @_; + my ($class, $schema, $storage_type_args, @args) = @_; return { schema=>$schema, @@ -518,7 +585,8 @@ around connect_replicants => sub { $self->throw_exception('too many hashrefs in connect_info') if @hashes > 2; - my %opts = %{ merge(reverse @hashes) }; + my $merge = Hash::Merge->new('LEFT_PRECEDENT'); + my %opts = %{ $merge->merge(reverse @hashes) }; # delete them splice @$r, $i+1, ($#{$r} - $i), (); @@ -531,7 +599,7 @@ around connect_replicants => sub { delete $master_opts{dbh_maker}; # merge with master - %opts = %{ merge(\%opts, \%master_opts) }; + %opts = %{ $merge->merge(\%opts, \%master_opts) }; # update $r->[$i] = \%opts; @@ -559,7 +627,7 @@ sub all_storages { =head2 execute_reliably ($coderef, ?@args) Given a coderef, saves the current state of the L, forces it to -use reliable storage (ie sets it to the master), executes a coderef and then +use reliable storage (e.g. sets it to the master), executes a coderef and then restores the original state. Example: @@ -567,7 +635,7 @@ Example: my $reliably = sub { my $name = shift @_; $schema->resultset('User')->create({name=>$name}); - my $user_rs = $schema->resultset('User')->find({name=>$name}); + my $user_rs = $schema->resultset('User')->find({name=>$name}); return $user_rs; }; @@ -598,7 +666,7 @@ sub execute_reliably { my @result; my $want_array = wantarray; - eval { + try { if($want_array) { @result = $coderef->(@args); } elsif(defined $want_array) { @@ -606,19 +674,14 @@ sub execute_reliably { } else { $coderef->(@args); } + } catch { + $self->throw_exception("coderef returned an error: $_"); + } finally { + ##Reset to the original state + $self->read_handler($current); }; - ##Reset to the original state - $self->read_handler($current); - - ##Exception testing has to come last, otherwise you might leave the - ##read_handler set to master. - - if($@) { - $self->throw_exception("coderef returned an error: $@"); - } else { - return $want_array ? @result : $result[0]; - } + return $want_array ? @result : $result[0]; } =head2 set_reliable_storage @@ -639,7 +702,7 @@ sub set_reliable_storage { =head2 set_balanced_storage Sets the current $schema to be use the for all reads, while all -writea are sent to the master only +writes are sent to the master only =cut @@ -688,7 +751,7 @@ sub limit_dialect { foreach my $source ($self->all_storages) { $source->limit_dialect(@_); } - return $self->master->quote_char; + return $self->master->limit_dialect; } =head2 quote_char @@ -809,6 +872,194 @@ sub cursor_class { $self->master->cursor_class; } +=head2 cursor + +set cursor class on all storages, or return master's, alias for L +above. + +=cut + +sub cursor { + my ($self, $cursor_class) = @_; + + if ($cursor_class) { + $_->cursor($cursor_class) for $self->all_storages; + } + $self->master->cursor; +} + +=head2 unsafe + +sets the L option on all storages or returns +master's current setting + +=cut + +sub unsafe { + my $self = shift; + + if (@_) { + $_->unsafe(@_) for $self->all_storages; + } + + return $self->master->unsafe; +} + +=head2 disable_sth_caching + +sets the L option on all storages +or returns master's current setting + +=cut + +sub disable_sth_caching { + my $self = shift; + + if (@_) { + $_->disable_sth_caching(@_) for $self->all_storages; + } + + return $self->master->disable_sth_caching; +} + +=head2 lag_behind_master + +returns the highest Replicant L +setting + +=cut + +sub lag_behind_master { + my $self = shift; + + return max map $_->lag_behind_master, $self->replicants; +} + +=head2 is_replicating + +returns true if all replicants return true for +L + +=cut + +sub is_replicating { + my $self = shift; + + return (grep $_->is_replicating, $self->replicants) == ($self->replicants); +} + +=head2 connect_call_datetime_setup + +calls L for all storages + +=cut + +sub connect_call_datetime_setup { + my $self = shift; + $_->connect_call_datetime_setup for $self->all_storages; +} + +sub _populate_dbh { + my $self = shift; + $_->_populate_dbh for $self->all_storages; +} + +sub _connect { + my $self = shift; + $_->_connect for $self->all_storages; +} + +sub _rebless { + my $self = shift; + $_->_rebless for $self->all_storages; +} + +sub _determine_driver { + my $self = shift; + $_->_determine_driver for $self->all_storages; +} + +sub _driver_determined { + my $self = shift; + + if (@_) { + $_->_driver_determined(@_) for $self->all_storages; + } + + return $self->master->_driver_determined; +} + +sub _init { + my $self = shift; + + $_->_init for $self->all_storages; +} + +sub _run_connection_actions { + my $self = shift; + + $_->_run_connection_actions for $self->all_storages; +} + +sub _do_connection_actions { + my $self = shift; + + if (@_) { + $_->_do_connection_actions(@_) for $self->all_storages; + } +} + +sub connect_call_do_sql { + my $self = shift; + $_->connect_call_do_sql(@_) for $self->all_storages; +} + +sub disconnect_call_do_sql { + my $self = shift; + $_->disconnect_call_do_sql(@_) for $self->all_storages; +} + +sub _seems_connected { + my $self = shift; + + return min map $_->_seems_connected, $self->all_storages; +} + +sub _ping { + my $self = shift; + + return min map $_->_ping, $self->all_storages; +} + +# not using the normalized_version, because we want to preserve +# version numbers much longer than the conventional xxx.yyyzzz +my $numify_ver = sub { + my $ver = shift; + my @numparts = split /\D+/, $ver; + my $format = '%d.' . (join '', ('%06d') x (@numparts - 1)); + + return sprintf $format, @numparts; +}; +sub _server_info { + my $self = shift; + + if (not $self->_dbh_details->{info}) { + $self->_dbh_details->{info} = ( + reduce { $a->[0] < $b->[0] ? $a : $b } + map [ $numify_ver->($_->{dbms_version}), $_ ], + map $_->_server_info, $self->all_storages + )->[1]; + } + + return $self->next::method; +} + +sub _get_server_version { + my $self = shift; + + return $self->_server_info->{dbms_version}; +} + =head1 GOTCHAS Due to the fact that replicants can lag behind a master, you must take care to