X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI%2FReplicated.pm;h=bf5bb47caf290a84e47d2d34a6bd680af17da6d8;hb=4b8a53eabdb1629bacdb95f04ca8fc3718ca7c58;hp=3162d8112550ba1e7f4ee3697ef5da1e52ba6605;hpb=4bea1fe7a2b4827947b3d0d64b16a0f2c5e594bd;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI/Replicated.pm b/lib/DBIx/Class/Storage/DBI/Replicated.pm index 3162d81..bf5bb47 100644 --- a/lib/DBIx/Class/Storage/DBI/Replicated.pm +++ b/lib/DBIx/Class/Storage/DBI/Replicated.pm @@ -1,9 +1,8 @@ package DBIx::Class::Storage::DBI::Replicated; BEGIN { - use Carp::Clan qw/^DBIx::Class/; use DBIx::Class; - croak('The following modules are required for Replication ' . DBIx::Class::Optional::Dependencies->req_missing_for ('replicated') ) + die('The following modules are required for Replication ' . DBIx::Class::Optional::Dependencies->req_missing_for ('replicated') . "\n" ) unless DBIx::Class::Optional::Dependencies->req_ok_for ('replicated'); } @@ -16,11 +15,13 @@ use MooseX::Types::Moose qw/ClassName HashRef Object/; use Scalar::Util 'reftype'; use Hash::Merge; use List::Util qw/min max reduce/; +use Context::Preserve 'preserve_context'; use Try::Tiny; -use namespace::clean; use namespace::clean -except => 'meta'; +=encoding utf8 + =head1 NAME DBIx::Class::Storage::DBI::Replicated - BETA Replicated database support @@ -57,9 +58,9 @@ be delegated to the replicants, while writes to the master. You can force a given query to use a particular storage using the search attribute 'force_pool'. For example: - my $RS = $schema->resultset('Source')->search(undef, {force_pool=>'master'}); + my $rs = $schema->resultset('Source')->search(undef, {force_pool=>'master'}); -Now $RS will force everything (both reads and writes) to use whatever was setup +Now $rs will force everything (both reads and writes) to use whatever was setup as the master storage. 'master' is hardcoded to always point to the Master, but you can also use any Replicant name. Please see: L and the replicants attribute for more. @@ -69,8 +70,12 @@ force read traffic to the master. In general, you should wrap your statements in a transaction when you are reading and writing to the same tables at the same time, since your replicants will often lag a bit behind the master. -See L for more help and -walkthroughs. +If you have a multi-statement read only transaction you can force it to select +a random server in the pool by: + + my $rs = $schema->resultset('Source')->search( undef, + { force_pool => $db->storage->read_handler->next_storage } + ); =head1 DESCRIPTION @@ -269,43 +274,40 @@ my $method_dispatch = { txn_commit txn_rollback txn_scope_guard - sth + _exec_txn_rollback + _exec_txn_begin + _exec_txn_commit deploy with_deferred_fk_checks dbh_do _prep_for_execute is_datatype_numeric _count_select - _subq_update_delete svp_rollback svp_begin svp_release relname_to_table_alias _dbh_last_insert_id - _fix_bind_params _default_dbi_connect_attributes _dbi_connect_info _dbic_connect_attributes auto_savepoint + _query_start _query_end + _format_for_trace + _dbi_attrs_for_bind bind_attribute_by_data_type transaction_depth _dbh _select_args - _dbh_execute_array + _dbh_execute_for_fetch _sql_maker - _query_start - _per_row_update_delete - _dbh_begin_work _dbh_execute_inserts_with_no_binds _select_args_to_query + _gen_sql_bind _svp_generate_name - _multipk_update_delete - source_bind_attributes _normalize_connect_info _parse_connect_do - _dbh_commit - _execute_array savepoints _sql_maker_opts _conn_pid @@ -313,19 +315,12 @@ my $method_dispatch = { _native_data_type _get_dbh sql_maker_class - _dbh_rollback - _adjust_select_args_for_complex_prefetch - _resolve_ident_sources - _resolve_column_info - _prune_unused_joins - _strip_cond_qualifiers - _strip_cond_qualifiers_from_array - _resolve_aliastypes_from_select_args _execute _do_query + _sth _dbh_sth _dbh_execute - /], + /, Class::MOP::Class->initialize('DBIx::Class::Storage::DBIHacks')->get_method_list ], reader => [qw/ select select_single @@ -344,20 +339,27 @@ my $method_dispatch = { _dbh_details _dbh_get_info + _determine_connector_driver + _describe_connection + _warn_undetermined_driver + sql_limit_dialect sql_quote_char sql_name_sep - _inner_join_to_node - _group_over_selection - _extract_order_criteria - _prefetch_autovalues + _perform_autoinc_retrieval + _autoinc_supplied_for_op + + _resolve_bindattrs _max_column_bytesize _is_lob_type _is_binary_lob_type + _is_binary_type _is_text_lob_type + + sth /,( # the capability framework # not sure if CMOP->initialize does evil things to DBIC::S::DBI, fix if a problem @@ -395,7 +397,8 @@ if (DBIx::Class::_ENV_::DBICTEST) { for my $method (@{$method_dispatch->{unimplemented}}) { __PACKAGE__->meta->add_method($method, sub { - croak "$method must not be called on ".(blessed shift).' objects'; + my $self = shift; + $self->throw_exception("$method must not be called on ".(blessed $self).' objects'); }); } @@ -444,6 +447,11 @@ C, C, C and C. around connect_info => sub { my ($next, $self, $info, @extra) = @_; + $self->throw_exception( + 'connect_info can not be retrieved from a replicated storage - ' + . 'accessor must be called on a specific pool instance' + ) unless defined $info; + my $merge = Hash::Merge->new('LEFT_PRECEDENT'); my %opts; @@ -480,24 +488,19 @@ around connect_info => sub { $self->_master_connect_info_opts(\%opts); - my @res; - if (wantarray) { - @res = $self->$next($info, @extra); - } else { - $res[0] = $self->$next($info, @extra); - } - - # Make sure master is blessed into the correct class and apply role to it. - my $master = $self->master; - $master->_determine_driver; - Moose::Meta::Class->initialize(ref $master); + return preserve_context { + $self->$next($info, @extra); + } after => sub { + # Make sure master is blessed into the correct class and apply role to it. + my $master = $self->master; + $master->_determine_driver; + Moose::Meta::Class->initialize(ref $master); - DBIx::Class::Storage::DBI::Replicated::WithDSN->meta->apply($master); + DBIx::Class::Storage::DBI::Replicated::WithDSN->meta->apply($master); - # link pool back to master - $self->pool->master($master); - - wantarray ? @res : $res[0]; + # link pool back to master + $self->pool->master($master); + }; }; =head1 METHODS @@ -678,41 +681,22 @@ inserted something and need to get a resultset including it, etc. =cut sub execute_reliably { - my ($self, $coderef, @args) = @_; + my $self = shift; + my $coderef = shift; unless( ref $coderef eq 'CODE') { $self->throw_exception('Second argument must be a coderef'); } - ##Get copy of master storage - my $master = $self->master; + ## replace the current read handler for the remainder of the scope + local $self->{read_handler} = $self->master; - ##Get whatever the current read hander is - my $current = $self->read_handler; - - ##Set the read handler to master - $self->read_handler($master); - - ## do whatever the caller needs - my @result; - my $want_array = wantarray; - - try { - if($want_array) { - @result = $coderef->(@args); - } elsif(defined $want_array) { - ($result[0]) = ($coderef->(@args)); - } else { - $coderef->(@args); - } + my $args = \@_; + return try { + $coderef->(@$args); } catch { $self->throw_exception("coderef returned an error: $_"); - } finally { - ##Reset to the original state - $self->read_handler($current); }; - - return wantarray ? @result : $result[0]; } =head2 set_reliable_storage