X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FResultSet.pm;h=5c87ffa38900305e77f77fa3e8e68e57e77a2420;hb=307f12659a1a370e5e41a23ef35839e2807f1ca4;hp=5c862e3427b72ab645b33201e4350cf2f0faab90;hpb=48e4eac60b37de39a225264ab16f316c407f5740;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/ResultSet.pm b/lib/DBIx/Class/ResultSet.pm index 5c862e3..5c87ffa 100644 --- a/lib/DBIx/Class/ResultSet.pm +++ b/lib/DBIx/Class/ResultSet.pm @@ -3,28 +3,23 @@ package DBIx::Class::ResultSet; use strict; use warnings; use base qw/DBIx::Class/; -use Carp::Clan qw/^DBIx::Class/; +use DBIx::Class::Carp; use DBIx::Class::Exception; -use Data::Page; use DBIx::Class::ResultSetColumn; -use DBIx::Class::ResultSourceHandle; -use Hash::Merge (); use Scalar::Util qw/blessed weaken/; use Try::Tiny; -use Storable qw/nfreeze thaw/; # not importing first() as it will clash with our own method use List::Util (); -use namespace::clean; - - BEGIN { # De-duplication in _merge_attr() is disabled, but left in for reference # (the merger is used for other things that ought not to be de-duped) *__HM_DEDUP = sub () { 0 }; } +use namespace::clean; + use overload '0+' => "count", 'bool' => "_bool", @@ -98,7 +93,7 @@ another. year => $request->param('year'), }); - $self->apply_security_policy( $cd_rs ); + $cd_rs = $self->apply_security_policy( $cd_rs ); return $cd_rs->all(); } @@ -301,7 +296,6 @@ always return a resultset, even in list context. =cut -my $callsites_warned; sub search_rs { my $self = shift; @@ -410,15 +404,7 @@ sub search_rs { } if @_; if( @_ > 1 and ! $rsrc->result_class->isa('DBIx::Class::CDBICompat') ) { - # determine callsite obeying Carp::Clan rules (fucking ugly but don't have better ideas) - my $callsite = do { - my $w; - local $SIG{__WARN__} = sub { $w = shift }; - carp; - $w - }; - carp 'search( %condition ) is deprecated, use search( \%condition ) instead' - unless $callsites_warned->{$callsite}++; + carp_unique 'search( %condition ) is deprecated, use search( \%condition ) instead'; } for ($old_where, $call_cond) { @@ -697,7 +683,7 @@ sub find { next if $keyref eq 'ARRAY'; # has_many for multi_create my $rel_q = $rsrc->_resolve_condition( - $relinfo->{cond}, $val, $key + $relinfo->{cond}, $val, $key, $key ); die "Can't handle complex relationship conditions in find" if ref($rel_q) ne 'HASH'; @related{keys %$rel_q} = values %$rel_q; @@ -739,7 +725,7 @@ sub find { }++; push @unique_queries, try { - $self->_build_unique_cond ($c_name, $call_cond) + $self->_build_unique_cond ($c_name, $call_cond, 'croak_on_nulls') } || (); } @@ -798,7 +784,7 @@ sub _qualify_cond_columns { } sub _build_unique_cond { - my ($self, $constraint_name, $extra_cond) = @_; + my ($self, $constraint_name, $extra_cond, $croak_on_null) = @_; my @c_cols = $self->result_source->unique_constraint_columns($constraint_name); @@ -810,15 +796,38 @@ sub _build_unique_cond { }; # trim out everything not in $columns - $final_cond = { map { $_ => $final_cond->{$_} } @c_cols }; - - if (my @missing = grep { ! defined $final_cond->{$_} } (@c_cols) ) { + $final_cond = { map { + exists $final_cond->{$_} + ? ( $_ => $final_cond->{$_} ) + : () + } @c_cols }; + + if (my @missing = grep + { ! ($croak_on_null ? defined $final_cond->{$_} : exists $final_cond->{$_}) } + (@c_cols) + ) { $self->throw_exception( sprintf ( "Unable to satisfy requested constraint '%s', no values for column(s): %s", $constraint_name, join (', ', map { "'$_'" } @missing), ) ); } + if ( + !$croak_on_null + and + !$ENV{DBIC_NULLABLE_KEY_NOWARN} + and + my @undefs = grep { ! defined $final_cond->{$_} } (keys %$final_cond) + ) { + carp_unique ( sprintf ( + "NULL/undef values supplied for requested unique constraint '%s' (NULL " + . 'values in column(s): %s). This is almost certainly not what you wanted, ' + . 'though you can set DBIC_NULLABLE_KEY_NOWARN to disable this warning.', + $constraint_name, + join (', ', map { "'$_'" } @undefs), + )); + } + return $final_cond; } @@ -1045,7 +1054,7 @@ instead. An example conversion is: sub search_like { my $class = shift; - carp ( + carp_unique ( 'search_like() is deprecated and will be removed in DBIC version 0.09.' .' Instead use ->search({ x => { -like => "y%" } })' .' (note the outer pair of {}s - they are important!)' @@ -1828,7 +1837,7 @@ sub delete_all { Accepts either an arrayref of hashrefs or alternatively an arrayref of arrayrefs. For the arrayref of hashrefs style each hashref should be a structure suitable -forsubmitting to a $resultset->create(...) method. +for submitting to a $resultset->create(...) method. In void context, C in L is used to insert the data, as this is a faster method. @@ -1943,6 +1952,7 @@ sub populate { $reverse_relinfo->{cond}, $self, $result, + $rel, ); delete $data->[$index]->{$rel}; @@ -1981,6 +1991,7 @@ sub populate { $rels->{$rel}{cond}, $child, $main_row, + $rel, ); my @rows_to_add = ref $item->{$rel} eq 'ARRAY' ? @{$item->{$rel}} : ($item->{$rel}); @@ -2163,6 +2174,7 @@ sub pager { ### necessary for future development of DBIx::DS. Do *NOT* change this code ### before talking to ribasushi/mst + require Data::Page; my $pager = Data::Page->new( 0, #start with an empty set $attrs->{rows}, @@ -2312,7 +2324,13 @@ sub _merge_with_rscond { while ( my($col, $value) = each %implied ) { my $vref = ref $value; - if ($vref eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '=') { + if ( + $vref eq 'HASH' + and + keys(%$value) == 1 + and + (keys %$value)[0] eq '=' + ) { $new_data{$col} = $value->{'='}; } elsif( !$vref or $vref eq 'SCALAR' or blessed($value) ) { @@ -3335,7 +3353,7 @@ sub _resolved_attrs { # subquery (since a group_by is present) if (delete $attrs->{distinct}) { if ($attrs->{group_by}) { - carp ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)"); + carp_unique ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)"); } else { # distinct affects only the main selection part, not what prefetch may @@ -3527,6 +3545,7 @@ sub _merge_joinpref_attr { sub _merge_attr { $hm ||= do { + require Hash::Merge; my $hm = Hash::Merge->new; $hm->specify_behavior({ @@ -3616,14 +3635,14 @@ sub STORABLE_freeze { # A cursor in progress can't be serialized (and would make little sense anyway) delete $to_serialize->{cursor}; - nfreeze($to_serialize); + Storable::nfreeze($to_serialize); } # need this hook for symmetry sub STORABLE_thaw { my ($self, $cloning, $serialized) = @_; - %$self = %{ thaw($serialized) }; + %$self = %{ Storable::thaw($serialized) }; $self; } @@ -3735,6 +3754,10 @@ passed to object inflation. Note that the 'artist' is the name of the column (or relationship) accessor, and 'name' is the name of the column accessor in the related table. +B You need to explicitly quote '+columns' when defining the attribute. +Not doing so causes Perl to incorrectly interpret +columns as a bareword with a +unary plus operator before it. + =head2 include_columns =over 4 @@ -3775,6 +3798,10 @@ identifier aliasing. You can however alias a function, so you can use it in e.g. an C clause. This is done via the C<-as> B / L / L / L + +L implies a L/L with the fields of the +prefetched relations. So given: + + my $rs = $schema->resultset('CD')->search( + undef, + { + select => ['cd.title'], + as => ['cd_title'], + prefetch => 'artist', + } + ); + +The L becomes: C<'cd.title', 'artist.*'> and the L +becomes: C<'cd_title', 'artist.*'>. -B If you specify a C attribute, the C and C