Merge from current and fixed issue when a column has multiple bind values.
Revision history for DBIx::Class
- - add support to Ordered for multiple ordering columns
- mark DB.pm and compose_connection as deprecated
- switch tests to compose_namespace
- - ResltClass::HashRefInflator added
0.07999_01 2006-10-05 21:00:00
- add connect_info option "disable_statement_caching"
LTJake: Brian Cassidy <bricas@cpan.org>
-ned: Neil de Carteret
-
nigel: Nigel Metheringham <nigelm@cpan.org>
ningu: David Kamholz <dkamholz@cpan.org>
=head1 NAME
-DBIx::Class::InflateColumn - Automatically create references from column data
+DBIx::Class::InflateColumn - Automatically create objects from column data
=head1 SYNOPSIS
=head1 DESCRIPTION
-This component translates column data into references, i.e. "inflating"
-the column data. It also "deflates" references into an appropriate format
+This component translates column data into objects, i.e. "inflating"
+the column data. It also "deflates" objects into an appropriate format
for the database.
It can be used, for example, to automatically convert to and from
L<DateTime> objects for your date and time fields.
-It will accept arrayrefs, hashrefs and blessed references (objects),
-but not scalarrefs. Scalar references are passed through to the
-database to deal with, to allow such settings as C< \'year + 1'> and
-C< \'DEFAULT' > to work.
-
=head1 METHODS
=head2 inflate_column
sub _deflated_column {
my ($self, $col, $value) = @_;
-# return $value unless ref $value && blessed($value); # If it's not an object, don't touch it
- ## Leave scalar refs (ala SQL::Abstract literal SQL), untouched, deflate all other refs
- return $value unless (ref $value && ref($value) ne 'SCALAR');
+ return $value unless ref $value && blessed($value); # If it's not an object, don't touch it
my $info = $self->column_info($col) or
$self->throw_exception("No column info for $col");
return $value unless exists $info->{_inflate_info};
=cut
sub set_inflated_column {
- my ($self, $col, $inflated) = @_;
- $self->set_column($col, $self->_deflated_column($col, $inflated));
-# if (blessed $inflated) {
- if (ref $inflated && ref($inflated) ne 'SCALAR') {
- $self->{_inflated_column}{$col} = $inflated;
+ my ($self, $col, $obj) = @_;
+ $self->set_column($col, $self->_deflated_column($col, $obj));
+ if (blessed $obj) {
+ $self->{_inflated_column}{$col} = $obj;
} else {
delete $self->{_inflated_column}{$col};
}
- return $inflated;
+ return $obj;
}
=head2 store_inflated_column
=cut
sub store_inflated_column {
- my ($self, $col, $inflated) = @_;
-# unless (blessed $inflated) {
- unless (ref $inflated && ref($inflated) ne 'SCALAR') {
+ my ($self, $col, $obj) = @_;
+ unless (blessed $obj) {
delete $self->{_inflated_column}{$col};
- $self->store_column($col => $inflated);
- return $inflated;
+ $self->store_column($col => $obj);
+ return $obj;
}
delete $self->{_column_data}{$col};
- return $self->{_inflated_column}{$col} = $inflated;
+ return $self->{_inflated_column}{$col} = $obj;
}
=head1 SEE ALSO
Daniel Westermann-Clark <danieltwc@cpan.org> (documentation)
-Jess Robinson <cpan@desert-island.demon.co.uk>
-
=head1 LICENSE
You may distribute this code under the same terms as Perl itself.
name TEXT NOT NULL,
position INTEGER NOT NULL
);
-
-Optionally, add one or more columns to specify groupings, allowing you
-to maintain independent ordered lists within one table:
-
- CREATE TABLE items (
- item_id INTEGER PRIMARY KEY AUTOINCREMENT,
- name TEXT NOT NULL,
- position INTEGER NOT NULL,
- group_id INTEGER NOT NULL
- );
-
-Or even
-
- CREATE TABLE items (
- item_id INTEGER PRIMARY KEY AUTOINCREMENT,
- name TEXT NOT NULL,
- position INTEGER NOT NULL,
- group_id INTEGER NOT NULL,
- other_group_id INTEGER NOT NULL
- );
+ # Optional: group_id INTEGER NOT NULL
In your Schema or DB class add Ordered to the top
of the component list.
package My::Item;
__PACKAGE__->position_column('position');
-
-If you are using one grouping column, specify it as follows:
-
- __PACKAGE__->grouping_column('group_id');
-
-Or if you have multiple grouping columns:
-
- __PACKAGE__->grouping_column(['group_id', 'other_group_id']);
+ __PACKAGE__->grouping_column('group_id'); # optional
Thats it, now you can change the position of your objects.
$item->move_first();
$item->move_last();
$item->move_to( $position );
- $item->move_to_group( 'groupname' );
- $item->move_to_group( 'groupname', $position );
- $item->move_to_group( {group_id=>'groupname', 'other_group_id=>'othergroupname'} );
- $item->move_to_group( {group_id=>'groupname', 'other_group_id=>'othergroupname'}, $position );
=head1 DESCRIPTION
sub first_sibling {
my( $self ) = @_;
return 0 if ($self->get_column($self->position_column())==1);
-
return ($self->result_source->resultset->search(
{
$self->position_column => 1,
$self->_grouping_clause(),
});
my $op = ($from_position>$to_position) ? '+' : '-';
- $rs->update({ $position_column => \"$position_column $op 1" }); #" Sorry, GEdit bug
- $self->{_ORDERED_INTERNAL_UPDATE} = 1;
+ $rs->update({ $position_column => \"$position_column $op 1" });
$self->update({ $position_column => $to_position });
return 1;
}
-
-
-=head2 move_to_group
-
- $item->move_to_group( $group, $position );
-
-Moves the object to the specified position of the specified
-group, or to the end of the group if $position is undef.
-1 is returned on success, and 0 is returned if the object is
-already at the specified position of the specified group.
-
-$group may be specified as a single scalar if only one
-grouping column is in use, or as a hashref of column => value pairs
-if multiple grouping columns are in use.
-
-=cut
-
-sub move_to_group {
- my( $self, $to_group, $to_position ) = @_;
-
- # if we're given a string, turn it into a hashref
- unless (ref $to_group eq 'HASH') {
- $to_group = {($self->_grouping_columns)[0] => $to_group};
- }
-
- my $position_column = $self->position_column;
- #my @grouping_columns = $self->_grouping_columns;
-
- return 0 if ( ! defined($to_group) );
- return 0 if ( defined($to_position) and $to_position < 1 );
- return 0 if ( $self->_is_in_group($to_group)
- and ((not defined($to_position))
- or (defined($to_position) and $self->$position_column==$to_position)
- )
- );
-
- # Move to end of current group and adjust siblings
- $self->move_last;
-
- $self->set_columns($to_group);
- my $new_group_count = $self->result_source->resultset->search({$self->_grouping_clause()})->count();
- if (!defined($to_position) or $to_position > $new_group_count) {
- $self->{_ORDERED_INTERNAL_UPDATE} = 1;
- $self->update({ $position_column => $new_group_count + 1 });
- }
- else {
- my @between = ($to_position, $new_group_count);
-
- my $rs = $self->result_source->resultset->search({
- $position_column => { -between => [ @between ] },
- $self->_grouping_clause(),
- });
- $rs->update({ $position_column => \"$position_column + 1" }); #"
- $self->{_ORDERED_INTERNAL_UPDATE} = 1;
- $self->update({ $position_column => $to_position });
- }
-
- return 1;
-}
-
=head2 insert
Overrides the DBIC insert() method by providing a default
return $self->next::method( @_ );
}
-=head2 update
-
-Overrides the DBIC update() method by checking for a change
-to the position and/or group columns. Movement within a
-group or to another group is handled by repositioning
-the appropriate siblings. Position defaults to the end
-of a new group if it has been changed to undef.
-
-=cut
-
-sub update {
- my $self = shift;
-
- if ($self->{_ORDERED_INTERNAL_UPDATE}) {
- delete $self->{_ORDERED_INTERNAL_UPDATE};
- return $self->next::method( @_ );
- }
-
- $self->set_columns($_[0]) if @_ > 0;
- my %changes = $self->get_dirty_columns;
- $self->discard_changes;
-
- my $pos_col = $self->position_column;
-
- # if any of our grouping columns have been changed
- if (grep {$_} map {exists $changes{$_}} $self->_grouping_columns ) {
-
- # create new_group by taking the current group and inserting changes
- my $new_group = {$self->_grouping_clause};
- foreach my $col (keys %$new_group) {
- if (exists $changes{$col}) {
- $new_group->{$col} = $changes{$col};
- delete $changes{$col}; # don't want to pass this on to next::method
- }
- }
-
- $self->move_to_group(
- $new_group,
- exists($changes{$pos_col}) ? delete($changes{$pos_col}) : $self->$pos_col
- );
- }
- elsif (exists $changes{$pos_col}) {
- $self->move_to(delete $changes{$pos_col});
- }
- return $self->next::method( \%changes );
-}
-
=head2 delete
Overrides the DBIC delete() method by first moving the object
=head2 _grouping_clause
-This method returns one or more name=>value pairs for limiting a search
-by the grouping column(s). If the grouping column is not
+This method returns a name=>value pare for limiting a search
+by the collection column. If the collection column is not
defined then this will return an empty list.
=cut
-sub _grouping_clause {
- my( $self ) = @_;
- return map { $_ => $self->get_column($_) } $self->_grouping_columns();
-}
-
-
-=head2 _get_grouping_columns
-
-Returns a list of the column names used for grouping, regardless of whether
-they were specified as an arrayref or a single string, and returns ()
-if there is no grouping.
-
-=cut
-sub _grouping_columns {
+sub _grouping_clause {
my( $self ) = @_;
my $col = $self->grouping_column();
- if (ref $col eq 'ARRAY') {
- return @$col;
- } elsif ($col) {
- return ( $col );
- } else {
- return ();
+ if ($col) {
+ return ( $col => $self->get_column($col) );
}
+ return ();
}
-
-
-=head2 _is_in_group($other)
-
- $item->_is_in_group( {user => 'fred', list => 'work'} )
-
-Returns true if the object is in the group represented by hashref $other
-=cut
-sub _is_in_group {
- my ($self, $other) = @_;
- my $current = {$self->_grouping_clause};
- return 0 unless (ref $other eq 'HASH') and (keys %$current == keys %$other);
- for my $key (keys %$current) {
- return 0 unless exists $other->{$key};
- return 0 if $current->{$key} ne $other->{$key};
- }
- return 1;
-}
-
-
1;
__END__
+++ /dev/null
-package DBIx::Class::ResultClass::HashRefInflator;
-
-# $me is the hashref of cols/data from the immediate resultsource
-# $rest is a deep hashref of all the data from the prefetched
-# related sources.
-
-sub mk_hash {
- my ($me, $rest) = @_;
-
- # to avoid emtpy has_many rels contain one empty hashref
- return if (not keys %$me);
-
- return { %$me,
- map { ($_ => ref($rest->{$_}[0]) eq 'ARRAY' ? [ map { mk_hash(@$_) } @{$rest->{$_}} ] : mk_hash(@{$rest->{$_}}) ) } keys %$rest
- };
-}
-
-sub inflate_result {
- my ($self, $source, $me, $prefetch) = @_;
-
- return mk_hash($me, $prefetch);
-}
-
-1;
unless ref $values eq 'HASH';
my $cond = $self->_cond_for_update_delete;
-
+
return $self->result_source->storage->update(
- $self->result_source->from, $values, $cond
+ $self->result_source, $values, $cond
);
}
my $cond = $self->_cond_for_update_delete;
- $self->result_source->storage->delete($self->result_source->from, $cond);
+ $self->result_source->storage->delete($self->result_source, $cond);
return 1;
}
if $self->can('result_source_instance');
$self->throw_exception("No result_source set on this object; can't insert")
unless $source;
- #use Data::Dumper; warn Dumper($self);
- $source->storage->insert($source->from, { $self->get_columns });
+
+ $source->storage->insert($source, { $self->get_columns });
$self->in_storage(1);
$self->{_dirty_columns} = {};
$self->{related_resultsets} = {};
my $ident_cond = $self->ident_condition;
$self->throw_exception("Cannot safely update a row in a PK-less table")
if ! keys %$ident_cond;
+
if ($upd) {
foreach my $key (keys %$upd) {
if (ref $upd->{$key}) {
my %to_update = $self->get_dirty_columns;
return $self unless keys %to_update;
my $rows = $self->result_source->storage->update(
- $self->result_source->from, \%to_update, $self->{_orig_ident} || $ident_cond);
+ $self->result_source, \%to_update,
+ $self->{_orig_ident} || $ident_cond
+ );
if ($rows == 0) {
$self->throw_exception( "Can't update ${self}: row not found" );
} elsif ($rows > 1) {
unless exists $self->{_column_data}{$column};
}
$self->result_source->storage->delete(
- $self->result_source->from, $ident_cond);
+ $self->result_source, $ident_cond);
$self->in_storage(undef);
} else {
$self->throw_exception("Can't do class delete without a ResultSource instance")
}
return @created;
}
- $self->storage->insert_bulk($self->source($name)->from, \@names, $data);
+ $self->storage->insert_bulk($self->source($name), \@names, $data);
}
=head2 exception_action
sub _sql_maker_args {
my ($self) = @_;
- return ( limit_dialect => $self->dbh, %{$self->_sql_maker_opts} );
+ return ( bindtype=>'columns', limit_dialect => $self->dbh, %{$self->_sql_maker_opts} );
}
sub sql_maker {
my ($self, $op, $extra_bind, $ident, @args) = @_;
my ($sql, @bind) = $self->sql_maker->$op($ident, @args);
- unshift(@bind, @$extra_bind) if $extra_bind;
+ unshift(@bind,
+ map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind)
+ if $extra_bind;
@bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
return ($sql, @bind);
}
sub _execute {
- my $self = shift;
-
- my ($sql, @bind) = $self->_prep_for_execute(@_);
-
+ my ($self, $op, $extra_bind, $ident, $bind_attributes, @args) = @_;
+
+ my ($sql, @bind) = $self->sql_maker->$op($ident, @args);
+ unshift(@bind,
+ map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind)
+ if $extra_bind;
if ($self->debug) {
- my @debug_bind = map { defined $_ ? qq{'$_'} : q{'NULL'} } @bind;
+ my @debug_bind =
+ map { defined ($_ && $_->[1]) ? qq{'$_->[1]'} : q{'NULL'} } @bind;
$self->debugobj->query_start($sql, @debug_bind);
}
+ my $sth = eval { $self->sth($sql,$op) };
- my $sth = $self->sth($sql);
+ if (!$sth || $@) {
+ $self->throw_exception(
+ 'no sth generated via sql (' . ($@ || $self->_dbh->errstr) . "): $sql"
+ );
+ }
my $rv;
if ($sth) {
my $time = time();
- $rv = eval { $sth->execute(@bind) };
+
+ $rv = eval {
+
+ my $placeholder_index = 1;
+
+ foreach my $bound (@bind) {
+
+ my $attributes = {};
+ my($column_name, @data) = @$bound;
+ if( $bind_attributes ) {
+ $attributes = $bind_attributes->{$column_name}
+ if defined $bind_attributes->{$column_name};
+ }
+
+ foreach my $data (@data)
+ {
+ $data = ref $data ? ''.$data : $data; # stringify args
+
+ $sth->bind_param($placeholder_index, $data, $attributes);
+ $placeholder_index++;
+ }
+ }
+ $sth->execute();
+ };
+
if ($@ || !$rv) {
$self->throw_exception("Error executing '$sql': ".($@ || $sth->errstr));
}
$self->throw_exception("'$sql' did not generate a statement.");
}
if ($self->debug) {
- my @debug_bind = map { defined $_ ? qq{`$_'} : q{`NULL'} } @bind;
- $self->debugobj->query_end($sql, @debug_bind);
+ my @debug_bind =
+ map { defined ($_ && $_->[1]) ? qq{'$_->[1]'} : q{'NULL'} } @bind;
+ $self->debugobj->query_end($sql, @debug_bind);
}
return (wantarray ? ($rv, $sth, @bind) : $rv);
}
sub insert {
- my ($self, $ident, $to_insert) = @_;
+ my ($self, $source, $to_insert) = @_;
+
+ my $ident = $source->from;
+ my $bind_attributes;
+ foreach my $column ($source->columns) {
+
+ my $data_type = $source->column_info($column)->{data_type} || '';
+ $bind_attributes->{$column} = $self->bind_attribute_by_data_type($data_type)
+ if $data_type;
+ }
+
$self->throw_exception(
"Couldn't insert ".join(', ',
map "$_ => $to_insert->{$_}", keys %$to_insert
)." into ${ident}"
- ) unless ($self->_execute('insert' => [], $ident, $to_insert));
+ ) unless ($self->_execute('insert' => [], $ident, $bind_attributes, $to_insert));
return $to_insert;
}
## scalar refs, or at least, all the same type as the first set, the statement is
## only prepped once.
sub insert_bulk {
- my ($self, $table, $cols, $data) = @_;
+ my ($self, $source, $cols, $data) = @_;
my %colvalues;
+ my $table = $source->from;
@colvalues{@$cols} = (0..$#$cols);
my ($sql, @bind) = $self->sql_maker->insert($table, \%colvalues);
-# print STDERR "BIND".Dumper(\@bind);
-
+
if ($self->debug) {
- my @debug_bind = map { defined $_ ? qq{'$_'} : q{'NULL'} } @bind;
+ my @debug_bind = map { defined $_->[1] ? qq{$_->[1]} : q{'NULL'} } @bind;
$self->debugobj->query_start($sql, @debug_bind);
}
my $sth = $self->sth($sql);
# @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
my $rv;
+
## This must be an arrayref, else nothing works!
+
my $tuple_status = [];
-# use Data::Dumper;
-# print STDERR Dumper($data);
+
+ ##use Data::Dumper;
+ ##print STDERR Dumper( $data, $sql, [@bind] );
+
if ($sth) {
+
my $time = time();
- $rv = eval { $sth->execute_array({ ArrayTupleFetch => sub { my $values = shift @$data; return if !$values; return [ @{$values}[@bind] ]},
- ArrayTupleStatus => $tuple_status }) };
-# print STDERR Dumper($tuple_status);
-# print STDERR "RV: $rv\n";
+
+ #$rv = eval {
+ #
+ # $sth->execute_array({
+
+ # ArrayTupleFetch => sub {
+
+ # my $values = shift @$data;
+ # return if !$values;
+ # return [ @{$values}[@bind] ];
+ # },
+
+ # ArrayTupleStatus => $tuple_status,
+ # })
+ #};
+
+ ## Get the bind_attributes, if any exist
+
+ my $bind_attributes;
+ foreach my $column ($source->columns) {
+
+ my $data_type = $source->column_info($column)->{data_type} || '';
+ $bind_attributes->{$column} = $self->bind_attribute_by_data_type($data_type)
+ if $data_type;
+ }
+
+ ## Bind the values and execute
+
+ $rv = eval {
+
+ my $placeholder_index = 1;
+
+ foreach my $bound (@bind) {
+
+ my $attributes = {};
+ my ($column_name, $data_index) = @$bound;
+
+ if( $bind_attributes ) {
+ $attributes = $bind_attributes->{$column_name}
+ if defined $bind_attributes->{$column_name};
+ }
+
+ my @data = map { $_->[$data_index] } @$data;
+
+ $sth->bind_param_array( $placeholder_index, [@data], $attributes );
+ $placeholder_index++;
+ }
+ $sth->execute_array( {ArrayTupleStatus => $tuple_status} );
+
+ };
+
+#print STDERR Dumper($tuple_status);
+#print STDERR "RV: $rv\n";
+
if ($@ || !defined $rv) {
my $errors = '';
foreach my $tuple (@$tuple_status)
}
sub update {
- return shift->_execute('update' => [], @_);
+ my $self = shift @_;
+ my $source = shift @_;
+
+ my $bind_attributes;
+ foreach my $column ($source->columns) {
+
+ my $data_type = $source->column_info($column)->{data_type} || '';
+ $bind_attributes->{$column} = $self->bind_attribute_by_data_type($data_type)
+ if $data_type;
+ }
+
+ my $ident = $source->from;
+ return $self->_execute('update' => [], $ident, $bind_attributes, @_);
}
+
sub delete {
- return shift->_execute('delete' => [], @_);
+ my $self = shift @_;
+ my $source = shift @_;
+
+ my $bind_attrs = {}; ## If ever it's needed...
+ my $ident = $source->from;
+
+ return $self->_execute('delete' => [], $ident, $bind_attrs, @_);
}
sub _select {
($order ? (order_by => $order) : ())
};
}
- my @args = ('select', $attrs->{bind}, $ident, $select, $condition, $order);
+ my $bind_attrs = {}; ## Future support
+ my @args = ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $condition, $order);
if ($attrs->{software_limit} ||
$self->sql_maker->_default_limit_syntax eq "GenericSubQ") {
$attrs->{software_limit} = 1;
sub sqlt_type { shift->dbh->{Driver}->{Name} }
+=head2 bind_attribute_by_data_type
+
+Given a datatype from column info, returns a database specific bind attribute for
+$dbh->bind_param($val,$attribute) or nothing if we will let the database planner
+just handle it.
+
+Generally only needed for special case column types, like bytea in postgres.
+
+=cut
+
+sub bind_attribute_by_data_type {
+ return;
+}
+
=head2 create_ddl_dir (EXPERIMENTAL)
=over 4
use strict;
use warnings;
-use DBD::Pg;
+use DBD::Pg qw(:pg_types);
use base qw/DBIx::Class::Storage::DBI/;
sub datetime_parser_type { return "DateTime::Format::Pg"; }
+sub bind_attribute_by_data_type {
+ my ($self,$data_type) = @_;
+
+ my $bind_attributes = {
+ bytea => { pg_type => DBD::Pg::PG_BYTEA },
+ };
+
+ if( defined $bind_attributes->{$data_type} ) {
+ return $bind_attributes->{$data_type};
+ }
+ else {
+ return;
+ }
+}
+
1;
=head1 NAME
eval { require DateTime };
plan skip_all => "Need DateTime for inflation tests" if $@;
-plan tests => 20;
+plan tests => 4;
$schema->class('CD')
#DBICTest::Schema::CD
is( $cd->year->month, 1, 'inflated month ok' );
-eval { $cd->year(\'year +1'); };
-ok(!$@, 'updated year using a scalarref');
-$cd->update();
-$cd->discard_changes();
-
-is( ref($cd->year), 'DateTime', 'year is still a DateTime, ok' );
-
-is( $cd->year->year, 1998, 'updated year, bypassing inflation' );
-
-is( $cd->year->month, 1, 'month is still 1' );
-
-# get_inflated_column test
-
-is( ref($cd->get_inflated_column('year')), 'DateTime', 'get_inflated_column produces a DateTime');
-
# deflate test
my $now = DateTime->now;
$cd->year( $now );
$cd->update;
-$cd = $schema->resultset("CD")->find(3);
+($cd) = $schema->resultset("CD")->search( year => $now->year );
is( $cd->year->year, $now->year, 'deflate ok' );
-# set_inflated_column test
-eval { $cd->set_inflated_column('year', $now) };
-ok(!$@, 'set_inflated_column with DateTime object');
-$cd->update;
-
-$cd = $schema->resultset("CD")->find(3);
-is( $cd->year->year, $now->year, 'deflate ok' );
-
-$cd = $schema->resultset("CD")->find(3);
-my $before_year = $cd->year->year;
-eval { $cd->set_inflated_column('year', \'year + 1') };
-ok(!$@, 'set_inflated_column to "year + 1"');
-$cd->update;
-
-$cd = $schema->resultset("CD")->find(3);
-is( $cd->year->year, $before_year+1, 'deflate ok' );
-
-# store_inflated_column test
-$cd = $schema->resultset("CD")->find(3);
-eval { $cd->store_inflated_column('year', $now) };
-ok(!$@, 'store_inflated_column with DateTime object');
-$cd->update;
-
-is( $cd->year->year, $now->year, 'deflate ok' );
-
-# update tests
-$cd = $schema->resultset("CD")->find(3);
-eval { $cd->update({'year' => $now}) };
-ok(!$@, 'update using DateTime object ok');
-is($cd->year->year, $now->year, 'deflate ok');
-
-$cd = $schema->resultset("CD")->find(3);
-$before_year = $cd->year->year;
-eval { $cd->update({'year' => \'year + 1'}) };
-ok(!$@, 'update using scalarref ok');
-
-$cd = $schema->resultset("CD")->find(3);
-is($cd->year->year, $before_year + 1, 'deflate ok');
-
-# discard_changes test
-$cd = $schema->resultset("CD")->find(3);
-# inflate the year
-$before_year = $cd->year->year;
-$cd->update({ year => \'year + 1'});
-$cd->discard_changes;
-
-is($cd->year->year, $before_year + 1, 'discard_changes clears the inflated value');
-
-# eval { $cd->store_inflated_column('year', \'year + 1') };
-# print STDERR "ERROR: $@" if($@);
-# ok(!$@, 'store_inflated_column to "year + 1"');
-
-# is_deeply( $cd->year, \'year + 1', 'deflate ok' );
-
+++ /dev/null
-use strict;
-use warnings;
-
-use Test::More qw(no_plan);
-use lib qw(t/lib);
-use DBICTest;
-use DBIx::Class::ResultClass::HashRefInflator;
-my $schema = DBICTest->init_schema();
-
-
-# Under some versions of SQLite if the $rs is left hanging around it will lock
-# So we create a scope here cos I'm lazy
-{
- my $rs = $schema->resultset('CD');
-
- # get the defined columns
- my @dbic_cols = sort $rs->result_source->columns;
-
- # use the hashref inflator class as result class
- $rs->result_class('DBIx::Class::ResultClass::HashRefInflator');
-
- # fetch first record
- my $datahashref1 = $rs->first;
-
- my @hashref_cols = sort keys %$datahashref1;
-
- is_deeply( \@dbic_cols, \@hashref_cols, 'returned columns' );
-}
-
-
-sub check_cols_of {
- my ($dbic_obj, $datahashref) = @_;
-
- foreach my $col (keys %$datahashref) {
- # plain column
- if (not ref ($datahashref->{$col}) ) {
- is ($datahashref->{$col}, $dbic_obj->get_column($col), 'same value');
- }
- # related table entry (belongs_to)
- elsif (ref ($datahashref->{$col}) eq 'HASH') {
- check_cols_of($dbic_obj->$col, $datahashref->{$col});
- }
- # multiple related entries (has_many)
- elsif (ref ($datahashref->{$col}) eq 'ARRAY') {
- my @dbic_reltable = $dbic_obj->$col;
- my @hashref_reltable = @{$datahashref->{$col}};
-
- is (scalar @hashref_reltable, scalar @dbic_reltable, 'number of related entries');
-
- # for my $index (0..scalar @hashref_reltable) {
- for my $index (0..scalar @dbic_reltable) {
- my $dbic_reltable_obj = $dbic_reltable[$index];
- my $hashref_reltable_entry = $hashref_reltable[$index];
-
- check_cols_of($dbic_reltable_obj, $hashref_reltable_entry);
- }
- }
- }
-}
-
-# create a cd without tracks for testing empty has_many relationship
-$schema->resultset('CD')->create({ title => 'Silence is golden', artist => 3, year => 2006 });
-
-# order_by to ensure both resultsets have the rows in the same order
-my $rs_dbic = $schema->resultset('CD')->search(undef,
- {
- prefetch => [ qw/ artist tracks / ],
- order_by => [ 'me.cdid', 'tracks.position' ],
- }
-);
-my $rs_hashrefinf = $schema->resultset('CD')->search(undef,
- {
- prefetch => [ qw/ artist tracks / ],
- order_by => [ 'me.cdid', 'tracks.position' ],
- }
-);
-$rs_hashrefinf->result_class('DBIx::Class::ResultClass::HashRefInflator');
-
-my @dbic = $rs_dbic->all;
-my @hashrefinf = $rs_hashrefinf->all;
-
-for my $index (0..scalar @hashrefinf) {
- my $dbic_obj = $dbic[$index];
- my $datahashref = $hashrefinf[$index];
-
- check_cols_of($dbic_obj, $datahashref);
-}
plan (skip_all => "No suitable serializer found") unless $selected;
-plan (tests => 8);
+plan (tests => 6);
DBICTest::Schema::Serialized->inflate_column( 'serialized',
{ inflate => $selected->{inflater},
deflate => $selected->{deflater},
ok($inflated = $entry->serialized, 'hashref inflation ok');
is_deeply($inflated, $complex1->{serialized}, 'inflated hash matches original');
-my $entry2 = $rs->create({ id => 2, serialized => ''});
-
-eval { $entry2->set_inflated_column('serialized', $complex1->{serialized}) };
-ok(!$@, 'set_inflated_column to a hashref');
-$entry2->update;
-is_deeply($entry2->serialized, $complex1->{serialized}, 'inflated hash matches original');
-
ok($entry->update ({ %{$complex2} }), 'arrayref deflation ok');
ok($inflated = $entry->serialized, 'arrayref inflation ok');
is_deeply($inflated, $complex2->{serialized}, 'inflated array matches original');
use lib qw(t/lib);
use DBICTest;
-use POSIX qw(ceil);
-
my $schema = DBICTest->init_schema();
-plan tests => 879;
+plan tests => 321;
my $employees = $schema->resultset('Employee');
$employees->delete();
DBICTest::Employee->grouping_column('group_id');
$employees->delete();
-foreach my $group_id (1..4) {
+foreach my $group_id (1..3) {
foreach (1..6) {
$employees->create({ name=>'temp', group_id=>$group_id });
}
}
$employees = $employees->search(undef,{order_by=>'group_id,position'});
-foreach my $group_id (1..4) {
+foreach my $group_id (1..3) {
my $group_employees = $employees->search({group_id=>$group_id});
$group_employees->all();
ok( check_rs($group_employees), "group intial positions" );
hammer_rs( $group_employees );
}
-my $group_3 = $employees->search({group_id=>3});
-my $to_group = 1;
-my $to_pos = undef;
-while (my $employee = $group_3->next) {
- $employee->move_to_group($to_group, $to_pos);
- $to_pos++;
- $to_group = $to_group==1 ? 2 : 1;
-}
-foreach my $group_id (1..4) {
- my $group_employees = $employees->search({group_id=>$group_id});
- $group_employees->all();
- ok( check_rs($group_employees), "group positions after move_to_group" );
-}
-
-my $employee = $employees->search({group_id=>4})->first;
-$employee->position(2);
-$employee->update;
-ok( check_rs($employees->search_rs({group_id=>4})), "overloaded update 1" );
-$employee = $employees->search({group_id=>4})->first;
-$employee->update({position=>3});
-ok( check_rs($employees->search_rs({group_id=>4})), "overloaded update 2" );
-$employee = $employees->search({group_id=>4})->first;
-$employee->group_id(1);
-$employee->update;
-ok(
- check_rs($employees->search_rs({group_id=>1})) && check_rs($employees->search_rs({group_id=>4})),
- "overloaded update 3"
-);
-$employee = $employees->search({group_id=>4})->first;
-$employee->update({group_id=>2});
-ok(
- check_rs($employees->search_rs({group_id=>2})) && check_rs($employees->search_rs({group_id=>4})),
- "overloaded update 4"
-);
-$employee = $employees->search({group_id=>4})->first;
-$employee->group_id(1);
-$employee->position(3);
-$employee->update;
-ok(
- check_rs($employees->search_rs({group_id=>1})) && check_rs($employees->search_rs({group_id=>4})),
- "overloaded update 5"
-);
-$employee = $employees->search({group_id=>4})->first;
-$employee->group_id(2);
-$employee->position(undef);
-$employee->update;
-ok(
- check_rs($employees->search_rs({group_id=>2})) && check_rs($employees->search_rs({group_id=>4})),
- "overloaded update 6"
-);
-$employee = $employees->search({group_id=>4})->first;
-$employee->update({group_id=>1,position=>undef});
-ok(
- check_rs($employees->search_rs({group_id=>1})) && check_rs($employees->search_rs({group_id=>4})),
- "overloaded update 7"
-);
-
-# multicol tests begin here
-DBICTest::Employee->grouping_column(['group_id', 'group_id_2']);
-$employees->delete();
-foreach my $group_id (1..4) {
- foreach my $group_id_2 (1..4) {
- foreach (1..4) {
- $employees->create({ name=>'temp', group_id=>$group_id, group_id_2=>$group_id_2 });
- }
- }
-}
-$employees = $employees->search(undef,{order_by=>'group_id,group_id_2,position'});
-
-foreach my $group_id (1..3) {
- foreach my $group_id_2 (1..3) {
- my $group_employees = $employees->search({group_id=>$group_id, group_id_2=>$group_id_2});
- $group_employees->all();
- ok( check_rs($group_employees), "group intial positions" );
- hammer_rs( $group_employees );
- }
-}
-
-# move_to_group, specifying group by hash
-my $group_4 = $employees->search({group_id=>4});
-$to_group = 1;
-my $to_group_2_base = 7;
-my $to_group_2 = 1;
-$to_pos = undef;
-while (my $employee = $group_4->next) {
- $employee->move_to_group({group_id=>$to_group, group_id_2=>$to_group_2}, $to_pos);
- $to_pos++;
- $to_group = ($to_group % 3) + 1;
- $to_group_2_base++;
- $to_group_2 = (ceil($to_group_2_base/3.0) %3) +1
-}
-foreach my $group_id (1..4) {
- foreach my $group_id_2 (1..4) {
- my $group_employees = $employees->search({group_id=>$group_id,group_id_2=>$group_id_2});
- $group_employees->all();
- ok( check_rs($group_employees), "group positions after move_to_group" );
- }
-}
-
-$employees->delete();
-foreach my $group_id (1..4) {
- foreach my $group_id_2 (1..4) {
- foreach (1..4) {
- $employees->create({ name=>'temp', group_id=>$group_id, group_id_2=>$group_id_2 });
- }
- }
-}
-$employees = $employees->search(undef,{order_by=>'group_id,group_id_2,position'});
-
-$employee = $employees->search({group_id=>4, group_id_2=>1})->first;
-$employee->group_id(1);
-$employee->update;
-ok(
- check_rs($employees->search_rs({group_id=>4, group_id_2=>1}))
- && check_rs($employees->search_rs({group_id=>1, group_id_2=>1})),
- "overloaded multicol update 1"
-);
-
-$employee = $employees->search({group_id=>4, group_id_2=>1})->first;
-$employee->update({group_id=>2});
-ok( check_rs($employees->search_rs({group_id=>4, group_id_2=>1}))
- && check_rs($employees->search_rs({group_id=>2, group_id_2=>1})),
- "overloaded multicol update 2"
-);
-
-$employee = $employees->search({group_id=>3, group_id_2=>1})->first;
-$employee->group_id(1);
-$employee->group_id_2(3);
-$employee->update();
-ok( check_rs($employees->search_rs({group_id=>3, group_id_2=>1}))
- && check_rs($employees->search_rs({group_id=>1, group_id_2=>3})),
- "overloaded multicol update 3"
-);
-
-$employee = $employees->search({group_id=>3, group_id_2=>1})->first;
-$employee->update({group_id=>2, group_id_2=>3});
-ok( check_rs($employees->search_rs({group_id=>3, group_id_2=>1}))
- && check_rs($employees->search_rs({group_id=>2, group_id_2=>3})),
- "overloaded multicol update 4"
-);
-
-$employee = $employees->search({group_id=>3, group_id_2=>2})->first;
-$employee->update({group_id=>2, group_id_2=>4, position=>2});
-ok( check_rs($employees->search_rs({group_id=>3, group_id_2=>2}))
- && check_rs($employees->search_rs({group_id=>2, group_id_2=>4})),
- "overloaded multicol update 5"
-);
-
sub hammer_rs {
my $rs = shift;
my $employee;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my ($dsn, $dbuser, $dbpass) = @ENV{map { "DBICTEST_PG_${_}" } qw/DSN USER PASS/};
+
+$dsn = 'dbi:Pg:dbname=postgres;host=localhost' unless $dsn;
+$dbuser = 'postgres' unless $dbuser;
+$dbpass = 'postgres' unless $dbpass;
+
+plan skip_all => 'Set $ENV{DBICTEST_PG_DSN}, _USER and _PASS to run this test'
+ unless ($dsn && $dbuser);
+
+plan tests => 3;
+
+DBICTest::Schema->compose_connection('PGTest' => $dsn, $dbuser, $dbpass);
+
+my $dbh = PGTest->schema->storage->dbh;
+
+$dbh->do(qq[
+
+ CREATE TABLE artist
+ (
+ artistid serial NOT NULL PRIMARY KEY,
+ media bytea NOT NULL,
+ name varchar NULL
+ );
+],{ RaiseError => 1, PrintError => 1 });
+
+
+PGTest::Artist->load_components(qw/
+
+ PK::Auto
+ Core
+/);
+
+PGTest::Artist->add_columns(
+
+ "media", {
+
+ data_type => "bytea",
+ is_nullable => 0,
+ },
+);
+
+# test primary key handling
+my $big_long_string = 'abcd' x 250000;
+
+my $new = PGTest::Artist->create({ media => $big_long_string });
+
+ok($new->artistid, "Created a blob row");
+is($new->media, $big_long_string, "Set the blob correctly.");
+
+my $rs = PGTest::Artist->find({artistid=>$new->artistid});
+
+is($rs->get_column('media'), $big_long_string, "Created the blob correctly.");
+
+$dbh->do("DROP TABLE artist");
+
+
+
__PACKAGE__->add_columns(
'artistid' => {
data_type => 'integer',
- is_auto_increment => 1
+ is_auto_increment => 1,
},
'name' => {
data_type => 'varchar',
data_type => 'integer',
is_nullable => 1,
},
- group_id_2 => {
- data_type => 'integer',
- is_nullable => 1,
- },
name => {
data_type => 'varchar',
size => 100,
employee_id INTEGER PRIMARY KEY NOT NULL,
position integer NOT NULL,
group_id integer,
- group_id_2 integer,
name varchar(100)
);