return shift->_per_row_update_delete (@_);
}
+my $INSERT_BULK_SIZE = 80;
sub _insert_bulk {
my ($self, $source, $cols, $colvalues, $data) = @_;
my $bind_attrs = $self->source_bind_attributes($source);
+ # Organize this way to make context sensitivity easier to code up.
+ while ( @$data > $INSERT_BULK_SIZE ) {
+ my @this_data = splice @$data, 0, $INSERT_BULK_SIZE;
+ $self->_execute(
+ 'insert_bulk' => [], $source, $bind_attrs, \@this_data, $cols,
+ );
+ }
+
+ # Don't put this in the while-loop above.
return $self->_execute(
'insert_bulk' => [], $source, $bind_attrs, $data, $cols,
);
is( $schema->resultset('Artist')->find( 100 )->name, 'John' );
}
+# Verify that 600 rows ends up with the return SQL having only 40 entries
+# because they were inserted 80 rows at a time other than the last group
+# of 40 rows. But, all 600 rows are inserted.
+{
+ my $rsrc = $schema->resultset('Artist')->result_source;
+ my ($rv, $sth, @bind) = $schema->storage->insert_bulk(
+ $rsrc,
+ [qw/ artistid name rank charfield /],
+ [
+ (map { [ 1000 + $_, "X $_", $_ + 1000, "Y $_" ] } 1 .. 600),
+ ],
+ );
+
+ is_same_sql_bind(
+ $sth->{Statement},
+ \@bind,
+ q{INSERT INTO artist ( artistid, name, rank, charfield ) VALUES } . join(', ', ('( ?, ?, ?, ? )') x 40 ),
+ [
+ map { [ dummy => $_ ] } (
+ map { 1000 + $_, "X $_", $_ + 1000, "Y $_" } 561 .. 600
+ ),
+ ],
+ );
+
+ is( $schema->resultset('Artist')->find( 1001 )->name, 'X 1' );
+ cmp_ok( $schema->resultset('Artist')->search({ artistid => { '>' => 1000 }})->count, '==', 600 );
+}
+
done_testing;