Revision history for DBIx::Class
+
+ - is_deferable support on relations used by the SQL::Translator
+ parser (Anders Nor Berle)
+ - Refactored DBIx::Class::Schema::Versioned
+ - Syntax errors from resultset components are now reported correctly
+
+0.08010 2008-03-01 10:30
+ - Fix t/94versioning.t so it passes with latest SQL::Translator
+
+0.08009 2008-01-20 13:30
+ - Made search_rs smarter about when to preserve the cache to fix
+ mm prefetch usage
+ - Added Storage::DBI subclass for MSSQL over ODBC.
+ - Added freeze, thaw and dclone methods to Schema so that thawed
+ objects will get re-attached to the schema.
+ - Moved dbicadmin to JSON::Any wrapped JSON.pm for a sane API
+ - introduced DBIx::Class::set_inflated_columns
+ - DBIx::Class::Row::copy uses set_inflated_columns
+
+0.08008 2007-11-16 14:30:00
- Fixed join merging bug (test from Zby)
- When adding relationships, it will throw an exception if you get the
foreign and self parts the wrong way round in the condition
- ResultSource::reverse_relationship_info can distinguish between
sources using the same table
- Row::insert will now not fall over if passed duplicate related objects
+ - Row::copy will not fall over if you have two relationships to the
+ same source with a unique constraint on it
0.08007 2007-09-04 19:36:00
- patch for Oracle datetime inflation (abram@arin.net)
requires 'Module::Find' => 0;
requires 'Class::Inspector' => 0;
requires 'Class::Accessor::Grouped' => 0.05002;
-requires 'JSON' => 1.00;
+requires 'JSON::Any' => 1.00;
requires 'Scope::Guard' => 0.03;
# Perl 5.8.0 doesn't have utf8::is_utf8()
if( -e 'inc/.author' ) {
build_requires 'DBIx::ContextualFetch';
build_requires 'Class::Trigger';
+ build_requires 'Time::Piece';
+
system('pod2text lib/DBIx/Class.pm > README');
}
auto_install;
WriteAll;
+
+
+if ($Module::Install::AUTHOR) {
+ # Need to do this _after_ WriteAll else it looses track of them
+ Meta->{values}{build_requires} = [ grep {
+ $_->[0] !~ /
+ DBIx::ContextualFetch |
+ Class::Trigger |
+ Time::Piece
+ /x;
+ } @{Meta->{values}{build_requires}} ];
+
+ my @scalar_keys = Module::Install::Metadata::Meta_TupleKeys();
+ sub Module::Install::Metadata::Meta_TupleKeys {
+ return @scalar_keys, 'resources';
+ }
+ Meta->{values}{resources} = [
+ [ 'MailingList', 'http://lists.scsys.co.uk/cgi-bin/mailman/listinfo/dbix-class' ],
+ [ 'IRC', 'irc://irc.perl.org/#dbix-class' ],
+ ];
+ Meta->write;
+}
+
+
+
# i.e. first release of 0.XX *must* be 0.XX000. This avoids fBSD ports
# brain damage and presumably various other packaging systems too
-$VERSION = '0.08007';
+$VERSION = '0.08010';
sub MODIFY_CODE_ATTRIBUTES {
my ($class,$code,@attrs) = @_;
da5id: David Jack Olrik <djo@cpan.org>
+debolaz: Anders Nor Berle <berle@cpan.org>
+
dkubb: Dan Kubb <dan.kubb-cpan@onautopilot.com>
dnm: Justin Wheeler <jwheeler@datademons.com>
Numa: Dan Sully <daniel@cpan.org>
+oyse: Øystein Torget <oystein.torget@dnv.com>
+
paulm: Paul Makepeace
penguin: K J Cheetham
+perigrin: Chris Prather <chris@prather.org>
+
phaylon: Robert Sedlacek <phaylon@dunkelheit.at>
quicksilver: Jules Bean
sszabo: Stephan Szabo <sszabo@bigpanda.com>
+teejay : Aaron Trevena <teejay@cpan.org>
+
Todd Lipcon
Tom Hukins
Constraints
Triggers
ReadOnly
- GetSet
LiveObjectIndex
AttributeAPI
Stringify
Constructor
AccessorMapping
ColumnCase
- HasA
- HasMany
- MightHave
+ Relationships
+ Copy
LazyLoading
AutoUpdate
TempColumns
+ GetSet
Retrieve
Pager
ColumnGroups
- ImaDBI/);
+ ColumnsAsHash
+ AbstractSearch
+ ImaDBI
+ Iterator
+/);
#DBIx::Class::ObjIndexStubs
1;
=head1 SYNOPSIS
- use base qw/DBIx::Class/;
- __PACKAGE__->load_components(qw/CDBICompat Core DB/);
+ package My::CDBI;
+ use base qw/DBIx::Class::CDBICompat/;
+
+ ...continue as Class::DBI...
=head1 DESCRIPTION
DBIx::Class features a fully featured compatibility layer with L<Class::DBI>
-to ease transition for existing CDBI users. In fact, this class is just a
-receipe containing all the features emulated. If you like, you can choose
-which features to emulate by building your own class and loading it like
-this:
+and some common plugins to ease transition for existing CDBI users.
+
+This is not a wrapper or subclass of DBIx::Class but rather a series of plugins. The result being that even though you're using the Class::DBI emulation layer you are still getting DBIx::Class objects. You can use all DBIx::Class features and methods via CDBICompat. This allows you to take advantage of DBIx::Class features without having to rewrite your CDBI code.
+
+
+=head2 Plugins
+
+CDBICompat is good enough that many CDBI plugins will work with CDBICompat, but many of the plugin features are better done with DBIx::Class methods.
+
+=head3 Class::DBI::AbstractSearch
+
+C<search_where()> is fully emulated using DBIC's search. Aside from emulation there's no reason to use C<search_where()>.
+
+=head3 Class::DBI::Plugin::NoCache
+
+C<nocache> is fully emulated.
+
+=head3 Class::DBI::Sweet
+
+The features of CDBI::Sweet are better done using DBIC methods which are almost exactly the same. It even uses L<Data::Page>.
+=head3 Class::DBI::Plugin::DeepAbstractSearch
+
+This is better done using DBIC's native search facilities. The major difference is that DBIC will not infer the join for you, you have to tell it the join tables.
+
+
+=head2 Choosing Features
+
+In fact, this class is just a receipe containing all the features emulated.
+If you like, you can choose which features to emulate by building your
+own class and loading it like this:
+
+ package My::DB;
__PACKAGE__->load_own_components(qw/CDBICompat/);
this will automatically load the features included in My::DB::CDBICompat,
CDBICompat::MightHave
/);
-=head1 COMPONENTS
-
-=over 4
-
-=item AccessorMapping
-
-=item AttributeAPI
-=item AutoUpdate
+=head1 LIMITATIONS
-Allows you to turn on automatic updates for column values.
+=head2 Unimplemented
-=item ColumnCase
+The following methods and classes are not emulated, maybe in the future.
-=item ColumnGroups
+=over 4
-=item Constraints
+=item Class::DBI::Query
-=item Constructor
+Deprecated in Class::DBI.
-=item DestroyWarning
+=item Class::DBI::Column
-=item GetSet
+Not documented in Class::DBI. CDBICompat's columns() returns a plain string, not an object.
-=item HasA
+=item data_type()
-=item HasMany
+Undocumented CDBI method.
-=item ImaDBI
+=back
-=item LazyLoading
+=head2 Limited Support
-=item LiveObjectIndex
+The following elements of Class::DBI have limited support.
-The live object index tries to ensure there is only one version of a object
-in the perl interpreter.
+=over 4
-=item MightHave
+=item Class::DBI::Relationship
-=item ObjIndexStubs
+The semi-documented Class::DBI::Relationship objects returned by C<meta_info($type, $col)> are mostly emulated except for their C<args> method.
-=item ReadOnly
+=item Relationships
-=item Retrieve
+Relationships between tables (has_a, has_many...) must be delcared after all tables in the relationship have been declared. Thus the usual CDBI idiom of declaring columns and relationships for each class together will not work. They must instead be done like so:
-=item Stringify
+ package Foo;
+ use base qw(Class::DBI);
+
+ Foo->table("foo");
+ Foo->columns( All => qw(this that bar) );
-=item TempColumns
+ package Bar;
+ use base qw(Class::DBI);
+
+ Bar->table("bar");
+ Bar->columns( All => qw(up down) );
-=item Triggers
+ # Now that Foo and Bar are declared it is safe to declare a
+ # relationship between them
+ Foo->has_a( bar => "Bar" );
-=item PassThrough
=back
--- /dev/null
+package # hide form PAUSE
+ DBIx::Class::CDBICompat::AbstractSearch;
+
+use strict;
+use warnings;
+
+=head1 NAME
+
+DBIx::Class::CDBICompat::AbstractSearch
+
+=head1 SYNOPSIS
+
+See DBIx::Class::CDBICompat for directions for use.
+
+=head1 DESCRIPTION
+
+Emulates L<Class::DBI::AbstractSearch>.
+
+=cut
+
+# The keys are mostly the same.
+my %cdbi2dbix = (
+ limit => 'rows',
+);
+
+sub search_where {
+ my $class = shift;
+ my $where = (ref $_[0]) ? $_[0] : { @_ };
+ my $attr = (ref $_[0]) ? $_[1] : {};
+
+ # Translate the keys
+ $attr->{$cdbi2dbix{$_}} = delete $attr->{$_} for keys %cdbi2dbix;
+
+ return $class->resultset_instance->search($where, $attr);
+}
+
+1;
sub mk_group_accessors {
my ($class, $group, @cols) = @_;
- unless ($class->can('accessor_name') || $class->can('mutator_name')) {
+ unless ($class->_can_accessor_name_for || $class->_can_mutator_name_for) {
return $class->next::method($group => @cols);
}
foreach my $col (@cols) {
- my $ro_meth = ($class->can('accessor_name')
- ? $class->accessor_name($col)
- : $col);
- my $wo_meth = ($class->can('mutator_name')
- ? $class->mutator_name($col)
- : $col);
- #warn "$col $ro_meth $wo_meth";
- if ($ro_meth eq $wo_meth) {
+ my $ro_meth = $class->_try_accessor_name_for($col);
+ my $wo_meth = $class->_try_mutator_name_for($col);
+
+ # warn "class: $class / col: $col / ro: $ro_meth / wo: $wo_meth\n";
+ if ($ro_meth eq $wo_meth or # they're the same
+ $wo_meth eq $col) # or only the accessor is custom
+ {
$class->next::method($group => [ $ro_meth => $col ]);
} else {
$class->mk_group_ro_accessors($group => [ $ro_meth => $col ]);
}
}
+# CDBI 3.0.7 decided to change "accessor_name" and "mutator_name" to
+# "accessor_name_for" and "mutator_name_for". This is recent enough
+# that we should support both. CDBI does.
+sub _can_accessor_name_for {
+ my $class = shift;
+ return $class->can("accessor_name") || $class->can("accessor_name_for");
+}
+
+sub _can_mutator_name_for {
+ my $class = shift;
+ return $class->can("mutator_name") || $class->can("mutator_name_for");
+}
+
+sub _try_accessor_name_for {
+ my($class, $column) = @_;
+
+ my $method = $class->_can_accessor_name_for;
+ return $column unless $method;
+ return $class->$method($column);
+}
+
+sub _try_mutator_name_for {
+ my($class, $column) = @_;
+
+ my $method = $class->_can_mutator_name_for;
+ return $column unless $method;
+ return $class->$method($column);
+}
+
+
sub new {
my ($class, $attrs, @rest) = @_;
$class->throw_exception( "create needs a hashref" ) unless ref $attrs eq 'HASH';
foreach my $col ($class->columns) {
- if ($class->can('accessor_name')) {
- my $acc = $class->accessor_name($col);
+ if ($class->_can_accessor_name_for) {
+ my $acc = $class->_try_accessor_name_for($col);
$attrs->{$col} = delete $attrs->{$acc} if exists $attrs->{$acc};
}
- if ($class->can('mutator_name')) {
- my $mut = $class->mutator_name($col);
+ if ($class->_can_mutator_name_for) {
+ my $mut = $class->_try_mutator_name_for($col);
$attrs->{$col} = delete $attrs->{$mut} if exists $attrs->{$mut};
}
}
return \%new_query;
}
+
+# CDBI will never overwrite an accessor, but it only uses one
+# accessor for all column types. DBIC uses many different
+# accessor types so, for example, if you declare a column()
+# and then a has_a() for that same column it must overwrite.
+#
+# To make this work CDBICompat has decide if an accessor
+# method was put there by itself and only then overwrite.
+{
+ my %our_accessors;
+
+ sub _has_custom_accessor {
+ my($class, $name) = @_;
+
+ no strict 'refs';
+ my $existing_accessor = *{$class .'::'. $name}{CODE};
+ return $existing_accessor && !$our_accessors{$existing_accessor};
+ }
+
+ sub _deploy_accessor {
+ my($class, $name, $accessor) = @_;
+
+ return if $class->_has_custom_accessor($name);
+
+ for my $name ($name, lc $name) {
+ no strict 'refs';
+ no warnings 'redefine';
+ *{$class .'::'. $name} = $accessor;
+ }
+
+ $our_accessors{$accessor}++;
+
+ return 1;
+ }
+}
+
sub _mk_group_accessors {
my ($class, $type, $group, @fields) = @_;
- #warn join(', ', map { ref $_ ? (@$_) : ($_) } @fields);
- my @extra;
- foreach (@fields) {
- my ($acc, $field) = ref $_ ? @$_ : ($_, $_);
- #warn "$acc ".lc($acc)." $field";
- next if defined &{"${class}::${acc}"};
- push(@extra, [ lc $acc => $field ]);
+
+ # So we don't have to do lots of lookups inside the loop.
+ my $maker = $class->can($type) unless ref $type;
+
+ # warn "$class $type $group\n";
+ foreach my $field (@fields) {
+ if( $field eq 'DESTROY' ) {
+ carp("Having a data accessor named DESTROY in ".
+ "'$class' is unwise.");
+ }
+
+ my $name = $field;
+
+ ($name, $field) = @$field if ref $field;
+
+ my $accessor = $class->$maker($group, $field);
+ my $alias = "_${name}_accessor";
+
+ # warn " $field $alias\n";
+ {
+ no strict 'refs';
+
+ $class->_deploy_accessor($name, $accessor);
+ $class->_deploy_accessor($alias, $accessor);
+ }
}
- return $class->next::method($type, $group,
- @fields, @extra);
}
sub new {
use strict;
use warnings;
+use Storable 'dclone';
+
use base qw/DBIx::Class::Row/;
__PACKAGE__->mk_classdata('_column_groups' => { });
my $proto = shift;
my $class = ref $proto || $proto;
my $group = shift || "All";
+ $class->_init_result_source_instance();
+
$class->_add_column_group($group => @_) if @_;
return $class->all_columns if $group eq "All";
return $class->primary_column if $group eq "Primary";
sub _register_column_group {
my ($class, $group, @cols) = @_;
- my $groups = { %{$class->_column_groups} };
+ # Must do a complete deep copy else column groups
+ # might accidentally be shared.
+ my $groups = dclone $class->_column_groups;
if ($group eq 'Primary') {
$class->set_primary_key(@cols);
- $groups->{'Essential'}{$_} ||= {} for @cols;
+ $groups->{'Essential'}{$_} ||= 1 for @cols;
}
if ($group eq 'All') {
unless (exists $class->_column_groups->{'Primary'}) {
- $groups->{'Primary'}{$cols[0]} = {};
+ $groups->{'Primary'}{$cols[0]} = 1;
$class->set_primary_key($cols[0]);
}
unless (exists $class->_column_groups->{'Essential'}) {
- $groups->{'Essential'}{$cols[0]} = {};
+ $groups->{'Essential'}{$cols[0]} = 1;
}
}
- $groups->{$group}{$_} ||= {} for @cols;
+ $groups->{$group}{$_} ||= 1 for @cols;
$class->_column_groups($groups);
}
--- /dev/null
+package
+ DBIx::Class::CDBICompat::ColumnsAsHash;
+
+use strict;
+use warnings;
+
+
+=head1 NAME
+
+DBIx::Class::CDBICompat::ColumnsAsHash
+
+=head1 SYNOPSIS
+
+See DBIx::Class::CDBICompat for directions for use.
+
+=head1 DESCRIPTION
+
+Emulates the I<undocumnted> behavior of Class::DBI where the object can be accessed as a hash of columns. This is often used as a performance hack.
+
+ my $column = $row->{column};
+
+=head2 Differences from Class::DBI
+
+If C<DBIC_CDBICOMPAT_HASH_WARN> is true it will warn when a column is accessed as a hash key.
+
+=cut
+
+sub new {
+ my $class = shift;
+
+ my $new = $class->next::method(@_);
+
+ $new->_make_columns_as_hash;
+
+ return $new;
+}
+
+sub inflate_result {
+ my $class = shift;
+
+ my $new = $class->next::method(@_);
+
+ $new->_make_columns_as_hash;
+
+ return $new;
+}
+
+
+sub _make_columns_as_hash {
+ my $self = shift;
+
+ for my $col ($self->columns) {
+ if( exists $self->{$col} ) {
+ warn "Skipping mapping $col to a hash key because it exists";
+ }
+
+ tie $self->{$col}, 'DBIx::Class::CDBICompat::Tied::ColumnValue',
+ $self, $col;
+ }
+}
+
+
+package DBIx::Class::CDBICompat::Tied::ColumnValue;
+
+use Carp;
+use Scalar::Util qw(weaken isweak);
+
+
+sub TIESCALAR {
+ my($class, $obj, $col) = @_;
+ my $self = [$obj, $col];
+ weaken $self->[0];
+
+ return bless $self, $_[0];
+}
+
+sub FETCH {
+ my $self = shift;
+ my($obj, $col) = @$self;
+
+ my $class = ref $obj;
+ my $id = $obj->id;
+ carp "Column '$col' of '$class/$id' was fetched as a hash"
+ if $ENV{DBIC_CDBICOMPAT_HASH_WARN};
+
+ return $obj->column_info($col)->{_inflate_info}
+ ? $obj->get_inflated_column($col)
+ : $obj->get_column($col);
+}
+
+sub STORE {
+ my $self = shift;
+ my($obj, $col) = @$self;
+
+ my $class = ref $obj;
+ my $id = $obj->id;
+ carp "Column '$col' of '$class/$id' was stored as a hash"
+ if $ENV{DBIC_CDBICOMPAT_HASH_WARN};
+
+ return $obj->column_info($col)->{_inflate_info}
+ ? $obj->set_inflated_column($col => shift)
+ : $obj->set_column($col => shift);
+}
+
+1;
--- /dev/null
+package # hide from PAUSE
+ DBIx::Class::CDBICompat::Copy;
+
+use strict;
+use warnings;
+
+use Carp;
+
+=head1 NAME
+
+DBIx::Class::CDBICompat::Copy
+
+=head1 SYNOPSIS
+
+See DBIx::Class::CDBICompat for directions for use.
+
+=head1 DESCRIPTION
+
+Emulates C<<Class::DBI->copy($new_id)>>.
+
+=cut
+
+
+# CDBI's copy will take an id in addition to a hash ref.
+sub copy {
+ my($self, $arg) = @_;
+ return $self->next::method($arg) if ref $arg;
+
+ my @primary_columns = $self->primary_columns;
+ croak("Need hash-ref to edit copied column values")
+ if @primary_columns > 1;
+
+ return $self->next::method({ $primary_columns[0] => $arg });
+}
+
+1;
}
sub set {
- return shift->set_column(@_);
+ my($self, %data) = @_;
+
+ # set_columns() is going to do a string comparison before setting.
+ # This breaks on DateTime objects (whose comparison is arguably broken)
+ # so we stringify anything first.
+ for my $key (keys %data) {
+ next unless ref $data{$key};
+ $data{$key} = "$data{$key}";
+ }
+
+ return shift->set_columns(\%data);
}
1;
+++ /dev/null
-package # hide from PAUSE
- DBIx::Class::CDBICompat::HasA;
-
-use strict;
-use warnings;
-
-sub has_a {
- my ($self, $col, $f_class, %args) = @_;
- $self->throw_exception( "No such column ${col}" ) unless $self->has_column($col);
- $self->ensure_class_loaded($f_class);
- if ($args{'inflate'} || $args{'deflate'}) { # Non-database has_a
- if (!ref $args{'inflate'}) {
- my $meth = $args{'inflate'};
- $args{'inflate'} = sub { $f_class->$meth(shift); };
- }
- if (!ref $args{'deflate'}) {
- my $meth = $args{'deflate'};
- $args{'deflate'} = sub { shift->$meth; };
- }
- $self->inflate_column($col, \%args);
- return 1;
- }
-
- $self->belongs_to($col, $f_class);
- return 1;
-}
-
-sub search {
- my $self = shift;
- my $attrs = {};
- if (@_ > 1 && ref $_[$#_] eq 'HASH') {
- $attrs = { %{ pop(@_) } };
- }
- my $where = (@_ ? ((@_ == 1) ? ((ref $_[0] eq "HASH") ? { %{+shift} } : shift)
- : {@_})
- : undef());
- if (ref $where eq 'HASH') {
- foreach my $key (keys %$where) { # has_a deflation hack
- $where->{$key} = ''.$where->{$key}
- if eval { $where->{$key}->isa('DBIx::Class') };
- }
- }
- $self->next::method($where, $attrs);
-}
-
-1;
+++ /dev/null
-package # hide from PAUSE
- DBIx::Class::CDBICompat::HasMany;
-
-use strict;
-use warnings;
-
-sub has_many {
- my ($class, $rel, $f_class, $f_key, $args) = @_;
-
- my @f_method;
-
- if (ref $f_class eq 'ARRAY') {
- ($f_class, @f_method) = @$f_class;
- }
-
- if (ref $f_key eq 'HASH' && !$args) { $args = $f_key; undef $f_key; };
-
- $args ||= {};
- if (delete $args->{no_cascade_delete}) {
- $args->{cascade_delete} = 0;
- }
-
- if( !$f_key and !@f_method ) {
- my $f_source = $f_class->result_source_instance;
- ($f_key) = grep { $f_source->relationship_info($_)->{class} eq $class }
- $f_source->relationships;
- }
-
- $class->next::method($rel, $f_class, $f_key, $args);
-
- if (@f_method) {
- no strict 'refs';
- no warnings 'redefine';
- my $post_proc = sub { my $o = shift; $o = $o->$_ for @f_method; $o; };
- *{"${class}::${rel}"} =
- sub {
- my $rs = shift->search_related($rel => @_);
- $rs->{attrs}{record_filter} = $post_proc;
- return (wantarray ? $rs->all : $rs);
- };
- return 1;
- }
-
-}
-
-1;
use base qw/DBIx::Class/;
__PACKAGE__->mk_classdata('_transform_sql_handler_order'
- => [ qw/TABLE ESSENTIAL JOIN/ ] );
+ => [ qw/TABLE ESSENTIAL JOIN IDENTIFIER/ ] );
__PACKAGE__->mk_classdata('_transform_sql_handlers' =>
{
'ESSENTIAL' =>
sub {
my ($self, $class, $data) = @_;
- return join(' ', $class->columns('Essential')) unless $data;
- return join(' ', $self->{_classes}{$data}->columns('Essential'));
+ $class = $data ? $self->{_classes}{$data} : $class;
+ return join(', ', $class->columns('Essential'));
+ },
+ 'IDENTIFIER' =>
+ sub {
+ my ($self, $class, $data) = @_;
+ $class = $data ? $self->{_classes}{$data} : $class;
+ return join ' AND ', map "$_ = ?", $class->primary_columns;
},
'JOIN' =>
sub {
--- /dev/null
+package DBIx::Class::CDBICompat::Iterator;
+
+use strict;
+use warnings;
+
+=head1 NAME
+
+DBIx::Class::CDBICompat::Iterator
+
+=head1 SYNOPSIS
+
+See DBIx::Class::CDBICompat for directions for use.
+
+=head1 DESCRIPTION
+
+Emulates the extra behaviors of the Class::DBI search iterator.
+
+=head2 Differences from DBIx::Class result set
+
+The CDBI iterator returns true if there were any results, false otherwise. The DBIC result set always returns true.
+
+=cut
+
+
+sub _init_result_source_instance {
+ my $class = shift;
+
+ my $table = $class->next::method(@_);
+ $table->resultset_class("DBIx::Class::CDBICompat::Iterator::ResultSet");
+
+ return $table;
+}
+
+
+
+package DBIx::Class::CDBICompat::Iterator::ResultSet;
+
+use strict;
+use warnings;
+
+use base qw(DBIx::Class::ResultSet);
+
+sub _bool {
+ return $_[0]->count;
+}
+
+1;
return $rs;
}
+
+# Emulate that CDBI throws out all changed columns and reloads them on
+# request in case the database modifies the new value (say, via a trigger)
+sub update {
+ my $self = shift;
+
+ my @dirty_columns = keys %{$self->{_dirty_columns}};
+
+ my $ret = $self->next::method(@_);
+ $self->_clear_column_data(@dirty_columns);
+
+ return $ret;
+}
+
+
+# And again for create
+sub create {
+ my $class = shift;
+ my($data) = @_;
+
+ my @columns = keys %$data;
+
+ my $obj = $class->next::method(@_);
+ return $obj unless defined $obj;
+
+ my %primary_cols = map { $_ => 1 } $class->primary_columns;
+ my @data_cols = grep !$primary_cols{$_}, @columns;
+ $obj->_clear_column_data(@data_cols);
+
+ return $obj;
+}
+
+
+sub _clear_column_data {
+ my $self = shift;
+
+ delete $self->{_column_data}{$_} for @_;
+ delete $self->{_inflated_column}{$_} for @_;
+}
+
+
sub get_column {
my ($self, $col) = @_;
if ((ref $self) && (!exists $self->{'_column_data'}{$col})
$self->next::method(@_[1..$#_]);
}
+# CDBI does not explicitly declare auto increment columns, so
+# we just clear out our primary columns before copying.
+sub copy {
+ my($self, $changes) = @_;
+
+ for my $col ($self->primary_columns) {
+ $changes->{$col} = undef unless exists $changes->{$col};
+ }
+
+ return $self->next::method($changes);
+}
+
+sub discard_changes {
+ my($self) = shift;
+
+ delete $self->{_column_data}{$_} for $self->is_changed;
+ delete $self->{_dirty_columns};
+ delete $self->{_relationship_data};
+
+ return $self;
+}
+
sub _ident_cond {
my ($class) = @_;
return join(" AND ", map { "$_ = ?" } $class->primary_columns);
__PACKAGE__->mk_classdata('live_object_index' => { });
__PACKAGE__->mk_classdata('live_object_init_count' => { });
+# Caching is on by default, but a classic CDBI hack to turn it off is to
+# set this variable false.
+$Class::DBI::Weaken_Is_Available = 1
+ unless defined $Class::DBI::Weaken_Is_Available;
+__PACKAGE__->mk_classdata('__nocache' => 0);
+
+sub nocache {
+ my $class = shift;
+
+ return $class->__nocache(@_) if @_;
+
+ return 1 if $Class::DBI::Weaken_Is_Available == 0;
+ return $class->__nocache;
+}
+
# Ripped from Class::DBI 0.999, all credit due to Tony Bowden for this code,
# all blame due to me for whatever bugs I introduced porting it.
delete @$live{ keys %$live };
}
+
# And now the fragments to tie it in to DBIx::Class::Table
sub insert {
my ($self, @rest) = @_;
$self->next::method(@rest);
+
+ return $self if $self->nocache;
+
# Because the insert will die() if it can't insert into the db (or should)
# we can be sure the object *was* inserted if we got this far. In which
# case, given primary keys are unique and ID only returns a
sub inflate_result {
my ($class, @rest) = @_;
my $new = $class->next::method(@rest);
+
+ return $new if $new->nocache;
+
if (my $key = $new->ID) {
#warn "Key $key";
my $live = $class->live_object_index;
return $new;
}
-sub discard_changes {
- my ($self) = @_;
- if (my $key = $self->ID) {
- $self->remove_from_object_index;
- my $ret = $self->next::method;
- $self->live_object_index->{$key} = $self if $self->in_storage;
- return $ret;
- } else {
- return $self->next::method;
- }
-}
-
1;
+++ /dev/null
-package # hide from PAUSE
- DBIx::Class::CDBICompat::MightHave;
-
-use strict;
-use warnings;
-
-sub might_have {
- my ($class, $rel, $f_class, @columns) = @_;
- if (ref $columns[0] || !defined $columns[0]) {
- return $class->next::method($rel, $f_class, @columns);
- } else {
- return $class->next::method($rel, $f_class, undef,
- { proxy => \@columns });
- }
-}
-
-1;
--- /dev/null
+package # hide from PAUSE
+ DBIx::Class::CDBICompat::NoObjectIndex;
+
+use strict;
+use warnings;
+
+=head1 NAME
+
+DBIx::Class::CDBICompat::NoObjectIndex
+
+=head1 SYNOPSIS
+
+ Part of CDBICompat
+
+=head1 DESCRIPTION
+
+Defines empty methods for object indexing. They do nothing.
+
+Using NoObjectIndex instead of LiveObjectIndex and nocache(1) is a little
+faster because it removes code from the object insert and retrieve chains.
+
+=cut
+
+sub nocache { return 1 }
+
+sub purge_dead_from_object_index {}
+
+sub remove_from_object_index {}
+
+sub clear_object_index {}
+
+1;
+++ /dev/null
-package # hide from PAUSE
- DBIx::Class::CDBICompat::ObjIndexStubs;
-
-use strict;
-use warnings;
-
-sub remove_from_object_index { }
-
-sub clear_object_index { }
-
-1;
--- /dev/null
+package
+ DBIx::Class::CDBICompat::Relationship;
+
+use strict;
+use warnings;
+
+
+=head1 NAME
+
+DBIx::Class::CDBICompat::Relationship
+
+=head1 DESCRIPTION
+
+Emulate the Class::DBI::Relationship object returned from C<meta_info()>.
+
+=cut
+
+my %method2key = (
+ name => 'type',
+ class => 'self_class',
+ accessor => 'accessor',
+ foreign_class => 'class',
+ args => 'args',
+);
+
+sub new {
+ my($class, $args) = @_;
+
+ return bless $args, $class;
+}
+
+for my $method (keys %method2key) {
+ my $key = $method2key{$method};
+ my $code = sub {
+ $_[0]->{$key};
+ };
+
+ no strict 'refs';
+ *{$method} = $code;
+}
+
+1;
--- /dev/null
+package # hide from PAUSE
+ DBIx::Class::CDBICompat::Relationships;
+
+use strict;
+use warnings;
+
+use base qw/Class::Data::Inheritable/;
+
+use Clone;
+use DBIx::Class::CDBICompat::Relationship;
+
+__PACKAGE__->mk_classdata('__meta_info' => {});
+
+
+=head1 NAME
+
+DBIx::Class::CDBICompat::Relationships
+
+=head1 DESCRIPTION
+
+Emulate C<has_a>, C<has_many>, C<might_have> and C<meta_info>.
+
+=cut
+
+sub has_a {
+ my ($self, $col, $f_class, %args) = @_;
+ $self->throw_exception( "No such column ${col}" ) unless $self->has_column($col);
+ $self->ensure_class_loaded($f_class);
+
+ my $rel_info;
+
+ if ($args{'inflate'} || $args{'deflate'}) { # Non-database has_a
+ if (!ref $args{'inflate'}) {
+ my $meth = $args{'inflate'};
+ $args{'inflate'} = sub { $f_class->$meth(shift); };
+ }
+ if (!ref $args{'deflate'}) {
+ my $meth = $args{'deflate'};
+ $args{'deflate'} = sub { shift->$meth; };
+ }
+ $self->inflate_column($col, \%args);
+
+ $rel_info = {
+ class => $f_class
+ };
+ }
+ else {
+ $self->belongs_to($col, $f_class);
+ $rel_info = $self->result_source_instance->relationship_info($col);
+ }
+
+ $rel_info->{args} = \%args;
+
+ $self->_extend_meta(
+ has_a => $col,
+ $rel_info
+ );
+
+ return 1;
+}
+
+
+sub has_many {
+ my ($class, $rel, $f_class, $f_key, $args) = @_;
+
+ my @f_method;
+
+ if (ref $f_class eq 'ARRAY') {
+ ($f_class, @f_method) = @$f_class;
+ }
+
+ if (ref $f_key eq 'HASH' && !$args) { $args = $f_key; undef $f_key; };
+
+ $args ||= {};
+ my $cascade = delete $args->{cascade} || '';
+ if (delete $args->{no_cascade_delete} || $cascade eq 'None') {
+ $args->{cascade_delete} = 0;
+ }
+ elsif( $cascade eq 'Delete' ) {
+ $args->{cascade_delete} = 1;
+ }
+ elsif( length $cascade ) {
+ warn "Unemulated cascade option '$cascade' in $class->has_many($rel => $f_class)";
+ }
+
+ if( !$f_key and !@f_method ) {
+ $class->ensure_class_loaded($f_class);
+ my $f_source = $f_class->result_source_instance;
+ ($f_key) = grep { $f_source->relationship_info($_)->{class} eq $class }
+ $f_source->relationships;
+ }
+
+ $class->next::method($rel, $f_class, $f_key, $args);
+
+ my $rel_info = $class->result_source_instance->relationship_info($rel);
+ $args->{mapping} = \@f_method;
+ $args->{foreign_key} = $f_key;
+ $rel_info->{args} = $args;
+
+ $class->_extend_meta(
+ has_many => $rel,
+ $rel_info
+ );
+
+ if (@f_method) {
+ no strict 'refs';
+ no warnings 'redefine';
+ my $post_proc = sub { my $o = shift; $o = $o->$_ for @f_method; $o; };
+ *{"${class}::${rel}"} =
+ sub {
+ my $rs = shift->search_related($rel => @_);
+ $rs->{attrs}{record_filter} = $post_proc;
+ return (wantarray ? $rs->all : $rs);
+ };
+ return 1;
+ }
+
+}
+
+
+sub might_have {
+ my ($class, $rel, $f_class, @columns) = @_;
+
+ my $ret;
+ if (ref $columns[0] || !defined $columns[0]) {
+ $ret = $class->next::method($rel, $f_class, @columns);
+ } else {
+ $ret = $class->next::method($rel, $f_class, undef,
+ { proxy => \@columns });
+ }
+
+ my $rel_info = $class->result_source_instance->relationship_info($rel);
+ $rel_info->{args}{import} = \@columns;
+
+ $class->_extend_meta(
+ might_have => $rel,
+ $rel_info
+ );
+
+ return $ret;
+}
+
+
+sub _extend_meta {
+ my ($class, $type, $rel, $val) = @_;
+ my %hash = %{ Clone::clone($class->__meta_info || {}) };
+
+ $val->{self_class} = $class;
+ $val->{type} = $type;
+ $val->{accessor} = $rel;
+
+ $hash{$type}{$rel} = DBIx::Class::CDBICompat::Relationship->new($val);
+ $class->__meta_info(\%hash);
+}
+
+
+sub meta_info {
+ my ($class, $type, $rel) = @_;
+ my $meta = $class->__meta_info;
+ return $meta unless $type;
+
+ my $type_meta = $meta->{$type};
+ return $type_meta unless $rel;
+ return $type_meta->{$rel};
+}
+
+
+sub search {
+ my $self = shift;
+ my $attrs = {};
+ if (@_ > 1 && ref $_[$#_] eq 'HASH') {
+ $attrs = { %{ pop(@_) } };
+ }
+ my $where = (@_ ? ((@_ == 1) ? ((ref $_[0] eq "HASH") ? { %{+shift} } : shift)
+ : {@_})
+ : undef());
+ if (ref $where eq 'HASH') {
+ foreach my $key (keys %$where) { # has_a deflation hack
+ $where->{$key} = ''.$where->{$key}
+ if eval { $where->{$key}->isa('DBIx::Class') };
+ }
+ }
+ $self->next::method($where, $attrs);
+}
+
+1;
sub retrieve_from_sql {
my ($class, $cond, @rest) = @_;
+
$cond =~ s/^\s*WHERE//i;
- $class->search_literal($cond, @rest);
+
+ if( $cond =~ s/\bLIMIT (\d+)\s*$//i ) {
+ push @rest, { rows => $1 };
+ }
+
+ return $class->search_literal($cond, @rest);
+}
+
+sub construct {
+ my $class = shift;
+ my $obj = $class->resultset_instance->new_result(@_);
+ $obj->in_storage(1);
+
+ return $obj;
}
sub retrieve_all { shift->search }
sub count_all { shift->count }
- # Contributed by Numa. No test for this though.
+
+sub maximum_value_of {
+ my($class, $col) = @_;
+ return $class->resultset_instance->get_column($col)->max;
+}
+
+sub minimum_value_of {
+ my($class, $col) = @_;
+ return $class->resultset_instance->get_column($col)->min;
+}
1;
use warnings;
use base qw/DBIx::Class/;
+use Carp;
+
__PACKAGE__->mk_classdata('_temp_columns' => { });
sub _add_column_group {
my ($class, $group, @cols) = @_;
- if ($group eq 'TEMP') {
- $class->_register_column_group($group => @cols);
- $class->mk_group_accessors('temp' => @cols);
- my %tmp = %{$class->_temp_columns};
- $tmp{$_} = 1 for @cols;
- $class->_temp_columns(\%tmp);
- } else {
- return $class->next::method($group, @cols);
+
+ return $class->next::method($group, @cols) unless $group eq 'TEMP';
+
+ my %new_cols = map { $_ => 1 } @cols;
+ my %tmp_cols = %{$class->_temp_columns};
+
+ for my $existing_col ( grep $new_cols{$_}, $class->columns ) {
+ # Already been declared TEMP
+ next if $tmp_cols{$existing_col};
+
+ carp "Declaring column $existing_col as TEMP but it already exists";
}
+
+ $class->_register_column_group($group => @cols);
+ $class->mk_group_accessors('temp' => @cols);
+
+ $class->_temp_columns({ %tmp_cols, %new_cols });
}
sub new {
my ($class, $attrs, @rest) = @_;
- my %temp;
- foreach my $key (keys %$attrs) {
- $temp{$key} = delete $attrs->{$key} if $class->_temp_columns->{$key};
- }
+
+ my $temp = $class->_extract_temp_data($attrs);
+
my $new = $class->next::method($attrs, @rest);
- foreach my $key (keys %temp) {
- $new->set_temp($key, $temp{$key});
- }
+
+ $new->set_temp($_, $temp->{$_}) for keys %$temp;
+
return $new;
}
+sub _extract_temp_data {
+ my($self, $data) = @_;
+
+ my %temp;
+ foreach my $key (keys %$data) {
+ $temp{$key} = delete $data->{$key} if $self->_temp_columns->{$key};
+ }
+
+ return \%temp;
+}
sub find_column {
my ($class, $col, @rest) = @_;
return $class->next::method($col, @rest);
}
+sub set {
+ my($self, %data) = @_;
+
+ my $temp_data = $self->_extract_temp_data(\%data);
+
+ $self->set_temp($_, $temp_data->{$_}) for keys %$temp_data;
+
+ return $self->next::method(%data);
+}
+
sub get_temp {
my ($self, $column) = @_;
$self->throw_exception( "Can't fetch data as class method" ) unless ref $self;
sub insert {
my $self = shift;
+
+ return $self->create(@_) unless ref $self;
+
$self->call_trigger('before_create');
$self->next::method(@_);
$self->call_trigger('after_create');
=cut
+__PACKAGE__->mk_classdata('_result_source_instance' => []);
+
sub result_source_instance {
my $class = shift;
$class = ref $class || $class;
-
- __PACKAGE__->mk_classdata(qw/_result_source_instance/)
- unless __PACKAGE__->can('_result_source_instance');
-
- return $class->_result_source_instance(@_) if @_;
+ return $class->_result_source_instance([$_[0], $class]) if @_;
- my $source = $class->_result_source_instance;
- return {} unless Scalar::Util::blessed($source);
+ my($source, $result_class) = @{$class->_result_source_instance};
+ return unless Scalar::Util::blessed($source);
- if ($source->result_class ne $class) {
- # Remove old source instance so we dont get deep recursion
- #$DB::single = 1;
- # Need to set it to a non-undef value so that it doesn't just fallback to
- # a parent class's _result_source_instance
- #$class->_result_source_instance({});
- #$class->table($class);
- #$source = $class->_result_source_instance;
+ if ($result_class ne $class) { # new class
+ # Give this new class it's own source and register it.
- $DB::single = 1;
$source = $source->new({
%$source,
source_name => $class,
result_class => $class
} );
- $class->_result_source_instance($source);
+ $class->_result_source_instance([$source, $class]);
if (my $coderef = $class->can('schema_instance')) {
$coderef->($class)->register_class($class, $class);
}
=head2 Create a new row in a related table
- my $book->create_related('author', { name => 'Fred'});
+ my $author = $book->create_related('author', { name => 'Fred'});
=head2 Search in a related table
Only searches for books named 'Titanic' by the author in $author.
- my $author->search_related('books', { name => 'Titanic' });
+ my $books_rs = $author->search_related('books', { name => 'Titanic' });
=head2 Delete data in a related table
Deletes only the book named Titanic by the author in $author.
- my $author->delete_related('books', { name => 'Titanic' });
+ $author->delete_related('books', { name => 'Titanic' });
=head2 Ordering a relationship result set
If you always want a relation to be ordered, you can specify this when you
create the relationship.
-To order C<< $book->pages >> by descending page_number.
+To order C<< $book->pages >> by descending page_number, create the relation
+as follows:
- Book->has_many('pages' => 'Page', 'book', { order_by => \'page_number DESC'} );
+ __PACKAGE__->has_many('pages' => 'Page', 'book', { order_by => \'page_number DESC'} );
=head2 Many-to-many relationships
This is straightforward using L<ManyToMany|DBIx::Class::Relationship/many_to_many>:
- package My::DB;
- # ... set up connection ...
-
package My::User;
- use base 'My::DB';
+ use base 'DBIx::Class';
+ __PACKAGE__->load_components('Core');
__PACKAGE__->table('user');
__PACKAGE__->add_columns(qw/id name/);
__PACKAGE__->set_primary_key('id');
__PACKAGE__->many_to_many('addresses' => 'user_address', 'address');
package My::UserAddress;
- use base 'My::DB';
+ use base 'DBIx::Class';
+ __PACKAGE__->load_components('Core');
__PACKAGE__->table('user_address');
__PACKAGE__->add_columns(qw/user address/);
__PACKAGE__->set_primary_key(qw/user address/);
__PACKAGE__->belongs_to('address' => 'My::Address');
package My::Address;
- use base 'My::DB';
+ use base 'DBIx::Class';
+ __PACKAGE__->load_components('Core');
__PACKAGE__->table('address');
__PACKAGE__->add_columns(qw/id street town area_code country/);
__PACKAGE__->set_primary_key('id');
$genus->add_to_species({ name => 'troglodyte' });
$genus->wings(2);
$genus->update;
- $schema->txn_do($coderef2); # Can have a nested transaction
+ $schema->txn_do($coderef2); # Can have a nested transaction. Only the outer will actualy commit
return $genus->species;
};
The recommend way of achieving this is to use the
L<make_schema_at|DBIx::Class::Schema::Loader/make_schema_at> method:
- perl -MDBIx::Class::Schema::Loader=make_schema_at,dump_to_dir:./lib -e 'make_schema_at("My::Schema", { debug => 1 }, [ "dbi:Pg:dbname=foo","postgres" ])'
+ perl -MDBIx::Class::Schema::Loader=make_schema_at,dump_to_dir:./lib \
+ -e 'make_schema_at("My::Schema", { debug => 1 }, [ "dbi:Pg:dbname=foo","postgres" ])'
This will create a tree of files rooted at C<./lib/My/Schema/> containing
source definitions for all the tables found in the C<foo> database.
requires that the files for 0.1 as created above are available in the
given directory to diff against.
+=head2 Select from dual
+
+Dummy tables are needed by some databases to allow calling functions
+or expressions that aren't based on table content, for examples of how
+this applies to various database types, see:
+L<http://troels.arvin.dk/db/rdbms/#other-dummy_table>.
+
+Note: If you're using Oracles dual table don't B<ever> do anything
+other than a select, if you CRUD on your dual table you *will* break
+your database.
+
+Make a table class as you would for any other table
+
+ package MyAppDB::Dual;
+ use strict;
+ use warnings;
+ use base 'DBIx::Class';
+ __PACKAGE__->load_components("Core");
+ __PACKAGE__->table("Dual");
+ __PACKAGE__->add_columns(
+ "dummy",
+ { data_type => "VARCHAR2", is_nullable => 0, size => 1 },
+ );
+
+Once you've loaded your table class select from it using C<select>
+and C<as> instead of C<columns>
+
+ my $rs = $schema->resultset('Dual')->search(undef,
+ { select => [ 'sydate' ],
+ as => [ 'now' ]
+ },
+ );
+
+All you have to do now is be careful how you access your resultset, the below
+will not work because there is no column called 'now' in the Dual table class
+
+ while (my $dual = $rs->next) {
+ print $dual->now."\n";
+ }
+ # Can't locate object method "now" via package "MyAppDB::Dual" at headshot.pl line 23.
+
+You could of course use 'dummy' in C<as> instead of 'now', or C<add_columns> to
+your Dual class for whatever you wanted to select from dual, but that's just
+silly, instead use C<get_column>
+
+ while (my $dual = $rs->next) {
+ print $dual->get_column('now')."\n";
+ }
+
+Or use C<cursor>
+
+ my $cursor = $rs->cursor;
+ while (my @vals = $cursor->next) {
+ print $vals[0]."\n";
+ }
+
+Or use L<DBIx::Class::ResultClass::HashRefInflator>
+
+ $rs->result_class('DBIx::Class::ResultClass::HashRefInflator');
+ while ( my $dual = $rs->next ) {
+ print $dual->{now}."\n";
+ }
+
+Here are some example C<select> conditions to illustrate the different syntax
+you could use for doing stuff like
+C<oracles.heavily(nested(functions_can('take', 'lots'), OF), 'args')>
+
+ # get a sequence value
+ select => [ 'A_SEQ.nextval' ],
+
+ # get create table sql
+ select => [ { 'dbms_metadata.get_ddl' => [ "'TABLE'", "'ARTIST'" ]} ],
+
+ # get a random num between 0 and 100
+ select => [ { "trunc" => [ { "dbms_random.value" => [0,100] } ]} ],
+
+ # what year is it?
+ select => [ { 'extract' => [ \'year from sysdate' ] } ],
+
+ # do some math
+ select => [ {'round' => [{'cos' => [ \'180 * 3.14159265359/180' ]}]}],
+
+ # which day of the week were you born on?
+ select => [{'to_char' => [{'to_date' => [ "'25-DEC-1980'", "'dd-mon-yyyy'" ]}, "'day'"]}],
+
+ # select 16 rows from dual
+ select => [ "'hello'" ],
+ as => [ 'world' ],
+ group_by => [ 'cube( 1, 2, 3, 4 )' ],
+
+
+
=head2 Adding Indexes And Functions To Your SQL
Often you will want indexes on columns on your table to speed up searching. To
Add the L<DBIx::Class::Schema::Versioned> schema component to your
Schema class. This will add a new table to your database called
-C<SchemaVersions> which will keep track of which version is installed
+C<dbix_class_schema_vesion> which will keep track of which version is installed
and warn if the user trys to run a newer schema version than the
database thinks it has.
join => [ 'room', 'room' ]
-The aliases are: C<room_1> and C<room_2>.
+The aliases are: C<room> and C<room_2>.
=cut
my ($self) = @_;
delete $self->{_dirty_columns};
return unless $self->in_storage; # Don't reload if we aren't real!
- my ($reload) = $self->result_source->resultset->find
- (map { $self->$_ } $self->primary_columns);
+
+ my $reload = $self->result_source->resultset->find(
+ map { $self->$_ } $self->primary_columns
+ );
unless ($reload) { # If we got deleted in the mean-time
$self->in_storage(0);
return $self;
}
- delete @{$self}{keys %$self};
- @{$self}{keys %$reload} = values %$reload;
+
+ %$self = %$reload;
+
+ # Avoid a possible infinite loop with
+ # sub DESTROY { $_[0]->discard_changes }
+ bless $reload, 'Do::Not::Exist';
+
return $self;
}
$class->inflate_column($rel,
{ inflate => sub {
my ($val, $self) = @_;
- return $self->find_or_create_related($rel, {}, {});
+ return $self->find_or_new_related($rel, {}, {});
},
deflate => sub {
my ($val, $self) = @_;
should, set this attribute to a true or false value to override the detection
of when to create constraints.
+=item is_deferrable
+
+Tells L<SQL::Translator> that the foreign key constraint it creates should be
+deferrable. In other words, the user may request that the constraint be ignored
+until the end of the transaction. Currently, only the PostgreSQL producer
+actually supports this.
+
=back
=head2 register_relationship
sub find_or_new_related {
my $self = shift;
- return $self->find_related(@_) || $self->new_related(@_);
+ my $obj = $self->find_related(@_);
+ return defined $obj ? $obj : $self->new_related(@_);
}
=head2 find_or_create_related
);
}
# explicit join condition
- elsif (ref $cond eq 'HASH') {
- my $cond_rel;
- for (keys %$cond) {
- if (m/\./) { # Explicit join condition
- $cond_rel = $cond;
- last;
+ elsif (ref $cond) {
+ if (ref $cond eq 'HASH') { # ARRAY is also valid
+ my $cond_rel;
+ for (keys %$cond) {
+ if (m/\./) { # Explicit join condition
+ $cond_rel = $cond;
+ last;
+ }
+ $cond_rel->{"foreign.$_"} = "self.".$cond->{$_};
}
- $cond_rel->{"foreign.$_"} = "self.".$cond->{$_};
+ $cond = $cond_rel;
}
- my $acc_type = (keys %$cond_rel == 1 and $class->has_column($rel))
- ? 'filter'
- : 'single';
+ my $acc_type = ((ref $cond eq 'HASH')
+ && keys %$cond == 1
+ && $class->has_column($rel))
+ ? 'filter'
+ : 'single';
$class->add_relationship($rel, $f_class,
- $cond_rel,
+ $cond,
{ accessor => $acc_type, %{$attrs || {}} }
);
}
package DBIx::Class::ResultClass::HashRefInflator;
+use strict;
+use warnings;
+
=head1 NAME
DBIx::Class::ResultClass::HashRefInflator
# related sources.
# to avoid emtpy has_many rels contain one empty hashref
- return if (not keys %$me);
+ return undef if (not keys %$me);
my $def;
last;
}
}
- return unless $def;
+ return undef unless $def;
return { %$me,
map {
( $_ =>
- ref($rest->{$_}[0]) eq 'ARRAY' ? [ map { mk_hash(@$_) } @{$rest->{$_}} ]
- : mk_hash( @{$rest->{$_}} )
+ ref($rest->{$_}[0]) eq 'ARRAY'
+ ? [ grep defined, map mk_hash(@$_), @{$rest->{$_}} ]
+ : mk_hash( @{$rest->{$_}} )
)
} keys %$rest
};
use strict;
use warnings;
use overload
- '0+' => \&count,
- 'bool' => sub { 1; },
+ '0+' => "count",
+ 'bool' => "_bool",
fallback => 1;
use Carp::Clan qw/^DBIx::Class/;
use Data::Page;
use Storable;
use DBIx::Class::ResultSetColumn;
use DBIx::Class::ResultSourceHandle;
+use List::Util ();
use base qw/DBIx::Class/;
__PACKAGE__->mk_group_accessors('simple' => qw/result_class _source_handle/);
__PACKAGE__->belongs_to(artist => 'MyApp::Schema::Artist');
1;
+=head1 OVERLOADING
+
+If a resultset is used as a number it returns the C<count()>. However, if it is used as a boolean it is always true. So if you want to check if a result set has any results use C<if $rs != 0>. C<if $rs> will always be true.
+
=head1 METHODS
=head2 new
sub search_rs {
my $self = shift;
- my $rows;
-
- unless (@_) { # no search, effectively just a clone
- $rows = $self->get_cache;
- }
-
my $attrs = {};
$attrs = pop(@_) if @_ > 1 and ref $_[$#_] eq 'HASH';
my $our_attrs = { %{$self->{attrs}} };
my $having = delete $our_attrs->{having};
my $where = delete $our_attrs->{where};
+ my $rows;
+
+ my %safe = (alias => 1, cache => 1);
+
+ unless (
+ (@_ && defined($_[0])) # @_ == () or (undef)
+ ||
+ (keys %$attrs # empty attrs or only 'safe' attrs
+ && List::Util::first { !$safe{$_} } keys %$attrs)
+ ) {
+ # no search, effectively just a clone
+ $rows = $self->get_cache;
+ }
+
my $new_attrs = { %{$our_attrs}, %{$attrs} };
# merge new attrs into inherited
with to find the number of elements. If passed arguments, does a search
on the resultset and counts the results of that.
-Note: When using C<count> with C<group_by>, L<DBIX::Class> emulates C<GROUP BY>
+Note: When using C<count> with C<group_by>, L<DBIx::Class> emulates C<GROUP BY>
using C<COUNT( DISTINCT( columns ) )>. Some databases (notably SQLite) do
not support C<DISTINCT> with multiple columns. If you are using such a
database, you should only use columns from the main table in your C<group_by>
return $count;
}
+sub _bool {
+ return 1;
+}
+
=head2 count_literal
=over 4
sub throw_exception {
my $self=shift;
- $self->_source_handle->schema->throw_exception(@_);
+ if (ref $self && $self->_source_handle->schema) {
+ $self->_source_handle->schema->throw_exception(@_)
+ } else {
+ croak(@_);
+ }
+
}
# XXX: FIXME: Attributes docs need clearing up
=over 4
Indicates additional columns to be selected from storage. Works the same as
-L<select> but adds columns to the selection.
+L</select> but adds columns to the selection.
=back
=over 4
-Indicates additional column names for those added via L<+select>.
+Indicates additional column names for those added via L</+select>.
=back
below.
For more help on using joins with search, see L<DBIx::Class::Manual::Joining>.
+
=head2 prefetch
=over 4
use strict;
use warnings;
use Storable;
+use Carp;
use base qw/DBIx::Class/;
__PACKAGE__->mk_group_accessors('simple' => qw/schema source_moniker/);
+# Schema to use when thawing.
+our $thaw_schema;
+
=head1 NAME
DBIx::Class::ResultSourceHandle
sub STORABLE_freeze {
my ($self, $cloning) = @_;
+
my $to_serialize = { %$self };
- delete $to_serialize->{schema};
+
+ my $class = $self->schema->class($self->source_moniker);
+ $to_serialize->{schema} = $class;
return (Storable::freeze($to_serialize));
}
=head2 STORABLE_thaw
-Thaws frozen handle.
+Thaws frozen handle. Resets the internal schema reference to the package
+variable C<$thaw_schema>. The recomened way of setting this is to use
+C<$schema->thaw($ice)> which handles this for you.
=cut
+
sub STORABLE_thaw {
my ($self, $cloning,$ice) = @_;
%$self = %{ Storable::thaw($ice) };
+
+ my $class = delete $self->{schema};
+ if( $thaw_schema ) {
+ $self->{schema} = $thaw_schema;
+ }
+ else {
+ my $rs = $class->result_source_instance;
+ $self->{schema} = $rs->schema if $rs;
+ }
+
+ carp "Unable to restore schema" unless $self->{schema};
}
+=head1 AUTHOR
+
+Ash Berlin C<< <ash@cpan.org> >>
+
+=cut
+
1;
__PACKAGE__->mk_classdata('table_alias'); # FIXME: Doesn't actually do
# anything yet!
+sub _init_result_source_instance {
+ my $class = shift;
+
+ $class->mk_classdata('result_source_instance')
+ unless $class->can('result_source_instance');
+
+ my $table = $class->result_source_instance;
+ my $class_has_table_instance = ($table and $table->result_class eq $class);
+ return $table if $class_has_table_instance;
+
+ if( $table ) {
+ $table = $class->table_class->new({
+ %$table,
+ result_class => $class,
+ source_name => undef,
+ schema => undef
+ });
+ }
+ else {
+ $table = $class->table_class->new({
+ name => undef,
+ result_class => $class,
+ source_name => undef,
+ });
+ }
+
+ $class->result_source_instance($table);
+
+ if ($class->can('schema_instance')) {
+ $class =~ m/([^:]+)$/;
+ $class->schema_instance->register_class($class, $class);
+ }
+
+ return $table;
+}
+
=head1 NAME
DBIx::Class::ResultSourceProxy::Table - provides a classdata table
unless (ref $table) {
$table = $class->table_class->new({
$class->can('result_source_instance') ?
- %{$class->result_source_instance} : (),
+ %{$class->result_source_instance||{}} : (),
name => $table,
result_class => $class,
source_name => undef,
my ($class, $attrs) = @_;
$class = ref $class if ref $class;
- my $new = { _column_data => {} };
+ my $new = {
+ _column_data => {},
+ };
bless $new, $class;
if (my $handle = delete $attrs->{-source_handle}) {
next;
}
}
- use Data::Dumper;
$new->throw_exception("No such column $key on $class")
unless $class->has_column($key);
$new->store_column($key => $attrs->{$key});
an entirely new object into the database, use C<create> (see
L<DBIx::Class::ResultSet/create>).
+To fetch an uninserted row object, call
+L<new|DBIx::Class::ResultSet/new> on a resultset.
+
This will also insert any uninserted, related objects held inside this
one, see L<DBIx::Class::ResultSet/create> for more details.
%{$self->{_inflated_column} || {}});
if(!$self->{_rel_in_storage}) {
- $source->storage->txn_begin;
# The guard will save us if we blow out of this scope via die
-
- $rollback_guard = Scope::Guard->new(sub { $source->storage->txn_rollback });
+ $rollback_guard = $source->storage->txn_scope_guard;
## Should all be in relationship_data, but we need to get rid of the
## 'filter' reltype..
}
}
}
- $source->storage->txn_commit;
- $rollback_guard->dismiss;
+ $rollback_guard->commit;
}
$self->in_storage(1);
$obj->in_storage; # Get value
$obj->in_storage(1); # Set value
-Indicates whether the object exists as a row in the database or not
+Indicates whether the object exists as a row in the database or
+not. This is set to true when L<DBIx::Class::ResultSet/find>,
+L<DBIx::Class::ResultSet/create> or L<DBIx::Class::ResultSet/insert>
+are used.
+
+Creating a row object using L<DBIx::Class::ResultSet/new>, or calling
+L</delete> on one, sets it to false.
=cut
required.
Also takes an options hashref of C<< column_name => value> pairs >> to update
-first. But be aware that this hashref might be edited in place, so dont rely on
-it being the same after a call to C<update>. If you need to preserve the hashref,
-it is sufficient to pass a shallow copy to C<update>, e.g. ( { %{ $href } } )
+first. But be aware that the hashref will be passed to
+C<set_inflated_columns>, which might edit it in place, so dont rely on it being
+the same after a call to C<update>. If you need to preserve the hashref, it is
+sufficient to pass a shallow copy to C<update>, e.g. ( { %{ $href } } )
=cut
$self->throw_exception("Cannot safely update a row in a PK-less table")
if ! keys %$ident_cond;
- if ($upd) {
- foreach my $key (keys %$upd) {
- if (ref $upd->{$key}) {
- my $info = $self->relationship_info($key);
- if ($info && $info->{attrs}{accessor}
- && $info->{attrs}{accessor} eq 'single')
- {
- my $rel = delete $upd->{$key};
- $self->set_from_related($key => $rel);
- $self->{_relationship_data}{$key} = $rel;
- } elsif ($info && $info->{attrs}{accessor}
- && $info->{attrs}{accessor} eq 'multi'
- && ref $upd->{$key} eq 'ARRAY') {
- my $others = delete $upd->{$key};
- foreach my $rel_obj (@$others) {
- if(!Scalar::Util::blessed($rel_obj)) {
- $rel_obj = $self->create_related($key, $rel_obj);
- }
- }
- $self->{_relationship_data}{$key} = $others;
-# $related->{$key} = $others;
- next;
- }
- elsif ($self->has_column($key)
- && exists $self->column_info($key)->{_inflate_info})
- {
- $self->set_inflated_column($key, delete $upd->{$key});
- }
- }
- }
- $self->set_columns($upd);
- }
+ $self->set_inflated_columns($upd) if $upd;
my %to_update = $self->get_dirty_columns;
return $self unless keys %to_update;
my $rows = $self->result_source->storage->update(
reinserted using C<< ->insert() >> before C<< ->update() >> can be used
on it. If you delete an object in a class with a C<has_many>
relationship, all the related objects will be deleted as well. To turn
-this behavior off, pass C<cascade_delete => 0> in the C<$attr>
+this behavior off, pass C<< cascade_delete => 0 >> in the C<$attr>
hashref. Any database-level cascade or restrict will take precedence
over a DBIx-Class-based cascading delete. See also L<DBIx::Class::ResultSet/delete>.
my $val = $obj->get_column($col);
-Gets a column value from a row object. Does not do any queries; the column
-must have already been fetched from the database and stored in the object. If
-there is an inflated value stored that has not yet been deflated, it is deflated
-when the method is invoked.
+Returns a raw column value from the row object, if it has already
+been fetched from the database or set by an accessor.
+
+If an L<inflated value|DBIx::Class::InflateColumn> has been set, it
+will be deflated and returned.
=cut
my %data = $obj->get_columns;
-Does C<get_column>, for all column values at once.
+Does C<get_column>, for all loaded column values at once.
=cut
=head2 get_inflated_columns
- my $inflated_data = $obj->get_inflated_columns;
+ my %inflated_data = $obj->get_inflated_columns;
-Similar to get_columns but objects are returned for inflated columns instead of their raw non-inflated values.
+Similar to get_columns but objects are returned for inflated columns
+instead of their raw non-inflated values.
=cut
$obj->set_column($col => $val);
-Sets a column value. If the new value is different from the old one,
+Sets a raw column value. If the new value is different from the old one,
the column is marked as dirty for when you next call $obj->update.
+If passed an object or reference, this will happily attempt store the
+value, and a later insert/update will try and stringify/numify as
+appropriate.
+
=cut
sub set_column {
my $old = $self->get_column($column);
my $ret = $self->store_column(@_);
$self->{_dirty_columns}{$column} = 1
- if (defined $old ^ defined $ret) || (defined $old && $old ne $ret);
+ if (defined $old xor defined $ret) || (defined $old && $old ne $ret);
+
+ # XXX clear out the relation cache for this column
+ delete $self->{related_resultsets}{$column};
+
return $ret;
}
return $self;
}
+=head2 set_inflated_columns
+
+ my $copy = $orig->set_inflated_columns({ $col => $val, $rel => $obj, ... });
+
+Sets more than one column value at once, taking care to respect inflations and
+relationships if relevant. Be aware that this hashref might be edited in place,
+so dont rely on it being the same after a call to C<set_inflated_columns>. If
+you need to preserve the hashref, it is sufficient to pass a shallow copy to
+C<set_inflated_columns>, e.g. ( { %{ $href } } )
+
+=cut
+
+sub set_inflated_columns {
+ my ( $self, $upd ) = @_;
+ foreach my $key (keys %$upd) {
+ if (ref $upd->{$key}) {
+ my $info = $self->relationship_info($key);
+ if ($info && $info->{attrs}{accessor}
+ && $info->{attrs}{accessor} eq 'single')
+ {
+ my $rel = delete $upd->{$key};
+ $self->set_from_related($key => $rel);
+ $self->{_relationship_data}{$key} = $rel;
+ } elsif ($info && $info->{attrs}{accessor}
+ && $info->{attrs}{accessor} eq 'multi'
+ && ref $upd->{$key} eq 'ARRAY') {
+ my $others = delete $upd->{$key};
+ foreach my $rel_obj (@$others) {
+ if(!Scalar::Util::blessed($rel_obj)) {
+ $rel_obj = $self->create_related($key, $rel_obj);
+ }
+ }
+ $self->{_relationship_data}{$key} = $others;
+# $related->{$key} = $others;
+ next;
+ }
+ elsif ($self->has_column($key)
+ && exists $self->column_info($key)->{_inflate_info})
+ {
+ $self->set_inflated_column($key, delete $upd->{$key});
+ }
+ }
+ }
+ $self->set_columns($upd);
+}
+
=head2 copy
my $copy = $orig->copy({ change => $to, ... });
bless $new, ref $self;
$new->result_source($self->result_source);
- $new->set_columns($changes);
+ $new->set_inflated_columns($changes);
$new->insert;
+
+ # Its possible we'll have 2 relations to the same Source. We need to make
+ # sure we don't try to insert the same row twice esle we'll violate unique
+ # constraints
+ my $rels_copied = {};
+
foreach my $rel ($self->result_source->relationships) {
my $rel_info = $self->result_source->relationship_info($rel);
- if ($rel_info->{attrs}{cascade_copy}) {
- my $resolved = $self->result_source->resolve_condition(
- $rel_info->{cond}, $rel, $new);
- foreach my $related ($self->search_related($rel)) {
- $related->copy($resolved);
- }
+
+ next unless $rel_info->{attrs}{cascade_copy};
+
+ my $resolved = $self->result_source->resolve_condition(
+ $rel_info->{cond}, $rel, $new
+ );
+
+ my $copied = $rels_copied->{ $rel_info->{source} } ||= {};
+ foreach my $related ($self->search_related($rel)) {
+ my $id_str = join("\0", $related->id);
+ next if $copied->{$id_str};
+ $copied->{$id_str} = 1;
+ my $rel_copy = $related->copy($resolved);
}
+
}
return $new;
}
$obj->update_or_insert
-Updates the object if it's already in the db, else inserts it.
+Updates the object if it's already in the database, according to
+L</in_storage>, else inserts it.
=head2 insert_or_update
}
}
+=head2 id
+
+Returns the primary key(s) for a row. Can't be called as a class method.
+Actually implemented in L<DBIx::Class::PK>
+
+=head2 discard_changes
+
+Re-selects the row from the database, losing any changes that had
+been made.
+
+This method can also be used to refresh from storage, retrieving any
+changes made since the row was last read from storage. Actually
+implemented in L<DBIx::Class::PK>
+
+=cut
+
1;
=head1 AUTHORS
$self->storage->txn_do(@_);
}
+=head2 txn_scope_guard
+
+Runs C<txn_scope_guard> on the schema's storage.
+
+=cut
+
+sub txn_scope_guard {
+ my $self = shift;
+
+ $self->storage or $self->throw_exception
+ ('txn_scope_guard called on $schema without storage');
+
+ $self->storage->txn_scope_guard(@_);
+}
+
=head2 txn_begin
Begins a transaction (does nothing if AutoCommit is off). Equivalent to
For an example of what you can do with this, see
L<DBIx::Class::Manual::Cookbook/Adding Indexes And Functions To Your SQL>.
+=head2 thaw
+
+Provided as the recommened way of thawing schema objects. You can call
+C<Storable::thaw> directly if you wish, but the thawed objects will not have a
+reference to any schema, so are rather useless
+
+=cut
+
+sub thaw {
+ my ($self, $obj) = @_;
+ local $DBIx::Class::ResultSourceHandle::thaw_schema = $self;
+ return Storable::thaw($obj);
+}
+
+=head2 freeze
+
+This doesn't actualy do anything more than call L<Storable/freeze>, it is just
+provided here for symetry.
+
=cut
+sub freeze {
+ return Storable::freeze($_[1]);
+}
+
+=head2 dclone
+
+Recommeneded way of dcloning objects. This is needed to properly maintain
+references to the schema object (which itself is B<not> cloned.)
+
+=cut
+
+sub dclone {
+ my ($self, $obj) = @_;
+ local $DBIx::Class::ResultSourceHandle::thaw_schema = $self;
+ return Storable::dclone($obj);
+}
+
1;
=head1 AUTHORS
use warnings;
__PACKAGE__->load_components(qw/ Core/);
-__PACKAGE__->table('SchemaVersions');
+__PACKAGE__->table('dbix_class_schema_versions');
__PACKAGE__->add_columns
- ( 'Version' => {
+ ( 'version' => {
'data_type' => 'VARCHAR',
'is_auto_increment' => 0,
'default_value' => undef,
'is_foreign_key' => 0,
- 'name' => 'Version',
+ 'name' => 'version',
'is_nullable' => 0,
'size' => '10'
},
- 'Installed' => {
+ 'installed' => {
'data_type' => 'VARCHAR',
'is_auto_increment' => 0,
'default_value' => undef,
'is_foreign_key' => 0,
- 'name' => 'Installed',
+ 'name' => 'installed',
'is_nullable' => 0,
'size' => '20'
},
);
+__PACKAGE__->set_primary_key('version');
+
+package DBIx::Class::Version::TableCompat;
+use base 'DBIx::Class';
+__PACKAGE__->load_components(qw/ Core/);
+__PACKAGE__->table('SchemaVersions');
+
+__PACKAGE__->add_columns
+ ( 'Version' => {
+ 'data_type' => 'VARCHAR',
+ },
+ 'Installed' => {
+ 'data_type' => 'VARCHAR',
+ },
+ );
__PACKAGE__->set_primary_key('Version');
package DBIx::Class::Version;
__PACKAGE__->register_class('Table', 'DBIx::Class::Version::Table');
+package DBIx::Class::VersionCompat;
+use base 'DBIx::Class::Schema';
+use strict;
+use warnings;
+
+__PACKAGE__->register_class('TableCompat', 'DBIx::Class::Version::TableCompat');
+
# ---------------------------------------------------------------------------
+
+=head1 NAME
+
+DBIx::Class::Schema::Versioned - DBIx::Class::Schema plugin for Schema upgrades
+
+=head1 SYNOPSIS
+
+ package Library::Schema;
+ use base qw/DBIx::Class::Schema/;
+ # load Library::Schema::CD, Library::Schema::Book, Library::Schema::DVD
+ __PACKAGE__->load_classes(qw/CD Book DVD/);
+
+ __PACKAGE__->load_components(qw/+DBIx::Class::Schema::Versioned/);
+ __PACKAGE__->upgrade_directory('/path/to/upgrades/');
+ __PACKAGE__->backup_directory('/path/to/backups/');
+
+
+=head1 DESCRIPTION
+
+This module is a component designed to extend L<DBIx::Class::Schema>
+classes, to enable them to upgrade to newer schema layouts. To use this
+module, you need to have called C<create_ddl_dir> on your Schema to
+create your upgrade files to include with your delivery.
+
+A table called I<dbix_class_schema_versions> is created and maintained by the
+module. This contains two fields, 'Version' and 'Installed', which
+contain each VERSION of your Schema, and the date+time it was installed.
+
+The actual upgrade is called manually by calling C<upgrade> on your
+schema object. Code is run at connect time to determine whether an
+upgrade is needed, if so, a warning "Versions out of sync" is
+produced.
+
+So you'll probably want to write a script which generates your DDLs and diffs
+and another which executes the upgrade.
+
+NB: At the moment, only SQLite and MySQL are supported. This is due to
+spotty behaviour in the SQL::Translator producers, please help us by
+them.
+
+=head1 METHODS
+
+=head2 upgrade_directory
+
+Use this to set the directory your upgrade files are stored in.
+
+=head2 backup_directory
+
+Use this to set the directory you want your backups stored in.
+
+=cut
+
package DBIx::Class::Schema::Versioned;
use strict;
__PACKAGE__->mk_classdata('_filedata');
__PACKAGE__->mk_classdata('upgrade_directory');
__PACKAGE__->mk_classdata('backup_directory');
+__PACKAGE__->mk_classdata('do_backup');
+__PACKAGE__->mk_classdata('do_diff_on_init');
+
+=head2 schema_version
+
+Returns the current schema class' $VERSION; does -not- use $schema->VERSION
+since that varies in results depending on if version.pm is installed, and if
+so the perl or XS versions. If you want this to change, bug the version.pm
+author to make vpp and vxs behave the same.
+
+=cut
sub schema_version {
my ($self) = @_;
return $version;
}
-sub connection {
- my $self = shift;
- $self->next::method(@_);
- $self->_on_connect;
- return $self;
-}
-
-sub _on_connect
-{
- my ($self) = @_;
- my $vschema = DBIx::Class::Version->connect(@{$self->storage->connect_info()});
- my $vtable = $vschema->resultset('Table');
- my $pversion;
-
- if(!$self->_source_exists($vtable))
- {
-# $vschema->storage->debug(1);
- $vschema->storage->ensure_connected();
- $vschema->deploy();
- $pversion = 0;
- }
- else
- {
- my $psearch = $vtable->search(undef,
- { select => [
- { 'max' => 'Installed' },
- ],
- as => ['maxinstall'],
- })->first;
- $pversion = $vtable->search({ Installed => $psearch->get_column('maxinstall'),
- })->first;
- $pversion = $pversion->Version if($pversion);
- }
-# warn("Previous version: $pversion\n");
- if($pversion eq $self->schema_version)
- {
- warn "This version is already installed\n";
- return 1;
- }
-
-## use IC::DT?
-
- if(!$pversion)
- {
- $vtable->create({ Version => $self->schema_version,
- Installed => strftime("%Y-%m-%d %H:%M:%S", gmtime())
- });
- ## If we let the user do this, where does the Version table get updated?
- warn "No previous version found, calling deploy to install this version.\n";
- $self->deploy();
- return 1;
- }
-
- my $file = $self->ddl_filename(
- $self->storage->sqlt_type,
- $self->upgrade_directory,
- $self->schema_version
- );
- if(!$file)
- {
- # No upgrade path between these two versions
- return 1;
- }
+=head2 get_db_version
- $file = $self->ddl_filename(
- $self->storage->sqlt_type,
- $self->upgrade_directory,
- $self->schema_version,
- $pversion,
- );
-# $file =~ s/@{[ $self->schema_version ]}/"${pversion}-" . $self->schema_version/e;
- if(!-f $file)
- {
- warn "Upgrade not possible, no upgrade file found ($file)\n";
- return;
- }
+Returns the version that your database is currently at. This is determined by the values in the
+dbix_class_schema_versions table that $self->upgrade writes to.
- my $fh;
- open $fh, "<$file" or warn("Can't open upgrade file, $file ($!)");
- my @data = split(/;\n/, join('', <$fh>));
- close($fh);
- @data = grep { $_ && $_ !~ /^-- / } @data;
- @data = grep { $_ !~ /^(BEGIN TRANACTION|COMMIT)/m } @data;
+=cut
- $self->_filedata(\@data);
+sub get_db_version
+{
+ my ($self, $rs) = @_;
- ## Don't do this yet, do only on command?
- ## If we do this later, where does the Version table get updated??
- warn "Versions out of sync. This is " . $self->schema_version .
- ", your database contains version $pversion, please call upgrade on your Schema.\n";
-# $self->upgrade($pversion, $self->schema_version);
+ my $vtable = $self->{vschema}->resultset('Table');
+ my $version = 0;
+ eval {
+ my $stamp = $vtable->get_column('installed')->max;
+ $version = $vtable->search({ installed => $stamp })->first->version;
+ };
+ return $version;
}
sub _source_exists
return 1;
}
+=head2 backup
+
+This is an overwritable method which is called just before the upgrade, to
+allow you to make a backup of the database. Per default this method attempts
+to call C<< $self->storage->backup >>, to run the standard backup on each
+database type.
+
+This method should return the name of the backup file, if appropriate..
+
+=cut
+
sub backup
{
my ($self) = @_;
$self->storage->backup($self->backup_directory());
}
-sub upgrade
-{
- my ($self) = @_;
-
- ## overridable sub, per default just run all the commands.
-
- $self->backup();
+# is this just a waste of time? if not then merge with DBI.pm
+sub _create_db_to_schema_diff {
+ my $self = shift;
- $self->run_upgrade(qr/create/i);
- $self->run_upgrade(qr/alter table .*? add/i);
- $self->run_upgrade(qr/alter table .*? (?!drop)/i);
- $self->run_upgrade(qr/alter table .*? drop/i);
- $self->run_upgrade(qr/drop/i);
-# $self->run_upgrade(qr//i);
+ my %driver_to_db_map = (
+ 'mysql' => 'MySQL'
+ );
- my $vschema = DBIx::Class::Version->connect(@{$self->storage->connect_info()});
- my $vtable = $vschema->resultset('Table');
- $vtable->create({ Version => $self->schema_version,
- Installed => strftime("%Y-%m-%d %H:%M:%S", gmtime())
- });
-}
+ my $db = $driver_to_db_map{$self->storage->dbh->{Driver}->{Name}};
+ unless ($db) {
+ print "Sorry, this is an unsupported DB\n";
+ return;
+ }
+ eval 'require SQL::Translator "0.09"';
+ if ($@) {
+ $self->throw_exception("SQL::Translator 0.09 required");
+ }
-sub run_upgrade
-{
- my ($self, $stm) = @_;
-# print "Reg: $stm\n";
- my @statements = grep { $_ =~ $stm } @{$self->_filedata};
-# print "Statements: ", join("\n", @statements), "\n";
- $self->_filedata([ grep { $_ !~ /$stm/i } @{$self->_filedata} ]);
+ my $db_tr = SQL::Translator->new({
+ add_drop_table => 1,
+ parser => 'DBI',
+ parser_args => { dbh => $self->storage->dbh }
+ });
+
+ $db_tr->producer($db);
+ my $dbic_tr = SQL::Translator->new;
+ $dbic_tr->parser('SQL::Translator::Parser::DBIx::Class');
+ $dbic_tr = $self->storage->configure_sqlt($dbic_tr, $db);
+ $dbic_tr->data($self);
+ $dbic_tr->producer($db);
+
+ $db_tr->schema->name('db_schema');
+ $dbic_tr->schema->name('dbic_schema');
+
+ # is this really necessary?
+ foreach my $tr ($db_tr, $dbic_tr) {
+ my $data = $tr->data;
+ $tr->parser->($tr, $$data);
+ }
- for (@statements)
+ my $diff = SQL::Translator::Diff::schema_diff($db_tr->schema, $db,
+ $dbic_tr->schema, $db,
+ { ignore_constraint_names => 1, ignore_index_names => 1, caseopt => 1 });
+
+ my $filename = $self->ddl_filename(
+ $db,
+ $self->upgrade_directory,
+ $self->schema_version,
+ 'PRE',
+ );
+ my $file;
+ if(!open($file, ">$filename"))
{
- $self->storage->debugobj->query_start($_) if $self->storage->debug;
- $self->storage->dbh->do($_) or warn "SQL was:\n $_";
- $self->storage->debugobj->query_end($_) if $self->storage->debug;
+ $self->throw_exception("Can't open $filename for writing ($!)");
+ next;
}
+ print $file $diff;
+ close($file);
- return 1;
+ print "WARNING: There may be differences between your DB and your DBIC schema. Please review and if necessary run the SQL in $filename to sync your DB.\n";
}
-1;
+=head2 upgrade
-=head1 NAME
+Call this to attempt to upgrade your database from the version it is at to the version
+this DBIC schema is at.
-DBIx::Class::Schema::Versioned - DBIx::Class::Schema plugin for Schema upgrades
+It requires an SQL diff file to exist in $schema->upgrade_directory, normally you will
+have created this using $schema->create_ddl_dir.
-=head1 SYNOPSIS
+=cut
- package Library::Schema;
- use base qw/DBIx::Class::Schema/;
- # load Library::Schema::CD, Library::Schema::Book, Library::Schema::DVD
- __PACKAGE__->load_classes(qw/CD Book DVD/);
+sub upgrade
+{
+ my ($self) = @_;
+ my $db_version = $self->get_db_version();
- __PACKAGE__->load_components(qw/+DBIx::Class::Schema::Versioned/);
- __PACKAGE__->upgrade_directory('/path/to/upgrades/');
- __PACKAGE__->backup_directory('/path/to/backups/');
+ # db unversioned
+ unless ($db_version) {
+ # set version in dbix_class_schema_versions table, can't actually upgrade as we don 't know what version the DB is at
+ $self->_create_db_to_schema_diff() if ($self->do_diff_on_init);
- sub backup
- {
- my ($self) = @_;
- # my special backup process
+ # create versions table and version row
+ $self->{vschema}->deploy;
+ $self->_set_db_version;
+ return;
}
- sub upgrade
- {
- my ($self) = @_;
-
- ## overridable sub, per default just runs all the commands.
-
- $self->run_upgrade(qr/create/i);
- $self->run_upgrade(qr/alter table .*? add/i);
- $self->run_upgrade(qr/alter table .*? (?!drop)/i);
- $self->run_upgrade(qr/alter table .*? drop/i);
- $self->run_upgrade(qr/drop/i);
- $self->run_upgrade(qr//i);
+ # db and schema at same version. do nothing
+ if ($db_version eq $self->schema_version) {
+ print "Upgrade not necessary\n";
+ return;
}
-=head1 DESCRIPTION
-
-This module is a component designed to extend L<DBIx::Class::Schema>
-classes, to enable them to upgrade to newer schema layouts. To use this
-module, you need to have called C<create_ddl_dir> on your Schema to
-create your upgrade files to include with your delivery.
-
-A table called I<SchemaVersions> is created and maintained by the
-module. This contains two fields, 'Version' and 'Installed', which
-contain each VERSION of your Schema, and the date+time it was installed.
-
-If you would like to influence which levels of version change need
-upgrades in your Schema, you can override the method C<ddl_filename>
-in L<DBIx::Class::Schema>. Return a false value if there is no upgrade
-path between the two versions supplied. By default, every change in
-your VERSION is regarded as needing an upgrade.
-
-The actual upgrade is called manually by calling C<upgrade> on your
-schema object. Code is run at connect time to determine whether an
-upgrade is needed, if so, a warning "Versions out of sync" is
-produced.
-
-NB: At the moment, SQLite upgrading is rather spotty, as SQL::Translator::Diff
-returns SQL statements that SQLite does not support.
+ # strangely the first time this is called can
+ # differ to subsequent times. so we call it
+ # here to be sure.
+ # XXX - just fix it
+ $self->storage->sqlt_type;
+
+ my $upgrade_file = $self->ddl_filename(
+ $self->storage->sqlt_type,
+ $self->upgrade_directory,
+ $self->schema_version,
+ $db_version,
+ );
+
+ unless (-f $upgrade_file) {
+ warn "Upgrade not possible, no upgrade file found ($upgrade_file), please create one\n";
+ return;
+ }
+ # backup if necessary then apply upgrade
+ $self->_filedata($self->_read_sql_file($upgrade_file));
+ $self->backup() if($self->do_backup);
+ $self->txn_do(sub { $self->do_upgrade() });
-=head1 METHODS
+ # set row in dbix_class_schema_versions table
+ $self->_set_db_version;
+}
-=head2 backup
+sub _set_db_version {
+ my $self = shift;
-This is an overwritable method which is called just before the upgrade, to
-allow you to make a backup of the database. Per default this method attempts
-to call C<< $self->storage->backup >>, to run the standard backup on each
-database type.
+ my $vtable = $self->{vschema}->resultset('Table');
+ $vtable->create({ version => $self->schema_version,
+ installed => strftime("%Y-%m-%d %H:%M:%S", gmtime())
+ });
-This method should return the name of the backup file, if appropriate.
+}
-C<backup> is called from C<upgrade>, make sure you call it, if you write your
-own <upgrade> method.
+sub _read_sql_file {
+ my $self = shift;
+ my $file = shift || return;
+
+ my $fh;
+ open $fh, "<$file" or warn("Can't open upgrade file, $file ($!)");
+ my @data = split(/\n/, join('', <$fh>));
+ @data = grep(!/^--/, @data);
+ @data = split(/;/, join('', @data));
+ close($fh);
+ @data = grep { $_ && $_ !~ /^-- / } @data;
+ @data = grep { $_ !~ /^(BEGIN TRANACTION|COMMIT)/m } @data;
+ return \@data;
+}
-=head2 upgrade
+=head2 do_upgrade
This is an overwritable method used to run your upgrade. The freeform method
allows you to run your upgrade any way you please, you can call C<run_upgrade>
commands, then migrate your data from old to new tables/formats, then
issue the DROP commands when you are finished.
+Will run the whole file as it is by default.
+
+=cut
+
+sub do_upgrade
+{
+ my ($self) = @_;
+
+ ## overridable sub, per default just run all the commands.
+ $self->run_upgrade(qr/create/i);
+ $self->run_upgrade(qr/alter table .*? add/i);
+ $self->run_upgrade(qr/alter table .*? (?!drop)/i);
+ $self->run_upgrade(qr/alter table .*? drop/i);
+ $self->run_upgrade(qr/drop/i);
+}
+
=head2 run_upgrade
$self->run_upgrade(qr/create/i);
Runs a set of SQL statements matching a passed in regular expression. The
idea is that this method can be called any number of times from your
C<upgrade> method, running whichever commands you specify via the
-regex in the parameter.
+regex in the parameter. Probably won't work unless called from the overridable
+do_upgrade method.
-=head2 upgrade_directory
+=cut
-Use this to set the directory your upgrade files are stored in.
+sub run_upgrade
+{
+ my ($self, $stm) = @_;
-=head2 backup_directory
+ return unless ($self->_filedata);
+ my @statements = grep { $_ =~ $stm } @{$self->_filedata};
+ $self->_filedata([ grep { $_ !~ /$stm/i } @{$self->_filedata} ]);
-Use this to set the directory you want your backups stored in.
+ for (@statements)
+ {
+ $self->storage->debugobj->query_start($_) if $self->storage->debug;
+ $self->storage->dbh->do($_) or warn "SQL was:\n $_";
+ $self->storage->debugobj->query_end($_) if $self->storage->debug;
+ }
-=head2 schema_version
+ return 1;
+}
+
+=head2 connection
+
+Overloaded method. This checks the DBIC schema version against the DB version and
+warns if they are not the same or if the DB is unversioned. It also provides
+compatibility between the old versions table (SchemaVersions) and the new one
+(dbix_class_schema_versions).
+
+To avoid the checks on connect, set the env var DBIC_NO_VERSION_CHECK. This can be
+useful for scripts.
+
+=cut
+
+sub connection {
+ my $self = shift;
+ $self->next::method(@_);
+ $self->_on_connect;
+ return $self;
+}
+
+sub _on_connect
+{
+ my ($self) = @_;
+ $self->{vschema} = DBIx::Class::Version->connect(@{$self->storage->connect_info()});
+ my $vtable = $self->{vschema}->resultset('Table');
+
+ # check for legacy versions table and move to new if exists
+ my $vschema_compat = DBIx::Class::VersionCompat->connect(@{$self->storage->connect_info()});
+ unless ($self->_source_exists($vtable)) {
+ my $vtable_compat = $vschema_compat->resultset('TableCompat');
+ if ($self->_source_exists($vtable_compat)) {
+ $self->{vschema}->deploy;
+ map { $vtable->create({ installed => $_->Installed, version => $_->Version }) } $vtable_compat->all;
+ $self->storage->dbh->do("DROP TABLE " . $vtable_compat->result_source->from);
+ }
+ }
+
+ # useful when connecting from scripts etc
+ return if ($ENV{DBIC_NO_VERSION_CHECK});
+
+ my $pversion = $self->get_db_version();
+
+ if($pversion eq $self->schema_version)
+ {
+# warn "This version is already installed\n";
+ return 1;
+ }
+
+ if(!$pversion)
+ {
+ warn "Your DB is currently unversioned. Please call upgrade on your schema to sync the DB.\n";
+ return 1;
+ }
+
+ warn "Versions out of sync. This is " . $self->schema_version .
+ ", your database contains version $pversion, please call upgrade on your Schema.\n";
+}
+
+1;
-Returns the current schema class' $VERSION; does -not- use $schema->VERSION
-since that varies in results depending on if version.pm is installed, and if
-so the perl or XS versions. If you want this to change, bug the version.pm
-author to make vpp and vxs behave the same.
-=head1 AUTHOR
+=head1 AUTHORS
Jess Robinson <castaway@desert-island.demon.co.uk>
+Luke Saunders <luke@shadowcatsystems.co.uk>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
use Storable;
sub STORABLE_freeze {
- my ($self,$cloning) = @_;
+ my ($self, $cloning) = @_;
my $to_serialize = { %$self };
+
delete $to_serialize->{result_source};
+ delete $to_serialize->{related_resultsets};
+ delete $to_serialize->{_inflated_column};
+
return (Storable::freeze($to_serialize));
}
sub STORABLE_thaw {
- my ($self,$cloning,$serialized) = @_;
+ my ($self, $cloning, $serialized) = @_;
+
%$self = %{ Storable::thaw($serialized) };
$self->result_source($self->result_source_instance)
if $self->can('result_source_instance');
use Scalar::Util qw/weaken/;
use Carp::Clan qw/^DBIx::Class/;
use IO::File;
+use DBIx::Class::Storage::TxnScopeGuard;
__PACKAGE__->mk_group_accessors('simple' => qw/debug debugobj schema/);
__PACKAGE__->mk_group_accessors('inherited' => 'cursor_class');
sub txn_rollback { die "Virtual method!" }
+=for comment
+
+=head2 txn_scope_guard
+
+Return an object that does stuff.
+
+=cut
+
+sub txn_scope_guard {
+ return DBIx::Class::Storage::TxnScopeGuard->new($_[0]);
+}
+
=head2 sql_maker
Returns a C<sql_maker> object - normally an object of class
} else {
$self->throw_exception("rows attribute must be positive if present")
if (defined($attrs->{rows}) && !($attrs->{rows} > 0));
+
+ # MySQL actually recommends this approach. I cringe.
+ $attrs->{rows} = 2**48 if not defined $attrs->{rows} and defined $attrs->{offset};
push @args, $attrs->{rows}, $attrs->{offset};
}
$version ||= $schema->VERSION || '1.x';
$sqltargs = { ( add_drop_table => 1 ), %{$sqltargs || {}} };
- $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.08: '}
+ $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09: '}
. $self->_check_sqlt_message . q{'})
if !$self->_check_sqlt_version;
my $sqlt = SQL::Translator->new({
-# debug => 1,
add_drop_table => 1,
});
+
+ $sqlt->parser('SQL::Translator::Parser::DBIx::Class');
+ my $sqlt_schema = $sqlt->translate({ data => $schema }) or die $sqlt->error;
+
foreach my $db (@$databases)
{
$sqlt->reset();
- $sqlt->parser('SQL::Translator::Parser::DBIx::Class');
-# $sqlt->parser_args({'DBIx::Class' => $schema);
$sqlt = $self->configure_sqlt($sqlt, $db);
- $sqlt->data($schema);
+ $sqlt->{schema} = $sqlt_schema;
$sqlt->producer($db);
my $file;
if(-e $filename)
{
warn("$filename already exists, skipping $db");
- next;
- }
-
- my $output = $sqlt->translate;
- if(!$output)
- {
- warn("Failed to translate to $db, skipping. (" . $sqlt->error . ")");
- next;
- }
- if(!open($file, ">$filename"))
- {
- $self->throw_exception("Can't open $filename for writing ($!)");
+ next unless ($preversion);
+ } else {
+ my $output = $sqlt->translate;
+ if(!$output)
+ {
+ warn("Failed to translate to $db, skipping. (" . $sqlt->error . ")");
next;
- }
- print $file $output;
- close($file);
-
+ }
+ if(!open($file, ">$filename"))
+ {
+ $self->throw_exception("Can't open $filename for writing ($!)");
+ next;
+ }
+ print $file $output;
+ close($file);
+ }
if($preversion)
{
require SQL::Translator::Diff;
warn("No previous schema file found ($prefilename)");
next;
}
- #### We need to reparse the SQLite file we just wrote, so that
- ## Diff doesnt get all confoosed, and Diff is *very* confused.
- ## FIXME: rip Diff to pieces!
-# my $target_schema = $sqlt->schema;
-# unless ( $target_schema->name ) {
-# $target_schema->name( $filename );
-# }
- my @input;
- push @input, {file => $prefilename, parser => $db};
- push @input, {file => $filename, parser => $db};
- my ( $source_schema, $source_db, $target_schema, $target_db ) = map {
- my $file = $_->{'file'};
- my $parser = $_->{'parser'};
+ my $difffile = $schema->ddl_filename($db, $dir, $version, $preversion);
+ print STDERR "Diff: $difffile: $db, $dir, $version, $preversion \n";
+ if(-e $difffile)
+ {
+ warn("$difffile already exists, skipping");
+ next;
+ }
+
+ my $source_schema;
+ {
my $t = SQL::Translator->new;
$t->debug( 0 );
$t->trace( 0 );
- $t->parser( $parser ) or die $t->error;
- my $out = $t->translate( $file ) or die $t->error;
- my $schema = $t->schema;
- unless ( $schema->name ) {
- $schema->name( $file );
+ $t->parser( $db ) or die $t->error;
+ my $out = $t->translate( $prefilename ) or die $t->error;
+ $source_schema = $t->schema;
+ unless ( $source_schema->name ) {
+ $source_schema->name( $prefilename );
}
- ($schema, $parser);
- } @input;
+ }
+
+ # The "new" style of producers have sane normalization and can support
+ # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't
+ # And we have to diff parsed SQL against parsed SQL.
+ my $dest_schema = $sqlt_schema;
+
+ unless ( "SQL::Translator::Producer::$db"->can('preprocess_schema') ) {
+ my $t = SQL::Translator->new;
+ $t->debug( 0 );
+ $t->trace( 0 );
+ $t->parser( $db ) or die $t->error;
+ my $out = $t->translate( $filename ) or die $t->error;
+ $dest_schema = $t->schema;
+ $dest_schema->name( $filename )
+ unless $dest_schema->name;
+ }
my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db,
- $target_schema, $db,
+ $dest_schema, $db,
{}
);
- my $difffile = $schema->ddl_filename($db, $dir, $version, $preversion);
- print STDERR "Diff: $difffile: $db, $dir, $version, $preversion \n";
- if(-e $difffile)
- {
- warn("$difffile already exists, skipping");
- next;
- }
if(!open $file, ">$difffile")
{
$self->throw_exception("Can't write to $difffile ($!)");
return join('', @rows);
}
- $self->throw_exception(q{Can't deploy without SQL::Translator 0.08: '}
+ $self->throw_exception(q{Can't deploy without SQL::Translator 0.09: '}
. $self->_check_sqlt_message . q{'})
if !$self->_check_sqlt_version;
my $_check_sqlt_message; # private
sub _check_sqlt_version {
return $_check_sqlt_version if defined $_check_sqlt_version;
- eval 'use SQL::Translator 0.08';
- $_check_sqlt_message = $@ ? $@ : '';
- $_check_sqlt_version = $@ ? 0 : 1;
+ eval 'use SQL::Translator "0.09"';
+ $_check_sqlt_message = $@ || '';
+ $_check_sqlt_version = !$@;
}
sub _check_sqlt_message {
--- /dev/null
+package DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server;
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI/;
+
+sub _prep_for_execute {
+ my $self = shift;
+ my ($op, $extra_bind, $ident, $args) = @_;
+
+ my ($sql, $bind) = $self->SUPER::_prep_for_execute(@_);
+ $sql .= ';SELECT SCOPE_IDENTITY()' if $op eq 'insert';
+
+ return ($sql, $bind);
+}
+
+sub insert {
+ my ($self, $source, $to_insert) = @_;
+
+ my $bind_attributes = $self->source_bind_attributes($source);
+ my (undef, $sth) = $self->_execute( 'insert' => [], $source, $bind_attributes, $to_insert);
+ $self->{_scope_identity} = $sth->fetchrow_array;
+
+ return $to_insert;
+}
+
+sub last_insert_id { shift->{_scope_identity} }
+
+sub sqlt_type { 'SQLServer' }
+
+sub _sql_maker_opts {
+ my ( $self, $opts ) = @_;
+
+ if ( $opts ) {
+ $self->{_sql_maker_opts} = { %$opts };
+ }
+
+ return { limit_dialect => 'Top', %{$self->{_sql_maker_opts}||{}} };
+}
+
+sub build_datetime_parser {
+ my $self = shift;
+ my $type = "DateTime::Format::Strptime";
+ eval "use ${type}";
+ $self->throw_exception("Couldn't load ${type}: $@") if $@;
+ return $type->new( pattern => '%F %T' );
+}
+
+1;
+
+__END__
+
+=head1 NAME
+
+DBIx::Class::Storage::ODBC::Microsoft_SQL_Server - Support specific to
+Microsoft SQL Server over ODBC
+
+=head1 DESCRIPTION
+
+This class implements support specific to Microsoft SQL Server over ODBC,
+including auto-increment primary keys and SQL::Abstract::Limit dialect. It
+is loaded automatically by by DBIx::Class::Storage::DBI::ODBC when it
+detects a MSSQL back-end.
+
+=head1 IMPLEMENTATION NOTES
+
+Microsoft SQL Server supports three methods of retrieving the IDENTITY
+value for inserted row: IDENT_CURRENT, @@IDENTITY, and SCOPE_IDENTITY().
+SCOPE_IDENTITY is used here because it is the safest. However, it must
+be called is the same execute statement, not just the same connection.
+
+So, this implementation appends a SELECT SCOPE_IDENTITY() statement
+onto each INSERT to accommodate that requirement.
+
+=head1 METHODS
+
+=head2 insert
+
+=head2 last_insert_id
+
+=head2 sqlt_type
+
+=head2 build_datetime_parser
+
+The resulting parser handles the MSSQL C<DATETIME> type, but is almost
+certainly not sufficient for the other MSSQL 2008 date/time types.
+
+=head1 AUTHORS
+
+Marc Mims C<< <marc@questright.com> >>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
# We need to copy-pass $global_options, since connect_info clears it while
# processing options
- $self->write_source->connect_info( [ @{$info->[0]}, { %$global_options } ] );
+ $self->write_source->connect_info( @{$info->[0]}, { %$global_options } );
- @dsns = map { ($_->[3]->{priority} || 10) => $_ } @{$info}[1..@$info-1];
+ @dsns = map { ($_->[3]->{priority} || 10) => $_ } @{$info->[0]}[1..@{$info->[0]}-1];
$global_options->{dsns} = \@dsns;
$self->read_source->connect_info( [ 'dbi:Multi:', undef, undef, { %$global_options } ] );
# my $dbfile = file($dbname);
my ($vol, $dbdir, $file) = File::Spec->splitpath($dbname);
# my $file = $dbfile->basename();
- $file = strftime("%y%m%d%h%M%s", localtime()) . $file;
+ $file = strftime("%Y-%m-%d-%H_%M_%S", localtime()) . $file;
$file = "B$file" while(-f $file);
mkdir($dir) unless -f $dir;
use warnings;
use base qw/Class::Accessor::Grouped/;
+use IO::File;
__PACKAGE__->mk_group_accessors(simple => qw/callback debugfh/);
--- /dev/null
+package # Hide from pause for now - till we get it working
+ DBIx::Class::Storage::TxnScopeGuard;
+
+use strict;
+use warnings;
+
+sub new {
+ my ($class, $storage) = @_;
+
+ $storage->txn_begin;
+ bless [ 0, $storage ], ref $class || $class;
+}
+
+sub commit {
+ my $self = shift;
+
+ $self->[1]->txn_commit;
+ $self->[0] = 1;
+}
+
+sub DESTROY {
+ my ($dismiss, $storage) = @{$_[0]};
+
+ return if $dismiss;
+
+ my $exception = $@;
+
+ $DB::single = 1;
+
+ local $@;
+ eval { $storage->txn_rollback };
+ my $rollback_exception = $@;
+ if($rollback_exception) {
+ my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
+
+ $storage->throw_exception(
+ "Transaction aborted: ${exception}. "
+ . "Rollback failed: ${rollback_exception}"
+ ) unless $rollback_exception =~ /$exception_class/;
+ }
+}
+
+1;
+
+__END__
+
+=head1 NAME
+
+DBIx::Class::Storage::TxnScopeGuard
+
+=head1 SYNOPSIS
+
+ sub foo {
+ my ($self, $schema) = @_;
+
+ my $guard = $schema->txn_scope_guard;
+
+ # Multiple database operations here
+
+ $guard->commit;
+ }
+
+=head1 DESCRIPTION
+
+An object that behaves much like L<Scope::Guard>, but hardcoded to do the
+right thing with transactions in DBIx::Class.
+
+=head1 METHODS
+
+=head2 new
+
+Creating an instance of this class will start a new transaction. Expects a
+L<DBIx::Class::Storage> object as its only argument.
+
+=head2 commit
+
+Commit the transaction, and stop guarding the scope. If this method is not
+called (i.e. an exception is thrown) and this object goes out of scope then
+the transaction is rolled back.
+
+=cut
+
+=head1 SEE ALSO
+
+L<DBIx::Class::Schema/txn_scope_guard>.
+
+=head1 AUTHOR
+
+Ash Berlin, 2008.
+
+Insipred by L<Scope::Guard> by chocolateboy.
+
+This module is free software. It may be used, redistributed and/or modified
+under the same terms as Perl itself.
+
+=cut
use strict;
use warnings;
-use vars qw($DEBUG $VERSION @EXPORT_OK);
+use vars qw($DEBUG @EXPORT_OK);
$DEBUG = 0 unless defined $DEBUG;
-$VERSION = sprintf "%d.%02d", q$Revision 1.0$ =~ /(\d+)\.(\d+)/;
use Exporter;
use Data::Dumper;
sub parse {
my ($tr, $data) = @_;
my $args = $tr->parser_args;
- my $dbixschema = $args->{'DBIx::Schema'} || $data;
- $dbixschema ||= $args->{'package'};
+ my $dbicschema = $args->{'DBIx::Class::Schema'} || $args->{"DBIx::Schema"} ||$data;
+ $dbicschema ||= $args->{'package'};
my $limit_sources = $args->{'sources'};
- die 'No DBIx::Schema' unless ($dbixschema);
- if (!ref $dbixschema) {
- eval "use $dbixschema;";
- die "Can't load $dbixschema ($@)" if($@);
+ die 'No DBIx::Class::Schema' unless ($dbicschema);
+ if (!ref $dbicschema) {
+ eval "use $dbicschema;";
+ die "Can't load $dbicschema ($@)" if($@);
}
my $schema = $tr->schema;
my $table_no = 0;
-# print Dumper($dbixschema->registered_classes);
-
- #foreach my $tableclass ($dbixschema->registered_classes)
+ $schema->name( ref($dbicschema) . " v" . ($dbicschema->VERSION || '1.x'))
+ unless ($schema->name);
my %seen_tables;
- my @monikers = $dbixschema->sources;
+ my @monikers = sort $dbicschema->sources;
if ($limit_sources) {
my $ref = ref $limit_sources || '';
die "'sources' parameter must be an array or hash ref" unless $ref eq 'ARRAY' || ref eq 'HASH';
}
- foreach my $moniker (@monikers)
+ foreach my $moniker (sort @monikers)
{
- my $source = $dbixschema->source($moniker);
+ my $source = $dbicschema->source($moniker);
+ # Its possible to have multiple DBIC source using same table
next if $seen_tables{$source->name}++;
my $table = $schema->add_table(
$table->primary_key($source->primary_columns);
my @primary = $source->primary_columns;
+ foreach my $field (@primary) {
+ my $index = $table->add_index(
+ name => $field,
+ fields => [$field],
+ type => 'NORMAL',
+ );
+ }
my %unique_constraints = $source->unique_constraints;
- foreach my $uniq (keys %unique_constraints) {
+ foreach my $uniq (sort keys %unique_constraints) {
if (!$source->compare_relationship_keys($unique_constraints{$uniq}, \@primary)) {
$table->add_constraint(
type => 'unique',
name => "$uniq",
fields => $unique_constraints{$uniq}
);
+
+ my $index = $table->add_index(
+ # TODO: Pick a better than that wont conflict
+ name => $unique_constraints{$uniq}->[0],
+ fields => $unique_constraints{$uniq},
+ type => 'NORMAL',
+ );
+
}
}
my %created_FK_rels;
- foreach my $rel (@rels)
+ foreach my $rel (sort @rels)
{
my $rel_info = $source->relationship_info($rel);
$on_update = $otherrelationship->{'attrs'}->{cascade_copy} ? 'CASCADE' : '';
}
+ my $is_deferrable = $rel_info->{attrs}{is_deferrable} || 0;
+
# Make sure we dont create the same foreign key constraint twice
my $key_test = join("\x00", @keys);
# If the sets are different, then we assume it's a foreign key from
# us to another table.
# OR: If is_foreign_key_constraint attr is explicity set (or set to false) on the relation
- if ( ! exists $created_FK_rels{$rel_table}->{$key_test} &&
- ( exists $rel_info->{attrs}{is_foreign_key_constraint} &&
- $rel_info->{attrs}{is_foreign_key_constraint} ||
- !$source->compare_relationship_keys(\@keys, \@primary)
- )
- )
- {
- $created_FK_rels{$rel_table}->{$key_test} = 1;
- $table->add_constraint(
- type => 'foreign_key',
- name => "fk_$keys[0]",
- fields => \@keys,
- reference_fields => \@refkeys,
- reference_table => $rel_table,
- on_delete => $on_delete,
- on_update => $on_update
- );
+ next if ( exists $created_FK_rels{$rel_table}->{$key_test} );
+ if ( exists $rel_info->{attrs}{is_foreign_key_constraint}) {
+ # not is this attr set to 0 but definitely if set to 1
+ next unless ($rel_info->{attrs}{is_foreign_key_constraint});
+ } else {
+ # not if might have
+ # next if ($rel_info->{attrs}{accessor} eq 'single' && exists $rel_info->{attrs}{join_type} && uc($rel_info->{attrs}{join_type}) eq 'LEFT');
+ # not sure about this one
+ next if $source->compare_relationship_keys(\@keys, \@primary);
+ }
+
+ $created_FK_rels{$rel_table}->{$key_test} = 1;
+ if (scalar(@keys)) {
+ $table->add_constraint(
+ type => 'foreign_key',
+ name => $table->name . "_fk_$keys[0]",
+ fields => \@keys,
+ reference_fields => \@refkeys,
+ reference_table => $rel_table,
+ on_delete => $on_delete,
+ on_update => $on_update,
+ deferrable => $is_deferrable,
+ );
+
+ my $index = $table->add_index(
+ name => join('_', @keys),
+ fields => \@keys,
+ type => 'NORMAL',
+ );
}
}
}
}
}
- if ($dbixschema->can('sqlt_deploy_hook')) {
- $dbixschema->sqlt_deploy_hook($schema);
+ if ($dbicschema->can('sqlt_deploy_hook')) {
+ $dbicschema->sqlt_deploy_hook($schema);
}
return 1;
use Getopt::Long;
use Pod::Usage;
-use JSON qw( jsonToObj );
+use JSON::Any;
-$JSON::BareKey = 1;
-$JSON::QuotApos = 1;
+
+my $json = JSON::Any->new(allow_barekey => 1, allow_singlequote => 1);
GetOptions(
'schema=s' => \my $schema_class,
die('No schema specified') if(!$schema_class);
eval("require $schema_class");
die('Unable to load schema') if ($@);
-$connect = jsonToObj( $connect ) if ($connect);
+$connect = $json->jsonToObj( $connect ) if ($connect);
my $schema = $schema_class->connect(
( $connect ? @$connect : () )
);
my $resultset = eval{ $schema->resultset($resultset_class) };
die('Unable to load the class with the schema') if ($@);
-$set = jsonToObj( $set ) if ($set);
-$where = jsonToObj( $where ) if ($where);
-$attrs = jsonToObj( $attrs ) if ($attrs);
+$set = $json->jsonToObj( $set ) if ($set);
+$where = $json->jsonToObj( $where ) if ($where);
+$attrs = $json->jsonToObj( $attrs ) if ($attrs);
if ($op eq 'insert') {
die('Do not use the where option with the insert op') if ($where);
die('Do not use the attrs option with the insert op') if ($attrs);
my $obj = $resultset->create( $set );
- print ''.ref($resultset).' ID: '.join(',',$obj->id())."\n";
+ print ''.ref($resultset).' ID: '.join(',',$obj->id())."\n" if (!$quiet);
}
elsif ($op eq 'update') {
$resultset = $resultset->search( ($where||{}) );
]
},
'DBIx::Class::CDBICompat::AccessorMapping' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::AbstractSearch' => {
+ ignore => [qw(search_where)]
+ },
'DBIx::Class::CDBICompat::AttributeAPI' => { skip => 1 },
'DBIx::Class::CDBICompat::AutoUpdate' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::ColumnsAsHash' => {
+ ignore => [qw(inflate_result new update)]
+ },
'DBIx::Class::CDBICompat::ColumnCase' => { skip => 1 },
'DBIx::Class::CDBICompat::ColumnGroups' => { skip => 1 },
'DBIx::Class::CDBICompat::Constraints' => { skip => 1 },
'DBIx::Class::CDBICompat::Constructor' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::Copy' => {
+ ignore => [qw(copy)]
+ },
'DBIx::Class::CDBICompat::DestroyWarning' => { skip => 1 },
'DBIx::Class::CDBICompat::GetSet' => { skip => 1 },
'DBIx::Class::CDBICompat::HasA' => { skip => 1 },
'DBIx::Class::CDBICompat::LazyLoading' => { skip => 1 },
'DBIx::Class::CDBICompat::LiveObjectIndex' => { skip => 1 },
'DBIx::Class::CDBICompat::MightHave' => { skip => 1 },
- 'DBIx::Class::CDBICompat::ObjIndexStubs' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::NoObjectIndex' => { skip => 1 },
'DBIx::Class::CDBICompat::Pager' => { skip => 1 },
'DBIx::Class::CDBICompat::ReadOnly' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::Relationship' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::Relationships' => { skip => 1 },
'DBIx::Class::CDBICompat::Retrieve' => { skip => 1 },
'DBIx::Class::CDBICompat::Stringify' => { skip => 1 },
'DBIx::Class::CDBICompat::TempColumns' => { skip => 1 },
eval "use DBD::SQLite";
plan $@
? ( skip_all => 'needs DBD::SQLite for testing' )
- : ( tests => 3 );
+ : ( tests => 4 );
}
use lib qw(t/lib);
use_ok( 'DBICTest' );
-
use_ok( 'DBICTest::Schema' );
+my $schema = DBICTest->init_schema;
{
my $warnings;
local $SIG{__WARN__} = sub { $warnings .= $_[0] };
- eval { DBICTest::CD->create({ title => 'vacation in antarctica' }) };
+ eval {
+ $schema->resultset('CD')
+ ->create({ title => 'vacation in antarctica' })
+ };
+ like $@, qr/NULL/; # as opposed to some other error
ok( $warnings !~ /uninitialized value/, "No warning from Storage" );
}
eval { require DateTime };
plan skip_all => "Need DateTime for inflation tests" if $@;
-plan tests => 20;
+plan tests => 21;
$schema->class('CD')
#DBICTest::Schema::CD
$cd->discard_changes;
is($cd->year->year, $before_year + 1, 'discard_changes clears the inflated value');
+
+my $copy = $cd->copy({ year => $now, title => "zemoose" });
+
+isnt( $copy->year->year, $before_year, "copy" );
# eval { $cd->store_inflated_column('year', \'year + 1') };
# print STDERR "ERROR: $@" if($@);
# clean up our mess
END {
- # Set the metadata back for the last_updated_on column
- $schema->class('Track')->add_column( 'last_updated_on' => $col_metadata );
-
if($dbh) {
$dbh->do("DROP TABLE track");
}
my $dbh = $schema->storage->dbh;
-$dbh->do("DROP TABLE artist", { RaiseError => 0, PrintError => 0 });
+eval { $dbh->do("DROP TABLE artist") };
$dbh->do("CREATE TABLE artist (artistid INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1), name VARCHAR(255), charfield CHAR(10))");
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MSSQL_ODBC_${_}" } qw/DSN USER PASS/};
+
+plan skip_all => 'Set $ENV{DBICTEST_MSSQL_ODBC_DSN}, _USER and _PASS to run this test'
+ unless ($dsn && $user);
+
+plan tests => 12;
+
+my $schema = DBICTest::Schema->connect($dsn, $user, $pass, {AutoCommit => 1});
+
+$schema->storage->ensure_connected;
+isa_ok( $schema->storage, 'DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server' );
+
+my $dbh = $schema->storage->dbh;
+
+eval { $dbh->do("DROP TABLE artist") };
+
+ $dbh->do(<<'');
+CREATE TABLE artist (
+ artistid INT IDENTITY NOT NULL,
+ name VARCHAR(255),
+ charfield CHAR(10),
+ primary key(artistid)
+)
+
+my %seen_id;
+
+# test primary key handling
+my $new = $schema->resultset('Artist')->create({ name => 'foo' });
+ok($new->artistid > 0, "Auto-PK worked");
+
+$seen_id{$new->artistid}++;
+
+# test LIMIT support
+for (1..6) {
+ $new = $schema->resultset('Artist')->create({ name => 'Artist ' . $_ });
+ is ( $seen_id{$new->artistid}, undef, "id for Artist $_ is unique" );
+ $seen_id{$new->artistid}++;
+}
+
+my $it = $schema->resultset('Artist')->search( {}, {
+ rows => 3,
+ order_by => 'artistid',
+});
+
+is( $it->count, 3, "LIMIT count ok" );
+is( $it->next->name, "foo", "iterator->next ok" );
+$it->next;
+is( $it->next->name, "Artist 2", "iterator->next ok" );
+is( $it->next, undef, "next past end of resultset ok" );
+
+
+# clean up our mess
+END {
+ $dbh->do('DROP TABLE artist') if $dbh;
+}
+
BEGIN {
eval "use DBD::SQLite";
- plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 9);
-}
+ plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 10);
+}
# test LIMIT
my $it = $schema->resultset("CD")->search( {},
);
is( $cds[0]->title, "Spoonful of bees", "software offset ok" );
+
+@cds = $schema->resultset("CD")->search( {},
+ {
+ offset => 2,
+ order_by => 'year' }
+);
+is( $cds[0]->title, "Spoonful of bees", "offset with no limit" );
+
+
# based on a failing criteria submitted by waswas
# requires SQL::Abstract >= 1.20
$it = $schema->resultset("CD")->search(
is($cd->producers->first->name, 'Matt S Trout', 'many_to_many accessor ok');
-TODO: {
- local $TODO = 'use prefetched values for many_to_many accessor';
-
- is($queries, 1, 'many_to_many accessor with nested prefetch ran exactly 1 query');
-}
+is($queries, 1, 'many_to_many accessor with nested prefetch ran exactly 1 query');
$queries = 0;
use warnings;
use Test::More;
+use Test::Exception;
use lib qw(t/lib);
use DBICTest;
my $schema = DBICTest->init_schema();
-plan tests => 54;
+plan tests => 67;
my $code = sub {
my ($artist, @cd_titles) = @_;
my $err = $@;
ok(($err eq ''), 'Pre-connection nested transactions.');
}
+
+# Test txn_rollback with nested
+{
+ local $TODO = "Work out how this should work";
+ my $local_schema = DBICTest->init_schema();
+
+ my $artist_rs = $local_schema->resultset('Artist');
+ throws_ok {
+
+ $local_schema->txn_begin;
+ $artist_rs->create({ name => 'Test artist rollback 1'});
+ $local_schema->txn_begin;
+ is($local_schema->storage->transaction_depth, 2, "Correct transaction depth");
+ $artist_rs->create({ name => 'Test artist rollback 2'});
+ $local_schema->txn_rollback;
+ } qr/Not sure what this should be.... something tho/, "Rolled back okay";
+ is($local_schema->storage->transaction_depth, 0, "Correct transaction depth");
+
+ ok(!$artist_rs->find({ name => 'Test artist rollback 1'}), "Test Artist not created")
+ || $artist_rs->find({ name => 'Test artist rollback 1'})->delete;
+}
+
+# Test txn_scope_guard
+{
+ local $TODO = "Work out how this should work";
+ my $schema = DBICTest->init_schema();
+
+ is($schema->storage->transaction_depth, 0, "Correct transaction depth");
+ my $artist_rs = $schema->resultset('Artist');
+ throws_ok {
+ my $guard = $schema->txn_scope_guard;
+
+
+ $artist_rs->create({
+ name => 'Death Cab for Cutie',
+ made_up_column => 1,
+ });
+
+ $guard->commit;
+ } qr/No such column made_up_column.*?line 16/, "Error propogated okay";
+
+ ok(!$artist_rs->find({name => 'Death Cab for Cutie'}), "Artist not created");
+
+ my $inner_exception;
+ eval {
+ outer($schema, 1);
+ };
+ is($@, $inner_exception, "Nested exceptions propogated");
+
+ ok(!$artist_rs->find({name => 'Death Cab for Cutie'}), "Artist not created");
+
+
+ eval {
+ # The 0 arg says done die, just let the scope guard go out of scope
+ # forcing a txn_rollback to happen
+ outer($schema, 0);
+ };
+ is($@, "Not sure what we want here, but something", "Rollback okay");
+
+ ok(!$artist_rs->find({name => 'Death Cab for Cutie'}), "Artist not created");
+
+ sub outer {
+ my ($schema) = @_;
+
+ my $guard = $schema->txn_scope_guard;
+ $schema->resultset('Artist')->create({
+ name => 'Death Cab for Cutie',
+ });
+ inner(@_);
+ $guard->commit;
+ }
+
+ sub inner {
+ my ($schema, $fatal) = @_;
+ my $guard = $schema->txn_scope_guard;
+
+ my $artist = $artist_rs->find({ name => 'Death Cab for Cutie' });
+
+ is($schema->storage->transaction_depth, 2, "Correct transaction depth");
+ undef $@;
+ eval {
+ $artist->cds->create({
+ title => 'Plans',
+ year => 2005,
+ $fatal ? ( foo => 'bar' ) : ()
+ });
+ };
+ if ($@) {
+ # Record what got thrown so we can test it propgates out properly.
+ $inner_exception = $@;
+ die $@;
+ }
+
+ # See what happens if we dont $guard->commit;
+ }
+}
use Test::More;
use lib qw(t/lib);
use DBICTest;
-use Storable;
+use Storable qw(dclone freeze thaw);
my $schema = DBICTest->init_schema();
-plan tests => 1;
+my %stores = (
+ dclone_method => sub { return $schema->dclone($_[0]) },
+ dclone_func => sub { return dclone($_[0]) },
+ "freeze/thaw_method" => sub {
+ my $ice = $schema->freeze($_[0]);
+ return $schema->thaw($ice);
+ },
+ "freeze/thaw_func" => sub {
+ thaw(freeze($_[0]));
+ },
+);
-my $artist = $schema->resultset('Artist')->find(1);
-my $copy = eval { Storable::dclone($artist) };
-is_deeply($copy, $artist, 'serialize row object works');
+plan tests => (7 * keys %stores);
+for my $name (keys %stores) {
+ my $store = $stores{$name};
+
+ my $artist = $schema->resultset('Artist')->find(1);
+
+ # Test that the procedural versions will work if there's a registered
+ # schema as with CDBICompat objects and that the methods work
+ # without.
+ if( $name =~ /func/ ) {
+ $artist->result_source_instance->schema($schema);
+ DBICTest::CD->result_source_instance->schema($schema);
+ }
+ else {
+ $artist->result_source_instance->schema(undef);
+ DBICTest::CD->result_source_instance->schema(undef);
+ }
+
+ my $copy = eval { $store->($artist) };
+ is_deeply($copy, $artist, "serialize row object works: $name");
+
+ # Test that an object with a related_resultset can be serialized.
+ my @cds = $artist->related_resultset("cds");
+
+ ok $artist->{related_resultsets}, 'has key: related_resultsets';
+
+ $copy = eval { $store->($artist) };
+ for my $key (keys %$artist) {
+ next if $key eq 'related_resultsets';
+ next if $key eq '_inflated_column';
+ is_deeply($copy->{$key}, $artist->{$key},
+ qq[serialize with related_resultset "$key"]);
+ }
+
+ ok eval { $copy->discard_changes; 1 } or diag $@;
+ is($copy->id, $artist->id, "IDs still match ");
+}
my $schema = DBICTest->init_schema;
-plan tests => 56;
+plan tests => 77;
my $translator = SQL::Translator->new(
parser_args => {
'display' => 'twokeys->cd',
'selftable' => 'twokeys', 'foreigntable' => 'cd',
'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
- on_delete => '', on_update => '',
+ on_delete => '', on_update => '', deferrable => 0,
},
{
'display' => 'twokeys->artist',
'selftable' => 'twokeys', 'foreigntable' => 'artist',
'selfcols' => ['artist'], 'foreigncols' => ['artistid'],
- on_delete => 'CASCADE', on_update => 'CASCADE',
+ on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 0,
},
],
'display' => 'fourkeys_to_twokeys->twokeys',
'selftable' => 'fourkeys_to_twokeys', 'foreigntable' => 'twokeys',
'selfcols' => ['t_artist', 't_cd'], 'foreigncols' => ['artist', 'cd'],
- on_delete => 'CASCADE', on_update => 'CASCADE',
+ on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 0,
},
{
'display' => 'fourkeys_to_twokeys->fourkeys',
'selftable' => 'fourkeys_to_twokeys', 'foreigntable' => 'fourkeys',
'selfcols' => [qw(f_foo f_bar f_hello f_goodbye)],
'foreigncols' => [qw(foo bar hello goodbye)],
- on_delete => 'CASCADE', on_update => 'CASCADE',
+ on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 0,
},
],
'display' => 'cd_to_producer->cd',
'selftable' => 'cd_to_producer', 'foreigntable' => 'cd',
'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
- on_delete => 'CASCADE', on_update => 'CASCADE',
+ on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 0,
},
{
'display' => 'cd_to_producer->producer',
'selftable' => 'cd_to_producer', 'foreigntable' => 'producer',
'selfcols' => ['producer'], 'foreigncols' => ['producerid'],
- on_delete => '', on_update => '',
+ on_delete => '', on_update => '', deferrable => 0,
},
],
'display' => 'self_ref_alias->self_ref for self_ref',
'selftable' => 'self_ref_alias', 'foreigntable' => 'self_ref',
'selfcols' => ['self_ref'], 'foreigncols' => ['id'],
- on_delete => 'CASCADE', on_update => 'CASCADE',
+ on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 0,
},
{
'display' => 'self_ref_alias->self_ref for alias',
'selftable' => 'self_ref_alias', 'foreigntable' => 'self_ref',
'selfcols' => ['alias'], 'foreigncols' => ['id'],
- on_delete => '', on_update => '',
+ on_delete => '', on_update => '', deferrable => 0,
},
],
'display' => 'cd->artist',
'selftable' => 'cd', 'foreigntable' => 'artist',
'selfcols' => ['artist'], 'foreigncols' => ['artistid'],
- on_delete => 'CASCADE', on_update => 'CASCADE',
+ on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 1,
},
],
'display' => 'artist_undirected_map->artist for id1',
'selftable' => 'artist_undirected_map', 'foreigntable' => 'artist',
'selfcols' => ['id1'], 'foreigncols' => ['artistid'],
- on_delete => 'CASCADE', on_update => '',
+ on_delete => 'CASCADE', on_update => '', deferrable => 0,
},
{
'display' => 'artist_undirected_map->artist for id2',
'selftable' => 'artist_undirected_map', 'foreigntable' => 'artist',
'selfcols' => ['id2'], 'foreigncols' => ['artistid'],
- on_delete => 'CASCADE', on_update => '',
+ on_delete => 'CASCADE', on_update => '', deferrable => 0,
},
],
'display' => 'track->cd',
'selftable' => 'track', 'foreigntable' => 'cd',
'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
- on_delete => 'CASCADE', on_update => 'CASCADE',
+ on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 0,
},
],
'display' => 'treelike->treelike for parent',
'selftable' => 'treelike', 'foreigntable' => 'treelike',
'selfcols' => ['parent'], 'foreigncols' => ['id'],
- on_delete => 'CASCADE', on_update => 'CASCADE',
+ on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 0,
},
],
'display' => 'twokeytreelike->twokeytreelike for parent1,parent2',
'selftable' => 'twokeytreelike', 'foreigntable' => 'twokeytreelike',
'selfcols' => ['parent1', 'parent2'], 'foreigncols' => ['id1','id2'],
- on_delete => '', on_update => '',
+ on_delete => '', on_update => '', deferrable => 0,
},
],
'display' => 'tags->cd',
'selftable' => 'tags', 'foreigntable' => 'cd',
'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
- on_delete => 'CASCADE', on_update => 'CASCADE',
+ on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 0,
},
],
'display' => 'bookmark->link',
'selftable' => 'bookmark', 'foreigntable' => 'link',
'selfcols' => ['link'], 'foreigncols' => ['id'],
- on_delete => '', on_update => '',
+ on_delete => '', on_update => '', deferrable => 0,
},
],
+ # ForceForeign
+ forceforeign => [
+ {
+ 'display' => 'forceforeign->artist',
+ 'selftable' => 'forceforeign', 'foreigntable' => 'artist',
+ 'selfcols' => ['artist'], 'foreigncols' => ['artist_id'],
+ on_delete => '', on_update => '', deferrable => 0,
+ },
+ ],
+
);
my %unique_constraints = (
);
my $tschema = $translator->schema();
-
# Test that the $schema->sqlt_deploy_hook was called okay and that it removed
# the 'link' table
ok( !defined($tschema->get_table('link')), "Link table was removed by hook");
ok( !defined($constraint), 'nonexistent FOREIGN KEY constraint not found' );
$constraint = get_constraint('UNIQUE', 'cd', ['artist']);
ok( !defined($constraint), 'nonexistent UNIQUE constraint not found' );
+$constraint = get_constraint('FOREIGN KEY', 'forceforeign', ['cd'], 'cd', ['cdid']);
+ok( !defined($constraint), 'forced nonexistent FOREIGN KEY constraint not found' );
for my $expected_constraints (keys %fk_constraints) {
for my $expected_constraint (@{ $fk_constraints{$expected_constraints} }) {
"on_delete parameter correct for `$desc'" );
is( $got->on_update, $expected->{on_update},
"on_update parameter correct for `$desc'" );
+ is( $got->deferrable, $expected->{deferrable},
+ "is_deferrable parameter correct for `$desc'" );
}
my $schema = DBICTest->init_schema();
-eval 'require JSON';
-plan skip_all => 'Install JSON to run this test' if ($@);
+eval 'require JSON::Any';
+plan skip_all => 'Install JSON::Any to run this test' if ($@);
eval 'require Text::CSV_XS';
if ($@) {
plan tests => 5;
-# double quotes round the arguments and single-quote within to make sure the
-# tests run on windows as well
+# the script supports double quotes round the arguments and single-quote within
+# to make sure it runs on windows as well, but only if JSON::Any picks the right module
+
+
my $employees = $schema->resultset('Employee');
-my $cmd = qq|$^X script/dbicadmin --schema=DBICTest::Schema --class=Employee --tlibs --connect="['dbi:SQLite:dbname=t/var/DBIxClass.db','','',{AutoCommit:1}]" --force --tlibs|;
+my @cmd = ($^X, qw|script/dbicadmin --quiet --schema=DBICTest::Schema --class=Employee --tlibs|, q|--connect=["dbi:SQLite:dbname=t/var/DBIxClass.db","","",{"AutoCommit":1}]|, qw|--force --tlibs|);
-`$cmd --op=insert --set="{name:'Matt'}"`;
+system(@cmd, qw|--op=insert --set={"name":"Matt"}|);
ok( ($employees->count()==1), 'insert count' );
my $employee = $employees->find(1);
ok( ($employee->name() eq 'Matt'), 'insert valid' );
-`$cmd --op=update --set="{name:'Trout'}"`;
+system(@cmd, qw|--op=update --set={"name":"Trout"}|);
$employee = $employees->find(1);
ok( ($employee->name() eq 'Trout'), 'update' );
-`$cmd --op=insert --set="{name:'Aran'}"`;
-my $data = `$cmd --op=select --attrs="{order_by:'name'}"`;
+system(@cmd, qw|--op=insert --set={"name":"Aran"}|);
+
+open(my $fh, "-|", @cmd, qw|--op=select --attrs={"order_by":"name"}|) or die $!;
+my $data = do { local $/; <$fh> };
+close($fh);
ok( ($data=~/Aran.*Trout/s), 'select with attrs' );
-`$cmd --op=delete --where="{name:'Trout'}"`;
+system(@cmd, qw|--op=delete --where={"name":"Trout"}|);
ok( ($employees->count()==1), 'delete' );
my $schema = DBICTest->init_schema();
-plan tests => 19;
+plan tests => 20;
# Test ensure_class_found
ok( $schema->ensure_class_found('DBIx::Class::Schema'),
'load_optional_class(DBICTest::SyntaxErrorComponent2) threw ok' );
}
+
+eval {
+ package Fake::ResultSet;
+
+ use base 'DBIx::Class::ResultSet';
+
+ __PACKAGE__->load_components('+DBICTest::SyntaxErrorComponent3');
+};
+
+# Make sure the errors in components of resultset classes are reported right.
+like($@, qr!\Qsyntax error at t/lib/DBICTest/SyntaxErrorComponent3.pm!, "Errors from RS components reported right");
+
1;
--- /dev/null
+use strict;
+use warnings;
+use lib qw(t/lib);
+
+use File::Copy;
+
+use DBICTest;
+
+use Test::More;
+
+BEGIN {
+ eval "use DBD::Multi";
+ plan $@
+ ? ( skip_all => 'needs DBD::Multi for testing' )
+ : ( tests => 3 );
+}
+
+my $schema = DBICTest->init_schema();
+
+$schema->storage_type( '::DBI::Replication' );
+
+
+my $db_file1 = "t/var/DBIxClass.db";
+my $db_file2 = "t/var/DBIxClass_slave1.db";
+my $db_file3 = "t/var/DBIxClass_slave2.db";
+my $dsn1 = $ENV{"DBICTEST_DSN"} || "dbi:SQLite:${db_file1}";
+my $dsn2 = $ENV{"DBICTEST_DSN2"} || "dbi:SQLite:${db_file2}";
+my $dsn3 = $ENV{"DBICTEST_DSN3"} || "dbi:SQLite:${db_file3}";
+
+$schema->connect( [
+ [ $dsn1, '', '', { AutoCommit => 1 } ],
+ [ $dsn2, '', '', { priority => 10 } ],
+ [ $dsn3, '', '', { priority => 10 } ]
+ ]
+ );
+
+$schema->populate('Artist', [
+ [ qw/artistid name/ ],
+ [ 4, 'Ozric Tentacles']
+ ]);
+
+my $new_artist1 = $schema->resultset('Artist')->find(4);
+
+isa_ok ($new_artist1, 'DBICTest::Artist');
+
+# reconnect
+my $schema2 = $schema->connect( [
+ [ $dsn1, '', '', { AutoCommit => 1 } ],
+ [ $dsn2, '', '', { priority => 10 } ],
+ [ $dsn3, '', '', { priority => 10 } ]
+ ]
+ );
+
+# try and read (should fail)
+eval { my $new_artist2 = $schema2->resultset('Artist')->find(4); };
+ok($@, 'read after disconnect fails because it uses slave 1 which we have neglected to "replicate" yet');
+
+# try and read (should succede after faked synchronisation)
+copy($db_file1, $db_file2);
+$schema2 = $schema->connect( [
+ [ $dsn1, '', '', { AutoCommit => 1 } ],
+ [ $dsn2, '', '', { priority => 10 } ],
+ [ $dsn3, '', '', { priority => 10 } ]
+ ]
+ );
+my $new_artist3 = $schema2->resultset('Artist')->find(4);
+isa_ok ($new_artist3, 'DBICTest::Artist');
+
+unlink $db_file2;
use warnings;
use Test::More;
use File::Spec;
+use File::Copy;
+
+#warn "$dsn $user $pass";
+my ($dsn, $user, $pass);
BEGIN {
- eval "use DBD::SQLite; use SQL::Translator 0.08;";
+ ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MYSQL_${_}" } qw/DSN USER PASS/};
+
+ plan skip_all => 'Set $ENV{DBICTEST_MYSQL_DSN}, _USER and _PASS to run this test'
+ unless ($dsn);
+
+
+ eval "use DBD::mysql; use SQL::Translator 0.08;";
plan $@
- ? ( skip_all => 'needs DBD::SQLite and SQL::Translator 0.08 for testing' )
- : ( tests => 6 );
+ ? ( skip_all => 'needs DBD::mysql and SQL::Translator 0.08 for testing' )
+ : ( tests => 13 );
}
-use lib qw(t/lib);
+my $version_table_name = 'dbix_class_schema_versions';
+my $old_table_name = 'SchemaVersions';
+use lib qw(t/lib);
use_ok('DBICVersionOrig');
-my $db_file = "t/var/versioning.db";
-unlink($db_file) if -e $db_file;
-unlink($db_file . "-journal") if -e $db_file . "-journal";
-mkdir("t/var") unless -d "t/var";
-unlink('t/var/DBICVersion-Schema-1.0-SQLite.sql');
+my $schema_orig = DBICVersion::Schema->connect($dsn, $user, $pass);
+eval { $schema_orig->storage->dbh->do('drop table ' . $version_table_name) };
+eval { $schema_orig->storage->dbh->do('drop table ' . $old_table_name) };
-my $schema_orig = DBICVersion::Schema->connect(
- "dbi:SQLite:$db_file",
- undef,
- undef,
- { AutoCommit => 1 },
-);
-# $schema->storage->ensure_connected();
+is($schema_orig->ddl_filename('MySQL', 't/var', '1.0'), File::Spec->catfile('t', 'var', 'DBICVersion-Schema-1.0-MySQL.sql'), 'Filename creation working');
+unlink('t/var/DBICVersion-Schema-1.0-MySQL.sql') if (-e 't/var/DBICVersion-Schema-1.0-MySQL.sql');
+$schema_orig->create_ddl_dir('MySQL', undef, 't/var');
-is($schema_orig->ddl_filename('SQLite', 't/var', '1.0'), File::Spec->catfile('t', 'var', 'DBICVersion-Schema-1.0-SQLite.sql'), 'Filename creation working');
-$schema_orig->create_ddl_dir('SQLite', undef, 't/var');
+ok(-f 't/var/DBICVersion-Schema-1.0-MySQL.sql', 'Created DDL file');
+$schema_orig->deploy({ add_drop_table => 1 });
+$schema_orig->upgrade();
-ok(-f 't/var/DBICVersion-Schema-1.0-SQLite.sql', 'Created DDL file');
-## do this here or let Versioned.pm do it?
-# $schema->deploy();
-
-my $tvrs = $schema_orig->resultset('Table');
+my $tvrs = $schema_orig->{vschema}->resultset('Table');
is($schema_orig->_source_exists($tvrs), 1, 'Created schema from DDL file');
eval "use DBICVersionNew";
-my $schema_new = DBICVersion::Schema->connect(
- "dbi:SQLite:$db_file",
- undef,
- undef,
- { AutoCommit => 1 },
-);
-
-unlink('t/var/DBICVersion-Schema-2.0-SQLite.sql');
-unlink('t/var/DBICVersion-Schema-1.0-2.0-SQLite.sql');
-$schema_new->create_ddl_dir('SQLite', undef, 't/var', '1.0');
-ok(-f 't/var/DBICVersion-Schema-1.0-2.0-SQLite.sql', 'Created DDL upgrade file');
-
-## create new to pick up filedata for upgrade files we just made (on_connect)
-my $schema_upgrade = DBICVersion::Schema->connect(
- "dbi:SQLite:$db_file",
- undef,
- undef,
- { AutoCommit => 1 },
-);
-
-## do this here or let Versioned.pm do it?
-$schema_upgrade->upgrade();
-$tvrs = $schema_upgrade->resultset('Table');
-is($schema_upgrade->_source_exists($tvrs), 1, 'Upgraded schema from DDL file');
-
-unlink($db_file) if -e $db_file;
-unlink($db_file . "-journal") if -e $db_file . "-journal";
-unlink('t/var/DBICVersion-Schema-1.0-SQLite.sql');
-unlink('t/var/DBICVersion-Schema-2.0-SQLite.sql');
-unlink('t/var/DBICVersion-Schema-1.0-2.0-SQLite.sql');
-unlink(<t/var/backup/*>);
+{
+ unlink('t/var/DBICVersion-Schema-2.0-MySQL.sql');
+ unlink('t/var/DBICVersion-Schema-1.0-2.0-MySQL.sql');
+
+ my $schema_upgrade = DBICVersion::Schema->connect($dsn, $user, $pass);
+ is($schema_upgrade->get_db_version(), '1.0', 'get_db_version ok');
+ is($schema_upgrade->schema_version, '2.0', 'schema version ok');
+ $schema_upgrade->create_ddl_dir('MySQL', '2.0', 't/var', '1.0');
+ ok(-f 't/var/DBICVersion-Schema-1.0-2.0-MySQL.sql', 'Created DDL file');
+ $schema_upgrade->upgrade();
+ is($schema_upgrade->get_db_version(), '2.0', 'db version number upgraded');
+
+ eval {
+ $schema_upgrade->storage->dbh->do('select NewVersionName from TestVersion');
+ };
+ is($@, '', 'new column created');
+}
+
+{
+ my $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass);
+ eval {
+ $schema_version->storage->dbh->do('select * from ' . $version_table_name);
+ };
+ is($@, '', 'version table exists');
+
+ eval {
+ $schema_version->storage->dbh->do("DROP TABLE IF EXISTS $old_table_name");
+ $schema_version->storage->dbh->do("RENAME TABLE $version_table_name TO $old_table_name");
+ };
+ is($@, '', 'versions table renamed to old style table');
+
+ $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass);
+ is($schema_version->get_db_version, '2.0', 'transition from old table name to new okay');
+
+ eval {
+ $schema_version->storage->dbh->do('select * from ' . $old_table_name);
+ };
+ ok($@, 'old version table gone');
+
+}
--- /dev/null
+#!/usr/bin/perl -w
+
+use Test::More;
+
+use strict;
+use warnings;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ if ($@) {
+ plan (skip_all => "Class::Trigger and DBIx::ContextualFetch required: $@");
+ next;
+ }
+ eval "use DBD::SQLite";
+ plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 10);
+}
+
+INIT {
+ use lib 't/testlib';
+ use Film;
+}
+
+
+Film->create({ Title => $_, Rating => "PG" }) for ("Superman", "Super Fuzz");
+Film->create({ Title => "Batman", Rating => "PG13" });
+
+my $superman = Film->search_where( Title => "Superman" );
+is $superman->next->Title, "Superman", "search_where() as iterator";
+is $superman->next, undef;
+
+{
+ my @supers = Film->search_where({ title => { 'like' => 'Super%' } });
+ is_deeply [sort map $_->Title, @supers],
+ [sort ("Super Fuzz", "Superman")], 'like';
+}
+
+
+my @all = Film->search_where({}, { order_by => "Title ASC" });
+is_deeply ["Batman", "Super Fuzz", "Superman"],
+ [map $_->Title, @all],
+ "order_by ASC";
+
+@all = Film->search_where({}, { order_by => "Title DESC" });
+is_deeply ["Superman", "Super Fuzz", "Batman"],
+ [map $_->Title, @all],
+ "order_by DESC";
+
+@all = Film->search_where({ Rating => "PG" }, { limit => 1, order_by => "Title ASC" });
+is_deeply ["Super Fuzz"],
+ [map $_->Title, @all],
+ "where, limit";
+
+@all = Film->search_where({}, { limit => 2, order_by => "Title ASC" });
+is_deeply ["Batman", "Super Fuzz"],
+ [map $_->Title, @all],
+ "limit";
+
+@all = Film->search_where({}, { offset => 1, order_by => "Title ASC" });
+is_deeply ["Super Fuzz", "Superman"],
+ [map $_->Title, @all],
+ "offset";
+
+@all = Film->search_where({}, { limit => 1, offset => 1, order_by => "Title ASC" });
+is_deeply ["Super Fuzz"],
+ [map $_->Title, @all],
+ "limit + offset";
+
+@all = Film->search_where({}, { limit => 2, offset => 1,
+ limit_dialect => "Top", order_by => "Title ASC"
+ });
+is_deeply ["Super Fuzz", "Superman"],
+ [map $_->Title, @all],
+ "limit_dialect ignored";
+
BEGIN {
eval "use DBIx::Class::CDBICompat;";
- plan $@ ? (skip_all => 'Class::Trigger and DBIx::ContextualFetch required') : (tests=> 24);
+ plan $@ ? (skip_all => "Class::Trigger and DBIx::ContextualFetch required: $@") : (tests=> 24);
}
ok $@, "Can't get title with no object";
}
-eval { my $duh = Film->create; };
+eval { my $duh = Film->insert; };
like $@, qr/create needs a hashref/, "needs a hashref";
ok +Film->create_test_film;
}
eval {
- my $ishtar = Film->create({ Title => 'Ishtar', Director => 'Elaine May' });
+ my $ishtar = Film->insert({ Title => 'Ishtar', Director => 'Elaine May' });
my $mandn =
- Film->create({ Title => 'Mikey and Nicky', Director => 'Elaine May' });
+ Film->insert({ Title => 'Mikey and Nicky', Director => 'Elaine May' });
my $new_leaf =
- Film->create({ Title => 'A New Leaf', Director => 'Elaine May' });
+ Film->insert({ Title => 'A New Leaf', Director => 'Elaine May' });
#use Data::Dumper; die Dumper(Film->search( Director => 'Elaine May' ));
cmp_ok(Film->search(Director => 'Elaine May'), '==', 3,
{ # update deleted object
my $rt = "Royal Tenenbaums";
- my $ten = Film->create({ title => $rt, Rating => "R" });
+ my $ten = Film->insert({ title => $rt, Rating => "R" });
$ten->rating(18);
Film->set_sql(drt => "DELETE FROM __TABLE__ WHERE title = ?");
Film->sql_drt->execute($rt);
# Primary key of 0
{
- my $zero = Film->create({ Title => 0, Rating => "U" });
+ my $zero = Film->insert({ Title => 0, Rating => "U" });
ok defined $zero, "Create 0";
ok my $ret = Film->retrieve(0), "Retrieve 0";
is $ret->Title, 0, "Title OK";
{
{
- ok my $byebye = DeletingFilm->create(
+ ok my $byebye = DeletingFilm->insert(
{
Title => 'Goodbye Norma Jean',
Rating => 'PG',
}
SKIP: {
- #skip "DBIx::Class doesn't yet have a live objects index", 3;
- #skip "Scalar::Util::weaken not available", 3
- #if !$Class::DBI::Weaken_Is_Available;
+ skip "Caching has been removed", 5
+ if Film->isa("DBIx::Class::CDBICompat::NoObjectIndex");
# my bad taste is your bad taste
my $btaste = Film->retrieve('Bad Taste');
isnt Scalar::Util::refaddr($btaste2), Scalar::Util::refaddr($btaste4),
"Clearing cache and retrieving again gives new object";
- $btaste=Film->create({
+ $btaste=Film->insert({
Title => 'Bad Taste 2',
Director => 'Peter Jackson',
Rating => 'R',
+#!/usr/bin/perl -w
+
use strict;
use Test::More;
-
+use Test::Warn;
#----------------------------------------------------------------------
# Test lazy loading
next;
}
eval "use DBD::SQLite";
- plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 25);
+ plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 35);
}
INIT {
};
ok($@, $@);
+
+warning_is {
+ Lazy->columns( TEMP => qw(that) );
+} "Declaring column that as TEMP but it already exists";
+
+# Test that create() and update() throws out columns that changed
+{
+ my $l = Lazy->create({
+ this => 99,
+ that => 2,
+ oop => 3,
+ opop => 4,
+ });
+
+ ok $l->db_Main->do(qq{
+ UPDATE @{[ $l->table ]}
+ SET oop = ?
+ WHERE this = ?
+ }, undef, 87, $l->this);
+
+ is $l->oop, 87;
+
+ $l->oop(32);
+ $l->update;
+
+ ok $l->db_Main->do(qq{
+ UPDATE @{[ $l->table ]}
+ SET oop = ?
+ WHERE this = ?
+ }, undef, 23, $l->this);
+
+ is $l->oop, 23;
+
+ $l->delete;
+}
+
+
+# Now again for inflated values
+SKIP: {
+ skip "Requires Date::Simple", 5 unless eval "use Date::Simple; 1; ";
+ Lazy->has_a(
+ orp => 'Date::Simple',
+ inflate => sub { Date::Simple->new($_[0] . '-01-01') },
+ deflate => 'format'
+ );
+
+ my $l = Lazy->create({
+ this => 89,
+ that => 2,
+ orp => 1998,
+ });
+
+ ok $l->db_Main->do(qq{
+ UPDATE @{[ $l->table ]}
+ SET orp = ?
+ WHERE this = ?
+ }, undef, 1987, $l->this);
+
+ is $l->orp, '1987-01-01';
+
+ $l->orp(2007);
+ is $l->orp, '2007-01-01'; # make sure it's inflated
+ $l->update;
+
+ ok $l->db_Main->do(qq{
+ UPDATE @{[ $l->table ]}
+ SET orp = ?
+ WHERE this = ?
+ }, undef, 1942, $l->this);
+
+ is $l->orp, '1942-01-01';
+
+ $l->delete;
+}
BEGIN {
eval "use DBIx::Class::CDBICompat;";
if ($@) {
- plan (skip_all => 'Class::Trigger and DBIx::ContextualFetch required');
+ plan (skip_all => "Class::Trigger and DBIx::ContextualFetch required: $@");
next;
}
eval "use DBD::SQLite";
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+use Test::More;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ plan $@ ? (skip_all => 'Class::Trigger and DBIx::ContextualFetch required')
+ : (tests=> 3);
+}
+
+package A;
+@A::ISA = qw(DBIx::Class::CDBICompat);
+__PACKAGE__->columns(Primary => 'id');
+
+package A::B;
+@A::B::ISA = 'A';
+__PACKAGE__->columns(All => qw(id b1));
+
+package A::C;
+@A::C::ISA = 'A';
+__PACKAGE__->columns(All => qw(id c1 c2 c3));
+
+package main;
+is join (' ', sort A->columns), 'id', "A columns";
+is join (' ', sort A::B->columns), 'b1 id', "A::B columns";
+is join (' ', sort A::C->columns), 'c1 c2 c3 id', "A::C columns";
next;
}
eval "use DBD::SQLite";
- plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 18);
+ plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 22);
}
use lib 't/testlib';
}
-
+{
+ my $host = Film->create({ title => "Gwoemul" });
+ $host->blurb("Monsters are real.");
+ my $info = $host->info;
+ is $info->blurb, "Monsters are real.";
+
+ $host->discard_changes;
+ is $host->info->id, $info->id,
+ 'relationships still valid after discard_changes';
+
+ ok $host->info->delete;
+ $host->discard_changes;
+ ok !$host->info, 'relationships rechecked after discard_changes';
+}
\ No newline at end of file
BEGIN {
eval "use DBIx::Class::CDBICompat;";
if ($@) {
+ diag $@;
plan (skip_all => 'Class::Trigger and DBIx::ContextualFetch required');
next;
}
eval "use DBD::SQLite";
- plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 53);
+ plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 54);
}
INIT {
use lib 't/testlib';
require Film;
require Actor;
+ require Director;
Actor->has_a(film => 'Film');
+ Film->has_a(director => 'Director');
sub Class::DBI::sheep { ok 0; }
}
return $col;
}
-sub Actor::accessor_name {
+sub Actor::accessor_name_for {
my ($class, $col) = @_;
return "movie" if lc $col eq "film";
return $col;
}
+# This is a class with accessor_name_for() but no corresponding mutatori_name_for()
+sub Director::accessor_name_for {
+ my($class, $col) = @_;
+ return "nutty_as_a_fruitcake" if lc $col eq "isinsane";
+ return $col;
+}
+
my $data = {
Title => 'Bad Taste',
Director => 'Peter Jackson',
}
-SKIP: { # have non persistent accessor?
- #skip "Compat layer doesn't handle TEMP columns yet", 11;
+
+# Make sure a class with an accessor_name() method has a similar mutator.
+{
+ my $aki = Director->create({
+ name => "Aki Kaurismaki",
+ });
+
+ $aki->nutty_as_a_fruitcake(1);
+ is $aki->nutty_as_a_fruitcake, 1,
+ "a custom accessor without a custom mutator is setable";
+ $aki->update;
+}
+
+{
Film->columns(TEMP => qw/nonpersistent/);
ok(Film->find_column('nonpersistent'), "nonpersistent is a column");
ok(!Film->has_real_column('nonpersistent'), " - but it's not real");
}
}
-SKIP: { # was bug with TEMP and no Essential
- #skip "Compat layer doesn't have TEMP columns yet", 5;
+{
is_deeply(
- Actor->columns('Essential'),
- Actor->columns('Primary'),
+ [Actor->columns('Essential')],
+ [Actor->columns('Primary')],
"Actor has no specific essential columns"
);
ok(Actor->find_column('nonpersistent'), "nonpersistent is a column");
isa_ok $pj => "Actor";
}
-SKIP: {
- #skip "Compat layer doesn't handle read-only objects yet", 10;
+{
Film->autoupdate(1);
my $naked = Film->create({ title => 'Naked' });
my $sandl = Film->create({ title => 'Secrets and Lies' });
next;
}
eval "use DBD::SQLite";
- plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 17);
+ plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 20);
}
use lib 't/testlib';
my @cols = Film->columns('Essential');
is_deeply \@cols, ['title'], "1 Column in essential";
is +Film->transform_sql('__ESSENTIAL__'), 'title', '__ESSENTIAL__ expansion';
+
+ # This provides a more interesting test
+ Film->columns(Essential => qw(title rating));
+ is +Film->transform_sql('__ESSENTIAL__'), 'title, rating',
+ 'multi-col __ESSENTIAL__ expansion';
}
my $f1 = Film->create({ title => 'A', director => 'AA', rating => 'PG' });
};
{
+ Film->set_sql(
+ by_id => qq{
+ SELECT __ESSENTIAL__
+ FROM __TABLE__
+ WHERE __IDENTIFIER__
+ }
+ );
+
+ my $film = Film->retrieve_all->first;
+ my @found = Film->search_by_id($film->id);
+ is @found, 1;
+ is $found[0]->id, $film->id;
+}
+
+
+{
Actor->has_a(film => "Film");
Film->set_sql(
namerate => qq{
is $apg[1]->title, "B", "and B";
}
-#} # end SKIP block
BEGIN {
eval "use DBIx::Class::CDBICompat;";
if ($@) {
- plan (skip_all => 'Class::Trigger and DBIx::ContextualFetch required');
+ plan (skip_all => "Class::Trigger and DBIx::ContextualFetch required: $@");
next;
}
eval "use DBD::SQLite";
- plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 33);
+ plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 37);
}
use lib 't/testlib';
is $it->next->title, "Film 2", "And 2 is still next";
}
-SKIP: {
- #skip "Iterator doesn't yet have slice support", 19;
{
my $it = Film->retrieve_all;
is $it->next->title, "Film 2", "And 2 is still next";
}
-} # End SKIP
+{
+ my $it = Film->retrieve_all;
+ is $it, $it->count, "iterator returns count as a scalar";
+ ok $it, "iterator returns true when there are results";
+}
+
+{
+ my $it = Film->search( Title => "something which does not exist" );
+ is $it, 0;
+ ok !$it, "iterator returns false when no results";
+}
--- /dev/null
+$| = 1;
+use strict;
+
+use Test::More;
+
+eval { require Time::Piece::MySQL };
+plan skip_all => "Need Time::Piece::MySQL for this test" if $@;
+
+eval { require 't/testlib/Log.pm' };
+plan skip_all => "Need MySQL for this test" if $@;
+
+plan tests => 2;
+
+package main;
+
+my $log = Log->insert( { message => 'initial message' } );
+ok eval { $log->datetime_stamp }, "Have datetime";
+diag $@ if $@;
+
+$log->message( 'a revised message' );
+$log->update;
+ok eval { $log->datetime_stamp }, "Have datetime after update";
+diag $@ if $@;
+
--- /dev/null
+use strict;
+use Test::More;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ if ($@) {
+ plan (skip_all => 'Class::Trigger and DBIx::ContextualFetch required');
+ next;
+ }
+ eval "use DBD::SQLite";
+ plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 12);
+}
+
+INIT {
+ use lib 't/testlib';
+ use Film;
+ use Director;
+}
+
+{ # Cascade on delete
+ Director->has_many(nasties => 'Film');
+
+ my $dir = Director->insert({
+ name => "Lewis Teague",
+ });
+ my $kk = $dir->add_to_nasties({
+ Title => 'Alligator'
+ });
+ is $kk->director, $dir, "Director set OK";
+ is $dir->nasties, 1, "We have one nasty";
+
+ ok $dir->delete;
+ ok !Film->retrieve("Alligator"), "has_many cascade deletes by default";
+}
+
+
+# Two ways of saying not to cascade
+for my $args ({ no_cascade_delete => 1 }, { cascade => "None" }) {
+ Director->has_many(nasties => 'Film', $args);
+
+ my $dir = Director->insert({
+ name => "Lewis Teague",
+ });
+ my $kk = $dir->add_to_nasties({
+ Title => 'Alligator'
+ });
+ is $kk->director, $dir, "Director set OK";
+ is $dir->nasties, 1, "We have one nasty";
+
+ ok $dir->delete;
+ ok +Film->retrieve("Alligator"), "has_many with @{[ keys %$args ]} => @{[ values %$args ]}";
+ $kk->delete;
+}
+
+
+#{ # Fail on cascade
+# local $TODO = 'cascade => "Fail" unimplemented';
+#
+# Director->has_many(nasties => Film => { cascade => 'Fail' });
+#
+# my $dir = Director->insert({ name => "Nasty Noddy" });
+# my $kk = $dir->add_to_nasties({ Title => 'Killer Killers' });
+# is $kk->director, $dir, "Director set OK";
+# is $dir->nasties, 1, "We have one nasty";
+#
+# ok !eval { $dir->delete };
+# like $@, qr/1/, "Can't delete while films exist";
+#
+# my $rr = $dir->add_to_nasties({ Title => 'Revenge of the Revengers' });
+# ok !eval { $dir->delete };
+# like $@, qr/2/, "Still can't delete";
+#
+# $dir->nasties->delete_all;
+# ok eval { $dir->delete };
+# is $@, '', "Can delete once films are gone";
+#}
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+BEGIN {
+ eval "require Time::Piece";
+ use Test::More skip_all => "Time::Piece required for this test";
+}
+
+use Test::More tests => 12;
+use Test::Warn;
+
+package Temp::DBI;
+use base qw(DBIx::Class::CDBICompat);
+Temp::DBI->columns(All => qw(id date));
+
+my $strptime_inflate = sub {
+ Time::Piece->strptime(shift, "%Y-%m-%d")
+};
+Temp::DBI->has_a(
+ date => 'Time::Piece',
+ inflate => $strptime_inflate
+);
+
+
+package Temp::Person;
+use base 'Temp::DBI';
+Temp::Person->table('people');
+Temp::Person->columns(Info => qw(name pet));
+Temp::Person->has_a( pet => 'Temp::Pet' );
+
+package Temp::Pet;
+use base 'Temp::DBI';
+Temp::Pet->table('pets');
+Temp::Pet->columns(Info => qw(name));
+Temp::Pet->has_many(owners => 'Temp::Person');
+
+package main;
+
+{
+ my $pn_meta = Temp::Person->meta_info('has_a');
+ is_deeply [sort keys %$pn_meta], [qw/date pet/], "Person has Date and Pet";
+}
+
+{
+ my $pt_meta = Temp::Pet->meta_info;
+ is_deeply [keys %{$pt_meta->{has_a}}], [qw/date/], "Pet has Date";
+ is_deeply [keys %{$pt_meta->{has_many}}], [qw/owners/], "And owners";
+}
+
+{
+ my $pet = Temp::Person->meta_info( has_a => 'pet' );
+ is $pet->class, 'Temp::Person';
+ is $pet->foreign_class, 'Temp::Pet';
+ is $pet->accessor, 'pet';
+ is $pet->name, 'has_a';
+}
+
+{
+ my $owners = Temp::Pet->meta_info( has_many => 'owners' );
+
+ is_deeply $owners->args, {
+ foreign_key => 'pet',
+ mapping => [],
+ };
+}
+
+{
+ my $date = Temp::Pet->meta_info( has_a => 'date' );
+ is $date->class, 'Temp::DBI';
+ is $date->foreign_class, 'Time::Piece';
+ is $date->accessor, 'date';
+ is $date->args->{inflate}, $strptime_inflate;
+}
--- /dev/null
+use strict;
+use Test::More;
+
+BEGIN {
+ eval "use DBD::SQLite";
+ plan $@
+ ? (skip_all => 'needs DBD::SQLite for testing')
+ : (tests => 6);
+}
+
+use lib 't/testlib';
+require Film;
+
+sub Film::accessor_name_for {
+ my ($class, $col) = @_;
+ return "sheep" if lc $col eq "numexplodingsheep";
+ return $col;
+}
+
+my $data = {
+ Title => 'Bad Taste',
+ Director => 'Peter Jackson',
+ Rating => 'R',
+};
+
+my $bt;
+eval {
+ my $data = $data;
+ $data->{sheep} = 1;
+ ok $bt = Film->insert($data), "Modified accessor - with
+accessor";
+ isa_ok $bt, "Film";
+};
+is $@, '', "No errors";
+
+eval {
+ ok $bt->sheep(2), 'Modified accessor, set';
+ ok $bt->update, 'Update';
+};
+is $@, '', "No errors";
+
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+use Test::More;
+use Test::Warn;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ plan $@ ? (skip_all => "Class::Trigger and DBIx::ContextualFetch required: $@")
+ : ('no_plan');
+}
+
+use lib 't/testlib';
+use Film;
+
+my $waves = Film->insert({
+ Title => "Breaking the Waves",
+ Director => 'Lars von Trier',
+ Rating => 'R'
+});
+
+local $ENV{DBIC_CDBICOMPAT_HASH_WARN} = 0;
+
+{
+ local $ENV{DBIC_CDBICOMPAT_HASH_WARN} = 1;
+
+ warnings_like {
+ my $rating = $waves->{rating};
+ $waves->Rating("PG");
+ is $rating, "R", 'evaluation of column value is not deferred';
+ } qr{^Column 'rating' of 'Film/$waves' was fetched as a hash at \Q$0};
+
+ warnings_like {
+ is $waves->{title}, $waves->Title, "columns can be accessed as hashes";
+ } qr{^Column 'title' of 'Film/$waves' was fetched as a hash at\b};
+
+ $waves->Rating("G");
+
+ warnings_like {
+ is $waves->{rating}, "G", "updating via the accessor updates the hash";
+ } qr{^Column 'rating' of 'Film/$waves' was fetched as a hash at\b};
+
+
+ warnings_like {
+ $waves->{rating} = "PG";
+ } qr{^Column 'rating' of 'Film/$waves' was stored as a hash at\b};
+
+ $waves->update;
+ my @films = Film->search( Rating => "PG", Title => "Breaking the Waves" );
+ is @films, 1, "column updated as hash was saved";
+}
+
+warning_is {
+ $waves->{rating}
+} '', 'DBIC_CDBICOMPAT_HASH_WARN controls warnings';
+
+
+{
+ $waves->rating("R");
+ $waves->update;
+
+ no warnings 'redefine';
+ local *Film::rating = sub {
+ return "wibble";
+ };
+
+ is $waves->{rating}, "R";
+}
+
+
+{
+ no warnings 'redefine';
+ no warnings 'once';
+ local *Actor::accessor_name_for = sub {
+ my($class, $col) = @_;
+ return "movie" if lc $col eq "film";
+ return $col;
+ };
+
+ require Actor;
+ Actor->has_a( film => "Film" );
+
+ my $actor = Actor->insert({
+ name => 'Emily Watson',
+ film => $waves,
+ });
+
+ ok !eval { $actor->film };
+ is $actor->{film}->id, $waves->id,
+ 'hash access still works despite lack of accessor';
+}
+
+
+# Emulate that Class::DBI inflates immediately
+SKIP: {
+ skip "Need MySQL to run this test", 3 unless eval { require MyFoo };
+
+ my $foo = MyFoo->insert({
+ name => 'Whatever',
+ tdate => '1949-02-01',
+ });
+ isa_ok $foo, 'MyFoo';
+
+ isa_ok $foo->{tdate}, 'Date::Simple';
+ is $foo->{tdate}->year, 1949;
+}
\ No newline at end of file
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+use Test::More;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ plan $@ ? (skip_all => "Class::Trigger and DBIx::ContextualFetch required: $@")
+ : (tests=> 5);
+}
+
+{
+ package Thing;
+
+ use base 'DBIx::Class::Test::SQLite';
+
+ Thing->columns(TEMP => qw[foo bar]);
+ Thing->columns(All => qw[thing_id yarrow flower]);
+ sub foo { 42 }
+ sub yarrow { "hock" }
+}
+
+is_deeply( [sort Thing->columns("TEMP")],
+ [sort qw(foo bar)],
+ "TEMP columns set"
+);
+my $thing = Thing->construct(
+ { thing_id => 23, foo => "this", bar => "that" }
+);
+
+is( $thing->id, 23 );
+is( $thing->yarrow, "hock", 'custom accessor not overwritten by column' );
+is( $thing->foo, 42, 'custom routine not overwritten by temp column' );
+is( $thing->bar, "that", 'temp column accessor generated' );
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+use Test::More;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ plan $@ ? (skip_all => "Class::Trigger and DBIx::ContextualFetch required: $@")
+ : (tests=> 4);
+}
+
+INIT {
+ use lib 't/testlib';
+ use Film;
+}
+
+Film->insert({
+ Title => "Breaking the Waves",
+ Director => 'Lars von Trier',
+ Rating => 'R'
+});
+
+my $film = Film->construct({
+ Title => "Breaking the Waves",
+ Director => 'Lars von Trier',
+});
+
+isa_ok $film, "Film";
+is $film->title, "Breaking the Waves";
+is $film->director, "Lars von Trier";
+is $film->rating, "R", "constructed objects can get missing data from the db";
\ No newline at end of file
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+use Test::More;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ plan $@ ? (skip_all => "Class::Trigger and DBIx::ContextualFetch required: $@")
+ : (tests=> 4);
+}
+
+INIT {
+ use lib 't/testlib';
+}
+
+{
+ package # hide from PAUSE
+ MyFilm;
+
+ use base 'DBIx::Class::Test::SQLite';
+ use strict;
+
+ __PACKAGE__->set_table('Movies');
+ __PACKAGE__->columns(All => qw(id title));
+
+ sub create_sql {
+ return qq{
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ title VARCHAR(255)
+ }
+ }
+}
+
+my $film = MyFilm->create({ title => "For Your Eyes Only" });
+ok $film->id;
+
+my $new_film = $film->copy;
+ok $new_film->id;
+isnt $new_film->id, $film->id, "copy() gets new primary key";
+
+$new_film = $film->copy(42);
+is $new_film->id, 42, "copy() with new id";
+
--- /dev/null
+use strict;
+
+use Test::More;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ plan $@ ? (skip_all => "Class::Trigger and DBIx::ContextualFetch required: $@")
+ : ('no_plan');
+}
+
+
+{
+ package Thing;
+ use base qw(DBIx::Class::CDBICompat);
+}
+
+{
+ package Stuff;
+ use base qw(DBIx::Class::CDBICompat);
+}
+
+# There was a bug where looking at a column group before any were
+# set would cause them to be shared across classes.
+is_deeply [Stuff->columns("Essential")], [];
+Thing->columns(Essential => qw(foo bar baz));
+is_deeply [Stuff->columns("Essential")], [];
+
+1;
--- /dev/null
+use strict;
+use Test::More;
+
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ plan skip_all => 'Class::Trigger and DBIx::ContextualFetch required' if $@;
+ eval "use DBD::SQLite";
+ plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 3);
+}
+
+
+use lib 't/testlib';
+use Director;
+
+# Test that has_many() will load the foreign class.
+ok !Class::Inspector->loaded( 'Film' );
+ok eval { Director->has_many( films => 'Film' ); 1; } || diag $@;
+
+my $shan_hua = Director->create({
+ Name => "Shan Hua",
+});
+
+my $inframan = Film->create({
+ Title => "Inframan",
+ Director => "Shan Hua",
+});
+my $guillotine2 = Film->create({
+ Title => "Flying Guillotine 2",
+ Director => "Shan Hua",
+});
+my $guillotine = Film->create({
+ Title => "Master of the Flying Guillotine",
+ Director => "Yu Wang",
+});
+
+is_deeply [sort $shan_hua->films], [sort $inframan, $guillotine2];
\ No newline at end of file
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+use Test::More;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ plan $@ ? (skip_all => 'Class::Trigger and DBIx::ContextualFetch required')
+ : (tests=> 2);
+}
+
+package Foo;
+
+use base qw(DBIx::Class::CDBICompat);
+
+eval {
+ Foo->table("foo");
+ Foo->columns(Essential => qw(foo bar));
+ #Foo->has_a( bar => "This::Does::Not::Exist::Yet" );
+};
+#::is $@, '';
+::is(Foo->table, "foo");
+::is_deeply [sort map lc, Foo->columns], [sort map lc, qw(foo bar)];
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+use Test::More;
+
+#----------------------------------------------------------------------
+# Test database failures
+#----------------------------------------------------------------------
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ if ($@) {
+ plan (skip_all => 'Class::Trigger and DBIx::ContextualFetch required');
+ next;
+ }
+ eval "use DBD::SQLite";
+ plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 2);
+}
+
+use lib 't/testlib';
+use Film;
+
+Film->create({
+ title => "Bad Taste",
+ numexplodingsheep => 10,
+});
+
+Film->create({
+ title => "Evil Alien Conquerers",
+ numexplodingsheep => 2,
+});
+
+is( Film->maximum_value_of("numexplodingsheep"), 10 );
+is( Film->minimum_value_of("numexplodingsheep"), 2 );
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+use Test::More;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ plan $@ ? (skip_all => "Class::Trigger and DBIx::ContextualFetch required: $@")
+ : (tests=> 3);
+}
+
+{
+ package Thing;
+
+ use base 'DBIx::Class::Test::SQLite';
+
+ Thing->columns(TEMP => qw[foo bar baz]);
+ Thing->columns(All => qw[some real stuff]);
+}
+
+my $thing = Thing->construct({ foo => 23, some => 42, baz => 99 });
+$thing->set( foo => "wibble", some => "woosh" );
+is $thing->foo, "wibble";
+is $thing->some, "woosh";
+is $thing->baz, 99;
+
+$thing->discard_changes;
--- /dev/null
+use strict;
+use Test::More;
+$| = 1;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ if ($@) {
+ plan (skip_all => 'Class::Trigger and DBIx::ContextualFetch required');
+ }
+
+ eval "use DBD::SQLite";
+ plan skip_all => 'needs DBD::SQLite for testing' if $@;
+}
+
+INIT {
+ use lib 't/testlib';
+ use Film;
+}
+
+plan skip_all => "Object cache is turned off"
+ if Film->isa("DBIx::Class::CDBICompat::NoObjectIndex");
+
+plan tests => 5;
+
+
+ok +Film->create({
+ Title => 'This Is Spinal Tap',
+ Director => 'Rob Reiner',
+ Rating => 'R',
+});
+
+{
+ my $film1 = Film->retrieve( "This Is Spinal Tap" );
+ my $film2 = Film->retrieve( "This Is Spinal Tap" );
+
+ $film1->Director("Marty DiBergi");
+ is $film2->Director, "Marty DiBergi", 'retrieve returns the same object';
+
+ $film1->discard_changes;
+}
+
+{
+ Film->nocache(1);
+
+ my $film1 = Film->retrieve( "This Is Spinal Tap" );
+ my $film2 = Film->retrieve( "This Is Spinal Tap" );
+
+ $film1->Director("Marty DiBergi");
+ is $film2->Director, "Rob Reiner",
+ 'caching turned off';
+
+ $film1->discard_changes;
+}
+
+{
+ Film->nocache(0);
+
+ my $film1 = Film->retrieve( "This Is Spinal Tap" );
+ my $film2 = Film->retrieve( "This Is Spinal Tap" );
+
+ $film1->Director("Marty DiBergi");
+ is $film2->Director, "Marty DiBergi",
+ 'caching back on';
+
+ $film1->discard_changes;
+}
+
+
+{
+ Film->nocache(1);
+
+ local $Class::DBI::Weaken_Is_Available = 0;
+
+ my $film1 = Film->retrieve( "This Is Spinal Tap" );
+ my $film2 = Film->retrieve( "This Is Spinal Tap" );
+
+ $film1->Director("Marty DiBergi");
+ is $film2->Director, "Rob Reiner",
+ 'CDBI::Weaken_Is_Available turns off all caching';
+
+ $film1->discard_changes;
+}
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+use Test::More;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ plan $@ ? (skip_all => "Class::Trigger and DBIx::ContextualFetch required: $@")
+ : (tests=> 3);
+}
+
+INIT {
+ use lib 't/testlib';
+ use Film;
+}
+
+for my $title ("Bad Taste", "Braindead", "Forgotten Silver") {
+ Film->insert({ Title => $title, Director => 'Peter Jackson' });
+}
+
+Film->insert({ Title => "Transformers", Director => "Michael Bay"});
+
+{
+ my @films = Film->retrieve_from_sql(qq[director = "Peter Jackson" LIMIT 2]);
+ is @films, 2, "retrieve_from_sql with LIMIT";
+ is( $_->director, "Peter Jackson" ) for @films;
+}
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+use Test::More;
+use Test::NoWarnings;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ plan skip_all => "Class::Trigger and DBIx::ContextualFetch required: $@"
+ if $@;
+ plan skip_all => "DateTime required" unless eval { require DateTime };
+ plan tests => 1;
+}
+
+{
+ package Thing;
+
+ use base 'DBIx::Class::Test::SQLite';
+
+ Thing->columns(All => qw[thing_id this that date]);
+}
+
+my $thing = Thing->construct({ thing_id => 23, this => 42 });
+$thing->set( this => undef );
+$thing->discard_changes;
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+use Test::More;
+use Test::Exception;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ plan skip_all => "Class::Trigger and DBIx::ContextualFetch required: $@"
+ if $@;
+ plan skip_all => "DateTime required" unless eval { require DateTime };
+ plan tests => 1;
+}
+
+{
+ package Thing;
+
+ use base 'DBIx::Class::Test::SQLite';
+
+ Thing->columns(All => qw[thing_id this that date]);
+}
+
+my $thing = Thing->construct({ thing_id => 23, date => "01-02-1994" });
+my $date = DateTime->now;
+lives_ok {
+ $thing->set( date => $date );
+ $thing->set( date => $date );
+};
+
+
+
+$thing->discard_changes;
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+plan tests => 5;
+
+my $cd = $schema->resultset("CD")->find(2);
+ok $cd->liner_notes;
+ok keys %{$cd->{_relationship_data}}, "_relationship_data populated";
+
+$cd->discard_changes;
+ok $cd->liner_notes, 'relationships still valid after discarding changes';
+
+ok $cd->liner_notes->delete;
+$cd->discard_changes;
+ok !$cd->liner_notes, 'discard_changes resets relationship';
\ No newline at end of file
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+plan tests => 1;
+
+{
+ my @warnings;
+ local $SIG{__WARN__} = sub { push @warnings, @_; };
+ {
+ # Test that this doesn't cause infinite recursion.
+ local *DBICTest::Artist::DESTROY;
+ local *DBICTest::Artist::DESTROY = sub { $_[0]->discard_changes };
+
+ my $artist = $schema->resultset("Artist")->create( {
+ artistid => 10,
+ name => "artist number 10",
+ });
+
+ $artist->name("Wibble");
+
+ print "# About to call DESTROY\n";
+ }
+ is_deeply \@warnings, [];
+}
\ No newline at end of file
),
qw/SelfRefAlias TreeLike TwoKeyTreeLike Event EventTZ NoPrimaryKey/,
qw/Collection CollectionObject TypedObject/,
- qw/Owners BooksInLibrary/
+ qw/Owners BooksInLibrary/,
+ qw/ForceForeign/
);
sub sqlt_deploy_hook {
__PACKAGE__->set_primary_key('cdid');
__PACKAGE__->add_unique_constraint([ qw/artist title/ ]);
-__PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist' );
+__PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist', undef, { is_deferrable => 1 } );
__PACKAGE__->has_many( tracks => 'DBICTest::Schema::Track' );
__PACKAGE__->has_many(
--- /dev/null
+package # hide from PAUSE
+ DBICTest::Schema::ForceForeign;
+
+use base 'DBIx::Class::Core';
+
+__PACKAGE__->table('forceforeign');
+__PACKAGE__->add_columns(
+ 'artist' => { data_type => 'integer' },
+ 'cd' => { data_type => 'integer' },
+);
+__PACKAGE__->set_primary_key(qw/artist/);
+
+# Normally this would not appear as a FK constraint
+# since it uses the PK
+__PACKAGE__->might_have(
+ 'artist_1', 'DBICTest::Schema::Artist', {
+ 'foreign.artist_id' => 'self.artist',
+ }, {
+ is_foreign_key_constraint => 1,
+ },
+);
+
+# Normally this would appear as a FK constraint
+__PACKAGE__->might_have(
+ 'cd_1', 'DBICTest::Schema::CD', {
+ 'foreign.cdid' => 'self.cd',
+ }, {
+ is_foreign_key_constraint => 0,
+ },
+);
+
+1;
--- /dev/null
+package DBICErrorTest::SyntaxError;
+
+use strict;
+
+I'm a syntax error!
'is_auto_increment' => 0,
'default_value' => undef,
'is_foreign_key' => 0,
+ 'is_nullable' => 0,
+ 'size' => '10'
+ },
+ 'NewVersionName' => {
+ 'data_type' => 'VARCHAR',
+ 'is_auto_increment' => 0,
+ 'default_value' => undef,
+ 'is_foreign_key' => 0,
'is_nullable' => 1,
'size' => '20'
- },
+ }
);
__PACKAGE__->set_primary_key('Version');
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+plan tests => 2;
+
+my $bookmark = $schema->resultset("Bookmark")->find(1);
+my $link = $bookmark->link;
+my $link_id = $link->id;
+
+my $new_link = $schema->resultset("Link")->new({
+ id => 42,
+ url => "http://monstersarereal.com",
+ title => "monstersarereal.com"
+});
+
+# Changing a relationship by id rather than by object would cause
+# old related_resultsets to be used.
+$bookmark->link($new_link->id);
+is $bookmark->link->id, $new_link->id;
+
+$bookmark->update;
+is $bookmark->link->id, $new_link->id;
--- /dev/null
+#!/usr/bin/perl -w
+
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+plan tests => 3;
+
+my $bookmark = $schema->resultset("Bookmark")->find(1);
+my $link = $bookmark->link;
+my $link_id = $link->id;
+ok $link->id;
+
+$link->delete;
+is $schema->resultset("Link")->search(id => $link_id)->count, 0,
+ "link $link_id was deleted";
+
+# Get a fresh object with nothing cached
+$bookmark = $schema->resultset("Bookmark")->find($bookmark->id);
+
+# This would create a new link row if none existed
+$bookmark->link;
+
+is $schema->resultset("Link")->search(id => $link_id)->count, 0,
+ 'accessor did not create a link object where there was none';
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+plan tests => 6;
+
+{
+ my $rs = $schema->resultset("CD")->search({});
+
+ ok $rs->count;
+ is $rs, $rs->count, "resultset as number with results";
+ ok $rs, "resultset as boolean always true";
+}
+
+{
+ my $rs = $schema->resultset("CD")->search({ title => "Does not exist" });
+
+ ok !$rs->count;
+ is $rs, $rs->count, "resultset as number without results";
+ ok $rs, "resultset as boolean always true";
+}
\ No newline at end of file
MyBase;
use strict;
-use base qw(DBIx::Class);
+use base qw(DBIx::Class::CDBICompat);
+
+use DBI;
use vars qw/$dbh/;
inflate => sub { Date::Simple->new(shift) },
deflate => 'format',
);
-__PACKAGE__->find_column('tdate')->placeholder("IF(1, CURDATE(), ?)");
+#__PACKAGE__->find_column('tdate')->placeholder("IF(1, CURDATE(), ?)");
sub create_sql {
return qq{
PgBase;
use strict;
-use base 'DBIx::Class';
+use base 'DBIx::Class::CDBICompat';
my $db = $ENV{DBD_PG_DBNAME} || 'template1';
my $user = $ENV{DBD_PG_USER} || 'postgres';