use strict;
use warnings;
use base qw/Class::Accessor::Grouped Class::C3::Componentised/;
+use MRO::Compat;
use mro 'c3';
use Carp::Clan qw/^DBIx::Class/;
-use DBIx::Class::Schema::Loader::RelBuilder;
-use Data::Dump qw/ dump /;
-use POSIX qw//;
-use File::Spec qw//;
-use Cwd qw//;
-use Digest::MD5 qw//;
-use Lingua::EN::Inflect::Number qw//;
-use Lingua::EN::Inflect::Phrase qw//;
-use File::Temp qw//;
+use DBIx::Class::Schema::Loader::RelBuilder ();
+use Data::Dump 'dump';
+use POSIX ();
+use File::Spec ();
+use Cwd ();
+use Digest::MD5 ();
+use Lingua::EN::Inflect::Number ();
+use Lingua::EN::Inflect::Phrase ();
+use String::ToIdentifier::EN ();
+use String::ToIdentifier::EN::Unicode ();
+use File::Temp ();
use Class::Unload;
use Class::Inspector ();
use Scalar::Util 'looks_like_number';
-use File::Slurp 'read_file';
-use DBIx::Class::Schema::Loader::Utils qw/split_name dumper_squashed eval_package_without_redefine_warnings class_path/;
+use DBIx::Class::Schema::Loader::Column;
+use DBIx::Class::Schema::Loader::Utils qw/split_name dumper_squashed eval_package_without_redefine_warnings class_path slurp_file sigwarn_silencer firstidx uniq/;
use DBIx::Class::Schema::Loader::Optional::Dependencies ();
use Try::Tiny;
use DBIx::Class ();
-use Encode qw/encode/;
-use List::MoreUtils 'all';
+use Encode qw/encode decode/;
+use List::Util qw/all any none/;
+use File::Temp 'tempfile';
use namespace::clean;
-our $VERSION = '0.07010';
+our $VERSION = '0.07047';
__PACKAGE__->mk_group_ro_accessors('simple', qw/
schema
additional_base_classes
left_base_classes
components
+ schema_components
skip_relationships
skip_load_external
moniker_map
result_base_class
result_roles
use_moose
+ use_moo
+ only_autoclean
overwrite_modifications
+ dry_run
+ generated_classes
+ omit_version
+ omit_timestamp
relationship_attrs
- db_schema
_tables
classes
_upgrading_classes
datetime_locale
config_file
loader_class
- qualify_objects
- tables
+ table_comments_table
+ column_comments_table
class_to_table
+ moniker_to_table
uniq_to_primary
+ quiet
+ allow_extra_m2m_cols
/);
preserve_case
col_collision_map
rel_collision_map
+ rel_name_map
real_dump_directory
result_components_map
result_roles_map
datetime_undef_if_invalid
_result_class_methods
naming_set
+ filter_generated_code
+ db_schema
+ qualify_objects
+ moniker_parts
+ moniker_part_separator
+ moniker_part_map
/);
+my $CURRENT_V = 'v7';
+
+my @CLASS_ARGS = qw(
+ schema_components schema_base_class result_base_class
+ additional_base_classes left_base_classes additional_classes components
+ result_roles
+);
+
+my $CR = "\x0d";
+my $LF = "\x0a";
+my $CRLF = "\x0d\x0a";
+
=head1 NAME
DBIx::Class::Schema::Loader::Base - Base DBIx::Class::Schema::Loader Implementation.
=head1 SYNOPSIS
-See L<DBIx::Class::Schema::Loader>
+See L<DBIx::Class::Schema::Loader>.
=head1 DESCRIPTION
The option also takes a hashref:
- naming => { relationships => 'v7', monikers => 'v7' }
+ naming => {
+ relationships => 'v8',
+ monikers => 'v8',
+ column_accessors => 'v8',
+ force_ascii => 1,
+ }
+
+or
+
+ naming => { ALL => 'v8', force_ascii => 1 }
The keys are:
=over 4
+=item ALL
+
+Set L</relationships>, L</monikers> and L</column_accessors> to the specified
+value.
+
=item relationships
How to name relationship accessors.
How to name column accessors in Result classes.
+=item force_ascii
+
+For L</v8> mode and later, uses L<String::ToIdentifier::EN> instead of
+L<String::ToIdentifier::EN::Unicode> to force monikers and other identifiers to
+ASCII.
+
=back
The values can be:
=item v7
This mode is identical to C<v6> mode, except that monikerization of CamelCase
-table names is also done correctly.
+table names is also done better (but best in v8.)
-CamelCase column names in case-preserving mode will also be handled correctly
-for relationship name inflection. See L</preserve_case>.
+CamelCase column names in case-preserving mode will also be handled better
+for relationship name inflection (but best in v8.) See L</preserve_case>.
In this mode, CamelCase L</column_accessors> are normalized based on case
transition instead of just being lowercased, so C<FooId> becomes C<foo_id>.
-If you don't have any CamelCase table or column names, you can upgrade without
-breaking any of your code.
+=item v8
+
+(EXPERIMENTAL)
+
+The default mode is L</v7>, to get L</v8> mode, you have to specify it in
+L</naming> explicitly until C<0.08> comes out.
+
+L</monikers> and L</column_accessors> are created using
+L<String::ToIdentifier::EN::Unicode> or L<String::ToIdentifier::EN> if
+L</force_ascii> is set; this is only significant for names with non-C<\w>
+characters such as C<.>.
+
+CamelCase identifiers with words in all caps, e.g. C<VLANValidID> are supported
+correctly in this mode.
+
+For relationships, belongs_to accessors are made from column names by stripping
+postfixes other than C<_id> as well, for example just C<Id>, C<_?ref>, C<_?cd>,
+C<_?code> and C<_?num>, case insensitively.
=item preserve
__PACKAGE__->naming('v7');
+=head2 quiet
+
+If true, will not print the usual C<Dumping manual schema ... Schema dump
+completed.> messages. Does not affect warnings (except for warnings related to
+L</really_erase_my_files>.)
+
+=head2 dry_run
+
+If true, don't actually write out the generated files. This can only be
+used with static schema generation.
+
=head2 generate_pod
By default POD will be generated for columns and relationships, using database
metadata for the text if available and supported.
-Reading database metadata (e.g. C<COMMENT ON TABLE some_table ...>) is only
-supported for Postgres right now.
+Comment metadata can be stored in two ways.
+
+The first is that you can create two tables named C<table_comments> and
+C<column_comments> respectively. These tables must exist in the same database
+and schema as the tables they describe. They both need to have columns named
+C<table_name> and C<comment_text>. The second one needs to have a column named
+C<column_name>. Then data stored in these tables will be used as a source of
+metadata about tables and comments.
+
+(If you wish you can change the name of these tables with the parameters
+L</table_comments_table> and L</column_comments_table>.)
+
+As a fallback you can use built-in commenting mechanisms. Currently this is
+only supported for PostgreSQL, Oracle and MySQL. To create comments in
+PostgreSQL you add statements of the form C<COMMENT ON TABLE some_table IS
+'...'>, the same syntax is used in Oracle. To create comments in MySQL you add
+C<COMMENT '...'> to the end of the column or table definition. Note that MySQL
+restricts the length of comments, and also does not handle complex Unicode
+characters properly.
Set this to C<0> to turn off all POD generation.
The default is C<60>
+=head2 table_comments_table
+
+The table to look for comments about tables in. By default C<table_comments>.
+See L</generate_pod> for details.
+
+This must not be a fully qualified name, the table will be looked for in the
+same database and schema as the table whose comment is being retrieved.
+
+=head2 column_comments_table
+
+The table to look for comments about columns in. By default C<column_comments>.
+See L</generate_pod> for details.
+
+This must not be a fully qualified name, the table will be looked for in the
+same database and schema as the table/column whose comment is being retrieved.
+
=head2 relationship_attrs
-Hashref of attributes to pass to each generated relationship, listed
-by type. Also supports relationship type 'all', containing options to
-pass to all generated relationships. Attributes set for more specific
-relationship types override those set in 'all'.
+Hashref of attributes to pass to each generated relationship, listed by type.
+Also supports relationship type 'all', containing options to pass to all
+generated relationships. Attributes set for more specific relationship types
+override those set in 'all', and any attributes specified by this option
+override the introspected attributes of the foreign key if any.
For example:
- relationship_attrs => {
- belongs_to => { is_deferrable => 0 },
- },
+ relationship_attrs => {
+ has_many => { cascade_delete => 1, cascade_copy => 1 },
+ might_have => { cascade_delete => 1, cascade_copy => 1 },
+ },
+
+use this to turn L<DBIx::Class> cascades to on on your
+L<has_many|DBIx::Class::Relationship/has_many> and
+L<might_have|DBIx::Class::Relationship/might_have> relationships, they default
+to off.
+
+Can also be a coderef, for more precise control, in which case the coderef gets
+this hash of parameters (as a list):
-use this to turn off DEFERRABLE on your foreign key constraints.
+ rel_name # the name of the relationship
+ rel_type # the type of the relationship: 'belongs_to', 'has_many' or 'might_have'
+ local_source # the DBIx::Class::ResultSource object for the source the rel is *from*
+ remote_source # the DBIx::Class::ResultSource object for the source the rel is *to*
+ local_table # the DBIx::Class::Schema::Loader::Table object for the table of the source the rel is from
+ local_cols # an arrayref of column names of columns used in the rel in the source it is from
+ remote_table # the DBIx::Class::Schema::Loader::Table object for the table of the source the rel is to
+ remote_cols # an arrayref of column names of columns used in the rel in the source it is to
+ attrs # the attributes that would be set
+
+it should return the new hashref of attributes, or nothing for no changes.
+
+For example:
+
+ relationship_attrs => sub {
+ my %p = @_;
+
+ say "the relationship name is: $p{rel_name}";
+ say "the relationship is a: $p{rel_type}";
+ say "the local class is: ", $p{local_source}->result_class;
+ say "the remote class is: ", $p{remote_source}->result_class;
+ say "the local table is: ", $p{local_table}->sql_name;
+ say "the rel columns in the local table are: ", (join ", ", @{$p{local_cols}});
+ say "the remote table is: ", $p{remote_table}->sql_name;
+ say "the rel columns in the remote table are: ", (join ", ", @{$p{remote_cols}});
+
+ if ($p{local_table} eq 'dogs' && @{$p{local_cols}} == 1 && $p{local_cols}[0] eq 'name') {
+ $p{attrs}{could_be_snoopy} = 1;
+
+ reutrn $p{attrs};
+ }
+ },
+
+These are the default attributes:
+
+ has_many => {
+ cascade_delete => 0,
+ cascade_copy => 0,
+ },
+ might_have => {
+ cascade_delete => 0,
+ cascade_copy => 0,
+ },
+ belongs_to => {
+ on_delete => 'CASCADE',
+ on_update => 'CASCADE',
+ is_deferrable => 1,
+ },
+
+For L<belongs_to|DBIx::Class::Relationship/belongs_to> relationships, these
+defaults are overridden by the attributes introspected from the foreign key in
+the database, if this information is available (and the driver is capable of
+retrieving it.)
+
+This information overrides the defaults mentioned above, and is then itself
+overridden by the user's L</relationship_attrs> for C<belongs_to> if any are
+specified.
+
+In general, for most databases, for a plain foreign key with no rules, the
+values for a L<belongs_to|DBIx::Class::Relationship/belongs_to> relationship
+will be:
+
+ on_delete => 'NO ACTION',
+ on_update => 'NO ACTION',
+ is_deferrable => 0,
+
+In the cases where an attribute is not supported by the DB, a value matching
+the actual behavior is used, for example Oracle does not support C<ON UPDATE>
+rules, so C<on_update> is set to C<NO ACTION>. This is done so that the
+behavior of the schema is preserved when cross deploying to a different RDBMS
+such as SQLite for testing.
+
+In the cases where the DB does not support C<DEFERRABLE> foreign keys, the
+value is set to C<1> if L<DBIx::Class> has a working C<<
+$storage->with_deferred_fk_checks >>. This is done so that the same
+L<DBIx::Class> code can be used, and cross deployed from and to such databases.
=head2 debug
=head2 db_schema
Set the name of the schema to load (schema in the sense that your database
-vendor means it). Does not currently support loading more than one schema
-name.
+vendor means it).
+
+Can be set to an arrayref of schema names for multiple schemas, or the special
+value C<%> for all schemas.
+
+For MSSQL, Sybase ASE, and Informix can be set to a hashref of databases as
+keys and arrays of owners as values, set to the value:
+
+ { '%' => '%' }
+
+for all owners in all databases.
+
+Name clashes resulting from the same table name in different databases/schemas
+will be resolved automatically by prefixing the moniker with the database
+and/or schema.
+
+To prefix/suffix all monikers with the database and/or schema, see
+L</moniker_parts>.
+
+=head2 moniker_parts
+
+The database table names are represented by the
+L<DBIx::Class::Schema::Loader::Table> class in the loader, the
+L<DBIx::Class::Schema::Loader::Table::Sybase> class for Sybase ASE and
+L<DBIx::Class::Schema::Loader::Table::Informix> for Informix.
+
+Monikers are created normally based on just the
+L<name|DBIx::Class::Schema::Loader::DBObject/name> property, corresponding to
+the table name, but can consist of other parts of the fully qualified name of
+the table.
+
+The L</moniker_parts> option is an arrayref of methods on the table class
+corresponding to parts of the fully qualified table name, defaulting to
+C<['name']>, in the order those parts are used to create the moniker name.
+The parts are joined together using L</moniker_part_separator>.
+
+The C<'name'> entry B<must> be present.
+
+Below is a table of supported databases and possible L</moniker_parts>.
+
+=over 4
+
+=item * DB2, Firebird, mysql, Oracle, Pg, SQLAnywhere, SQLite, MS Access
+
+C<schema>, C<name>
+
+=item * Informix, MSSQL, Sybase ASE
+
+C<database>, C<schema>, C<name>
+
+=back
+
+=head2 moniker_part_separator
+
+String used to join L</moniker_parts> when creating the moniker.
+Defaults to the empty string. Use C<::> to get a separate namespace per
+database and/or schema.
=head2 constraint
-Only load tables matching regex. Best specified as a qr// regex.
+Only load matching tables.
+
+These can be specified either as a regex (preferably on the C<qr//>
+form), or as an arrayref of arrayrefs. Regexes are matched against
+the (unqualified) table name, while arrayrefs are matched according to
+L</moniker_parts>.
+
+For example:
+
+ db_schema => [qw(some_schema other_schema)],
+ moniker_parts => [qw(schema name)],
+ constraint => [
+ [ qr/\Asome_schema\z/ => qr/\A(?:foo|bar)\z/ ],
+ [ qr/\Aother_schema\z/ => qr/\Abaz\z/ ],
+ ],
+
+In this case only the tables C<foo> and C<bar> in C<some_schema> and
+C<baz> in C<other_schema> will be dumped.
=head2 exclude
-Exclude tables matching regex. Best specified as a qr// regex.
+Exclude matching tables.
+
+The tables to exclude are specified in the same way as for the
+L</constraint> option.
=head2 moniker_map
-Overrides the default table name to moniker translation. Can be either
-a hashref of table keys and moniker values, or a coderef for a translator
-function taking a single scalar table name argument and returning
-a scalar moniker. If the hash entry does not exist, or the function
-returns a false value, the code falls back to default behavior
-for that table name.
+Overrides the default table name to moniker translation. Either
+
+=over
+
+=item *
+
+a nested hashref, which will be traversed according to L</moniker_parts>
+
+For example:
+
+ moniker_parts => [qw(schema name)],
+ moniker_map => {
+ foo => {
+ bar => "FooishBar",
+ },
+ },
+
+In which case the table C<bar> in the C<foo> schema would get the moniker
+C<FooishBar>.
+
+=item *
+
+a hashref of unqualified table name keys and moniker values
+
+=item *
+
+a coderef that returns the moniker, which is called with the following
+arguments:
+
+=over
+
+=item *
+
+the L<DBIx::Class::Schema::Loader::Table> object for the table
+
+=item *
+
+the default moniker that DBIC would ordinarily give this table
+
+=item *
+
+a coderef that can be called with either of the hashref forms to get
+the moniker mapped accordingly. This is useful if you need to handle
+some monikers specially, but want to use the hashref form for the
+rest.
+
+=back
+
+=back
+
+If the hash entry does not exist, or the function returns a false
+value, the code falls back to default behavior for that table name.
The default behavior is to split on case transition and non-alphanumeric
boundaries, singularize the resulting phrase, then join the titlecased words
stations_visited | StationVisited
routeChange | RouteChange
+=head2 moniker_part_map
+
+Map for overriding the monikerization of individual L</moniker_parts>.
+The keys are the moniker part to override, the value is either a
+hashref or coderef for mapping the corresponding part of the
+moniker. If a coderef is used, it gets called with the moniker part
+and the hash key the code ref was found under.
+
+For example:
+
+ moniker_part_map => {
+ schema => sub { ... },
+ },
+
+Given the table C<foo.bar>, the code ref would be called with the
+arguments C<foo> and C<schema>, plus a coderef similar to the one
+described in L</moniker_map>.
+
+L</moniker_map> takes precedence over this.
+
=head2 col_accessor_map
-Same as moniker_map, but for column accessor names. If a coderef is
-passed, the code is called with arguments of
+Same as moniker_map, but for column accessor names. The nested
+hashref form is traversed according to L</moniker_parts>, with an
+extra level at the bottom for the column name. If a coderef is
+passed, the code is called with the following arguments:
+
+=over
+
+=item *
+
+the L<DBIx::Class::Schema::Loader::Column> object for the column
+
+=item *
+
+the default accessor name that DBICSL would ordinarily give this column
- the name of the column in the underlying database,
- default accessor name that DBICSL would ordinarily give this column,
- {
- table_class => name of the DBIC class we are building,
- table_moniker => calculated moniker for this table (after moniker_map if present),
- table_name => name of the database table,
- full_table_name => schema-qualified name of the database table (RDBMS specific),
- schema_class => name of the schema class we are building,
- column_info => hashref of column info (data_type, is_nullable, etc),
+=item *
+
+a hashref of this form:
+
+ {
+ table_class => name of the DBIC class we are building,
+ table_moniker => calculated moniker for this table (after moniker_map if present),
+ table => the DBIx::Class::Schema::Loader::Table object for the table,
+ full_table_name => schema-qualified name of the database table (RDBMS specific),
+ schema_class => name of the schema class we are building,
+ column_info => hashref of column info (data_type, is_nullable, etc),
}
+=item *
+
+a coderef that can be called with a hashref map
+
+=back
+
+=head2 rel_name_map
+
+Similar in idea to moniker_map, but different in the details. It can be
+a hashref or a code ref.
+
+If it is a hashref, keys can be either the default relationship name, or the
+moniker. The keys that are the default relationship name should map to the
+name you want to change the relationship to. Keys that are monikers should map
+to hashes mapping relationship names to their translation. You can do both at
+once, and the more specific moniker version will be picked up first. So, for
+instance, you could have
+
+ {
+ bar => "baz",
+ Foo => {
+ bar => "blat",
+ },
+ }
+
+and relationships that would have been named C<bar> will now be named C<baz>
+except that in the table whose moniker is C<Foo> it will be named C<blat>.
+
+If it is a coderef, it will be passed a hashref of this form:
+
+ {
+ name => default relationship name,
+ type => the relationship type eg: C<has_many>,
+ local_class => name of the DBIC class we are building,
+ local_moniker => moniker of the DBIC class we are building,
+ local_columns => columns in this table in the relationship,
+ remote_class => name of the DBIC class we are related to,
+ remote_moniker => moniker of the DBIC class we are related to,
+ remote_columns => columns in the other table in the relationship,
+ # for type => "many_to_many" only:
+ link_class => name of the DBIC class for the link table,
+ link_moniker => moniker of the DBIC class for the link table,
+ link_rel_name => name of the relationship to the link table,
+ }
+
+In addition it is passed a coderef that can be called with a hashref map.
+
+DBICSL will try to use the value returned as the relationship name.
+
=head2 inflect_plural
Just like L</moniker_map> above (can be hash/code-ref, falls back to default
Base class for your schema classes. Defaults to 'DBIx::Class::Schema'.
+=head2 schema_components
+
+List of components to load into the Schema class.
+
=head2 result_base_class
Base class for your table classes (aka result classes). Defaults to
=head2 components
-List of additional components to be loaded into all of your table
+List of additional components to be loaded into all of your Result
classes. A good example would be
L<InflateColumn::DateTime|DBIx::Class::InflateColumn::DateTime>
loads the given components into every Result class, this option allows you to
load certain components for specified Result classes. For example:
- result_components_map => {
- StationVisited => '+YourApp::Schema::Component::StationVisited',
- RouteChange => [
- '+YourApp::Schema::Component::RouteChange',
- 'InflateColumn::DateTime',
- ],
- }
-
+ result_components_map => {
+ StationVisited => '+YourApp::Schema::Component::StationVisited',
+ RouteChange => [
+ '+YourApp::Schema::Component::RouteChange',
+ 'InflateColumn::DateTime',
+ ],
+ }
+
You may use this in conjunction with L</components>.
=head2 result_roles
applies the given roles to every Result class, this option allows you to apply
certain roles for specified Result classes. For example:
- result_roles_map => {
- StationVisited => [
- 'YourApp::Role::Building',
- 'YourApp::Role::Destination',
- ],
- RouteChange => 'YourApp::Role::TripEvent',
- }
-
+ result_roles_map => {
+ StationVisited => [
+ 'YourApp::Role::Building',
+ 'YourApp::Role::Destination',
+ ],
+ RouteChange => 'YourApp::Role::TripEvent',
+ }
+
You may use this in conjunction with L</result_roles>.
=head2 use_namespaces
Again, you should be using version control on your schema classes. Be
careful with this option.
+=head2 omit_version
+
+Omit the package version from the signature comment.
+
+=head2 omit_timestamp
+
+Omit the creation timestamp from the signature comment.
+
=head2 custom_column_info
Hook for adding extra attributes to the
Must be a coderef that returns a hashref with the extra attributes.
-Receives the table name, column name and column_info.
+Receives the L<DBIx::Class::Schema::Loader::Table> object, column name
+and column_info.
For example:
- custom_column_info => sub {
- my ($table_name, $column_name, $column_info) = @_;
+ custom_column_info => sub {
+ my ($table, $column_name, $column_info) = @_;
- if ($column_name eq 'dog' && $column_info->{default_value} eq 'snoopy') {
- return { is_snoopy => 1 };
- }
- },
+ if ($column_name eq 'dog' && $column_info->{default_value} eq 'snoopy') {
+ return { is_snoopy => 1 };
+ }
+ },
This attribute can also be used to set C<inflate_datetime> on a non-datetime
column so it also receives the L</datetime_timezone> and/or L</datetime_locale>.
=head2 preserve_case
-Usually column names are lowercased, to make them easier to work with in
-L<DBIx::Class>. This option lets you turn this behavior off, if the driver
-supports it.
+Normally database names are lowercased and split by underscore, use this option
+if you have CamelCase database names.
Drivers for case sensitive databases like Sybase ASE or MSSQL with a
case-sensitive collation will turn this option on unconditionally.
-Currently the drivers for SQLite, mysql, MSSQL and Firebird/InterBase support
-setting this option.
+B<NOTE:> L</naming> = C<v8> is highly recommended with this option as the
+semantics of this mode are much improved for CamelCase database names.
+
+L</naming> = C<v7> or greater is required with this option.
=head2 qualify_objects
Set to true to prepend the L</db_schema> to table names for C<<
__PACKAGE__->table >> calls, and to some other things like Oracle sequences.
+This attribute is automatically set to true for multi db_schema configurations,
+unless explicitly set to false by the user.
+
=head2 use_moose
Creates Schema and Result classes that use L<Moose>, L<MooseX::NonMoose> and
-L<namespace::autoclean>. The default content after the md5 sum also makes the
-classes immutable.
+L<MooseX::MarkAsMethods> (or L<namespace::autoclean>, see below). The default
+content after the md5 sum also makes the classes immutable.
It is safe to upgrade your existing Schema to this option.
+=head2 use_moo
+
+Creates Schema and Result classes that use L<Moo> and
+L<namespace::autoclean>.
+
+It is safe to upgrade your existing Schema to this option.
+
+=head2 only_autoclean
+
+By default, we use L<MooseX::MarkAsMethods> to remove imported functions from
+your generated classes. It uses L<namespace::autoclean> to do this, after
+telling your object's metaclass that any operator L<overload>s in your class
+are methods, which will cause namespace::autoclean to spare them from removal.
+
+This prevents the "Hey, where'd my overloads go?!" effect.
+
+If you don't care about operator overloads, enabling this option falls back to
+just using L<namespace::autoclean> itself.
+
+If none of the above made any sense, or you don't have some pressing need to
+only use L<namespace::autoclean>, leaving this set to the default is
+recommended.
+
=head2 col_collision_map
This option controls how accessors for column names which collide with perl
on tables to primary keys, assuming there is only one largest unique
constraint.
+=head2 allow_extra_m2m_cols
+
+Generate C<many_to_many> relationship bridges even if the link table has
+extra columns other than the foreign keys. The primary key must still
+equal the union of the foreign keys.
+
+
+=head2 filter_generated_code
+
+An optional hook that lets you filter the generated text for various classes
+through a function that change it in any way that you want. The function will
+receive the type of file, C<schema> or C<result>, class and code; and returns
+the new code to use instead. For instance you could add custom comments, or do
+anything else that you want.
+
+The option can also be set to a string, which is then used as a filter program,
+e.g. C<perltidy>.
+
+If this exists but fails to return text matching C</\bpackage\b/>, no file will
+be generated.
+
+ filter_generated_code => sub {
+ my ($type, $class, $text) = @_;
+ ...
+ return $new_code;
+ }
+
+You can also use this option to set L<perltidy markers|perltidy/Skipping
+Selected Sections of Code> in your generated classes. This will leave
+the generated code in the default format, but will allow you to tidy
+your classes at any point in future, without worrying about changing the
+portions of the file which are checksummed, since C<perltidy> will just
+ignore all text between the markers.
+
+ filter_generated_code => sub {
+ return "#<<<\n$_[2]\n#>>>";
+ }
+
=head1 METHODS
None of these methods are intended for direct invocation by regular
=cut
-my $CURRENT_V = 'v7';
-
-my @CLASS_ARGS = qw(
- schema_base_class result_base_class additional_base_classes
- left_base_classes additional_classes components result_roles
-);
-
-# ensure that a peice of object data is a valid arrayref, creating
+# ensure that a piece of object data is a valid arrayref, creating
# an empty one or encapsulating whatever's there.
sub _ensure_arrayref {
my $self = shift;
}
}
- $self->result_components_map($self->{result_component_map})
- if defined $self->{result_component_map};
+ if (defined $self->{result_component_map}) {
+ if (defined $self->result_components_map) {
+ croak "Specify only one of result_components_map or result_component_map";
+ }
+ $self->result_components_map($self->{result_component_map})
+ }
+
+ if (defined $self->{result_role_map}) {
+ if (defined $self->result_roles_map) {
+ croak "Specify only one of result_roles_map or result_role_map";
+ }
+ $self->result_roles_map($self->{result_role_map})
+ }
- $self->result_roles_map($self->{result_role_map})
- if defined $self->{result_role_map};
+ croak "Specify only one of use_moose or use_moo"
+ if $self->use_moose and $self->use_moo;
- croak "the result_roles and result_roles_map options may only be used in conjunction with use_moose=1"
- if ((not defined $self->use_moose) || (not $self->use_moose))
+ croak "the result_roles and result_roles_map options may only be used in conjunction with use_moose=1 or use_moo=1"
+ if ((not $self->use_moose) && (not $self->use_moo))
&& ((defined $self->result_roles) || (defined $self->result_roles_map));
- $self->_ensure_arrayref(qw/additional_classes
+ $self->_ensure_arrayref(qw/schema_components
+ additional_classes
additional_base_classes
left_base_classes
components
}
$self->_validate_result_roles_map;
- if ($self->use_moose) {
- if (not DBIx::Class::Schema::Loader::Optional::Dependencies->req_ok_for('use_moose')) {
- die sprintf "You must install the following CPAN modules to enable the use_moose option: %s.\n",
- DBIx::Class::Schema::Loader::Optional::Dependencies->req_missing_for('use_moose');
+ for my $use_oo (qw(use_moose use_moo)) {
+ if ($self->$use_oo) {
+ if (not DBIx::Class::Schema::Loader::Optional::Dependencies->req_ok_for($use_oo)) {
+ die sprintf "You must install the following CPAN modules to enable the $use_oo option: %s.\n",
+ DBIx::Class::Schema::Loader::Optional::Dependencies->req_missing_for($use_oo);
+ }
}
}
+ $self->{_tables} = {};
$self->{monikers} = {};
- $self->{tables} = {};
+ $self->{moniker_to_table} = {};
$self->{class_to_table} = {};
$self->{classes} = {};
$self->{_upgrading_classes} = {};
+ $self->{generated_classes} = [];
$self->{schema_class} ||= ( ref $self->{schema} || $self->{schema} );
$self->{schema} ||= $self->{schema_class};
+ $self->{table_comments_table} ||= 'table_comments';
+ $self->{column_comments_table} ||= 'column_comments';
croak "dump_overwrite is deprecated. Please read the"
. " DBIx::Class::Schema::Loader::Base documentation"
if $self->{dump_overwrite};
$self->{dynamic} = ! $self->{dump_directory};
+
+ croak "dry_run can only be used with static schema generation"
+ if $self->dynamic and $self->dry_run;
+
$self->{temp_directory} ||= File::Temp::tempdir( 'dbicXXXX',
TMPDIR => 1,
CLEANUP => 1,
column_accessors => $naming_ver,
};
}
+ elsif (ref $self->naming eq 'HASH' && exists $self->naming->{ALL}) {
+ my $val = delete $self->naming->{ALL};
+
+ $self->naming->{$_} = $val
+ foreach qw/relationships monikers column_accessors/;
+ }
if ($self->naming) {
- for (values %{ $self->naming }) {
- $_ = $CURRENT_V if $_ eq 'current';
+ foreach my $key (qw/relationships monikers column_accessors/) {
+ $self->naming->{$key} = $CURRENT_V if ($self->naming->{$key}||'') eq 'current';
}
}
$self->{naming} ||= {};
}
}
- $self;
+ if (my $rel_collision_map = $self->rel_collision_map) {
+ if (my $reftype = ref $rel_collision_map) {
+ if ($reftype ne 'HASH') {
+ croak "Invalid type $reftype for option 'rel_collision_map'";
+ }
+ }
+ else {
+ $self->rel_collision_map({ '(.*)' => $rel_collision_map });
+ }
+ }
+
+ if (defined(my $rel_name_map = $self->rel_name_map)) {
+ my $reftype = ref $rel_name_map;
+ if ($reftype ne 'HASH' && $reftype ne 'CODE') {
+ croak "Invalid type $reftype for option 'rel_name_map', must be HASH or CODE";
+ }
+ }
+
+ if (defined(my $filter = $self->filter_generated_code)) {
+ my $reftype = ref $filter;
+ if ($reftype && $reftype ne 'CODE') {
+ croak "Invalid type $reftype for option 'filter_generated_code, must be a scalar or a CODE reference";
+ }
+ }
+
+ if (defined $self->db_schema) {
+ if (ref $self->db_schema eq 'ARRAY') {
+ if (@{ $self->db_schema } > 1 && not defined $self->{qualify_objects}) {
+ $self->{qualify_objects} = 1;
+ }
+ elsif (@{ $self->db_schema } == 0) {
+ $self->{db_schema} = undef;
+ }
+ }
+ elsif (not ref $self->db_schema) {
+ if ($self->db_schema eq '%' && not defined $self->{qualify_objects}) {
+ $self->{qualify_objects} = 1;
+ }
+
+ $self->{db_schema} = [ $self->db_schema ];
+ }
+ }
+
+ if (not $self->moniker_parts) {
+ $self->moniker_parts(['name']);
+ }
+ else {
+ if (not ref $self->moniker_parts) {
+ $self->moniker_parts([ $self->moniker_parts ]);
+ }
+ if (ref $self->moniker_parts ne 'ARRAY') {
+ croak 'moniker_parts must be an arrayref';
+ }
+ if (none { $_ eq 'name' } @{ $self->moniker_parts }) {
+ croak "moniker_parts option *must* contain 'name'";
+ }
+ }
+
+ if (not defined $self->moniker_part_separator) {
+ $self->moniker_part_separator('');
+ }
+ if (not defined $self->moniker_part_map) {
+ $self->moniker_part_map({}),
+ }
+
+ return $self;
}
sub _check_back_compat {
return unless -e $filename;
my ($old_gen, $old_md5, $old_ver, $old_ts, $old_custom) =
- $self->_parse_generated_file($filename);
+ $self->_parse_generated_file($filename);
return unless $old_ver;
- # determine if the existing schema was dumped with use_moose => 1
- if (! defined $self->use_moose) {
- $self->{use_moose} = 1 if $old_gen =~ /^ (?!\s*\#) use \s+ Moose/xm;
+ # determine if the existing schema was dumped with use_moo(se) => 1
+ for my $oo (qw(Moose Moo)) {
+ my $use_oo = "use_".lc($oo);
+ if (! defined $self->$use_oo) {
+ $self->{$use_oo} = 1 if $old_gen =~ /^ (?!\s*\#) use \s+ \Q$oo\E\b/xm;
+ }
}
my $load_classes = ($old_gen =~ /^__PACKAGE__->load_classes;/m) ? 1 : 0;
foreach my $c (@classes) {
# components default to being under the DBIx::Class namespace unless they
- # are preceeded with a '+'
+ # are preceded with a '+'
if ( $key =~ m/component/ && $c !~ s/^\+// ) {
$c = 'DBIx::Class::' . $c;
}
foreach my $prefix (@INC) {
my $fullpath = File::Spec->catfile($prefix, $file);
+ # abs_path pure-perl fallback warns for non-existent files
+ local $SIG{__WARN__} = sigwarn_silencer(qr/^stat\(.*\Q$file\E\)/);
return $fullpath if -f $fullpath
- # abs_path throws on Windows for nonexistant files
+ # abs_path throws on Windows for nonexistent files
and (try { Cwd::abs_path($fullpath) }) ne
((try { Cwd::abs_path(File::Spec->catfile($self->dump_directory, $file)) }) || '');
}
warn qq/# Loaded external class definition for '$class'\n/
if $self->debug;
- my $code = $self->_rewrite_old_classnames(scalar read_file($real_inc_path, binmode => ':encoding(UTF-8)'));
+ my $code = $self->_rewrite_old_classnames(slurp_file $real_inc_path);
if ($self->dynamic) { # load the class too
eval_package_without_redefine_warnings($class, $code);
}
$self->_ext_stmt($class,
- qq|# These lines were loaded from '$real_inc_path' found in \@INC.\n|
- .qq|# They are now part of the custom portion of this file\n|
- .qq|# for you to hand-edit. If you do not either delete\n|
- .qq|# this section or remove that file from \@INC, this section\n|
- .qq|# will be repeated redundantly when you re-create this\n|
- .qq|# file again via Loader! See skip_load_external to disable\n|
- .qq|# this feature.\n|
+ qq|# These lines were loaded from '$real_inc_path' found in \@INC.\n|
+ .qq|# They are now part of the custom portion of this file\n|
+ .qq|# for you to hand-edit. If you do not either delete\n|
+ .qq|# this section or remove that file from \@INC, this section\n|
+ .qq|# will be repeated redundantly when you re-create this\n|
+ .qq|# file again via Loader! See skip_load_external to disable\n|
+ .qq|# this feature.\n|
);
chomp $code;
$self->_ext_stmt($class, $code);
$self->_ext_stmt($class,
- qq|# End of lines loaded from '$real_inc_path' |
+ qq|# End of lines loaded from '$real_inc_path'|
);
}
if ($old_real_inc_path) {
- my $code = read_file($old_real_inc_path, binmode => ':encoding(UTF-8)');
+ my $code = slurp_file $old_real_inc_path;
$self->_ext_stmt($class, <<"EOF");
chomp $code;
$self->_ext_stmt($class, $code);
$self->_ext_stmt($class,
- qq|# End of lines loaded from '$old_real_inc_path' |
+ qq|# End of lines loaded from '$old_real_inc_path'|
);
}
}
sub load {
my $self = shift;
- $self->_load_tables(
- $self->_tables_list({ constraint => $self->constraint, exclude => $self->exclude })
- );
+ $self->_load_tables($self->_tables_list);
}
=head2 rescan
$self->_relbuilder->{schema} = $schema;
my @created;
- my @current = $self->_tables_list({ constraint => $self->constraint, exclude => $self->exclude });
+ my @current = $self->_tables_list;
foreach my $table (@current) {
- if(!exists $self->{_tables}->{$table}) {
+ if(!exists $self->_tables->{$table->sql_name}) {
push(@created, $table);
}
}
my %current;
- @current{@current} = ();
- foreach my $table (keys %{ $self->{_tables} }) {
- if (not exists $current{$table}) {
- $self->_unregister_source_for_table($table);
+ @current{map $_->sql_name, @current} = ();
+ foreach my $table (values %{ $self->_tables }) {
+ if (not exists $current{$table->sql_name}) {
+ $self->_remove_table($table);
}
}
my $loaded = $self->_load_tables(@current);
- return map { $self->monikers->{$_} } @created;
+ foreach my $table (@created) {
+ $self->monikers->{$table->sql_name} = $self->_table2moniker($table);
+ }
+
+ return map { $self->monikers->{$_->sql_name} } @created;
}
sub _relbuilder {
return if $self->{skip_relationships};
return $self->{relbuilder} ||= do {
-
- no warnings 'uninitialized';
my $relbuilder_suff =
{qw{
v4 ::Compat::v0_040
v5 ::Compat::v0_05
v6 ::Compat::v0_06
+ v7 ::Compat::v0_07
}}
- ->{ $self->naming->{relationships}};
+ ->{$self->naming->{relationships}||$CURRENT_V} || '';
my $relbuilder_class = 'DBIx::Class::Schema::Loader::RelBuilder'.$relbuilder_suff;
$self->ensure_class_loaded($relbuilder_class);
- $relbuilder_class->new( $self );
-
+ $relbuilder_class->new($self);
};
}
sub _load_tables {
my ($self, @tables) = @_;
- # Save the new tables to the tables list
+ # Save the new tables to the tables list and compute monikers
foreach (@tables) {
- $self->{_tables}->{$_} = 1;
+ $self->_tables->{$_->sql_name} = $_;
+ $self->monikers->{$_->sql_name} = $self->_table2moniker($_);
}
- $self->_make_src_class($_) for @tables;
-
- # sanity-check for moniker clashes
+ # check for moniker clashes
my $inverse_moniker_idx;
- for (keys %{$self->monikers}) {
- push @{$inverse_moniker_idx->{$self->monikers->{$_}}}, $_;
+ foreach my $imtable (values %{ $self->_tables }) {
+ push @{ $inverse_moniker_idx->{$self->monikers->{$imtable->sql_name}} }, $imtable;
}
my @clashes;
- for (keys %$inverse_moniker_idx) {
- my $tables = $inverse_moniker_idx->{$_};
- if (@$tables > 1) {
- push @clashes, sprintf ("tables %s reduced to the same source moniker '%s'",
- join (', ', map { "'$_'" } @$tables),
- $_,
- );
- }
+ foreach my $moniker (keys %$inverse_moniker_idx) {
+ my $imtables = $inverse_moniker_idx->{$moniker};
+ if (@$imtables > 1) {
+ my $different_databases =
+ $imtables->[0]->can('database') && (uniq map $_->database||'', @$imtables) > 1;
+
+ my $different_schemas =
+ (uniq map $_->schema||'', @$imtables) > 1;
+
+ if ($different_databases || $different_schemas) {
+ my ($use_schema, $use_database) = (1, 0);
+
+ if ($different_databases) {
+ $use_database = 1;
+
+ # If any monikers are in the same database, we have to distinguish by
+ # both schema and database.
+ my %db_counts;
+ $db_counts{$_}++ for map $_->database, @$imtables;
+ $use_schema = any { $_ > 1 } values %db_counts;
+ }
+
+ foreach my $tbl (@$imtables) { delete $self->monikers->{$tbl->sql_name}; }
+
+ my $moniker_parts = [ @{ $self->moniker_parts } ];
+
+ my $have_schema = any { $_ eq 'schema' } @{ $self->moniker_parts };
+ my $have_database = any { $_ eq 'database' } @{ $self->moniker_parts };
+
+ unshift @$moniker_parts, 'schema' if $use_schema && !$have_schema;
+ unshift @$moniker_parts, 'database' if $use_database && !$have_database;
+
+ local $self->{moniker_parts} = $moniker_parts;
+
+ my %new_monikers;
+
+ foreach my $tbl (@$imtables) { $new_monikers{$tbl->sql_name} = $self->_table2moniker($tbl); }
+ foreach my $name (map $_->sql_name, @$imtables) { $self->monikers->{$name} = $new_monikers{$name}; }
+
+ # check if there are still clashes
+ my %by_moniker;
+
+ while (my ($t, $m) = each %new_monikers) {
+ push @{ $by_moniker{$m} }, $t;
+ }
+
+ foreach my $m (grep @{ $by_moniker{$_} } > 1, keys %by_moniker) {
+ push @clashes, sprintf ("tried disambiguating by moniker_parts, but tables %s still reduced to the same source moniker '%s'",
+ join (', ', @{ $by_moniker{$m} }),
+ $m,
+ );
+ }
+ }
+ else {
+ push @clashes, sprintf ("tables %s reduced to the same source moniker '%s'",
+ join (', ', map $_->sql_name, @$imtables),
+ $moniker,
+ );
+ }
+ }
}
if (@clashes) {
- die 'Unable to load schema - chosen moniker/class naming style results in moniker clashes. '
- . 'Either change the naming style, or supply an explicit moniker_map: '
- . join ('; ', @clashes)
- . "\n"
- ;
+ die 'Unable to load schema - chosen moniker/class naming style results in moniker clashes. '
+ . 'Change the naming style, or supply an explicit moniker_map: '
+ . join ('; ', @clashes)
+ . "\n"
+ ;
}
-
- $self->_setup_src_meta($_) for @tables;
+ foreach my $tbl (@tables) { $self->_make_src_class($tbl); }
+ foreach my $tbl (@tables) { $self->_setup_src_meta($tbl); }
if(!$self->skip_relationships) {
# The relationship loader needs a working schema
- $self->{quiet} = 1;
+ local $self->{quiet} = 1;
local $self->{dump_directory} = $self->{temp_directory};
+ local $self->{generated_classes} = [];
+ local $self->{dry_run} = 0;
$self->_reload_classes(\@tables);
$self->_load_relationships(\@tables);
- $self->{quiet} = 0;
# Remove that temp dir from INC so it doesn't get reloaded
@INC = grep $_ ne $self->dump_directory, @INC;
}
- $self->_load_roles($_) for @tables;
-
- $self->_load_external($_)
- for map { $self->classes->{$_} } @tables;
+ foreach my $tbl (@tables) { $self->_load_roles($tbl); }
+ foreach my $tbl (map { $self->classes->{$_->sql_name} } @tables) { $self->_load_external($tbl); }
# Reload without unloading first to preserve any symbols from external
# packages.
# so that we don't repeat custom sections
@INC = grep $_ ne $self->dump_directory, @INC;
- $self->_dump_to_dir(map { $self->classes->{$_} } @tables);
+ $self->_dump_to_dir(map { $self->classes->{$_->sql_name} } @tables);
unshift @INC, $self->dump_directory;
-
+
+ return if $self->dry_run;
+
my @to_register;
my %have_source = map { $_ => $self->schema->source($_) }
$self->schema->sources;
for my $table (@tables) {
- my $moniker = $self->monikers->{$table};
- my $class = $self->classes->{$table};
-
+ my $moniker = $self->monikers->{$table->sql_name};
+ my $class = $self->classes->{$table->sql_name};
+
{
no warnings 'redefine';
local *Class::C3::reinitialize = sub {}; # to speed things up, reinitialized below
}
sub _moose_metaclass {
- return undef unless $INC{'Class/MOP.pm'}; # if CMOP is not loaded the class could not have loaded in the 1st place
+ return undef unless $INC{'Class/MOP.pm'}; # if CMOP is not loaded the class could not have loaded in the 1st place
- my $class = $_[1];
+ my $class = $_[1];
- my $mc = try { Class::MOP::class_of($class) }
- or return undef;
+ my $mc = try { Class::MOP::class_of($class) }
+ or return undef;
- return $mc->isa('Moose::Meta::Class') ? $mc : undef;
+ return $mc->isa('Moose::Meta::Class') ? $mc : undef;
}
# We use this instead of ensure_class_loaded when there are package symbols we
eval_package_without_redefine_warnings ($class, "require $class");
}
catch {
- my $source = read_file($self->_get_dump_filename($class), binmode => ':encoding(UTF-8)');
+ my $source = slurp_file $self->_get_dump_filename($class);
die "Failed to reload class $class: $_.\n\nCLASS SOURCE:\n\n$source";
};
}
sub _ensure_dump_subdirs {
my ($self, $class) = (@_);
+ return if $self->dry_run;
+
my @name_parts = split(/::/, $class);
pop @name_parts; # we don't care about the very last element,
# which is a filename
my $target_dir = $self->dump_directory;
warn "Dumping manual schema for $schema_class to directory $target_dir ...\n"
- unless $self->{dynamic} or $self->{quiet};
+ unless $self->dynamic or $self->quiet;
my $schema_text =
- qq|package $schema_class;\n\n|
+ qq|use utf8;\n|
+ . qq|package $schema_class;\n\n|
. qq|# Created by DBIx::Class::Schema::Loader\n|
. qq|# DO NOT MODIFY THE FIRST PART OF THIS FILE\n\n|;
+ my $autoclean
+ = $self->only_autoclean
+ ? 'namespace::autoclean'
+ : 'MooseX::MarkAsMethods autoclean => 1'
+ ;
+
if ($self->use_moose) {
- $schema_text.= qq|use Moose;\nuse namespace::autoclean;\nextends '$schema_base_class';\n\n|;
+ $schema_text.= qq|use Moose;\nuse $autoclean;\nextends '$schema_base_class';\n\n|;
+ }
+ elsif ($self->use_moo) {
+ $schema_text .= qq|use Moo;\nuse namespace::autoclean;\nextends '$schema_base_class';\n\n|;
}
else {
$schema_text .= qq|use strict;\nuse warnings;\n\nuse base '$schema_base_class';\n\n|;
}
+ my @schema_components = @{ $self->schema_components || [] };
+
+ if (@schema_components) {
+ my $schema_components = dump @schema_components;
+ $schema_components = "($schema_components)" if @schema_components == 1;
+
+ $schema_text .= "__PACKAGE__->load_components${schema_components};\n\n";
+ }
+
if ($self->use_namespaces) {
$schema_text .= qq|__PACKAGE__->load_namespaces|;
my $namespace_options;
my @attr = qw/resultset_namespace default_resultset_class/;
- unshift @attr, 'result_namespace' unless (not $self->result_namespace) || $self->result_namespace eq 'Result';
+ unshift @attr, 'result_namespace'
+ if $self->result_namespace && $self->result_namespace ne 'Result';
for my $attr (@attr) {
if ($self->$attr) {
my $result_base_class = $self->result_base_class || 'DBIx::Class::Core';
foreach my $src_class (@classes) {
- my $src_text =
- qq|package $src_class;\n\n|
+ my $src_text =
+ qq|use utf8;\n|
+ . qq|package $src_class;\n\n|
. qq|# Created by DBIx::Class::Schema::Loader\n|
. qq|# DO NOT MODIFY THE FIRST PART OF THIS FILE\n\n|;
$src_text .= $self->_base_class_pod($result_base_class)
unless $result_base_class eq 'DBIx::Class::Core';
- if ($self->use_moose) {
- $src_text.= qq|use Moose;\nuse MooseX::NonMoose;\nuse namespace::autoclean;|;
+ if ($self->use_moose || $self->use_moo) {
+ $src_text.= $self->use_moose
+ ? qq|use Moose;\nuse MooseX::NonMoose;\nuse $autoclean;|
+ : qq|use Moo;\nuse namespace::autoclean;|
+ ;
# these options 'use base' which is compile time
if (@{ $self->left_base_classes } || @{ $self->additional_base_classes }) {
}
}
else {
- $src_text .= qq|use base '$result_base_class';\n|;
+ $src_text .= qq|use base '$result_base_class';\n|;
}
$self->_write_classfile($src_class, $src_text);
}
}
- warn "Schema dump completed.\n" unless $self->{dynamic} or $self->{quiet};
-
+ warn "Schema dump completed.\n" unless $self->dynamic or $self->quiet;
}
sub _sig_comment {
my ($self, $version, $ts) = @_;
return qq|\n\n# Created by DBIx::Class::Schema::Loader|
- . qq| v| . $version
- . q| @ | . $ts
+ . (defined($version) ? q| v| . $version : '')
+ . (defined($ts) ? q| @ | . $ts : '')
. qq|\n# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:|;
}
my $filename = $self->_get_dump_filename($class);
$self->_ensure_dump_subdirs($class);
- if (-f $filename && $self->really_erase_my_files) {
+ if (-f $filename && $self->really_erase_my_files && !$self->dry_run) {
warn "Deleting existing file '$filename' due to "
- . "'really_erase_my_files' setting\n" unless $self->{quiet};
+ . "'really_erase_my_files' setting\n" unless $self->quiet;
unlink($filename);
}
my $custom_content = $old_custom || '';
- # prepend extra custom content from a *renamed* class (singularization effect)
+ # Use custom content from a renamed class, the class names in it are
+ # rewritten below.
if (my $renamed_class = $self->_upgrading_classes->{$class}) {
my $old_filename = $self->_get_dump_filename($renamed_class);
if (-f $old_filename) {
- my $extra_custom = ($self->_parse_generated_file ($old_filename))[4];
-
- $extra_custom =~ s/\n\n# You can replace.*\n1;\n//;
-
- $custom_content = join ("\n", '', $extra_custom, $custom_content)
- if $extra_custom;
+ $custom_content = ($self->_parse_generated_file ($old_filename))[4];
- unlink $old_filename;
+ unlink $old_filename unless $self->dry_run;
}
}
$text .= qq|$_\n|
for @{$self->{_dump_storage}->{$class} || []};
- # Check and see if the dump is infact differnt
+ if ($self->filter_generated_code) {
+ my $filter = $self->filter_generated_code;
+
+ if (ref $filter eq 'CODE') {
+ $text = $filter->(
+ ($is_schema ? 'schema' : 'result'),
+ $class,
+ $text
+ );
+ }
+ else {
+ my ($fh, $temp_file) = tempfile();
+
+ binmode $fh, ':encoding(UTF-8)';
+ print $fh $text;
+ close $fh;
+
+ open my $out, qq{$filter < "$temp_file"|}
+ or croak "Could not open pipe to $filter: $!";
+
+ $text = decode('UTF-8', do { local $/; <$out> });
+
+ $text =~ s/$CR?$LF/\n/g;
+
+ close $out;
+
+ my $exit_code = $? >> 8;
+
+ unlink $temp_file
+ or croak "Could not remove temporary file '$temp_file': $!";
+
+ if ($exit_code != 0) {
+ croak "filter '$filter' exited non-zero: $exit_code";
+ }
+ }
+ if (not $text or not $text =~ /\bpackage\b/) {
+ warn("$class skipped due to filter") if $self->debug;
+ return;
+ }
+ }
+
+ # Check and see if the dump is in fact different
my $compare_to;
if ($old_md5) {
- $compare_to = $text . $self->_sig_comment($old_ver, $old_ts);
- if (Digest::MD5::md5_base64(encode 'UTF-8', $compare_to) eq $old_md5) {
- return unless $self->_upgrading_from && $is_schema;
- }
+ $compare_to = $text . $self->_sig_comment($old_ver, $old_ts);
+ if (Digest::MD5::md5_base64(encode 'UTF-8', $compare_to) eq $old_md5) {
+ return unless $self->_upgrading_from && $is_schema;
+ }
}
+ push @{$self->generated_classes}, $class;
+
+ return if $self->dry_run;
+
$text .= $self->_sig_comment(
- $self->version_to_dump,
- POSIX::strftime('%Y-%m-%d %H:%M:%S', localtime)
+ $self->omit_version ? undef : $self->version_to_dump,
+ $self->omit_timestamp ? undef : POSIX::strftime('%Y-%m-%d %H:%M:%S', localtime)
);
- open(my $fh, '>:encoding(UTF-8)', $filename)
+ open(my $fh, '>:raw:encoding(UTF-8)', $filename)
or croak "Cannot open '$filename' for writing: $!";
# Write the top half and its MD5 sum
if (not $is_schema) {
return qq|\n__PACKAGE__->meta->make_immutable;|;
}
-
+
return qq|\n__PACKAGE__->meta->make_immutable(inline_constructor => 0);|;
}
or croak "Cannot open '$fn' for reading: $!";
my $mark_re =
- qr{^(# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:)([A-Za-z0-9/+]{22})\n};
+ qr{^(# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:)([A-Za-z0-9/+]{22})\r?\n};
- my ($md5, $ts, $ver, $gen);
+ my ($real_md5, $ts, $ver, $gen);
+ local $_;
while(<$fh>) {
if(/$mark_re/) {
my $pre_md5 = $1;
- $md5 = $2;
+ my $mark_md5 = $2;
# Pull out the version and timestamp from the line above
- ($ver, $ts) = $gen =~ m/^# Created by DBIx::Class::Schema::Loader v(.*?) @ (.*?)\Z/m;
+ ($ver, $ts) = $gen =~ m/^# Created by DBIx::Class::Schema::Loader( v[\d.]+)?( @ [\d-]+ [\d:]+)?\r?\Z/m;
+ $ver =~ s/^ v// if $ver;
+ $ts =~ s/^ @ // if $ts;
$gen .= $pre_md5;
- croak "Checksum mismatch in '$fn', the auto-generated part of the file has been modified outside of this loader. Aborting.\nIf you want to overwrite these modifications, set the 'overwrite_modifications' loader option.\n"
- if !$self->overwrite_modifications && Digest::MD5::md5_base64(encode 'UTF-8', $gen) ne $md5;
-
+ $real_md5 = Digest::MD5::md5_base64(encode 'UTF-8', $gen);
+ if ($real_md5 ne $mark_md5) {
+ if ($self->overwrite_modifications) {
+ # Setting this to something that is not a valid MD5 forces
+ # the file to be rewritten.
+ $real_md5 = 'not an MD5';
+ }
+ else {
+ croak "Checksum mismatch in '$fn', the auto-generated part of the file has been modified outside of this loader. Aborting.\nIf you want to overwrite these modifications, set the 'overwrite_modifications' loader option.\n";
+ }
+ }
last;
}
else {
}
my $custom = do { local $/; <$fh> }
- if $md5;
+ if $real_md5;
- close ($fh);
+ $custom ||= '';
+ $custom =~ s/$CRLF|$LF/\n/g;
- return ($gen, $md5, $ver, $ts, $custom);
+ close $fh;
+
+ return ($gen, $real_md5, $ver, $ts, $custom);
}
sub _use {
my $schema = $self->schema;
my $schema_class = $self->schema_class;
- my $table_moniker = $self->_table2moniker($table);
+ my $table_moniker = $self->monikers->{$table->sql_name};
my @result_namespace = ($schema_class);
if ($self->use_namespaces) {
my $result_namespace = $self->result_namespace || 'Result';
);
}
- my $old_class = join(q{::}, @result_namespace,
- $self->_table2moniker($table));
+ my $old_table_moniker = do {
+ local $self->naming->{monikers} = $upgrading_v;
+ $self->_table2moniker($table);
+ };
+
+ my $old_class = join(q{::}, @result_namespace, $old_table_moniker);
$self->_upgrading_classes->{$table_class} = $old_class
unless $table_class eq $old_class;
}
- $self->classes->{$table} = $table_class;
- $self->monikers->{$table} = $table_moniker;
- $self->tables->{$table_moniker} = $table;
+ $self->classes->{$table->sql_name} = $table_class;
+ $self->moniker_to_table->{$table_moniker} = $table;
$self->class_to_table->{$table_class} = $table;
$self->_pod_class_list($table_class, 'ADDITIONAL CLASSES USED', @{$self->additional_classes});
}
sub _is_result_class_method {
- my ($self, $name, $table_name) = @_;
+ my ($self, $name, $table) = @_;
- my $table_moniker = $table_name ? $self->monikers->{$table_name} : '';
+ my $table_moniker = $table ? $self->monikers->{$table->sql_name} : '';
$self->_result_class_methods({})
if not defined $self->_result_class_methods;
push @roles, @{ $self->result_roles_map->{$table_moniker} }
if exists $self->result_roles_map->{$table_moniker};
- for my $class ($base, @components,
- ($self->use_moose ? 'Moose::Object' : ()), @roles) {
+ for my $class (
+ $base, @components, @roles,
+ ($self->use_moose ? 'Moose::Object' : ()),
+ ($self->use_moo ? 'Moo::Object' : ()),
+ ) {
$self->ensure_class_loaded($class);
push @methods, @{ Class::Inspector->methods($class) || [] };
sub _resolve_col_accessor_collisions {
my ($self, $table, $col_info) = @_;
- my $table_name = ref $table ? $$table : $table;
-
while (my ($col, $info) = each %$col_info) {
my $accessor = $info->{accessor} || $col;
next if $accessor eq 'id'; # special case (very common column)
- if ($self->_is_result_class_method($accessor, $table_name)) {
+ if ($self->_is_result_class_method($accessor, $table)) {
my $mapped = 0;
if (my $map = $self->col_collision_map) {
if (not $mapped) {
warn <<"EOF";
-Column '$col' in table '$table_name' collides with an inherited method.
+Column '$col' in table '$table' collides with an inherited method.
See "COLUMN ACCESSOR COLLISIONS" in perldoc DBIx::Class::Schema::Loader::Base .
EOF
$info->{accessor} = undef;
}
}
-# use the same logic to run moniker_map, col_accessor_map, and
-# relationship_name_map
+# use the same logic to run moniker_map, col_accessor_map
sub _run_user_map {
my ( $self, $map, $default_code, $ident, @extra ) = @_;
my $default_ident = $default_code->( $ident, @extra );
my $new_ident;
if( $map && ref $map eq 'HASH' ) {
- $new_ident = $map->{ $ident };
+ if (my @parts = try { @{ $ident } }) {
+ my $part_map = $map;
+ while (@parts) {
+ my $part = shift @parts;
+ last unless exists $part_map->{ $part };
+ if ( !ref $part_map->{ $part } && !@parts ) {
+ $new_ident = $part_map->{ $part };
+ last;
+ }
+ elsif ( ref $part_map->{ $part } eq 'HASH' ) {
+ $part_map = $part_map->{ $part };
+ }
+ }
+ }
+ if( !$new_ident && !ref $map->{ $ident } ) {
+ $new_ident = $map->{ $ident };
+ }
}
elsif( $map && ref $map eq 'CODE' ) {
- $new_ident = $map->( $ident, $default_ident, @extra );
+ my $cb = sub {
+ my ($cb_map) = @_;
+ croak "reentered map must be a hashref"
+ unless 'HASH' eq ref($cb_map);
+ return $self->_run_user_map($cb_map, $default_code, $ident, @extra);
+ };
+ $new_ident = $map->( $ident, $default_ident, @extra, $cb );
}
$new_ident ||= $default_ident;
sub _default_column_accessor_name {
my ( $self, $column_name ) = @_;
- my $accessor_name = $column_name;
- $accessor_name =~ s/\W+/_/g;
+ my $preserve = ($self->naming->{column_accessors}||'') eq 'preserve';
+
+ my $v = $self->_get_naming_v('column_accessors');
- if ((($self->naming->{column_accessors}||'') =~ /(\d+)/ && $1 < 7) || (not $self->preserve_case)) {
+ my $accessor_name = $preserve ?
+ $self->_to_identifier('column_accessors', $column_name) # assume CamelCase
+ :
+ $self->_to_identifier('column_accessors', $column_name, '_');
+
+ $accessor_name =~ s/\W+/_/g; # only if naming < v8, otherwise to_identifier
+ # takes care of it
+
+ if ($preserve) {
+ return $accessor_name;
+ }
+ elsif ($v < 7 || (not $self->preserve_case)) {
# older naming just lc'd the col accessor and that's all.
return lc $accessor_name;
}
- elsif (($self->naming->{column_accessors}||'') eq 'preserve') {
- return $accessor_name;
- }
- return join '_', map lc, split_name $column_name;
+ return join '_', map lc, split_name $column_name, $v;
}
sub _make_column_accessor_name {
sub { $self->_default_column_accessor_name( shift ) },
$column_name,
$column_context_info,
- );
+ );
return $accessor;
}
-sub _quote {
- my ($self, $identifier) = @_;
-
- my $qt = $self->schema->storage->sql_maker->quote_char || '';
-
- if (ref $qt) {
- return $qt->[0] . $identifier . $qt->[1];
- }
-
- return "${qt}${identifier}${qt}";
+sub _table_is_view {
+ #my ($self, $table) = @_;
+ return 0;
}
+sub _view_definition { undef }
+
# Set up metadata (cols, pks, etc)
sub _setup_src_meta {
my ($self, $table) = @_;
my $schema = $self->schema;
my $schema_class = $self->schema_class;
- my $table_class = $self->classes->{$table};
- my $table_moniker = $self->monikers->{$table};
+ my $table_class = $self->classes->{$table->sql_name};
+ my $table_moniker = $self->monikers->{$table->sql_name};
- my $table_name = $table;
+ # Must come before ->table
+ $self->_dbic_stmt($table_class, 'table_class', 'DBIx::Class::ResultSource::View')
+ if my $is_view = $self->_table_is_view($table);
- my $sql_maker = $self->schema->storage->sql_maker;
- my $name_sep = $sql_maker->name_sep;
+ $self->_dbic_stmt($table_class, 'table', $table->dbic_name);
- if ($name_sep && $table_name =~ /\Q$name_sep\E/) {
- $table_name = \ $self->_quote($table_name);
+ # Must come after ->table
+ if ($is_view and my $view_def = $self->_view_definition($table)) {
+ $self->_dbic_stmt($table_class, 'result_source_instance->view_definition', $view_def);
}
- my $full_table_name = ($self->qualify_objects ?
- ($self->_quote($self->db_schema) . '.') : '')
- . (ref $table_name ? $$table_name : $table_name);
-
- # be careful to not create refs Data::Dump can "optimize"
- $full_table_name = \do {"".$full_table_name} if ref $table_name;
-
- $self->_dbic_stmt($table_class, 'table', $full_table_name);
-
my $cols = $self->_table_columns($table);
my $col_info = $self->__columns_info_for($table);
my $context = {
table_class => $table_class,
table_moniker => $table_moniker,
- table_name => $table_name,
- full_table_name => $full_table_name,
+ table_name => $table, # bugwards compatibility, RT#84050
+ table => $table,
+ full_table_name => $table->dbic_name,
schema_class => $schema_class,
column_info => $info,
};
+ my $col_obj = DBIx::Class::Schema::Loader::Column->new(
+ table => $table,
+ name => $col,
+ );
- $info->{accessor} = $self->_make_column_accessor_name( $col, $context );
+ $info->{accessor} = $self->_make_column_accessor_name( $col_obj, $context );
}
$self->_resolve_col_accessor_collisions($table, $col_info);
$self->_dbic_stmt($table_class, 'set_primary_key', @$pks)
if @$pks;
+ # Sort unique constraints by constraint name for repeatable results (rels
+ # are sorted as well elsewhere.)
+ @uniqs = sort { $a->[0] cmp $b->[0] } @uniqs;
+
foreach my $uniq (@uniqs) {
my ($name, $cols) = @$uniq;
$self->_dbic_stmt($table_class,'add_unique_constraint', $name, $cols);
sub tables {
my $self = shift;
- return keys %{$self->_tables};
+ return values %{$self->_tables};
+}
+
+sub _get_naming_v {
+ my ($self, $naming_key) = @_;
+
+ my $v;
+
+ if (($self->naming->{$naming_key}||'') =~ /^v(\d+)\z/) {
+ $v = $1;
+ }
+ else {
+ ($v) = $CURRENT_V =~ /^v(\d+)\z/;
+ }
+
+ return $v;
+}
+
+sub _to_identifier {
+ my ($self, $naming_key, $name, $sep_char, $force) = @_;
+
+ my $v = $self->_get_naming_v($naming_key);
+
+ my $to_identifier = $self->naming->{force_ascii} ?
+ \&String::ToIdentifier::EN::to_identifier
+ : \&String::ToIdentifier::EN::Unicode::to_identifier;
+
+ return $v >= 8 || $force ? $to_identifier->($name, $sep_char) : $name;
}
# Make a moniker from a table
sub _default_table2moniker {
- no warnings 'uninitialized';
my ($self, $table) = @_;
- if ($self->naming->{monikers} eq 'v4') {
- return join '', map ucfirst, split /[\W_]+/, lc $table;
- }
- elsif ($self->naming->{monikers} eq 'v5') {
- return join '', map ucfirst, split /[\W_]+/,
- Lingua::EN::Inflect::Number::to_S(lc $table);
- }
- elsif ($self->naming->{monikers} eq 'v6') {
- (my $as_phrase = lc $table) =~ s/_+/ /g;
- my $inflected = Lingua::EN::Inflect::Phrase::to_S($as_phrase);
+ my $v = $self->_get_naming_v('monikers');
- return join '', map ucfirst, split /\W+/, $inflected;
- }
+ my @moniker_parts = @{ $self->moniker_parts };
+ my @name_parts = map $table->$_, @moniker_parts;
- my @words = map lc, split_name $table;
- my $as_phrase = join ' ', @words;
+ my $name_idx = firstidx { $_ eq 'name' } @{ $self->moniker_parts };
- my $inflected = $self->naming->{monikers} eq 'plural' ?
- Lingua::EN::Inflect::Phrase::to_PL($as_phrase)
- :
- $self->naming->{monikers} eq 'preserve' ?
- $as_phrase
- :
- Lingua::EN::Inflect::Phrase::to_S($as_phrase);
+ my @all_parts;
+
+ foreach my $i (0 .. $#name_parts) {
+ my $part = $name_parts[$i];
+
+ my $moniker_part = $self->_run_user_map(
+ $self->moniker_part_map->{$moniker_parts[$i]},
+ sub { '' },
+ $part, $moniker_parts[$i],
+ );
+ if (length $moniker_part) {
+ push @all_parts, $moniker_part;
+ next;
+ }
+
+ if ($i != $name_idx || $v >= 8) {
+ $part = $self->_to_identifier('monikers', $part, '_', 1);
+ }
+
+ if ($i == $name_idx && $v == 5) {
+ $part = Lingua::EN::Inflect::Number::to_S($part);
+ }
+
+ my @part_parts = map lc, $v > 6 ?
+ # use v8 semantics for all moniker parts except name
+ ($i == $name_idx ? split_name $part, $v : split_name $part)
+ : split /[\W_]+/, $part;
+
+ if ($i == $name_idx && $v >= 6) {
+ my $as_phrase = join ' ', @part_parts;
+
+ my $inflected = ($self->naming->{monikers}||'') eq 'plural' ?
+ Lingua::EN::Inflect::Phrase::to_PL($as_phrase)
+ :
+ ($self->naming->{monikers}||'') eq 'preserve' ?
+ $as_phrase
+ :
+ Lingua::EN::Inflect::Phrase::to_S($as_phrase);
+
+ @part_parts = split /\s+/, $inflected;
+ }
+
+ push @all_parts, join '', map ucfirst, @part_parts;
+ }
- return join '', map ucfirst, split /\W+/, $inflected;
+ return join $self->moniker_part_separator, @all_parts;
}
sub _table2moniker {
$self->moniker_map,
sub { $self->_default_table2moniker( shift ) },
$table
- );
+ );
}
sub _load_relationships {
my @tables;
foreach my $table (@$tables) {
+ my $local_moniker = $self->monikers->{$table->sql_name};
+
my $tbl_fk_info = $self->_table_fk_info($table);
+
foreach my $fkdef (@$tbl_fk_info) {
+ $fkdef->{local_table} = $table;
+ $fkdef->{local_moniker} = $local_moniker;
$fkdef->{remote_source} =
- $self->monikers->{delete $fkdef->{remote_table}};
+ $self->monikers->{$fkdef->{remote_table}->sql_name};
}
my $tbl_uniq_info = $self->_table_uniq_info($table);
- my $local_moniker = $self->monikers->{$table};
-
push @tables, [ $local_moniker, $tbl_fk_info, $tbl_uniq_info ];
}
my $rel_stmts = $self->_relbuilder->generate_code(\@tables);
foreach my $src_class (sort keys %$rel_stmts) {
- my $src_stmts = $rel_stmts->{$src_class};
- foreach my $stmt (@$src_stmts) {
- $self->_dbic_stmt($src_class,$stmt->{method},@{$stmt->{args}});
+ # sort by rel name
+ my @src_stmts = map $_->[2],
+ sort {
+ $a->[0] <=> $b->[0]
+ ||
+ $a->[1] cmp $b->[1]
+ } map [
+ ($_->{method} eq 'many_to_many' ? 1 : 0),
+ $_->{args}[0],
+ $_,
+ ], @{ $rel_stmts->{$src_class} };
+
+ foreach my $stmt (@src_stmts) {
+ $self->_dbic_stmt($src_class,$stmt->{method}, @{$stmt->{args}});
}
}
}
sub _load_roles {
my ($self, $table) = @_;
- my $table_moniker = $self->monikers->{$table};
- my $table_class = $self->classes->{$table};
+ my $table_moniker = $self->monikers->{$table->sql_name};
+ my $table_class = $self->classes->{$table->sql_name};
my @roles = @{ $self->result_roles || [] };
push @roles, @{ $self->result_roles_map->{$table_moniker} }
if exists $self->result_roles_map->{$table_moniker};
if (@roles) {
- $self->_pod_class_list($table_class, 'L<Moose> ROLES APPLIED', @roles);
+ my $class = $self->use_moose ? 'Moose' : 'Moo';
+ $self->_pod_class_list($table_class, "L<$class> ROLES APPLIED", @roles);
$self->_with($table_class, @roles);
}
looks_like_number($s) ? $s : qq{'$s'};
" $_: $s"
- } sort keys %$attrs,
+ } sort keys %$attrs,
);
if (my $comment = $self->__column_comment($self->class_to_table->{$class}, $col_counter, $name)) {
$self->_pod( $class, $comment );
}
}
$self->_pod_cut( $class );
- } elsif ( $method =~ /^(belongs_to|has_many|might_have)$/ ) {
+ } elsif ( $method =~ /^(?:belongs_to|has_many|might_have)\z/ ) {
$self->_pod( $class, "=head1 RELATIONS" ) unless $self->{_relations_started} { $class } ;
my ( $accessor, $rel_class ) = @_;
$self->_pod( $class, "=head2 $accessor" );
$self->_pod( $class, "Related object: L<$rel_class>" );
$self->_pod_cut( $class );
$self->{_relations_started} { $class } = 1;
+ } elsif ( $method eq 'many_to_many' ) {
+ $self->_pod( $class, "=head1 RELATIONS" ) unless $self->{_relations_started} { $class } ;
+ my ( $accessor, $rel1, $rel2 ) = @_;
+ $self->_pod( $class, "=head2 $accessor" );
+ $self->_pod( $class, 'Type: many_to_many' );
+ $self->_pod( $class, "Composing rels: L</$rel1> -> $rel2" );
+ $self->_pod_cut( $class );
+ $self->{_relations_started} { $class } = 1;
}
elsif ($method eq 'add_unique_constraint') {
$self->_pod($class, '=head1 UNIQUE CONSTRAINTS')
unless $self->{_uniqs_started}{$class};
-
+
my ($name, $cols) = @_;
$self->_pod($class, "=head2 C<$name>");
$self->_pod($class, '=over 4');
-
+
foreach my $col (@$cols) {
$self->_pod($class, "=item \* L</$col>");
}
elsif ($method eq 'set_primary_key') {
$self->_pod($class, "=head1 PRIMARY KEY");
$self->_pod($class, '=over 4');
-
+
foreach my $col (@_) {
$self->_pod($class, "=item \* L</$col>");
}
sub _base_class_pod {
my ($self, $base_class) = @_;
- return unless $self->generate_pod;
+ return '' unless $self->generate_pod;
- return <<"EOF"
-=head1 BASE CLASS: L<$base_class>
-
-=cut
-
-EOF
+ return "\n=head1 BASE CLASS: L<$base_class>\n\n=cut\n\n";
}
sub _filter_comment {
if (my $code = $self->can('_table_comment')) {
return $self->_filter_comment($self->$code(@_));
}
-
+
return '';
}
return $self->preserve_case ? $name : uc($name);
}
-sub _unregister_source_for_table {
+sub _remove_table {
my ($self, $table) = @_;
try {
- local $@;
my $schema = $self->schema;
# in older DBIC it's a private method
my $unregister = $schema->can('unregister_source') || $schema->can('_unregister_source');
- $schema->$unregister($self->_table2moniker($table));
- delete $self->monikers->{$table};
- delete $self->classes->{$table};
- delete $self->_upgrading_classes->{$table};
- delete $self->{_tables}{$table};
+ $schema->$unregister(delete $self->monikers->{$table->sql_name});
+ delete $self->_upgrading_classes->{delete $self->classes->{$table->sql_name}};
+ delete $self->_tables->{$table->sql_name};
};
}
contain multiple entries per table for the original and normalized table
names, as above in L</monikers>.
+=head2 generated_classes
+
+Returns an arrayref of classes that were actually generated (i.e. not
+skipped because there were no changes).
+
+=head1 NON-ENGLISH DATABASES
+
+If you use the loader on a database with table and column names in a language
+other than English, you will want to turn off the English language specific
+heuristics.
+
+To do so, use something like this in your loader options:
+
+ naming => { monikers => 'v4' },
+ inflect_singular => sub { "$_[0]_rel" },
+ inflect_plural => sub { "$_[0]_rel" },
+
=head1 COLUMN ACCESSOR COLLISIONS
Occasionally you may have a column name that collides with a perl method, such
=head1 SEE ALSO
-L<DBIx::Class::Schema::Loader>
+L<DBIx::Class::Schema::Loader>, L<dbicdump>
-=head1 AUTHOR
+=head1 AUTHORS
-See L<DBIx::Class::Schema::Loader/AUTHOR> and L<DBIx::Class::Schema::Loader/CONTRIBUTORS>.
+See L<DBIx::Class::Schema::Loader/AUTHORS>.
=head1 LICENSE