use Class::Unload;
use Class::Inspector ();
use Scalar::Util 'looks_like_number';
-use DBIx::Class::Schema::Loader::Utils qw/split_name dumper_squashed eval_package_without_redefine_warnings class_path slurp_file/;
+use DBIx::Class::Schema::Loader::Utils qw/split_name dumper_squashed eval_package_without_redefine_warnings class_path slurp_file sigwarn_silencer firstidx uniq/;
use DBIx::Class::Schema::Loader::Optional::Dependencies ();
use Try::Tiny;
use DBIx::Class ();
use Encode qw/encode decode/;
-use List::MoreUtils qw/all any firstidx uniq/;
+use List::Util qw/all any none/;
use File::Temp 'tempfile';
use namespace::clean;
-our $VERSION = '0.07035';
+our $VERSION = '0.07043';
__PACKAGE__->mk_group_ro_accessors('simple', qw/
schema
use_moose
only_autoclean
overwrite_modifications
+ dry_run
+ generated_classes
+ omit_version
+ omit_timestamp
relationship_attrs
moniker_to_table
uniq_to_primary
quiet
+ allow_extra_m2m_cols
/);
db_schema
qualify_objects
moniker_parts
+ moniker_part_separator
+ moniker_part_map
/);
my $CURRENT_V = 'v7';
=item force_ascii
For L</v8> mode and later, uses L<String::ToIdentifier::EN> instead of
-L<String::ToIdentifier::EM::Unicode> to force monikers and other identifiers to
+L<String::ToIdentifier::EN::Unicode> to force monikers and other identifiers to
ASCII.
=back
completed.> messages. Does not affect warnings (except for warnings related to
L</really_erase_my_files>.)
+=head2 dry_run
+
+If true, don't actually write out the generated files. This can only be
+used with static schema generation.
+
=head2 generate_pod
By default POD will be generated for columns and relationships, using database
For example:
- relationship_attrs => {
- has_many => { cascade_delete => 1, cascade_copy => 1 },
- might_have => { cascade_delete => 1, cascade_copy => 1 },
- },
+ relationship_attrs => {
+ has_many => { cascade_delete => 1, cascade_copy => 1 },
+ might_have => { cascade_delete => 1, cascade_copy => 1 },
+ },
use this to turn L<DBIx::Class> cascades to on on your
L<has_many|DBIx::Class::Relationship/has_many> and
The L</moniker_parts> option is an arrayref of methods on the table class
corresponding to parts of the fully qualified table name, defaulting to
C<['name']>, in the order those parts are used to create the moniker name.
+The parts are joined together using L</moniker_part_separator>.
The C<'name'> entry B<must> be present.
=back
+=head2 moniker_part_separator
+
+String used to join L</moniker_parts> when creating the moniker.
+Defaults to the empty string. Use C<::> to get a separate namespace per
+database and/or schema.
+
=head2 constraint
-Only load tables matching regex. Best specified as a qr// regex.
+Only load matching tables.
=head2 exclude
-Exclude tables matching regex. Best specified as a qr// regex.
+Exclude matching tables.
+
+These can be specified either as a regex (preferrably on the C<qr//>
+form), or as an arrayref of arrayrefs. Regexes are matched against
+the (unqualified) table name, while arrayrefs are matched according to
+L</moniker_parts>.
+
+For example:
+
+ db_schema => [qw(some_schema other_schema)],
+ moniker_parts => [qw(schema name)],
+ constraint => [
+ [ qr/\Asome_schema\z/ => qr/\A(?:foo|bar)\z/ ],
+ [ qr/\Aother_schema\z/ => qr/\Abaz\z/ ],
+ ],
+
+In this case only the tables C<foo> and C<bar> in C<some_schema> and
+C<baz> in C<other_schema> will be dumped.
=head2 moniker_map
-Overrides the default table name to moniker translation. Can be either a
-hashref of table keys and moniker values, or a coderef for a translator
-function taking a L<table object|DBIx::Class::Schema::Loader::Table> argument
-(which stringifies to the unqualified table name) and returning a scalar
-moniker. If the hash entry does not exist, or the function returns a false
+Overrides the default table name to moniker translation. Either
+
+=over
+
+=item *
+
+a nested hashref, which will be traversed according to L</moniker_parts>
+
+For example:
+
+ moniker_parts => [qw(schema name)],
+ moniker_map => {
+ foo => {
+ bar => "FooishBar",
+ },
+ },
+
+In which case the table C<bar> in the C<foo> schema would get the moniker
+C<FooishBar>.
+
+=item *
+
+a hashref of unqualified table name keys and moniker values
+
+=item *
+
+a coderef for a translator function taking a L<table
+object|DBIx::Class::Schema::Loader::Table> argument (which stringifies to the
+unqualified table name) and returning a scalar moniker
+
+The function is also passed a coderef that can be called with either
+of the hashref forms to get the moniker mapped accordingly. This is
+useful if you need to handle some monikers specially, but want to use
+the hashref form for the rest.
+
+=back
+
+If the hash entry does not exist, or the function returns a false
value, the code falls back to default behavior for that table name.
The default behavior is to split on case transition and non-alphanumeric
stations_visited | StationVisited
routeChange | RouteChange
+=head2 moniker_part_map
+
+Map for overriding the monikerization of individual L</moniker_parts>.
+The keys are the moniker part to override, the value is either a
+hashref of coderef for mapping the corresponding part of the
+moniker. If a coderef is used, it gets called with the moniker part
+and the hash key the code ref was found under.
+
+For example:
+
+ moniker_part_map => {
+ schema => sub { ... },
+ },
+
+Given the table C<foo.bar>, the code ref would be called with the
+arguments C<foo> and C<schema>, plus a coderef similar to the one
+described in L</moniker_map>.
+
+L</moniker_map> takes precedence over this.
+
=head2 col_accessor_map
Same as moniker_map, but for column accessor names. If a coderef is
passed, the code is called with arguments of
- the name of the column in the underlying database,
- default accessor name that DBICSL would ordinarily give this column,
- {
- table_class => name of the DBIC class we are building,
- table_moniker => calculated moniker for this table (after moniker_map if present),
- table => table object of interface DBIx::Class::Schema::Loader::Table,
- full_table_name => schema-qualified name of the database table (RDBMS specific),
- schema_class => name of the schema class we are building,
- column_info => hashref of column info (data_type, is_nullable, etc),
- }
+ the name of the column in the underlying database,
+ default accessor name that DBICSL would ordinarily give this column,
+ {
+ table_class => name of the DBIC class we are building,
+ table_moniker => calculated moniker for this table (after moniker_map if present),
+ table => table object of interface DBIx::Class::Schema::Loader::Table,
+ full_table_name => schema-qualified name of the database table (RDBMS specific),
+ schema_class => name of the schema class we are building,
+ column_info => hashref of column info (data_type, is_nullable, etc),
+ }
+ coderef ref that can be called with a hashref map
the L<table object|DBIx::Class::Schema::Loader::Table> stringifies to the
unqualified table name.
and relationships that would have been named C<bar> will now be named C<baz>
except that in the table whose moniker is C<Foo> it will be named C<blat>.
-If it is a coderef, the argument passed will be a hashref of this form:
+If it is a coderef, it will be passed a hashref of this form:
{
name => default relationship name,
link_rel_name => name of the relationship to the link table
}
+In addition it is passed a coderef that can be called with a hashref map.
+
DBICSL will try to use the value returned as the relationship name.
=head2 inflect_plural
loads the given components into every Result class, this option allows you to
load certain components for specified Result classes. For example:
- result_components_map => {
- StationVisited => '+YourApp::Schema::Component::StationVisited',
- RouteChange => [
- '+YourApp::Schema::Component::RouteChange',
- 'InflateColumn::DateTime',
- ],
- }
+ result_components_map => {
+ StationVisited => '+YourApp::Schema::Component::StationVisited',
+ RouteChange => [
+ '+YourApp::Schema::Component::RouteChange',
+ 'InflateColumn::DateTime',
+ ],
+ }
You may use this in conjunction with L</components>.
applies the given roles to every Result class, this option allows you to apply
certain roles for specified Result classes. For example:
- result_roles_map => {
- StationVisited => [
- 'YourApp::Role::Building',
- 'YourApp::Role::Destination',
- ],
- RouteChange => 'YourApp::Role::TripEvent',
- }
+ result_roles_map => {
+ StationVisited => [
+ 'YourApp::Role::Building',
+ 'YourApp::Role::Destination',
+ ],
+ RouteChange => 'YourApp::Role::TripEvent',
+ }
You may use this in conjunction with L</result_roles>.
Again, you should be using version control on your schema classes. Be
careful with this option.
+=head2 omit_version
+
+Omit the package version from the signature comment.
+
+=head2 omit_timestamp
+
+Omit the creation timestamp from the signature comment.
+
=head2 custom_column_info
Hook for adding extra attributes to the
For example:
- custom_column_info => sub {
- my ($table, $column_name, $column_info) = @_;
+ custom_column_info => sub {
+ my ($table, $column_name, $column_info) = @_;
- if ($column_name eq 'dog' && $column_info->{default_value} eq 'snoopy') {
- return { is_snoopy => 1 };
- }
- },
+ if ($column_name eq 'dog' && $column_info->{default_value} eq 'snoopy') {
+ return { is_snoopy => 1 };
+ }
+ },
This attribute can also be used to set C<inflate_datetime> on a non-datetime
column so it also receives the L</datetime_timezone> and/or L</datetime_locale>.
on tables to primary keys, assuming there is only one largest unique
constraint.
+=head2 allow_extra_m2m_cols
+
+Generate C<many_to_many> relationship bridges even if the link table has
+extra columns other than the foreign keys. The primary key must still
+equal the union of the foreign keys.
+
+
=head2 filter_generated_code
An optional hook that lets you filter the generated text for various classes
filter_generated_code => sub {
my ($type, $class, $text) = @_;
- ...
- return $new_code;
+ ...
+ return $new_code;
+ }
+
+You can also use this option to set L<perltidy markers|perltidy/Skipping
+Selected Sections of Code> in your generated classes. This will leave
+the generated code in the default format, but will allow you to tidy
+your classes at any point in future, without worrying about changing the
+portions of the file which are checksummed, since C<perltidy> will just
+ignore all text between the markers.
+
+ filter_generated_code => sub {
+ return "#<<<\n$_[2]\n#>>>";
}
=head1 METHODS
=cut
-# ensure that a peice of object data is a valid arrayref, creating
+# ensure that a piece of object data is a valid arrayref, creating
# an empty one or encapsulating whatever's there.
sub _ensure_arrayref {
my $self = shift;
$self->{class_to_table} = {};
$self->{classes} = {};
$self->{_upgrading_classes} = {};
+ $self->{generated_classes} = [];
$self->{schema_class} ||= ( ref $self->{schema} || $self->{schema} );
$self->{schema} ||= $self->{schema_class};
if $self->{dump_overwrite};
$self->{dynamic} = ! $self->{dump_directory};
+
+ croak "dry_run can only be used with static schema generation"
+ if $self->dynamic and $self->dry_run;
+
$self->{temp_directory} ||= File::Temp::tempdir( 'dbicXXXX',
TMPDIR => 1,
CLEANUP => 1,
if (ref $self->moniker_parts ne 'ARRAY') {
croak 'moniker_parts must be an arrayref';
}
- if ((firstidx { $_ eq 'name' } @{ $self->moniker_parts }) == -1) {
+ if (none { $_ eq 'name' } @{ $self->moniker_parts }) {
croak "moniker_parts option *must* contain 'name'";
}
}
+ if (not defined $self->moniker_part_separator) {
+ $self->moniker_part_separator('');
+ }
+ if (not defined $self->moniker_part_map) {
+ $self->moniker_part_map({}),
+ }
+
return $self;
}
return unless -e $filename;
my ($old_gen, $old_md5, $old_ver, $old_ts, $old_custom) =
- $self->_parse_generated_file($filename);
+ $self->_parse_generated_file($filename);
return unless $old_ver;
foreach my $c (@classes) {
# components default to being under the DBIx::Class namespace unless they
- # are preceeded with a '+'
+ # are preceded with a '+'
if ( $key =~ m/component/ && $c !~ s/^\+// ) {
$c = 'DBIx::Class::' . $c;
}
foreach my $prefix (@INC) {
my $fullpath = File::Spec->catfile($prefix, $file);
+ # abs_path pure-perl fallback warns for non-existent files
+ local $SIG{__WARN__} = sigwarn_silencer(qr/^stat\(.*\Q$file\E\)/);
return $fullpath if -f $fullpath
- # abs_path throws on Windows for nonexistant files
+ # abs_path throws on Windows for nonexistent files
and (try { Cwd::abs_path($fullpath) }) ne
((try { Cwd::abs_path(File::Spec->catfile($self->dump_directory, $file)) }) || '');
}
}
$self->_ext_stmt($class,
- qq|# These lines were loaded from '$real_inc_path' found in \@INC.\n|
- .qq|# They are now part of the custom portion of this file\n|
- .qq|# for you to hand-edit. If you do not either delete\n|
- .qq|# this section or remove that file from \@INC, this section\n|
- .qq|# will be repeated redundantly when you re-create this\n|
- .qq|# file again via Loader! See skip_load_external to disable\n|
- .qq|# this feature.\n|
+ qq|# These lines were loaded from '$real_inc_path' found in \@INC.\n|
+ .qq|# They are now part of the custom portion of this file\n|
+ .qq|# for you to hand-edit. If you do not either delete\n|
+ .qq|# this section or remove that file from \@INC, this section\n|
+ .qq|# will be repeated redundantly when you re-create this\n|
+ .qq|# file again via Loader! See skip_load_external to disable\n|
+ .qq|# this feature.\n|
);
chomp $code;
$self->_ext_stmt($class, $code);
$self->_ext_stmt($class,
- qq|# End of lines loaded from '$real_inc_path' |
+ qq|# End of lines loaded from '$real_inc_path'|
);
}
chomp $code;
$self->_ext_stmt($class, $code);
$self->_ext_stmt($class,
- qq|# End of lines loaded from '$old_real_inc_path' |
+ qq|# End of lines loaded from '$old_real_inc_path'|
);
}
}
sub load {
my $self = shift;
- $self->_load_tables(
- $self->_tables_list({ constraint => $self->constraint, exclude => $self->exclude })
- );
+ $self->_load_tables($self->_tables_list);
}
=head2 rescan
$self->_relbuilder->{schema} = $schema;
my @created;
- my @current = $self->_tables_list({ constraint => $self->constraint, exclude => $self->exclude });
+ my @current = $self->_tables_list;
foreach my $table (@current) {
if(!exists $self->_tables->{$table->sql_name}) {
my $moniker_parts = [ @{ $self->moniker_parts } ];
- my $have_schema = 1 if any { $_ eq 'schema' } @{ $self->moniker_parts };
- my $have_database = 1 if any { $_ eq 'database' } @{ $self->moniker_parts };
+ my $have_schema = any { $_ eq 'schema' } @{ $self->moniker_parts };
+ my $have_database = any { $_ eq 'database' } @{ $self->moniker_parts };
unshift @$moniker_parts, 'schema' if $use_schema && !$have_schema;
unshift @$moniker_parts, 'database' if $use_database && !$have_database;
# The relationship loader needs a working schema
local $self->{quiet} = 1;
local $self->{dump_directory} = $self->{temp_directory};
+ local $self->{generated_classes} = [];
+ local $self->{dry_run} = 0;
$self->_reload_classes(\@tables);
$self->_load_relationships(\@tables);
unshift @INC, $self->dump_directory;
+ return if $self->dry_run;
+
my @to_register;
my %have_source = map { $_ => $self->schema->source($_) }
$self->schema->sources;
}
sub _moose_metaclass {
- return undef unless $INC{'Class/MOP.pm'}; # if CMOP is not loaded the class could not have loaded in the 1st place
+ return undef unless $INC{'Class/MOP.pm'}; # if CMOP is not loaded the class could not have loaded in the 1st place
- my $class = $_[1];
+ my $class = $_[1];
- my $mc = try { Class::MOP::class_of($class) }
- or return undef;
+ my $mc = try { Class::MOP::class_of($class) }
+ or return undef;
- return $mc->isa('Moose::Meta::Class') ? $mc : undef;
+ return $mc->isa('Moose::Meta::Class') ? $mc : undef;
}
# We use this instead of ensure_class_loaded when there are package symbols we
sub _ensure_dump_subdirs {
my ($self, $class) = (@_);
+ return if $self->dry_run;
+
my @name_parts = split(/::/, $class);
pop @name_parts; # we don't care about the very last element,
# which is a filename
}
}
else {
- $src_text .= qq|use base '$result_base_class';\n|;
+ $src_text .= qq|use base '$result_base_class';\n|;
}
$self->_write_classfile($src_class, $src_text);
sub _sig_comment {
my ($self, $version, $ts) = @_;
return qq|\n\n# Created by DBIx::Class::Schema::Loader|
- . qq| v| . $version
- . q| @ | . $ts
+ . (defined($version) ? q| v| . $version : '')
+ . (defined($ts) ? q| @ | . $ts : '')
. qq|\n# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:|;
}
my $filename = $self->_get_dump_filename($class);
$self->_ensure_dump_subdirs($class);
- if (-f $filename && $self->really_erase_my_files) {
+ if (-f $filename && $self->really_erase_my_files && !$self->dry_run) {
warn "Deleting existing file '$filename' due to "
. "'really_erase_my_files' setting\n" unless $self->quiet;
unlink($filename);
if (-f $old_filename) {
$custom_content = ($self->_parse_generated_file ($old_filename))[4];
- unlink $old_filename;
+ unlink $old_filename unless $self->dry_run;
}
}
croak "filter '$filter' exited non-zero: $exit_code";
}
}
- if (not $text or not $text =~ /\bpackage\b/) {
- warn("$class skipped due to filter") if $self->debug;
- return;
- }
+ if (not $text or not $text =~ /\bpackage\b/) {
+ warn("$class skipped due to filter") if $self->debug;
+ return;
+ }
}
# Check and see if the dump is in fact different
my $compare_to;
if ($old_md5) {
- $compare_to = $text . $self->_sig_comment($old_ver, $old_ts);
- if (Digest::MD5::md5_base64(encode 'UTF-8', $compare_to) eq $old_md5) {
- return unless $self->_upgrading_from && $is_schema;
- }
+ $compare_to = $text . $self->_sig_comment($old_ver, $old_ts);
+ if (Digest::MD5::md5_base64(encode 'UTF-8', $compare_to) eq $old_md5) {
+ return unless $self->_upgrading_from && $is_schema;
+ }
}
+ push @{$self->generated_classes}, $class;
+
+ return if $self->dry_run;
+
$text .= $self->_sig_comment(
- $self->version_to_dump,
- POSIX::strftime('%Y-%m-%d %H:%M:%S', localtime)
+ $self->omit_version ? undef : $self->version_to_dump,
+ $self->omit_timestamp ? undef : POSIX::strftime('%Y-%m-%d %H:%M:%S', localtime)
);
- open(my $fh, '>:encoding(UTF-8)', $filename)
+ open(my $fh, '>:raw:encoding(UTF-8)', $filename)
or croak "Cannot open '$filename' for writing: $!";
# Write the top half and its MD5 sum
my $mark_re =
qr{^(# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:)([A-Za-z0-9/+]{22})\r?\n};
- my ($md5, $ts, $ver, $gen);
+ my ($real_md5, $ts, $ver, $gen);
+ local $_;
while(<$fh>) {
if(/$mark_re/) {
my $pre_md5 = $1;
- $md5 = $2;
+ my $mark_md5 = $2;
# Pull out the version and timestamp from the line above
- ($ver, $ts) = $gen =~ m/^# Created by DBIx::Class::Schema::Loader v(.*?) @ (.*?)\r?\Z/m;
+ ($ver, $ts) = $gen =~ m/^# Created by DBIx::Class::Schema::Loader( v[\d.]+)?( @ [\d-]+ [\d:]+)?\r?\Z/m;
+ $ver =~ s/^ v// if $ver;
+ $ts =~ s/^ @ // if $ts;
$gen .= $pre_md5;
+ $real_md5 = Digest::MD5::md5_base64(encode 'UTF-8', $gen);
croak "Checksum mismatch in '$fn', the auto-generated part of the file has been modified outside of this loader. Aborting.\nIf you want to overwrite these modifications, set the 'overwrite_modifications' loader option.\n"
- if !$self->overwrite_modifications && Digest::MD5::md5_base64(encode 'UTF-8', $gen) ne $md5;
+ if !$self->overwrite_modifications && $real_md5 ne $mark_md5;
last;
}
}
my $custom = do { local $/; <$fh> }
- if $md5;
+ if $real_md5;
$custom ||= '';
$custom =~ s/$CRLF|$LF/\n/g;
close $fh;
- return ($gen, $md5, $ver, $ts, $custom);
+ return ($gen, $real_md5, $ver, $ts, $custom);
}
sub _use {
push @roles, @{ $self->result_roles_map->{$table_moniker} }
if exists $self->result_roles_map->{$table_moniker};
- for my $class ($base, @components,
- ($self->use_moose ? 'Moose::Object' : ()), @roles) {
+ for my $class (
+ $base, @components, @roles,
+ ($self->use_moose ? 'Moose::Object' : ()),
+ ) {
$self->ensure_class_loaded($class);
push @methods, @{ Class::Inspector->methods($class) || [] };
my $default_ident = $default_code->( $ident, @extra );
my $new_ident;
if( $map && ref $map eq 'HASH' ) {
- $new_ident = $map->{ $ident };
+ if (my @parts = try{ @{ $ident } }) {
+ my $part_map = $map;
+ while (@parts) {
+ my $part = shift @parts;
+ last unless exists $part_map->{ $part };
+ if ( !ref $part_map->{ $part } && !@parts ) {
+ $new_ident = $part_map->{ $part };
+ last;
+ }
+ elsif ( ref $part_map->{ $part } eq 'HASH' ) {
+ $part_map = $part_map->{ $part };
+ }
+ }
+ }
+ if( !$new_ident && !ref $map->{ $ident } ) {
+ $new_ident = $map->{ $ident };
+ }
}
elsif( $map && ref $map eq 'CODE' ) {
- $new_ident = $map->( $ident, $default_ident, @extra );
+ my $cb = sub {
+ my ($cb_map) = @_;
+ croak "reentered map must be a hashref"
+ unless 'HASH' eq ref($cb_map);
+ return $self->_run_user_map($cb_map, $default_code, $ident, @extra);
+ };
+ $new_ident = $map->( $ident, $default_ident, @extra, $cb );
}
$new_ident ||= $default_ident;
sub { $self->_default_column_accessor_name( shift ) },
$column_name,
$column_context_info,
- );
+ );
return $accessor;
}
+sub _table_is_view {
+ #my ($self, $table) = @_;
+ return 0;
+}
+
# Set up metadata (cols, pks, etc)
sub _setup_src_meta {
my ($self, $table) = @_;
my $table_class = $self->classes->{$table->sql_name};
my $table_moniker = $self->monikers->{$table->sql_name};
+ $self->_dbic_stmt($table_class, 'table_class', 'DBIx::Class::ResultSource::View')
+ if $self->_table_is_view($table);
+
$self->_dbic_stmt($table_class, 'table', $table->dbic_name);
my $cols = $self->_table_columns($table);
my $v = $self->_get_naming_v('monikers');
- my @name_parts = map $table->$_, @{ $self->moniker_parts };
+ my @moniker_parts = @{ $self->moniker_parts };
+ my @name_parts = map $table->$_, @moniker_parts;
my $name_idx = firstidx { $_ eq 'name' } @{ $self->moniker_parts };
foreach my $i (0 .. $#name_parts) {
my $part = $name_parts[$i];
+ my $moniker_part = $self->_run_user_map(
+ $self->moniker_part_map->{$moniker_parts[$i]},
+ sub { '' },
+ $part, $moniker_parts[$i],
+ );
+ if (length $moniker_part) {
+ push @all_parts, $moniker_part;
+ next;
+ }
+
if ($i != $name_idx || $v >= 8) {
$part = $self->_to_identifier('monikers', $part, '_', 1);
}
@part_parts = split /\s+/, $inflected;
}
- push @all_parts, map ucfirst, @part_parts;
+ push @all_parts, join '', map ucfirst, @part_parts;
}
- return join '', @all_parts;
+ return join $self->moniker_part_separator, @all_parts;
}
sub _table2moniker {
$self->moniker_map,
sub { $self->_default_table2moniker( shift ) },
$table
- );
+ );
}
sub _load_relationships {
looks_like_number($s) ? $s : qq{'$s'};
" $_: $s"
- } sort keys %$attrs,
+ } sort keys %$attrs,
);
if (my $comment = $self->__column_comment($self->class_to_table->{$class}, $col_counter, $name)) {
$self->_pod( $class, $comment );
return '' unless $self->generate_pod;
- return <<"EOF"
-=head1 BASE CLASS: L<$base_class>
-
-=cut
-
-EOF
+ return "\n=head1 BASE CLASS: L<$base_class>\n\n=cut\n\n";
}
sub _filter_comment {
contain multiple entries per table for the original and normalized table
names, as above in L</monikers>.
+=head2 generated_classes
+
+Returns an arrayref of classes that were actually generated (i.e. not
+skipped because there were no changes).
+
=head1 NON-ENGLISH DATABASES
If you use the loader on a database with table and column names in a language
L<DBIx::Class::Schema::Loader>, L<dbicdump>
-=head1 AUTHOR
+=head1 AUTHORS
-See L<DBIx::Class::Schema::Loader/AUTHOR> and L<DBIx::Class::Schema::Loader/CONTRIBUTORS>.
+See L<DBIx::Class::Schema::Loader/AUTHORS>.
=head1 LICENSE