allocate body types with "ghost fields".
"ghost fields" are fields that are unused in certain types, and
-consequently dont need to actually exist. They are declared because
+consequently don't need to actually exist. They are declared because
they're part of a "base type", which allows use of functions as
methods. The simplest examples are AVs and HVs, 2 aggregate types
which don't use the fields which support SCALAR semantics.
-For these types, the arenas are carved up into *_allocated size
+For these types, the arenas are carved up into appropriately sized
chunks, we thus avoid wasted memory for those unaccessed members.
When bodies are allocated, we adjust the pointer back in memory by the
-size of the bit not allocated, so it's as if we allocated the full
+size of the part not allocated, so it's as if we allocated the full
structure. (But things will all go boom if you write to the part that
is "not there", because you'll be overwriting the last members of the
preceding structure in memory.)
-We calculate the correction using the STRUCT_OFFSET macro. For
-example, if xpv_allocated is the same structure as XPV then the two
-OFFSETs sum to zero, and the pointer is unchanged. If the allocated
-structure is smaller (no initial NV actually allocated) then the net
-effect is to subtract the size of the NV from the pointer, to return a
-new pointer as if an initial NV were actually allocated.
+We calculate the correction using the STRUCT_OFFSET macro on the first
+member present. If the allocated structure is smaller (no initial NV
+actually allocated) then the net effect is to subtract the size of the NV
+from the pointer, to return a new pointer as if an initial NV were actually
+allocated. (We were using structures named *_allocated for this, but
+this turned out to be a subtle bug, because a structure without an NV
+could have a lower alignment constraint, but the compiler is allowed to
+optimised accesses based on the alignment constraint of the actual pointer
+to the full structure, for example, using a single 64 bit load instruction
+because it "knows" that two adjacent 32 bit members will be 8-byte aligned.)
This is the same trick as was used for NV and IV bodies. Ironically it
doesn't need to be used for NV bodies any more, because NV is now at
Body_size determines how big a body is, and therefore how many fit into
each arena. Offset carries the body-pointer adjustment needed for
-*_allocated body types, and is used in *_allocated macros.
+"ghost fields", and is used in *_allocated macros.
But its main purpose is to parameterize info needed in
Perl_sv_upgrade(). The info here dramatically simplifies the function
-vs the implementation in 5.8.7, making it table-driven. All fields
+vs the implementation in 5.8.8, making it table-driven. All fields
are used for this, except for arena_size.
For the sv-types that have no bodies, arenas are not used, so those
? FIT_ARENAn (count, body_size) \
: FIT_ARENA0 (body_size)
-/* A macro to work out the offset needed to subtract from a pointer to (say)
-
-typedef struct {
- STRLEN xpv_cur;
- STRLEN xpv_len;
-} xpv_allocated;
-
-to make its members accessible via a pointer to (say)
-
-struct xpv {
- NV xnv_nv;
- STRLEN xpv_cur;
- STRLEN xpv_len;
-};
-
-*/
-
-#define relative_STRUCT_OFFSET(longer, shorter, member) \
- (STRUCT_OFFSET(shorter, member) - STRUCT_OFFSET(longer, member))
-
/* Calculate the length to copy. Specifically work out the length less any
final padding the compiler needed to add. See the comment in sv_upgrade
for why copying the padding proved to be a bug. */
FIT_ARENA(0, sizeof(NV)) },
/* 8 bytes on most ILP32 with IEEE doubles */
- { sizeof(xpv_allocated),
- copy_length(XPV, xpv_len)
- - relative_STRUCT_OFFSET(xpv_allocated, XPV, xpv_cur),
- + relative_STRUCT_OFFSET(xpv_allocated, XPV, xpv_cur),
- SVt_PV, FALSE, NONV, HASARENA, FIT_ARENA(0, sizeof(xpv_allocated)) },
+ { sizeof(XPV) - STRUCT_OFFSET(XPV, xpv_cur),
+ copy_length(XPV, xpv_len) - STRUCT_OFFSET(XPV, xpv_cur),
+ + STRUCT_OFFSET(XPV, xpv_cur),
+ SVt_PV, FALSE, NONV, HASARENA,
+ FIT_ARENA(0, sizeof(XPV) - STRUCT_OFFSET(XPV, xpv_cur)) },
/* 12 */
- { sizeof(xpviv_allocated),
- copy_length(XPVIV, xiv_u)
- - relative_STRUCT_OFFSET(xpviv_allocated, XPVIV, xpv_cur),
- + relative_STRUCT_OFFSET(xpviv_allocated, XPVIV, xpv_cur),
- SVt_PVIV, FALSE, NONV, HASARENA, FIT_ARENA(0, sizeof(xpviv_allocated)) },
+ { sizeof(XPVIV) - STRUCT_OFFSET(XPV, xpv_cur),
+ copy_length(XPVIV, xiv_u) - STRUCT_OFFSET(XPV, xpv_cur),
+ + STRUCT_OFFSET(XPVIV, xpv_cur),
+ SVt_PVIV, FALSE, NONV, HASARENA,
+ FIT_ARENA(0, sizeof(XPV) - STRUCT_OFFSET(XPV, xpv_cur)) },
/* 20 */
{ sizeof(XPVNV), copy_length(XPVNV, xiv_u), 0, SVt_PVNV, FALSE, HADNV,
HASARENA, FIT_ARENA(0, sizeof(XPVMG)) },
/* something big */
- { sizeof(struct regexp_allocated), sizeof(struct regexp_allocated),
- + relative_STRUCT_OFFSET(struct regexp_allocated, regexp, xpv_cur),
+ { sizeof(regexp) - STRUCT_OFFSET(regexp, xpv_cur),
+ sizeof(regexp) - STRUCT_OFFSET(regexp, xpv_cur),
+ + STRUCT_OFFSET(regexp, xpv_cur),
SVt_REGEXP, FALSE, NONV, HASARENA,
- FIT_ARENA(0, sizeof(struct regexp_allocated))
+ FIT_ARENA(0, sizeof(regexp) - STRUCT_OFFSET(regexp, xpv_cur))
},
/* 48 */
{ sizeof(XPVLV), sizeof(XPVLV), 0, SVt_PVLV, TRUE, HADNV,
HASARENA, FIT_ARENA(0, sizeof(XPVLV)) },
- { sizeof(xpvav_allocated),
- copy_length(XPVAV, xmg_stash)
- - relative_STRUCT_OFFSET(xpvav_allocated, XPVAV, xav_fill),
- + relative_STRUCT_OFFSET(xpvav_allocated, XPVAV, xav_fill),
- SVt_PVAV, TRUE, NONV, HASARENA, FIT_ARENA(0, sizeof(xpvav_allocated)) },
+ { sizeof(XPVAV) - STRUCT_OFFSET(XPVAV, xav_fill),
+ copy_length(XPVAV, xmg_stash) - STRUCT_OFFSET(XPVAV, xav_fill),
+ + STRUCT_OFFSET(XPVAV, xav_fill),
+ SVt_PVAV, TRUE, NONV, HASARENA,
+ FIT_ARENA(0, sizeof(XPVAV) - STRUCT_OFFSET(XPVAV, xav_fill)) },
- { sizeof(xpvhv_allocated),
- copy_length(XPVHV, xmg_stash)
- - relative_STRUCT_OFFSET(xpvhv_allocated, XPVHV, xhv_fill),
- + relative_STRUCT_OFFSET(xpvhv_allocated, XPVHV, xhv_fill),
- SVt_PVHV, TRUE, NONV, HASARENA, FIT_ARENA(0, sizeof(xpvhv_allocated)) },
+ { sizeof(XPVHV) - STRUCT_OFFSET(XPVHV, xhv_fill),
+ copy_length(XPVHV, xmg_stash) - STRUCT_OFFSET(XPVHV, xhv_fill),
+ + STRUCT_OFFSET(XPVHV, xhv_fill),
+ SVt_PVHV, TRUE, NONV, HASARENA,
+ FIT_ARENA(0, sizeof(XPVHV) - STRUCT_OFFSET(XPVHV, xhv_fill)) },
/* 56 */
- { sizeof(xpvcv_allocated), sizeof(xpvcv_allocated),
- + relative_STRUCT_OFFSET(xpvcv_allocated, XPVCV, xpv_cur),
- SVt_PVCV, TRUE, NONV, HASARENA, FIT_ARENA(0, sizeof(xpvcv_allocated)) },
-
- { sizeof(xpvfm_allocated), sizeof(xpvfm_allocated),
- + relative_STRUCT_OFFSET(xpvfm_allocated, XPVFM, xpv_cur),
- SVt_PVFM, TRUE, NONV, NOARENA, FIT_ARENA(20, sizeof(xpvfm_allocated)) },
+ { sizeof(XPVCV) - STRUCT_OFFSET(XPVCV, xpv_cur),
+ sizeof(XPVCV) - STRUCT_OFFSET(XPVCV, xpv_cur),
+ + STRUCT_OFFSET(XPVCV, xpv_cur),
+ SVt_PVCV, TRUE, NONV, HASARENA,
+ FIT_ARENA(0, sizeof(XPVCV) - STRUCT_OFFSET(XPVCV, xpv_cur)) },
+
+ { sizeof(XPVFM) - STRUCT_OFFSET(XPVFM, xpv_cur),
+ sizeof(XPVFM) - STRUCT_OFFSET(XPVFM, xpv_cur),
+ + STRUCT_OFFSET(XPVFM, xpv_cur),
+ SVt_PVFM, TRUE, NONV, NOARENA,
+ FIT_ARENA(20, sizeof(XPVFM) - STRUCT_OFFSET(XPVFM, xpv_cur)) },
/* XPVIO is 84 bytes, fits 48x */
- { sizeof(xpvio_allocated), sizeof(xpvio_allocated),
- + relative_STRUCT_OFFSET(xpvio_allocated, XPVIO, xpv_cur),
- SVt_PVIO, TRUE, NONV, HASARENA, FIT_ARENA(24, sizeof(xpvio_allocated)) },
+ { sizeof(XPVIO) - STRUCT_OFFSET(XPVIO, xpv_cur),
+ sizeof(XPVIO) - STRUCT_OFFSET(XPVIO, xpv_cur),
+ + STRUCT_OFFSET(XPVIO, xpv_cur),
+ SVt_PVIO, TRUE, NONV, HASARENA,
+ FIT_ARENA(24, sizeof(XPVIO) - STRUCT_OFFSET(XPVIO, xpv_cur)) },
};
#define new_body_type(sv_type) \
PERL_ARGS_ASSERT_SV_UPGRADE;
+ if (old_type == new_type)
+ return;
+
+ /* This clause was purposefully added ahead of the early return above to
+ the shared string hackery for (sort {$a <=> $b} keys %hash), with the
+ inference by Nick I-S that it would fix other troublesome cases. See
+ changes 7162, 7163 (f130fd4589cf5fbb24149cd4db4137c8326f49c1 and parent)
+
+ Given that shared hash key scalars are no longer PVIV, but PV, there is
+ no longer need to unshare so as to free up the IVX slot for its proper
+ purpose. So it's safe to move the early return earlier. */
+
if (new_type != SVt_PV && SvIsCOW(sv)) {
sv_force_normal_flags(sv, 0);
}
- if (old_type == new_type)
- return;
-
old_body = SvANY(sv);
/* Copying structures onto other structures that have been neatly zeroed
SvNV_set(sv, 0);
#endif
- if (new_type == SVt_PVIO)
+ if (new_type == SVt_PVIO) {
+ IO * const io = MUTABLE_IO(sv);
+ GV *iogv = gv_fetchpvs("FileHandle::", 0, SVt_PVHV);
+
+ SvOBJECT_on(io);
+ /* Clear the stashcache because a new IO could overrule a package
+ name */
+ hv_clear(PL_stashcache);
+
+ /* unless exists($main::{FileHandle}) and
+ defined(%main::FileHandle::) */
+ if (!(iogv && GvHV(iogv) && HvARRAY(GvHV(iogv))))
+ iogv = gv_fetchpvs("IO::Handle::", GV_ADD, SVt_PVHV);
+ SvSTASH_set(io, MUTABLE_HV(SvREFCNT_inc(GvHV(iogv))));
IoPAGE_LEN(sv) = 60;
+ }
if (old_type < SVt_PV) {
/* referant will be NULL unless the old type was SVt_IV emulating
SVt_RV */
return TRUE;
}
-STATIC char *
-S_glob_2pv(pTHX_ GV * const gv, STRLEN * const len)
-{
- const U32 wasfake = SvFLAGS(gv) & SVf_FAKE;
- SV *const buffer = sv_newmortal();
-
- PERL_ARGS_ASSERT_GLOB_2PV;
-
- /* FAKE globs can get coerced, so need to turn this off temporarily if it
- is on. */
- SvFAKE_off(gv);
- gv_efullname3(buffer, gv, "*");
- SvFLAGS(gv) |= wasfake;
-
- assert(SvPOK(buffer));
- if (len) {
- *len = SvCUR(buffer);
- }
- return SvPVX(buffer);
-}
-
/* Actually, ISO C leaves conversion of UV to IV undefined, but
until proven guilty, assume that things are not that bad... */
#endif
}
else {
- if (isGV_with_GP(sv))
- return glob_2pv(MUTABLE_GV(sv), lp);
+ if (isGV_with_GP(sv)) {
+ GV *const gv = MUTABLE_GV(sv);
+ const U32 wasfake = SvFLAGS(gv) & SVf_FAKE;
+ SV *const buffer = sv_newmortal();
+
+ /* FAKE globs can get coerced, so need to turn this off temporarily
+ if it is on. */
+ SvFAKE_off(gv);
+ gv_efullname3(buffer, gv, "*");
+ SvFLAGS(gv) |= wasfake;
+
+ assert(SvPOK(buffer));
+ if (lp) {
+ *lp = SvCUR(buffer);
+ }
+ return SvPVX(buffer);
+ }
if (lp)
*lp = 0;
SV **location;
U8 import_flag = 0;
const U32 stype = SvTYPE(sref);
+ bool mro_changes = FALSE;
PERL_ARGS_ASSERT_GLOB_ASSIGN_REF;
goto common;
case SVt_PVAV:
location = (SV **) &GvAV(dstr);
+ if (strEQ(GvNAME((GV*)dstr), "ISA"))
+ mro_changes = TRUE;
import_flag = GVf_IMPORTED_AV;
goto common;
case SVt_PVIO:
goto common;
case SVt_PVFM:
location = (SV **) &GvFORM(dstr);
+ goto common;
default:
location = &GvSV(dstr);
import_flag = GVf_IMPORTED_SV;
SvREFCNT_dec(dref);
if (SvTAINTED(sstr))
SvTAINT(dstr);
+ if (mro_changes) mro_isa_changed_in(GvSTASH(dstr));
return;
}
}
#ifdef PERL_OLD_COPY_ON_WRITE
if (!isSwipe) {
- /* I believe I should acquire a global SV mutex if
- it's a COW sv (not a shared hash key) to stop
- it going un copy-on-write.
- If the source SV has gone un copy on write between up there
- and down here, then (assert() that) it is of the correct
- form to make it copy on write again */
if ((sflags & (SVf_FAKE | SVf_READONLY))
!= (SVf_FAKE | SVf_READONLY)) {
SvREADONLY_on(sstr);
SvCUR_set(dstr, cur);
SvREADONLY_on(dstr);
SvFAKE_on(dstr);
- /* Relesase a global SV mutex. */
}
else
{ /* Passes the swipe test. */
#ifdef PERL_OLD_COPY_ON_WRITE
if (SvREADONLY(sv)) {
- /* At this point I believe I should acquire a global SV mutex. */
if (SvFAKE(sv)) {
const char * const pvx = SvPVX_const(sv);
const STRLEN len = SvLEN(sv);
}
else if (IN_PERL_RUNTIME)
Perl_croak(aTHX_ "%s", PL_no_modify);
- /* At this point I believe that I can drop the global SV mutex. */
}
#else
if (SvREADONLY(sv)) {
case PERL_MAGIC_qr:
vtable = &PL_vtbl_regexp;
break;
- case PERL_MAGIC_hints:
- /* As this vtable is all NULL, we can reuse it. */
case PERL_MAGIC_sig:
vtable = &PL_vtbl_sig;
break;
case PERL_MAGIC_hintselem:
vtable = &PL_vtbl_hintselem;
break;
+ case PERL_MAGIC_hints:
+ vtable = &PL_vtbl_hints;
+ break;
case PERL_MAGIC_ext:
/* Reserved for use by extensions not perl internals. */
/* Useful for attaching extension internal data to perl vars. */
SV_CHECK_THINKFIRST_COW_DROP(sv);
if (SvREFCNT(nsv) != 1) {
- Perl_croak(aTHX_ "panic: reference miscount on nsv in sv_replace() (%"
- UVuf " != 1)", (UV) SvREFCNT(nsv));
+ Perl_croak(aTHX_ "panic: reference miscount on nsv in sv_replace()"
+ " (%" UVuf " != 1)", (UV) SvREFCNT(nsv));
}
if (SvMAGICAL(sv)) {
if (SvMAGICAL(nsv))
#ifdef PERL_OLD_COPY_ON_WRITE
else if (SvPVX_const(sv)) {
if (SvIsCOW(sv)) {
- /* I believe I need to grab the global SV mutex here and
- then recheck the COW status. */
if (DEBUG_C_TEST) {
PerlIO_printf(Perl_debug_log, "Copy on write: clear\n");
sv_dump(sv);
unshare_hek(SvSHARED_HEK_FROM_PV(SvPVX_const(sv)));
}
- /* And drop it here. */
SvFAKE_off(sv);
} else if (SvLEN(sv)) {
Safefree(SvPVX_const(sv));
d = SvPVX(sv);
while (isALPHA(*d)) d++;
while (isDIGIT(*d)) d++;
- if (*d) {
+ if (d < SvEND(sv)) {
#ifdef PERL_PRESERVE_IVUV
/* Got to punt this as an integer if needs be, but we don't issue
warnings. Probably ought to make the sv_iv_please() that does
sv_setnv(sv,Atof(SvPVX_const(sv)) - 1.0); /* punt */
}
+/* this define is used to eliminate a chunk of duplicated but shared logic
+ * it has the suffix __SV_C to signal that it isnt API, and isnt meant to be
+ * used anywhere but here - yves
+ */
+#define PUSH_EXTEND_MORTAL__SV_C(AnSv) \
+ STMT_START { \
+ EXTEND_MORTAL(1); \
+ PL_tmps_stack[++PL_tmps_ix] = (AnSv); \
+ } STMT_END
+
/*
=for apidoc sv_mortalcopy
new_SV(sv);
sv_setsv(sv,oldstr);
- EXTEND_MORTAL(1);
- PL_tmps_stack[++PL_tmps_ix] = sv;
+ PUSH_EXTEND_MORTAL__SV_C(sv);
SvTEMP_on(sv);
return sv;
}
new_SV(sv);
SvFLAGS(sv) = SVs_TEMP;
- EXTEND_MORTAL(1);
- PL_tmps_stack[++PL_tmps_ix] = sv;
+ PUSH_EXTEND_MORTAL__SV_C(sv);
return sv;
}
assert(!(flags & ~(SVf_UTF8|SVs_TEMP)));
new_SV(sv);
sv_setpvn(sv,s,len);
- SvFLAGS(sv) |= (flags & SVf_UTF8);
- return (flags & SVs_TEMP) ? sv_2mortal(sv) : sv;
+
+ /* This code used to a sv_2mortal(), however we now unroll the call to sv_2mortal()
+ * and do what it does outselves here.
+ * Since we have asserted that flags can only have the SVf_UTF8 and/or SVs_TEMP flags
+ * set above we can use it to enable the sv flags directly (bypassing SvTEMP_on), which
+ * in turn means we dont need to mask out the SVf_UTF8 flag below, which means that we
+ * eleminate quite a few steps than it looks - Yves (explaining patch by gfx)
+ */
+
+ SvFLAGS(sv) |= flags;
+
+ if(flags & SVs_TEMP){
+ PUSH_EXTEND_MORTAL__SV_C(sv);
+ }
+
+ return sv;
}
/*
return NULL;
if (SvREADONLY(sv) && SvIMMORTAL(sv))
return sv;
- EXTEND_MORTAL(1);
- PL_tmps_stack[++PL_tmps_ix] = sv;
+ PUSH_EXTEND_MORTAL__SV_C(sv);
SvTEMP_on(sv);
return sv;
}
if (args) {
eptr = va_arg(*args, char*);
if (eptr)
-#ifdef MACOS_TRADITIONAL
- /* On MacOS, %#s format is used for Pascal strings */
- if (alt)
- elen = *eptr++;
- else
-#endif
elen = strlen(eptr);
else {
eptr = (char *)nullstr;
else {
eptr = SvPV_const(argsv, elen);
if (DO_UTF8(argsv)) {
- I32 old_precis = precis;
+ STRLEN old_precis = precis;
if (has_precis && precis < elen) {
- I32 p = precis;
+ STRLEN ulen = sv_len_utf8(argsv);
+ I32 p = precis > ulen ? ulen : precis;
sv_pos_u2b(argsv, &p, 0); /* sticks at end */
precis = p;
}
}
string:
- if (has_precis && elen > precis)
+ if (has_precis && precis < elen)
elen = precis;
break;
/* Certain cases in Perl_ss_dup have been merged, by relying on the fact
that currently av_dup, gv_dup and hv_dup are the same as sv_dup.
- If this changes, please unmerge ss_dup. */
+ If this changes, please unmerge ss_dup.
+ Likewise, sv_dup_inc_multiple() relies on this fact. */
#define sv_dup_inc(s,t) SvREFCNT_inc(sv_dup(s,t))
#define sv_dup_inc_NN(s,t) SvREFCNT_inc_NN(sv_dup(s,t))
#define av_dup(s,t) MUTABLE_AV(sv_dup((const SV *)s,t))
Copy(proto->nexttype, parser->nexttype, 5, I32);
parser->nexttoke = proto->nexttoke;
#endif
+
+ /* XXX should clone saved_curcop here, but we aren't passed
+ * proto_perl; so do it in perl_clone_using instead */
+
return parser;
}
ptr_table_store(PL_ptr_table, gp, ret);
/* clone */
- ret->gp_refcnt = 0; /* must be before any other dups! */
+ /* ret->gp_refcnt must be 0 before any other dups are called. We're relying
+ on Newxz() to do this for us. */
ret->gp_sv = sv_dup_inc(gp->gp_sv, param);
ret->gp_io = io_dup_inc(gp->gp_io, param);
ret->gp_form = cv_dup_inc(gp->gp_form, param);
MAGIC *
Perl_mg_dup(pTHX_ MAGIC *mg, CLONE_PARAMS *const param)
{
- MAGIC *mgprev = (MAGIC*)NULL;
- MAGIC *mgret;
+ MAGIC *mgret = NULL;
+ MAGIC **mgprev_p = &mgret;
PERL_ARGS_ASSERT_MG_DUP;
- if (!mg)
- return (MAGIC*)NULL;
- /* look for it in the table first */
- mgret = (MAGIC*)ptr_table_fetch(PL_ptr_table, mg);
- if (mgret)
- return mgret;
-
for (; mg; mg = mg->mg_moremagic) {
MAGIC *nmg;
- Newxz(nmg, 1, MAGIC);
- if (mgprev)
- mgprev->mg_moremagic = nmg;
- else
- mgret = nmg;
- nmg->mg_virtual = mg->mg_virtual; /* XXX copy dynamic vtable? */
- nmg->mg_private = mg->mg_private;
- nmg->mg_type = mg->mg_type;
- nmg->mg_flags = mg->mg_flags;
+ Newx(nmg, 1, MAGIC);
+ *mgprev_p = nmg;
+ mgprev_p = &(nmg->mg_moremagic);
+
+ /* There was a comment "XXX copy dynamic vtable?" but as we don't have
+ dynamic vtables, I'm not sure why Sarathy wrote it. The comment dates
+ from the original commit adding Perl_mg_dup() - revision 4538.
+ Similarly there is the annotation "XXX random ptr?" next to the
+ assignment to nmg->mg_ptr. */
+ *nmg = *mg;
+
/* FIXME for plugins
- if (mg->mg_type == PERL_MAGIC_qr) {
- nmg->mg_obj = MUTABLE_SV(CALLREGDUPE((REGEXP*)mg->mg_obj, param));
+ if (nmg->mg_type == PERL_MAGIC_qr) {
+ nmg->mg_obj = MUTABLE_SV(CALLREGDUPE((REGEXP*)nmg->mg_obj, param));
}
else
*/
- if(mg->mg_type == PERL_MAGIC_backref) {
+ if(nmg->mg_type == PERL_MAGIC_backref) {
/* The backref AV has its reference count deliberately bumped by
1. */
nmg->mg_obj
- = SvREFCNT_inc(av_dup_inc((const AV *) mg->mg_obj, param));
+ = SvREFCNT_inc(av_dup_inc((const AV *) nmg->mg_obj, param));
}
else {
- nmg->mg_obj = (mg->mg_flags & MGf_REFCOUNTED)
- ? sv_dup_inc(mg->mg_obj, param)
- : sv_dup(mg->mg_obj, param);
- }
- nmg->mg_len = mg->mg_len;
- nmg->mg_ptr = mg->mg_ptr; /* XXX random ptr? */
- if (mg->mg_ptr && mg->mg_type != PERL_MAGIC_regex_global) {
- if (mg->mg_len > 0) {
- nmg->mg_ptr = SAVEPVN(mg->mg_ptr, mg->mg_len);
- if (mg->mg_type == PERL_MAGIC_overload_table &&
- AMT_AMAGIC((AMT*)mg->mg_ptr))
+ nmg->mg_obj = (nmg->mg_flags & MGf_REFCOUNTED)
+ ? sv_dup_inc(nmg->mg_obj, param)
+ : sv_dup(nmg->mg_obj, param);
+ }
+
+ if (nmg->mg_ptr && nmg->mg_type != PERL_MAGIC_regex_global) {
+ if (nmg->mg_len > 0) {
+ nmg->mg_ptr = SAVEPVN(nmg->mg_ptr, nmg->mg_len);
+ if (nmg->mg_type == PERL_MAGIC_overload_table &&
+ AMT_AMAGIC((AMT*)nmg->mg_ptr))
{
- const AMT * const amtp = (AMT*)mg->mg_ptr;
AMT * const namtp = (AMT*)nmg->mg_ptr;
- I32 i;
- for (i = 1; i < NofAMmeth; i++) {
- namtp->table[i] = cv_dup_inc(amtp->table[i], param);
- }
+ sv_dup_inc_multiple((SV**)(namtp->table),
+ (SV**)(namtp->table), NofAMmeth, param);
}
}
- else if (mg->mg_len == HEf_SVKEY)
- nmg->mg_ptr = (char*)sv_dup_inc((const SV *)mg->mg_ptr, param);
+ else if (nmg->mg_len == HEf_SVKEY)
+ nmg->mg_ptr = (char*)sv_dup_inc((const SV *)nmg->mg_ptr, param);
}
- if ((mg->mg_flags & MGf_DUP) && mg->mg_virtual && mg->mg_virtual->svt_dup) {
+ if ((nmg->mg_flags & MGf_DUP) && nmg->mg_virtual && nmg->mg_virtual->svt_dup) {
CALL_FPTR(nmg->mg_virtual->svt_dup)(aTHX_ nmg, param);
}
- mgprev = nmg;
}
return mgret;
}
PTR_TBL_t *tbl;
PERL_UNUSED_CONTEXT;
- Newxz(tbl, 1, PTR_TBL_t);
+ Newx(tbl, 1, PTR_TBL_t);
tbl->tbl_max = 511;
tbl->tbl_items = 0;
Newxz(tbl->tbl_ary, tbl->tbl_max + 1, PTR_TBL_ENT_t*);
}
}
+/* duplicate a list of SVs. source and dest may point to the same memory. */
+static SV **
+S_sv_dup_inc_multiple(pTHX_ SV *const *source, SV **dest,
+ SSize_t items, CLONE_PARAMS *const param)
+{
+ PERL_ARGS_ASSERT_SV_DUP_INC_MULTIPLE;
+
+ while (items-- > 0) {
+ *dest++ = sv_dup_inc(*source++, param);
+ }
+
+ return dest;
+}
+
/* duplicate an SV of any type (including AV, HV etc) */
SV *
LvTARG(dstr) = sv_dup_inc(LvTARG(dstr), param);
case SVt_PVGV:
if(isGV_with_GP(sstr)) {
- if (GvNAME_HEK(dstr))
- GvNAME_HEK(dstr) = hek_dup(GvNAME_HEK(dstr), param);
+ GvNAME_HEK(dstr) = hek_dup(GvNAME_HEK(dstr), param);
/* Don't call sv_add_backref here as it's going to be
created as part of the magic cloning of the symbol
table. */
AvARRAY(MUTABLE_AV(dstr)) = dst_ary;
AvALLOC((const AV *)dstr) = dst_ary;
if (AvREAL((const AV *)sstr)) {
- while (items-- > 0)
- *dst_ary++ = sv_dup_inc(*src_ary++, param);
+ dst_ary = sv_dup_inc_multiple(src_ary, dst_ary, items,
+ param);
}
else {
while (items-- > 0)
SvFLAGS(dstr) |= SVf_OOK;
hvname = saux->xhv_name;
- daux->xhv_name = hvname ? hek_dup(hvname, param) : hvname;
+ daux->xhv_name = hek_dup(hvname, param);
daux->xhv_riter = saux->xhv_riter;
daux->xhv_eiter = saux->xhv_eiter
PL_parser = parser_dup(proto_perl->Iparser, param);
+ /* XXX this only works if the saved cop has already been cloned */
+ if (proto_perl->Iparser) {
+ PL_parser->saved_curcop = (COP*)any_dup(
+ proto_perl->Iparser->saved_curcop,
+ proto_perl);
+ }
+
PL_subline = proto_perl->Isubline;
PL_subname = sv_dup_inc(proto_perl->Isubname, param);
PL_glob_index = proto_perl->Iglob_index;
PL_srand_called = proto_perl->Isrand_called;
- PL_bitcount = NULL; /* reinits on demand */
if (proto_perl->Ipsig_pend) {
Newxz(PL_psig_pend, SIG_SIZE, int);
PL_psig_pend = (int*)NULL;
}
- if (proto_perl->Ipsig_ptr) {
- Newxz(PL_psig_ptr, SIG_SIZE, SV*);
- Newxz(PL_psig_name, SIG_SIZE, SV*);
- for (i = 1; i < SIG_SIZE; i++) {
- PL_psig_ptr[i] = sv_dup_inc(proto_perl->Ipsig_ptr[i], param);
- PL_psig_name[i] = sv_dup_inc(proto_perl->Ipsig_name[i], param);
- }
+ if (proto_perl->Ipsig_name) {
+ Newx(PL_psig_name, 2 * SIG_SIZE, SV*);
+ sv_dup_inc_multiple(proto_perl->Ipsig_name, PL_psig_name, 2 * SIG_SIZE,
+ param);
+ PL_psig_ptr = PL_psig_name + SIG_SIZE;
}
else {
PL_psig_ptr = (SV**)NULL;
PL_tmps_ix = proto_perl->Itmps_ix;
PL_tmps_max = proto_perl->Itmps_max;
PL_tmps_floor = proto_perl->Itmps_floor;
- Newxz(PL_tmps_stack, PL_tmps_max, SV*);
- i = 0;
- while (i <= PL_tmps_ix) {
- PL_tmps_stack[i] = sv_dup_inc(proto_perl->Itmps_stack[i], param);
- ++i;
- }
+ Newx(PL_tmps_stack, PL_tmps_max, SV*);
+ sv_dup_inc_multiple(proto_perl->Itmps_stack, PL_tmps_stack, PL_tmps_ix,
+ param);
/* next PUSHMARK() sets *(PL_markstack_ptr+1) */
i = proto_perl->Imarkstack_max - proto_perl->Imarkstack;
SV * const nsv = MUTABLE_SV(ptr_table_fetch(PL_ptr_table,
proto_perl->Itmps_stack[i]));
if (nsv && !SvREFCNT(nsv)) {
- EXTEND_MORTAL(1);
- PL_tmps_stack[++PL_tmps_ix] = SvREFCNT_inc_simple(nsv);
+ PUSH_EXTEND_MORTAL__SV_C(SvREFCNT_inc_simple(nsv));
}
}
}
/* Pluggable optimizer */
PL_peepp = proto_perl->Ipeepp;
+ /* op_free() hook */
+ PL_opfreehook = proto_perl->Iopfreehook;
PL_stashcache = newHV();
PL_registered_mros = hv_dup_inc(proto_perl->Iregistered_mros, param);
- if (!(flags & CLONEf_KEEP_PTR_TABLE)) {
- ptr_table_free(PL_ptr_table);
- PL_ptr_table = NULL;
- }
-
/* Call the ->CLONE method, if it exists, for each of the stashes
identified by sv_dup() above.
*/
}
}
+ if (!(flags & CLONEf_KEEP_PTR_TABLE)) {
+ ptr_table_free(PL_ptr_table);
+ PL_ptr_table = NULL;
+ }
+
+
SvREFCNT_dec(param->stashes);
/* orphaned? eg threads->new inside BEGIN or use */
Need a better fix at dome point. DAPM 11/2007 */
break;
+ case OP_FLIP:
+ case OP_FLOP:
+ {
+ GV * const gv = gv_fetchpvs(".", GV_NOTQUAL, SVt_PV);
+ if (gv && GvSV(gv) == uninit_sv)
+ return newSVpvs_flags("$.", SVs_TEMP);
+ goto do_op;
+ }
case OP_POS:
/* def-ness of rval pos() is independent of the def-ness of its arg */