At the time of very final cleanup, sv_free_arenas() is called from
perl_destruct() to physically free all the arenas allocated since the
-start of the interpreter. Note that this also clears PL_he_arenaroot,
-which is otherwise dealt with in hv.c.
+start of the interpreter.
Manipulation of any of the PL_*root pointers is protected by enclosing
LOCK_SV_MUTEX; ... UNLOCK_SV_MUTEX calls which should Do the Right Thing
PL_body_roots[i] = 0;
}
- free_arena(he);
-
Safefree(PL_nice_chunk);
PL_nice_chunk = Nullch;
PL_nice_chunk_size = 0;
struct body_details {
size_t size; /* Size to allocate */
size_t copy; /* Size of structure to copy (may be shorter) */
- int offset;
+ size_t offset;
bool cant_upgrade; /* Can upgrade this type */
bool zero_nv; /* zero the NV when upgrading from this */
bool arena; /* Allocated from an arena */
#define HADNV FALSE
#define NONV TRUE
+#ifdef PURIFY
+/* With -DPURFIY we allocate everything directly, and don't use arenas.
+ This seems a rather elegant way to simplify some of the code below. */
+#define HASARENA FALSE
+#else
#define HASARENA TRUE
+#endif
#define NOARENA FALSE
+/* A macro to work out the offset needed to subtract from a pointer to (say)
+
+typedef struct {
+ STRLEN xpv_cur;
+ STRLEN xpv_len;
+} xpv_allocated;
+
+to make its members accessible via a pointer to (say)
+
+struct xpv {
+ NV xnv_nv;
+ STRLEN xpv_cur;
+ STRLEN xpv_len;
+};
+
+*/
+
+#define relative_STRUCT_OFFSET(longer, shorter, member) \
+ (STRUCT_OFFSET(shorter, member) - STRUCT_OFFSET(longer, member))
+
+/* Calculate the length to copy. Specifically work out the length less any
+ final padding the compiler needed to add. See the comment in sv_upgrade
+ for why copying the padding proved to be a bug. */
+
+#define copy_length(type, last_member) \
+ STRUCT_OFFSET(type, last_member) \
+ + sizeof (((type*)SvANY((SV*)0))->last_member)
+
static const struct body_details bodies_by_type[] = {
{0, 0, 0, FALSE, NONV, NOARENA},
/* IVs are in the head, so the allocation size is 0 */
- {0, sizeof(IV), -STRUCT_OFFSET(XPVIV, xiv_iv), FALSE, NONV, NOARENA},
+ {0, sizeof(IV), STRUCT_OFFSET(XPVIV, xiv_iv), FALSE, NONV, NOARENA},
/* 8 bytes on most ILP32 with IEEE doubles */
{sizeof(NV), sizeof(NV), 0, FALSE, HADNV, HASARENA},
/* RVs are in the head now */
{0, 0, 0, FALSE, NONV, NOARENA},
/* 8 bytes on most ILP32 with IEEE doubles */
{sizeof(xpv_allocated),
- STRUCT_OFFSET(XPV, xpv_len) + sizeof (((XPV*)SvANY((SV*)0))->xpv_len)
- + STRUCT_OFFSET(xpv_allocated, xpv_cur) - STRUCT_OFFSET(XPV, xpv_cur),
- + STRUCT_OFFSET(xpv_allocated, xpv_cur) - STRUCT_OFFSET(XPV, xpv_cur)
- , FALSE, NONV, HASARENA},
+ copy_length(XPV, xpv_len)
+ + relative_STRUCT_OFFSET(XPV, xpv_allocated, xpv_cur),
+ - relative_STRUCT_OFFSET(XPV, xpv_allocated, xpv_cur),
+ FALSE, NONV, HASARENA},
/* 12 */
{sizeof(xpviv_allocated),
- STRUCT_OFFSET(XPVIV, xiv_u) + sizeof (((XPVIV*)SvANY((SV*)0))->xiv_u)
- + STRUCT_OFFSET(xpviv_allocated, xpv_cur) - STRUCT_OFFSET(XPVIV, xpv_cur),
- + STRUCT_OFFSET(xpviv_allocated, xpv_cur) - STRUCT_OFFSET(XPVIV, xpv_cur)
- , FALSE, NONV, HASARENA},
+ copy_length(XPVIV, xiv_u)
+ + relative_STRUCT_OFFSET(XPVIV, xpviv_allocated, xpv_cur),
+ - relative_STRUCT_OFFSET(XPVIV, xpviv_allocated, xpv_cur),
+ FALSE, NONV, HASARENA},
/* 20 */
- {sizeof(XPVNV),
- STRUCT_OFFSET(XPVNV, xiv_u) + sizeof (((XPVNV*)SvANY((SV*)0))->xiv_u),
- 0, FALSE, HADNV, HASARENA},
+ {sizeof(XPVNV), copy_length(XPVNV, xiv_u), 0, FALSE, HADNV, HASARENA},
/* 28 */
- {sizeof(XPVMG),
- STRUCT_OFFSET(XPVMG, xmg_stash) + sizeof (((XPVMG*)SvANY((SV*)0))->xmg_stash),
- 0, FALSE, HADNV, HASARENA},
+ {sizeof(XPVMG), copy_length(XPVMG, xmg_stash), 0, FALSE, HADNV, HASARENA},
/* 36 */
{sizeof(XPVBM), sizeof(XPVBM), 0, TRUE, HADNV, HASARENA},
/* 48 */
/* 64 */
{sizeof(XPVLV), sizeof(XPVLV), 0, TRUE, HADNV, HASARENA},
/* 20 */
- {sizeof(xpvav_allocated), sizeof(xpvav_allocated),
- STRUCT_OFFSET(xpvav_allocated, xav_fill)
- - STRUCT_OFFSET(XPVAV, xav_fill), TRUE, HADNV, HASARENA},
+ {sizeof(xpvav_allocated),
+ copy_length(XPVAV, xmg_stash)
+ + relative_STRUCT_OFFSET(XPVAV, xpvav_allocated, xav_fill),
+ - relative_STRUCT_OFFSET(XPVAV, xpvav_allocated, xav_fill),
+ TRUE, HADNV, HASARENA},
/* 20 */
- {sizeof(xpvhv_allocated), sizeof(xpvhv_allocated),
- STRUCT_OFFSET(xpvhv_allocated, xhv_fill)
- - STRUCT_OFFSET(XPVHV, xhv_fill), TRUE, HADNV, HASARENA},
+ {sizeof(xpvhv_allocated),
+ copy_length(XPVHV, xmg_stash)
+ + relative_STRUCT_OFFSET(XPVHV, xpvhv_allocated, xhv_fill),
+ - relative_STRUCT_OFFSET(XPVHV, xpvhv_allocated, xhv_fill),
+ TRUE, HADNV, HASARENA},
/* 76 */
{sizeof(XPVCV), sizeof(XPVCV), 0, TRUE, HADNV, HASARENA},
/* 80 */
#define new_body_type(sv_type) \
(void *)((char *)S_new_body(aTHX_ bodies_by_type[sv_type].size, sv_type)\
- + bodies_by_type[sv_type].offset)
+ - bodies_by_type[sv_type].offset)
#define del_body_type(p, sv_type) \
del_body(p, &PL_body_roots[sv_type])
#define new_body_allocated(sv_type) \
(void *)((char *)S_new_body(aTHX_ bodies_by_type[sv_type].size, sv_type)\
- + bodies_by_type[sv_type].offset)
+ - bodies_by_type[sv_type].offset)
#define del_body_allocated(p, sv_type) \
- del_body(p - bodies_by_type[sv_type].offset, &PL_body_roots[sv_type])
+ del_body(p + bodies_by_type[sv_type].offset, &PL_body_roots[sv_type])
#define my_safemalloc(s) (void*)safemalloc(s)
#define new_XNV() my_safemalloc(sizeof(XPVNV))
#define del_XNV(p) my_safefree(p)
-#define new_XPV() my_safemalloc(sizeof(XPV))
-#define del_XPV(p) my_safefree(p)
-
-#define new_XPVIV() my_safemalloc(sizeof(XPVIV))
-#define del_XPVIV(p) my_safefree(p)
-
#define new_XPVNV() my_safemalloc(sizeof(XPVNV))
#define del_XPVNV(p) my_safefree(p)
-#define new_XPVCV() my_safemalloc(sizeof(XPVCV))
-#define del_XPVCV(p) my_safefree(p)
-
#define new_XPVAV() my_safemalloc(sizeof(XPVAV))
#define del_XPVAV(p) my_safefree(p)
#define new_XPVGV() my_safemalloc(sizeof(XPVGV))
#define del_XPVGV(p) my_safefree(p)
-#define new_XPVLV() my_safemalloc(sizeof(XPVLV))
-#define del_XPVLV(p) my_safefree(p)
-
-#define new_XPVBM() my_safemalloc(sizeof(XPVBM))
-#define del_XPVBM(p) my_safefree(p)
-
#else /* !PURIFY */
#define new_XNV() new_body_type(SVt_NV)
#define del_XNV(p) del_body_type(p, SVt_NV)
-#define new_XPV() new_body_allocated(SVt_PV)
-#define del_XPV(p) del_body_allocated(p, SVt_PV)
-
-#define new_XPVIV() new_body_allocated(SVt_PVIV)
-#define del_XPVIV(p) del_body_allocated(p, SVt_PVIV)
-
#define new_XPVNV() new_body_type(SVt_PVNV)
#define del_XPVNV(p) del_body_type(p, SVt_PVNV)
-#define new_XPVCV() new_body_type(SVt_PVCV)
-#define del_XPVCV(p) del_body_type(p, SVt_PVCV)
-
#define new_XPVAV() new_body_allocated(SVt_PVAV)
#define del_XPVAV(p) del_body_allocated(p, SVt_PVAV)
#define new_XPVGV() new_body_type(SVt_PVGV)
#define del_XPVGV(p) del_body_type(p, SVt_PVGV)
-#define new_XPVLV() new_body_type(SVt_PVLV)
-#define del_XPVLV(p) del_body_type(p, SVt_PVLV)
-
-#define new_XPVBM() new_body_type(SVt_PVBM)
-#define del_XPVBM(p) del_body_type(p, SVt_PVBM)
-
#endif /* PURIFY */
/* no arena for you! */
-#define new_NOARENA(s) my_safecalloc(s)
-
-#define new_XPVFM() my_safemalloc(sizeof(XPVFM))
-#define del_XPVFM(p) my_safefree(p)
-
-#define new_XPVIO() my_safemalloc(sizeof(XPVIO))
-#define del_XPVIO(p) my_safefree(p)
-
-
+#define new_NOARENA(details) \
+ my_safemalloc((details)->size + (details)->offset)
+#define new_NOARENAZ(details) \
+ my_safecalloc((details)->size + (details)->offset)
/*
=for apidoc sv_upgrade
{
void* old_body;
void* new_body;
- size_t new_body_length;
- size_t new_body_offset;
const U32 old_type = SvTYPE(sv);
const struct body_details *const old_type_details
= bodies_by_type + old_type;
old_body = SvANY(sv);
- new_body_offset = 0;
- new_body_length = ~0;
/* Copying structures onto other structures that have been neatly zeroed
has a subtle gotcha. Consider XPVMG
}
break;
+
+ case SVt_PVIV:
+ /* XXX Is this still needed? Was it ever needed? Surely as there is
+ no route from NV to PVIV, NOK can never be true */
+ assert(!SvNOKp(sv));
+ assert(!SvNOK(sv));
case SVt_PVIO:
case SVt_PVFM:
- new_body = new_NOARENA(new_type_details->size);
- new_body_length = new_type_details->copy;
- goto post_zero;
-
case SVt_PVBM:
case SVt_PVGV:
case SVt_PVCV:
case SVt_PVLV:
case SVt_PVMG:
case SVt_PVNV:
- new_body_length = bodies_by_type[new_type].size;
- goto new_body;
-
- case SVt_PVIV:
- new_body_offset = - bodies_by_type[SVt_PVIV].offset;
- new_body_length = sizeof(XPVIV) - new_body_offset;
- /* XXX Is this still needed? Was it ever needed? Surely as there is
- no route from NV to PVIV, NOK can never be true */
- assert(!SvNOKp(sv));
- assert(!SvNOK(sv));
- goto new_body_no_NV;
case SVt_PV:
- new_body_offset = - bodies_by_type[SVt_PV].offset;
- new_body_length = sizeof(XPV) - new_body_offset;
- new_body_no_NV:
- /* PV and PVIV don't have an NV slot. */
- new_body:
- assert(new_body_length);
-#ifndef PURIFY
- /* This points to the start of the allocated area. */
- new_body_inline(new_body, new_body_length, new_type);
-#else
- /* We always allocated the full length item with PURIFY */
- new_body_length += new_body_offset;
- new_body_offset = 0;
- new_body = my_safemalloc(new_body_length);
-
-#endif
- Zero(new_body, new_body_length, char);
- post_zero:
- new_body = ((char *)new_body) - new_body_offset;
+ assert(new_type_details->size);
+ /* We always allocated the full length item with PURIFY. To do this
+ we fake things so that arena is false for all 16 types.. */
+ if(new_type_details->arena) {
+ /* This points to the start of the allocated area. */
+ new_body_inline(new_body, new_type_details->size, new_type);
+ Zero(new_body, new_type_details->size, char);
+ new_body = ((char *)new_body) - new_type_details->offset;
+ } else {
+ new_body = new_NOARENAZ(new_type_details);
+ }
SvANY(sv) = new_body;
if (old_type_details->copy) {
- Copy((char *)old_body - old_type_details->offset,
- (char *)new_body - old_type_details->offset,
+ Copy((char *)old_body + old_type_details->offset,
+ (char *)new_body + old_type_details->offset,
old_type_details->copy, char);
}
#ifdef PURIFY
my_safefree(old_body);
#else
- del_body((void*)((char*)old_body - old_type_details->offset),
+ del_body((void*)((char*)old_body + old_type_details->offset),
&PL_body_roots[old_type]);
#endif
}
Perl_sv_clear(pTHX_ register SV *sv)
{
dVAR;
- void** old_body_arena;
- size_t old_body_offset;
const U32 type = SvTYPE(sv);
+ const struct body_details *const sv_type_details
+ = bodies_by_type + type;
assert(sv);
assert(SvREFCNT(sv) == 0);
if (type <= SVt_IV)
return;
- old_body_arena = 0;
- old_body_offset = 0;
-
if (SvOBJECT(sv)) {
if (PL_defstash) { /* Still have a symbol table? */
dSP;
Safefree(IoTOP_NAME(sv));
Safefree(IoFMT_NAME(sv));
Safefree(IoBOTTOM_NAME(sv));
- /* PVIOs aren't from arenas */
goto freescalar;
case SVt_PVBM:
- old_body_arena = &PL_body_roots[SVt_PVBM];
goto freescalar;
case SVt_PVCV:
- old_body_arena = &PL_body_roots[SVt_PVCV];
case SVt_PVFM:
- /* PVFMs aren't from arenas */
cv_undef((CV*)sv);
goto freescalar;
case SVt_PVHV:
hv_undef((HV*)sv);
- old_body_arena = &PL_body_roots[SVt_PVHV];
- old_body_offset = STRUCT_OFFSET(XPVHV, xhv_fill);
break;
case SVt_PVAV:
av_undef((AV*)sv);
- old_body_arena = &PL_body_roots[SVt_PVAV];
- old_body_offset = STRUCT_OFFSET(XPVAV, xav_fill);
break;
case SVt_PVLV:
if (LvTYPE(sv) == 'T') { /* for tie: return HE to pool */
}
else if (LvTYPE(sv) != 't') /* unless tie: unrefcnted fake SV** */
SvREFCNT_dec(LvTARG(sv));
- old_body_arena = &PL_body_roots[SVt_PVLV];
goto freescalar;
case SVt_PVGV:
gp_free((GV*)sv);
have a back reference to us, which needs to be cleared. */
if (GvSTASH(sv))
sv_del_backref((SV*)GvSTASH(sv), sv);
- old_body_arena = &PL_body_roots[SVt_PVGV];
- goto freescalar;
case SVt_PVMG:
- old_body_arena = &PL_body_roots[SVt_PVMG];
- goto freescalar;
case SVt_PVNV:
- old_body_arena = &PL_body_roots[SVt_PVNV];
- goto freescalar;
case SVt_PVIV:
- old_body_arena = &PL_body_roots[SVt_PVIV];
- old_body_offset = STRUCT_OFFSET(XPVIV, xpv_cur);
freescalar:
/* Don't bother with SvOOK_off(sv); as we're only going to free it. */
if (SvOOK(sv)) {
SvPV_set(sv, SvPVX_mutable(sv) - SvIVX(sv));
/* Don't even bother with turning off the OOK flag. */
}
- goto pvrv_common;
case SVt_PV:
- old_body_arena = &PL_body_roots[SVt_PV];
- old_body_offset = STRUCT_OFFSET(XPV, xpv_cur);
case SVt_RV:
- pvrv_common:
if (SvROK(sv)) {
SV *target = SvRV(sv);
if (SvWEAKREF(sv))
#endif
break;
case SVt_NV:
- old_body_arena = PL_body_roots[SVt_NV];
break;
}
SvFLAGS(sv) &= SVf_BREAK;
SvFLAGS(sv) |= SVTYPEMASK;
-#ifndef PURIFY
- if (old_body_arena) {
- del_body(((char *)SvANY(sv) + old_body_offset), old_body_arena);
+ if (sv_type_details->arena) {
+ del_body(((char *)SvANY(sv) + sv_type_details->offset),
+ &PL_body_roots[type]);
+ }
+ else if (sv_type_details->size) {
+ my_safefree(SvANY(sv));
}
- else
-#endif
- if (type > SVt_RV) {
- my_safefree(SvANY(sv));
- }
}
/*
default:
{
/* These are all the types that need complex bodies allocating. */
- size_t new_body_length;
- size_t new_body_offset = 0;
void *new_body;
- svtype sv_type = SvTYPE(sstr);
+ const svtype sv_type = SvTYPE(sstr);
+ const struct body_details *const sv_type_details
+ = bodies_by_type + sv_type;
switch (sv_type) {
default:
(IV)SvTYPE(sstr));
break;
- case SVt_PVIO:
- new_body = new_XPVIO();
- new_body_length = sizeof(XPVIO);
- break;
- case SVt_PVFM:
- new_body = new_XPVFM();
- new_body_length = sizeof(XPVFM);
- break;
-
- case SVt_PVHV:
- new_body_offset = - bodies_by_type[SVt_PVHV].offset;
-
- new_body_length = STRUCT_OFFSET(XPVHV, xmg_stash)
- + sizeof (((XPVHV*)SvANY(sstr))->xmg_stash)
- - new_body_offset;
- goto new_body;
- case SVt_PVAV:
- new_body_offset = - bodies_by_type[SVt_PVAV].offset;
-
- new_body_length = STRUCT_OFFSET(XPVHV, xmg_stash)
- + sizeof (((XPVHV*)SvANY(sstr))->xmg_stash)
- - new_body_offset;
- goto new_body;
case SVt_PVGV:
if (GvUNIQUE((GV*)sstr)) {
/* Do sharing here, and fall through */
}
+ case SVt_PVIO:
+ case SVt_PVFM:
+ case SVt_PVHV:
+ case SVt_PVAV:
case SVt_PVBM:
case SVt_PVCV:
case SVt_PVLV:
case SVt_PVMG:
case SVt_PVNV:
- new_body_length = bodies_by_type[sv_type].size;
- goto new_body;
-
case SVt_PVIV:
- new_body_offset = - bodies_by_type[SVt_PVIV].offset;
- new_body_length = sizeof(XPVIV) - new_body_offset;
- goto new_body;
case SVt_PV:
- new_body_offset = - bodies_by_type[SVt_PV].offset;
- new_body_length = sizeof(XPV) - new_body_offset;
- new_body:
- assert(new_body_length);
-#ifndef PURIFY
- new_body_inline(new_body, new_body_length, SvTYPE(sstr));
-
- new_body = (void*)((char*)new_body - new_body_offset);
-#else
- /* We always allocated the full length item with PURIFY */
- new_body_length += new_body_offset;
- new_body_offset = 0;
- new_body = my_safemalloc(new_body_length);
-#endif
+ assert(sv_type_details->copy);
+ if (sv_type_details->arena) {
+ new_body_inline(new_body, sv_type_details->copy, sv_type);
+ new_body
+ = (void*)((char*)new_body - sv_type_details->offset);
+ } else {
+ new_body = new_NOARENA(sv_type_details);
+ }
}
assert(new_body);
SvANY(dstr) = new_body;
- Copy(((char*)SvANY(sstr)) + new_body_offset,
- ((char*)SvANY(dstr)) + new_body_offset,
- new_body_length, char);
+#ifndef PURIFY
+ Copy(((char*)SvANY(sstr)) + sv_type_details->offset,
+ ((char*)SvANY(dstr)) + sv_type_details->offset,
+ sv_type_details->copy, char);
+#else
+ Copy(((char*)SvANY(sstr)),
+ ((char*)SvANY(dstr)),
+ sv_type_details->size + sv_type_details->offset, char);
+#endif
- if (SvTYPE(sstr) != SVt_PVAV && SvTYPE(sstr) != SVt_PVHV)
+ if (sv_type != SVt_PVAV && sv_type != SVt_PVHV)
Perl_rvpv_dup(aTHX_ dstr, sstr, param);
/* The Copy above means that all the source (unduplicated) pointers
pointers in either, but it's possible that there's less cache
missing by always going for the destination.
FIXME - instrument and check that assumption */
- if (SvTYPE(sstr) >= SVt_PVMG) {
+ if (sv_type >= SVt_PVMG) {
if (SvMAGIC(dstr))
SvMAGIC_set(dstr, mg_dup(SvMAGIC(dstr), param));
if (SvSTASH(dstr))
SvSTASH_set(dstr, hv_dup_inc(SvSTASH(dstr), param));
}
- switch (SvTYPE(sstr)) {
+ /* The cast silences a GCC warning about unhandled types. */
+ switch ((int)sv_type) {
case SVt_PV:
break;
case SVt_PVIV:
Zero(&PL_body_arenaroots, 1, PL_body_arenaroots);
Zero(&PL_body_roots, 1, PL_body_roots);
- PL_he_arenaroot = NULL;
- PL_he_root = NULL;
-
PL_nice_chunk = NULL;
PL_nice_chunk_size = 0;
PL_sv_count = 0;