{
dVAR;
XPVHV* xhv;
- U32 n_links;
HE *entry;
HE **oentry;
SV *sv;
#ifdef DYNAMIC_ENV_FETCH /* if it's an %ENV lookup, we may get it on the fly */
|| (SvRMAGICAL((SV*)hv) && mg_find((SV*)hv, PERL_MAGIC_env))
#endif
- )
- Newz(503, HvARRAY(hv),
+ ) {
+ char *array;
+ Newz(503, array,
PERL_HV_ARRAY_ALLOC_BYTES(xhv->xhv_max+1 /* HvMAX(hv)+1 */),
- HE*);
+ char);
+ HvARRAY(hv) = (HE**)array;
+ }
#ifdef DYNAMIC_ENV_FETCH
else if (action & HV_FETCH_ISEXISTS) {
/* for an %ENV exists, if we do an insert it's by a recursive
}
masked_flags = (flags & HVhek_MASK);
- n_links = 0;
#ifdef DYNAMIC_ENV_FETCH
if (!HvARRAY(hv)) entry = Null(HE*);
{
entry = (HvARRAY(hv))[hash & (I32) HvMAX(hv)];
}
- for (; entry; ++n_links, entry = HeNEXT(entry)) {
+ for (; entry; entry = HeNEXT(entry)) {
if (HeHASH(entry) != hash) /* strings can't be equal */
continue;
if (HeKLEN(entry) != (I32)klen)
/* Not sure if we can get here. I think the only case of oentry being
NULL is for %ENV with dynamic env fetch. But that should disappear
with magic in the previous code. */
- Newz(503, HvARRAY(hv),
+ char *array;
+ Newz(503, array,
PERL_HV_ARRAY_ALLOC_BYTES(xhv->xhv_max+1 /* HvMAX(hv)+1 */),
- HE*);
+ char);
+ HvARRAY(hv) = (HE**)array;
}
oentry = &(HvARRAY(hv))[hash & (I32) xhv->xhv_max];
if (masked_flags & HVhek_ENABLEHVKFLAGS)
HvHASKFLAGS_on(hv);
- xhv->xhv_keys++; /* HvKEYS(hv)++ */
- if (!n_links) { /* initial entry? */
- xhv->xhv_fill++; /* HvFILL(hv)++ */
- } else if ((xhv->xhv_keys > (IV)xhv->xhv_max)
- || ((n_links > HV_MAX_LENGTH_BEFORE_SPLIT) && !HvREHASH(hv))) {
- /* Use only the old HvKEYS(hv) > HvMAX(hv) condition to limit bucket
- splits on a rehashed hash, as we're not going to split it again,
- and if someone is lucky (evil) enough to get all the keys in one
- list they could exhaust our memory as we repeatedly double the
- number of buckets on every entry. Linear search feels a less worse
- thing to do. */
- hsplit(hv);
+ {
+ const HE *counter = HeNEXT(entry);
+
+ xhv->xhv_keys++; /* HvKEYS(hv)++ */
+ if (!counter) { /* initial entry? */
+ xhv->xhv_fill++; /* HvFILL(hv)++ */
+ } else if (xhv->xhv_keys > (IV)xhv->xhv_max) {
+ hsplit(hv);
+ } else if(!HvREHASH(hv)) {
+ U32 n_links = 1;
+
+ while ((counter = HeNEXT(counter)))
+ n_links++;
+
+ if (n_links > HV_MAX_LENGTH_BEFORE_SPLIT) {
+ /* Use only the old HvKEYS(hv) > HvMAX(hv) condition to limit
+ bucket splits on a rehashed hash, as we're not going to
+ split it again, and if someone is lucky (evil) enough to
+ get all the keys in one list they could exhaust our memory
+ as we repeatedly double the number of buckets on every
+ entry. Linear search feels a less worse thing to do. */
+ hsplit(hv);
+ }
+ }
}
return entry;
{
dVAR;
register XPVHV* xhv;
- register I32 i;
register HE *entry;
register HE **oentry;
+ HE *const *first_entry;
SV *sv;
bool is_utf8;
int masked_flags;
masked_flags = (k_flags & HVhek_MASK);
- oentry = &(HvARRAY(hv))[hash & (I32) HvMAX(hv)];
+ first_entry = oentry = &(HvARRAY(hv))[hash & (I32) HvMAX(hv)];
entry = *oentry;
- i = 1;
- for (; entry; i=0, oentry = &HeNEXT(entry), entry = *oentry) {
+ for (; entry; oentry = &HeNEXT(entry), entry = *oentry) {
if (HeHASH(entry) != hash) /* strings can't be equal */
continue;
if (HeKLEN(entry) != (I32)klen)
HvPLACEHOLDERS(hv)++;
} else {
*oentry = HeNEXT(entry);
- if (i && !*oentry)
+ if(!*first_entry) {
xhv->xhv_fill--; /* HvFILL(hv)-- */
- if (xhv->xhv_aux && entry
- == ((struct xpvhv_aux *)xhv->xhv_aux)->xhv_eiter /* HvEITER(hv) */)
+ }
+ if (SvOOK(hv) && entry == HvAUX(hv)->xhv_eiter /* HvEITER(hv) */)
HvLAZYDEL_on(hv);
else
hv_free_ent(hv, entry);
/*PerlIO_printf(PerlIO_stderr(), "hsplit called for %p which had %d\n",
hv, (int) oldsize);*/
- if (HvPLACEHOLDERS(hv) && !SvREADONLY(hv)) {
+ if (HvPLACEHOLDERS_get(hv) && !SvREADONLY(hv)) {
/* Can make this clear any placeholders first for non-restricted hashes,
even though Storable rebuilds restricted hashes by putting in all the
placeholders (first) before turning on the readonly flag, because
PL_nomemok = TRUE;
#if defined(STRANGE_MALLOC) || defined(MYMALLOC)
- Renew(a, PERL_HV_ARRAY_ALLOC_BYTES(newsize), char);
+ Renew(a, PERL_HV_ARRAY_ALLOC_BYTES(newsize)
+ + (SvOOK(hv) ? sizeof(struct xpvhv_aux) : 0), char);
if (!a) {
PL_nomemok = FALSE;
return;
}
+ if (SvOOK(hv)) {
+ Copy(HvAUX(hv), &a[newsize * sizeof(HE*)], 1, struct xpvhv_aux);
+ }
#else
- New(2, a, PERL_HV_ARRAY_ALLOC_BYTES(newsize), char);
+ New(2, a, PERL_HV_ARRAY_ALLOC_BYTES(newsize)
+ + (SvOOK(hv) ? sizeof(struct xpvhv_aux) : 0), char);
if (!a) {
PL_nomemok = FALSE;
return;
}
Copy(HvARRAY(hv), a, oldsize * sizeof(HE*), char);
+ if (SvOOK(hv)) {
+ Copy(HvAUX(hv), &a[newsize * sizeof(HE*)], 1, struct xpvhv_aux);
+ }
if (oldsize >= 64) {
offer_nice_chunk(HvARRAY(hv),
- PERL_HV_ARRAY_ALLOC_BYTES(oldsize));
+ PERL_HV_ARRAY_ALLOC_BYTES(oldsize)
+ + (SvOOK(hv) ? sizeof(struct xpvhv_aux) : 0));
}
else
Safefree(HvARRAY(hv));
longest_chain, HvTOTALKEYS(hv), HvFILL(hv), 1+HvMAX(hv));*/
++newsize;
- Newz(2, a, PERL_HV_ARRAY_ALLOC_BYTES(newsize), char);
+ Newz(2, a, PERL_HV_ARRAY_ALLOC_BYTES(newsize)
+ + (SvOOK(hv) ? sizeof(struct xpvhv_aux) : 0), char);
+ if (SvOOK(hv)) {
+ Copy(HvAUX(hv), &a[newsize * sizeof(HE*)], 1, struct xpvhv_aux);
+ }
+
was_shared = HvSHAREKEYS(hv);
xhv->xhv_fill = 0;
if (a) {
PL_nomemok = TRUE;
#if defined(STRANGE_MALLOC) || defined(MYMALLOC)
- Renew(a, PERL_HV_ARRAY_ALLOC_BYTES(newsize), char);
+ Renew(a, PERL_HV_ARRAY_ALLOC_BYTES(newsize)
+ + (SvOOK(hv) ? sizeof(struct xpvhv_aux) : 0), char);
if (!a) {
PL_nomemok = FALSE;
return;
}
+ if (SvOOK(hv)) {
+ Copy(HvAUX(hv), &a[newsize * sizeof(HE*)], 1, struct xpvhv_aux);
+ }
#else
- New(2, a, PERL_HV_ARRAY_ALLOC_BYTES(newsize), char);
+ New(2, a, PERL_HV_ARRAY_ALLOC_BYTES(newsize)
+ + (SvOOK(hv) ? sizeof(struct xpvhv_aux) : 0), char);
if (!a) {
PL_nomemok = FALSE;
return;
}
Copy(HvARRAY(hv), a, oldsize * sizeof(HE*), char);
+ if (SvOOK(hv)) {
+ Copy(HvAUX(hv), &a[newsize * sizeof(HE*)], 1, struct xpvhv_aux);
+ }
if (oldsize >= 64) {
offer_nice_chunk(HvARRAY(hv),
- PERL_HV_ARRAY_ALLOC_BYTES(oldsize));
+ PERL_HV_ARRAY_ALLOC_BYTES(oldsize)
+ + (SvOOK(hv) ? sizeof(struct xpvhv_aux) : 0));
}
else
Safefree(HvARRAY(hv));
xhv->xhv_max = 7; /* HvMAX(hv) = 7 (start with 8 buckets) */
xhv->xhv_fill = 0; /* HvFILL(hv) = 0 */
- xhv->xhv_aux = 0;
return hv;
}
HvHASKFLAGS_off(hv);
HvREHASH_off(hv);
reset:
- if (xhv->xhv_aux) {
+ if (SvOOK(hv)) {
HvEITER_set(hv, NULL);
}
}
Perl_hv_clear_placeholders(pTHX_ HV *hv)
{
dVAR;
- I32 items = (I32)HvPLACEHOLDERS(hv);
+ I32 items = (I32)HvPLACEHOLDERS_get(hv);
I32 i = HvMAX(hv);
if (items == 0)
if (--items == 0) {
/* Finished. */
- HvTOTALKEYS(hv) -= (IV)HvPLACEHOLDERS(hv);
+ HvTOTALKEYS(hv) -= (IV)HvPLACEHOLDERS_get(hv);
if (HvKEYS(hv) == 0)
HvHASKFLAGS_off(hv);
- HvPLACEHOLDERS(hv) = 0;
+ HvPLACEHOLDERS_set(hv, 0);
return;
}
} else {
I32 riter;
I32 max;
struct xpvhv_aux *iter;
-
if (!hv)
return;
if (!HvARRAY(hv))
return;
+ iter = SvOOK(hv) ? HvAUX(hv) : 0;
+
riter = 0;
max = HvMAX(hv);
array = HvARRAY(hv);
/* make everyone else think the array is empty, so that the destructors
* called for freed entries can't recusively mess with us */
HvARRAY(hv) = Null(HE**);
+ SvFLAGS(hv) &= ~SVf_OOK;
+
HvFILL(hv) = 0;
((XPVHV*) SvANY(hv))->xhv_keys = 0;
entry = array[riter];
}
}
- HvARRAY(hv) = array;
- iter = ((XPVHV*) SvANY(hv))->xhv_aux;
+ if (SvOOK(hv)) {
+ /* Someone attempted to iterate or set the hash name while we had
+ the array set to 0. */
+ assert(HvARRAY(hv));
+
+ if (HvAUX(hv)->xhv_name)
+ unshare_hek_or_pvn(HvAUX(hv)->xhv_name, 0, 0, 0);
+ /* SvOOK_off calls sv_backoff, which isn't correct. */
+
+ Safefree(HvARRAY(hv));
+ HvARRAY(hv) = 0;
+ SvFLAGS(hv) &= ~SVf_OOK;
+ }
+
+ /* FIXME - things will still go horribly wrong (or at least leak) if
+ people attempt to add elements to the hash while we're undef()ing it */
if (iter) {
entry = iter->xhv_eiter; /* HvEITER(hv) */
if (entry && HvLAZYDEL(hv)) { /* was deleted earlier? */
HvLAZYDEL_off(hv);
hv_free_ent(hv, entry);
}
- if (iter->xhv_name)
- unshare_hek_or_pvn(iter->xhv_name, 0, 0, 0);
- Safefree(iter);
- ((XPVHV*) SvANY(hv))->xhv_aux = 0;
+ iter->xhv_riter = -1; /* HvRITER(hv) = -1 */
+ iter->xhv_eiter = Null(HE*); /* HvEITER(hv) = Null(HE*) */
+ SvFLAGS(hv) |= SVf_OOK;
}
+
+ HvARRAY(hv) = array;
}
/*
DEBUG_A(Perl_hv_assert(aTHX_ hv));
xhv = (XPVHV*)SvANY(hv);
hfreeentries(hv);
- Safefree(HvARRAY(hv));
if ((name = HvNAME_get(hv))) {
if(PL_stashcache)
hv_delete(PL_stashcache, name, HvNAMELEN_get(hv), G_DISCARD);
Perl_hv_name_set(aTHX_ hv, 0, 0, 0);
}
+ SvFLAGS(hv) &= ~SVf_OOK;
+ Safefree(HvARRAY(hv));
xhv->xhv_max = 7; /* HvMAX(hv) = 7 (it's a normal hash) */
HvARRAY(hv) = 0;
HvPLACEHOLDERS_set(hv, 0);
}
struct xpvhv_aux*
-S_hv_auxinit(pTHX) {
+S_hv_auxinit(pTHX_ HV *hv) {
struct xpvhv_aux *iter;
+ char *array;
- New(0, iter, 1, struct xpvhv_aux);
+ if (!HvARRAY(hv)) {
+ Newz(0, array, PERL_HV_ARRAY_ALLOC_BYTES(HvMAX(hv) + 1)
+ + sizeof(struct xpvhv_aux), char);
+ } else {
+ array = (char *) HvARRAY(hv);
+ Renew(array, PERL_HV_ARRAY_ALLOC_BYTES(HvMAX(hv) + 1)
+ + sizeof(struct xpvhv_aux), char);
+ }
+ HvARRAY(hv) = (HE**) array;
+ /* SvOOK_on(hv) attacks the IV flags. */
+ SvFLAGS(hv) |= SVf_OOK;
+ iter = HvAUX(hv);
iter->xhv_riter = -1; /* HvRITER(hv) = -1 */
iter->xhv_eiter = Null(HE*); /* HvEITER(hv) = Null(HE*) */
{
register XPVHV* xhv;
HE *entry;
- struct xpvhv_aux *iter;
if (!hv)
Perl_croak(aTHX_ "Bad hash");
xhv = (XPVHV*)SvANY(hv);
- iter = xhv->xhv_aux;
- if (iter) {
+ if (SvOOK(hv)) {
+ struct xpvhv_aux *iter = HvAUX(hv);
entry = iter->xhv_eiter; /* HvEITER(hv) */
if (entry && HvLAZYDEL(hv)) { /* was deleted earlier? */
HvLAZYDEL_off(hv);
iter->xhv_riter = -1; /* HvRITER(hv) = -1 */
iter->xhv_eiter = Null(HE*); /* HvEITER(hv) = Null(HE*) */
} else {
- xhv->xhv_aux = S_hv_auxinit(aTHX);
+ S_hv_auxinit(aTHX_ hv);
}
/* used to be xhv->xhv_fill before 5.004_65 */
- return XHvTOTALKEYS(xhv);
+ return HvTOTALKEYS(hv);
}
I32 *
if (!hv)
Perl_croak(aTHX_ "Bad hash");
- iter = ((XPVHV *)SvANY(hv))->xhv_aux;
- if (!iter) {
- ((XPVHV *)SvANY(hv))->xhv_aux = iter = S_hv_auxinit(aTHX);
- }
+ iter = SvOOK(hv) ? HvAUX(hv) : S_hv_auxinit(aTHX_ hv);
return &(iter->xhv_riter);
}
if (!hv)
Perl_croak(aTHX_ "Bad hash");
- iter = ((XPVHV *)SvANY(hv))->xhv_aux;
- if (!iter) {
- ((XPVHV *)SvANY(hv))->xhv_aux = iter = S_hv_auxinit(aTHX);
- }
+ iter = SvOOK(hv) ? HvAUX(hv) : S_hv_auxinit(aTHX_ hv);
return &(iter->xhv_eiter);
}
if (!hv)
Perl_croak(aTHX_ "Bad hash");
-
- iter = ((XPVHV *)SvANY(hv))->xhv_aux;
- if (!iter) {
+ if (SvOOK(hv)) {
+ iter = HvAUX(hv);
+ } else {
if (riter == -1)
return;
- ((XPVHV *)SvANY(hv))->xhv_aux = iter = S_hv_auxinit(aTHX);
+ iter = S_hv_auxinit(aTHX_ hv);
}
iter->xhv_riter = riter;
}
if (!hv)
Perl_croak(aTHX_ "Bad hash");
- iter = ((XPVHV *)SvANY(hv))->xhv_aux;
- if (!iter) {
+ if (SvOOK(hv)) {
+ iter = HvAUX(hv);
+ } else {
/* 0 is the default so don't go malloc()ing a new structure just to
hold 0. */
if (!eiter)
return;
- ((XPVHV *)SvANY(hv))->xhv_aux = iter = S_hv_auxinit(aTHX);
+ iter = S_hv_auxinit(aTHX_ hv);
}
iter->xhv_eiter = eiter;
}
void
Perl_hv_name_set(pTHX_ HV *hv, const char *name, I32 len, int flags)
{
- struct xpvhv_aux *iter = ((XPVHV *)SvANY(hv))->xhv_aux;
+ struct xpvhv_aux *iter;
U32 hash;
- if (iter) {
+ if (SvOOK(hv)) {
+ iter = HvAUX(hv);
if (iter->xhv_name) {
unshare_hek_or_pvn(iter->xhv_name, 0, 0, 0);
}
if (name == 0)
return;
- ((XPVHV *)SvANY(hv))->xhv_aux = iter = S_hv_auxinit(aTHX);
+ iter = S_hv_auxinit(aTHX_ hv);
}
PERL_HASH(hash, name, len);
iter->xhv_name = name ? share_hek(name, len, hash) : 0;
if (!hv)
Perl_croak(aTHX_ "Bad hash");
xhv = (XPVHV*)SvANY(hv);
- iter = xhv->xhv_aux;
- if (!iter) {
+ if (!SvOOK(hv)) {
/* Too many things (well, pp_each at least) merrily assume that you can
call iv_iternext without calling hv_iterinit, so we'll have to deal
with it. */
hv_iterinit(hv);
- iter = ((XPVHV *)SvANY(hv))->xhv_aux;
}
+ iter = HvAUX(hv);
oldentry = entry = iter->xhv_eiter; /* HvEITER(hv) */
prime_env_iter();
#endif
- if (!HvARRAY(hv)) {
- char *darray;
- Newz(506, darray,
- PERL_HV_ARRAY_ALLOC_BYTES(xhv->xhv_max+1 /* HvMAX(hv)+1 */),
- char);
- HvARRAY(hv) = (HE**) darray;
- }
+ /* hv_iterint now ensures this. */
+ assert (HvARRAY(hv));
+
/* At start of hash, entry is NULL. */
if (entry)
{
register XPVHV* xhv;
register HE *entry;
register HE **oentry;
- register I32 i = 1;
+ HE **first;
bool found = 0;
bool is_utf8 = FALSE;
int k_flags = 0;
xhv = (XPVHV*)SvANY(PL_strtab);
/* assert(xhv_array != 0) */
LOCK_STRTAB_MUTEX;
- oentry = &(HvARRAY(PL_strtab))[hash & (I32) HvMAX(PL_strtab)];
+ first = oentry = &(HvARRAY(PL_strtab))[hash & (I32) HvMAX(PL_strtab)];
if (hek) {
- for (entry = *oentry; entry; i=0, oentry = &HeNEXT(entry), entry = *oentry) {
+ for (entry = *oentry; entry; oentry = &HeNEXT(entry), entry = *oentry) {
if (HeKEY_hek(entry) != hek)
continue;
found = 1;
}
} else {
const int flags_masked = k_flags & HVhek_MASK;
- for (entry = *oentry; entry; i=0, oentry = &HeNEXT(entry), entry = *oentry) {
+ for (entry = *oentry; entry; oentry = &HeNEXT(entry), entry = *oentry) {
if (HeHASH(entry) != hash) /* strings can't be equal */
continue;
if (HeKLEN(entry) != len)
if (found) {
if (--HeVAL(entry) == Nullsv) {
*oentry = HeNEXT(entry);
- if (i && !*oentry)
+ if (!*first) {
+ /* There are now no entries in our slot. */
xhv->xhv_fill--; /* HvFILL(hv)-- */
+ }
Safefree(HeKEY_hek(entry));
del_HE(entry);
xhv->xhv_keys--; /* HvKEYS(hv)-- */
register XPVHV* xhv;
register HE *entry;
register HE **oentry;
- register I32 i = 1;
I32 found = 0;
const int flags_masked = flags & HVhek_MASK;
/* assert(xhv_array != 0) */
LOCK_STRTAB_MUTEX;
oentry = &(HvARRAY(PL_strtab))[hash & (I32) HvMAX(PL_strtab)];
- for (entry = *oentry; entry; i=0, entry = HeNEXT(entry)) {
+ for (entry = *oentry; entry; entry = HeNEXT(entry)) {
if (HeHASH(entry) != hash) /* strings can't be equal */
continue;
if (HeKLEN(entry) != len)
break;
}
if (!found) {
+ /* What used to be head of the list.
+ If this is NULL, then we're the first entry for this slot, which
+ means we need to increate fill. */
+ const HE *old_first = *oentry;
entry = new_HE();
HeKEY_hek(entry) = save_hek_flags(str, len, hash, flags_masked);
HeVAL(entry) = Nullsv;
HeNEXT(entry) = *oentry;
*oentry = entry;
xhv->xhv_keys++; /* HvKEYS(hv)++ */
- if (i) { /* initial entry? */
+ if (!old_first) { /* initial entry? */
xhv->xhv_fill++; /* HvFILL(hv)++ */
} else if (xhv->xhv_keys > (IV)xhv->xhv_max /* HvKEYS(hv) > HvMAX(hv) */) {
hsplit(PL_strtab);
(int) real, (int) HvUSEDKEYS(hv));
bad = 1;
}
- if (HvPLACEHOLDERS(hv) != placeholders) {
+ if (HvPLACEHOLDERS_get(hv) != placeholders) {
PerlIO_printf(Perl_debug_log,
"Count %d placeholder(s), but hash reports %d\n",
- (int) placeholders, (int) HvPLACEHOLDERS(hv));
+ (int) placeholders, (int) HvPLACEHOLDERS_get(hv));
bad = 1;
}
}