5 #ifndef NO_FANCY_MALLOC
6 # ifndef SMALL_BUCKET_VIA_TABLE
7 # define SMALL_BUCKET_VIA_TABLE
10 # define BUCKETS_ROOT2
12 # ifndef IGNORE_SMALL_BAD_FREE
13 # define IGNORE_SMALL_BAD_FREE
17 #ifndef PLAIN_MALLOC /* Bulk enable features */
21 # ifndef TWO_POT_OPTIMIZE
22 # define TWO_POT_OPTIMIZE
24 # if defined(PERL_CORE) && !defined(PERL_EMERGENCY_SBRK)
25 # define PERL_EMERGENCY_SBRK
27 # if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS)
28 # define DEBUGGING_MSTATS
32 #define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */
33 #define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2)
35 #if !(defined(I286) || defined(atarist))
36 /* take 2k unless the block is bigger than that */
37 # define LOG_OF_MIN_ARENA 11
39 /* take 16k unless the block is bigger than that
40 (80286s like large segments!), probably good on the atari too */
41 # define LOG_OF_MIN_ARENA 14
45 # if defined(DEBUGGING) && !defined(NO_RCHECK)
48 # if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE)
49 # undef IGNORE_SMALL_BAD_FREE
52 * malloc.c (Caltech) 2/21/82
53 * Chris Kingsley, kingsley@cit-20.
55 * This is a very fast storage allocator. It allocates blocks of a small
56 * number of different sizes, and keeps free lists of each size. Blocks that
57 * don't exactly fit are passed up to the next larger size. In this
58 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
59 * If PACK_MALLOC is defined, small blocks are 2^n bytes long.
60 * This is designed for use in a program that uses vast quantities of memory,
61 * but bombs when it runs out.
69 # include "../EXTERN.h"
77 # define Malloc_t void *
80 # define MEM_SIZE unsigned long
83 # define LONG_MAX 0x7FFFFFFF
86 # define UV unsigned long
89 # define caddr_t char *
94 # define Copy(s,d,n,t) (void)memcpy((char*)(d),(char*)(s), (n) * sizeof(t))
95 # define PerlEnv_getenv getenv
96 # define PerlIO_printf fprintf
97 # define PerlIO_stderr() stderr
99 # ifndef croak /* make depend */
100 # define croak(mess, arg) warn((mess), (arg)); exit(1);
103 # define warn(mess, arg) fprintf(stderr, (mess), (arg));
115 # define MUTEX_LOCK(l)
119 # define MUTEX_UNLOCK(l)
124 # define DEBUG_m(a) if (debug & 128) a
127 /* I don't much care whether these are defined in sys/types.h--LAW */
129 #define u_char unsigned char
130 #define u_int unsigned int
133 # define u_bigint UV /* Needs to eat *void. */
135 # define u_bigint unsigned long /* Needs to eat *void. */
138 #define u_short unsigned short
140 /* 286 and atarist like big chunks, which gives too much overhead. */
141 #if (defined(RCHECK) || defined(I286) || defined(atarist)) && defined(PACK_MALLOC)
146 * The description below is applicable if PACK_MALLOC is not defined.
148 * The overhead on a block is at least 4 bytes. When free, this space
149 * contains a pointer to the next free block, and the bottom two bits must
150 * be zero. When in use, the first byte is set to MAGIC, and the second
151 * byte is the size index. The remaining bytes are for alignment.
152 * If range checking is enabled and the size of the block fits
153 * in two bytes, then the top two bytes hold the size of the requested block
154 * plus the range checking words, and the header word MINUS ONE.
157 union overhead *ov_next; /* when free */
158 #if MEM_ALIGNBYTES > 4
159 double strut; /* alignment problems */
162 u_char ovu_magic; /* magic number */
163 u_char ovu_index; /* bucket # */
165 u_short ovu_size; /* actual block size */
166 u_int ovu_rmagic; /* range magic number */
169 #define ov_magic ovu.ovu_magic
170 #define ov_index ovu.ovu_index
171 #define ov_size ovu.ovu_size
172 #define ov_rmagic ovu.ovu_rmagic
176 static void botch _((char *diag, char *s));
178 static void morecore _((int bucket));
179 static int findbucket _((union overhead *freep, int srchlen));
181 #define MAGIC 0xff /* magic # on accounting info */
182 #define RMAGIC 0x55555555 /* magic # on range info */
183 #define RMAGIC_C 0x55 /* magic # on range info */
186 # define RSLOP sizeof (u_int)
187 # ifdef TWO_POT_OPTIMIZE
188 # define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2)
190 # define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2)
196 #if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2)
197 # undef BUCKETS_ROOT2
201 # define BUCKET_TABLE_SHIFT 2
202 # define BUCKET_POW2_SHIFT 1
203 # define BUCKETS_PER_POW2 2
205 # define BUCKET_TABLE_SHIFT MIN_BUC_POW2
206 # define BUCKET_POW2_SHIFT 0
207 # define BUCKETS_PER_POW2 1
211 # define MAX_BUCKET_BY_TABLE 13
212 static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] =
214 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80,
216 # define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT)))
217 # define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \
219 : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \
221 + POW2_OPTIMIZE_SURPLUS(i)))
223 # define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT))
224 # define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i))
229 /* In this case it is assumed that if we do sbrk() in 2K units, we
230 * will get 2K aligned arenas (at least after some initial
231 * alignment). The bucket number of the given subblock is on the start
232 * of 2K arena which contains the subblock. Several following bytes
233 * contain the magic numbers for the subblocks in the block.
235 * Sizes of chunks are powers of 2 for chunks in buckets <=
236 * MAX_PACKED, after this they are (2^n - sizeof(union overhead)) (to
237 * get alignment right).
239 * Consider an arena for 2^n with n>MAX_PACKED. We suppose that
240 * starts of all the chunks in a 2K arena are in different
241 * 2^n-byte-long chunks. If the top of the last chunk is aligned on a
242 * boundary of 2K block, this means that sizeof(union
243 * overhead)*"number of chunks" < 2^n, or sizeof(union overhead)*2K <
244 * 4^n, or n > 6 + log2(sizeof()/2)/2, since a chunk of size 2^n -
245 * overhead is used. Since this rules out n = 7 for 8 byte alignment,
246 * we specialcase allocation of the first of 16 128-byte-long chunks.
248 * Note that with the above assumption we automatically have enough
249 * place for MAGIC at the start of 2K block. Note also that we
250 * overlay union overhead over the chunk, thus the start of small chunks
251 * is immediately overwritten after freeing. */
252 # define MAX_PACKED_POW2 6
253 # define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT)
254 # define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD)
255 # define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1)
256 # define TWOK_MASKED(x) ((u_bigint)(x) & ~TWOK_MASK)
257 # define TWOK_SHIFT(x) ((u_bigint)(x) & TWOK_MASK)
258 # define OV_INDEXp(block) ((u_char*)(TWOK_MASKED(block)))
259 # define OV_INDEX(block) (*OV_INDEXp(block))
260 # define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \
261 (TWOK_SHIFT(block)>> \
262 (bucket>>BUCKET_POW2_SHIFT)) + \
263 (bucket >= MIN_NEEDS_SHIFT ? 1 : 0)))
264 /* A bucket can have a shift smaller than it size, we need to
265 shift its magic number so it will not overwrite index: */
266 # ifdef BUCKETS_ROOT2
267 # define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */
269 # define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */
271 # define CHUNK_SHIFT 0
273 /* Number of active buckets of given ordinal. */
274 #ifdef IGNORE_SMALL_BAD_FREE
275 #define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */
276 # define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
277 ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \
280 # define N_BLKS(bucket) n_blks[bucket]
283 static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
285 # if BUCKETS_PER_POW2==1
287 (MIN_BUC_POW2==2 ? 384 : 0),
288 224, 120, 62, 31, 16, 8, 4, 2
291 (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */
292 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2
296 /* Shift of the first bucket with the given ordinal inside 2K chunk. */
297 #ifdef IGNORE_SMALL_BAD_FREE
298 # define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
299 ? ((1<<LOG_OF_MIN_ARENA) \
300 - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \
303 # define BLK_SHIFT(bucket) blk_shift[bucket]
306 static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
308 # if BUCKETS_PER_POW2==1
310 (MIN_BUC_POW2==2 ? 512 : 0),
311 256, 128, 64, 64, /* 8 to 64 */
312 16*sizeof(union overhead),
313 8*sizeof(union overhead),
314 4*sizeof(union overhead),
315 2*sizeof(union overhead),
318 (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0),
319 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */
320 16*sizeof(union overhead), 16*sizeof(union overhead),
321 8*sizeof(union overhead), 8*sizeof(union overhead),
322 4*sizeof(union overhead), 4*sizeof(union overhead),
323 2*sizeof(union overhead), 2*sizeof(union overhead),
327 #else /* !PACK_MALLOC */
329 # define OV_MAGIC(block,bucket) (block)->ov_magic
330 # define OV_INDEX(block) (block)->ov_index
331 # define CHUNK_SHIFT 1
332 # define MAX_PACKED -1
333 #endif /* !PACK_MALLOC */
335 #define M_OVERHEAD (sizeof(union overhead) + RSLOP)
338 # define MEM_OVERHEAD(bucket) \
339 (bucket <= MAX_PACKED ? 0 : M_OVERHEAD)
340 # ifdef SMALL_BUCKET_VIA_TABLE
341 # define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2)
342 # define START_SHIFT MAX_PACKED_POW2
343 # ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
344 # define SIZE_TABLE_MAX 80
346 # define SIZE_TABLE_MAX 64
348 static char bucket_of[] =
350 # ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
351 /* 0 to 15 in 4-byte increments. */
352 (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */
355 9, 9, 10, 10, /* 24, 32 */
356 11, 11, 11, 11, /* 48 */
357 12, 12, 12, 12, /* 64 */
358 13, 13, 13, 13, /* 80 */
359 13, 13, 13, 13 /* 80 */
360 # else /* !BUCKETS_ROOT2 */
361 /* 0 to 15 in 4-byte increments. */
362 (sizeof(void*) > 4 ? 3 : 2),
368 # endif /* !BUCKETS_ROOT2 */
370 # else /* !SMALL_BUCKET_VIA_TABLE */
371 # define START_SHIFTS_BUCKET MIN_BUCKET
372 # define START_SHIFT (MIN_BUC_POW2 - 1)
373 # endif /* !SMALL_BUCKET_VIA_TABLE */
374 #else /* !PACK_MALLOC */
375 # define MEM_OVERHEAD(bucket) M_OVERHEAD
376 # ifdef SMALL_BUCKET_VIA_TABLE
377 # undef SMALL_BUCKET_VIA_TABLE
379 # define START_SHIFTS_BUCKET MIN_BUCKET
380 # define START_SHIFT (MIN_BUC_POW2 - 1)
381 #endif /* !PACK_MALLOC */
384 * Big allocations are often of the size 2^n bytes. To make them a
385 * little bit better, make blocks of size 2^n+pagesize for big n.
388 #ifdef TWO_POT_OPTIMIZE
390 # ifndef PERL_PAGESIZE
391 # define PERL_PAGESIZE 4096
393 # ifndef FIRST_BIG_POW2
394 # define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */
396 # define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2)
397 /* If this value or more, check against bigger blocks. */
398 # define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD)
399 /* If less than this value, goes into 2^n-overhead-block. */
400 # define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD)
402 # define POW2_OPTIMIZE_ADJUST(nbytes) \
403 ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0)
404 # define POW2_OPTIMIZE_SURPLUS(bucket) \
405 ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0)
407 #else /* !TWO_POT_OPTIMIZE */
408 # define POW2_OPTIMIZE_ADJUST(nbytes)
409 # define POW2_OPTIMIZE_SURPLUS(bucket) 0
410 #endif /* !TWO_POT_OPTIMIZE */
412 #if defined(HAS_64K_LIMIT) && defined(PERL_CORE)
413 # define BARK_64K_LIMIT(what,nbytes,size) \
414 if (nbytes > 0xffff) { \
415 PerlIO_printf(PerlIO_stderr(), \
416 "%s too large: %lx\n", what, size); \
419 #else /* !HAS_64K_LIMIT || !PERL_CORE */
420 # define BARK_64K_LIMIT(what,nbytes,size)
421 #endif /* !HAS_64K_LIMIT || !PERL_CORE */
424 # define MIN_SBRK 2048
428 # define FIRST_SBRK (48*1024)
431 /* Minimal sbrk in percents of what is already alloced. */
432 #ifndef MIN_SBRK_FRAC
433 # define MIN_SBRK_FRAC 3
436 #ifndef SBRK_ALLOW_FAILURES
437 # define SBRK_ALLOW_FAILURES 3
440 #ifndef SBRK_FAILURE_PRICE
441 # define SBRK_FAILURE_PRICE 50
444 #if defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)
447 # define BIG_SIZE (1<<16) /* 64K */
450 static char *emergency_buffer;
451 static MEM_SIZE emergency_buffer_size;
457 if (size >= BIG_SIZE) {
458 /* Give the possibility to recover: */
459 MUTEX_UNLOCK(&malloc_mutex);
460 croak("Out of memory during \"large\" request for %i bytes", size);
463 if (!emergency_buffer) {
465 /* First offense, give a possibility to recover by dieing. */
466 /* No malloc involved here: */
467 GV **gvp = (GV**)hv_fetch(defstash, "^M", 2, 0);
471 if (!gvp) gvp = (GV**)hv_fetch(defstash, "\015", 1, 0);
472 if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv)
473 || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD))
474 return (char *)-1; /* Now die die die... */
476 /* Got it, now detach SvPV: */
478 /* Check alignment: */
479 if (((u_bigint)(pv - M_OVERHEAD)) & ((1<<LOG_OF_MIN_ARENA) - 1)) {
480 PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
481 return (char *)-1; /* die die die */
484 emergency_buffer = pv - M_OVERHEAD;
485 emergency_buffer_size = SvLEN(sv) + M_OVERHEAD;
488 MUTEX_UNLOCK(&malloc_mutex);
489 croak("Out of memory during request for %i bytes", size);
491 else if (emergency_buffer_size >= size) {
492 emergency_buffer_size -= size;
493 return emergency_buffer + emergency_buffer_size;
496 return (char *)-1; /* poor guy... */
499 #else /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
500 # define emergency_sbrk(size) -1
501 #endif /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
504 * nextf[i] is the pointer to the next free block of size 2^i. The
505 * smallest allocatable block is 8 bytes. The overhead information
506 * precedes the data area returned to the user.
508 #define NBUCKETS (32*BUCKETS_PER_POW2 + 1)
509 static union overhead *nextf[NBUCKETS];
512 #define sbrk(a) Perl_sbrk(a)
513 Malloc_t Perl_sbrk _((int size));
515 #ifdef DONT_DECLARE_STD
520 extern Malloc_t sbrk(int);
524 #ifdef DEBUGGING_MSTATS
526 * nmalloc[i] is the difference between the number of mallocs and frees
527 * for a given block size.
529 static u_int nmalloc[NBUCKETS];
530 static u_int sbrk_slack;
531 static u_int start_slack;
534 static u_int goodsbrk;
537 #define ASSERT(p,diag) if (!(p)) botch(diag,STRINGIFY(p)); else
539 botch(char *diag, char *s)
541 PerlIO_printf(PerlIO_stderr(), "assertion botched (%s?): %s\n", diag, s);
545 #define ASSERT(p, diag)
549 malloc(register size_t nbytes)
551 register union overhead *p;
553 register MEM_SIZE shiftr;
555 #if defined(DEBUGGING) || defined(RCHECK)
556 MEM_SIZE size = nbytes;
559 BARK_64K_LIMIT("Allocation",nbytes,nbytes);
561 if ((long)nbytes < 0)
562 croak("%s", "panic: malloc");
565 MUTEX_LOCK(&malloc_mutex);
567 * Convert amount of memory requested into
568 * closest block size stored in hash buckets
569 * which satisfies request. Account for
570 * space used per block for accounting.
573 # ifdef SMALL_BUCKET_VIA_TABLE
576 else if (nbytes <= SIZE_TABLE_MAX) {
577 bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT];
582 if (nbytes <= MAX_POW2_ALGO) goto do_shifts;
587 POW2_OPTIMIZE_ADJUST(nbytes);
588 nbytes += M_OVERHEAD;
589 nbytes = (nbytes + 3) &~ 3;
591 shiftr = (nbytes - 1) >> START_SHIFT;
592 bucket = START_SHIFTS_BUCKET;
593 /* apart from this loop, this is O(1) */
595 bucket += BUCKETS_PER_POW2;
598 * If nothing in hash bucket right now,
599 * request more memory from the system.
601 if (nextf[bucket] == NULL)
603 if ((p = nextf[bucket]) == NULL) {
604 MUTEX_UNLOCK(&malloc_mutex);
607 PerlIO_puts(PerlIO_stderr(),"Out of memory!\n");
615 DEBUG_m(PerlIO_printf(Perl_debug_log,
616 "0x%lx: (%05lu) malloc %ld bytes\n",
617 (unsigned long)(p+1), (unsigned long)(an++),
620 /* remove from linked list */
622 if (*((int*)p) & (sizeof(union overhead) - 1))
623 PerlIO_printf(PerlIO_stderr(), "Corrupt malloc ptr 0x%lx at 0x%lx\n",
624 (unsigned long)*((int*)p),(unsigned long)p);
626 nextf[bucket] = p->ov_next;
627 #ifdef IGNORE_SMALL_BAD_FREE
628 if (bucket >= FIRST_BUCKET_WITH_CHECK)
630 OV_MAGIC(p, bucket) = MAGIC;
632 OV_INDEX(p) = bucket;
636 * Record allocated size of block and
637 * bound space with magic numbers.
639 p->ov_rmagic = RMAGIC;
640 if (bucket <= MAX_SHORT_BUCKET) {
643 nbytes = size + M_OVERHEAD;
644 p->ov_size = nbytes - 1;
645 if ((i = nbytes & 3)) {
648 *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C;
650 nbytes = (nbytes + 3) &~ 3;
651 *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC;
654 MUTEX_UNLOCK(&malloc_mutex);
655 return ((Malloc_t)(p + CHUNK_SHIFT));
658 static char *last_sbrk_top;
659 static char *last_op; /* This arena can be easily extended. */
660 static int sbrked_remains;
661 static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE;
663 #ifdef DEBUGGING_MSTATS
667 struct chunk_chain_s {
668 struct chunk_chain_s *next;
671 static struct chunk_chain_s *chunk_chain;
673 static char max_bucket;
675 /* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */
677 get_from_chain(MEM_SIZE size)
679 struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain;
680 struct chunk_chain_s **oldgoodp = NULL;
681 long min_remain = LONG_MAX;
684 if (elt->size >= size) {
685 long remains = elt->size - size;
686 if (remains >= 0 && remains < min_remain) {
688 min_remain = remains;
694 oldp = &( elt->next );
697 if (!oldgoodp) return NULL;
699 void *ret = *oldgoodp;
700 struct chunk_chain_s *next = (*oldgoodp)->next;
702 *oldgoodp = (struct chunk_chain_s *)((char*)ret + size);
703 (*oldgoodp)->size = min_remain;
704 (*oldgoodp)->next = next;
707 void *ret = *oldgoodp;
708 *oldgoodp = (*oldgoodp)->next;
715 add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip)
717 struct chunk_chain_s *next = chunk_chain;
721 chunk_chain = (struct chunk_chain_s *)cp;
722 chunk_chain->size = size - chip;
723 chunk_chain->next = next;
728 get_from_bigger_buckets(int bucket, MEM_SIZE size)
731 static int bucketprice[NBUCKETS];
732 while (bucket <= max_bucket) {
733 /* We postpone stealing from bigger buckets until we want it
735 if (nextf[bucket] && bucketprice[bucket]++ >= price) {
737 void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT);
738 bucketprice[bucket] = 0;
739 if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) {
740 last_op = NULL; /* Disable optimization */
742 nextf[bucket] = nextf[bucket]->ov_next;
743 #ifdef DEBUGGING_MSTATS
745 start_slack -= M_OVERHEAD;
747 add_to_chain(ret, (BUCKET_SIZE(bucket) +
748 POW2_OPTIMIZE_SURPLUS(bucket)),
757 static union overhead *
758 getpages(int needed, int *nblksp, int bucket)
760 /* Need to do (possibly expensive) system call. Try to
761 optimize it for rare calling. */
762 MEM_SIZE require = needed - sbrked_remains;
768 if (!last_sbrk_top && require < FIRST_SBRK)
769 require = FIRST_SBRK;
770 else if (require < MIN_SBRK) require = MIN_SBRK;
772 if (require < goodsbrk * MIN_SBRK_FRAC / 100)
773 require = goodsbrk * MIN_SBRK_FRAC / 100;
774 require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK;
781 DEBUG_m(PerlIO_printf(Perl_debug_log,
782 "sbrk(%ld) for %ld-byte-long arena\n",
783 (long)require, (long) needed));
784 cp = (char *)sbrk(require);
785 #ifdef DEBUGGING_MSTATS
788 if (cp == last_sbrk_top) {
789 /* Common case, anything is fine. */
791 ovp = (union overhead *) (cp - sbrked_remains);
792 sbrked_remains = require - (needed - sbrked_remains);
793 } else if (cp == (char *)-1) { /* no more room! */
794 ovp = (union overhead *)emergency_sbrk(needed);
795 if (ovp == (union overhead *)-1)
798 } else { /* Non-continuous or first sbrk(). */
799 long add = sbrked_remains;
802 if (sbrked_remains) { /* Put rest into chain, we
803 cannot use it right now. */
804 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
808 /* Second, check alignment. */
811 #ifndef atarist /* on the atari we dont have to worry about this */
812 # ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */
814 /* CHUNK_SHIFT is 1 for PACK_MALLOC, 0 otherwise. */
815 if ((UV)cp & (0x7FF >> CHUNK_SHIFT)) { /* Not aligned. */
816 slack = (0x800 >> CHUNK_SHIFT)
817 - ((UV)cp & (0x7FF >> CHUNK_SHIFT));
824 DEBUG_m(PerlIO_printf(Perl_debug_log,
825 "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n",
826 (long)add, (long) slack,
827 (long) sbrked_remains));
828 newcp = (char *)sbrk(add);
829 #if defined(DEBUGGING_MSTATS)
833 if (newcp != cp + require) {
834 /* Too bad: even rounding sbrk() is not continuous.*/
835 DEBUG_m(PerlIO_printf(Perl_debug_log,
836 "failed to fix bad sbrk()\n"));
839 MUTEX_UNLOCK(&malloc_mutex);
840 croak("%s", "panic: Off-page sbrk");
843 if (sbrked_remains) {
845 #if defined(DEBUGGING_MSTATS)
846 sbrk_slack += require;
849 DEBUG_m(PerlIO_printf(Perl_debug_log,
850 "straight sbrk(%ld)\n",
852 cp = (char *)sbrk(require);
853 #ifdef DEBUGGING_MSTATS
856 if (cp == (char *)-1)
859 sbrk_good = -1; /* Disable optimization!
860 Continue with not-aligned... */
863 require += sbrked_remains;
868 sbrk_good -= SBRK_FAILURE_PRICE;
871 ovp = (union overhead *) cp;
873 * Round up to minimum allocation size boundary
874 * and deduct from block count to reflect.
877 #ifndef I286 /* Again, this should always be ok on an 80286 */
879 ovp = (union overhead *)(((UV)ovp + 8) & ~7);
880 DEBUG_m(PerlIO_printf(Perl_debug_log,
881 "fixing sbrk(): %d bytes off machine alignement\n",
882 (int)((UV)ovp & 7)));
884 # if defined(DEBUGGING_MSTATS)
885 /* This is only approx. if TWO_POT_OPTIMIZE: */
886 sbrk_slack += (1 << bucket);
890 sbrked_remains = require - needed;
892 last_sbrk_top = cp + require;
893 last_op = (char*) cp;
894 #ifdef DEBUGGING_MSTATS
901 getpages_adjacent(int require)
903 if (require <= sbrked_remains) {
904 sbrked_remains -= require;
908 require -= sbrked_remains;
909 /* We do not try to optimize sbrks here, we go for place. */
910 cp = (char*) sbrk(require);
911 #ifdef DEBUGGING_MSTATS
915 if (cp == last_sbrk_top) {
917 last_sbrk_top = cp + require;
919 /* Report the failure: */
921 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
923 add_to_chain((void*)cp, require, 0);
924 sbrk_good -= SBRK_FAILURE_PRICE;
936 * Allocate more memory to the indicated bucket.
939 morecore(register int bucket)
941 register union overhead *ovp;
942 register int rnu; /* 2^rnu bytes will be requested */
943 int nblks; /* become nblks blocks of the desired size */
944 register MEM_SIZE siz, needed;
948 if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) {
949 MUTEX_UNLOCK(&malloc_mutex);
950 croak("%s", "Out of memory during ridiculously large request");
952 if (bucket > max_bucket)
955 rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT))
957 : (bucket >> BUCKET_POW2_SHIFT) );
958 /* This may be overwritten later: */
959 nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */
960 needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket);
961 if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */
962 ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT;
963 nextf[rnu << BUCKET_POW2_SHIFT]
964 = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next;
965 #ifdef DEBUGGING_MSTATS
966 nmalloc[rnu << BUCKET_POW2_SHIFT]--;
967 start_slack -= M_OVERHEAD;
969 DEBUG_m(PerlIO_printf(Perl_debug_log,
970 "stealing %ld bytes from %ld arena\n",
971 (long) needed, (long) rnu << BUCKET_POW2_SHIFT));
972 } else if (chunk_chain
973 && (ovp = (union overhead*) get_from_chain(needed))) {
974 DEBUG_m(PerlIO_printf(Perl_debug_log,
975 "stealing %ld bytes from chain\n",
977 } else if ( (ovp = (union overhead*)
978 get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1,
980 DEBUG_m(PerlIO_printf(Perl_debug_log,
981 "stealing %ld bytes from bigger buckets\n",
983 } else if (needed <= sbrked_remains) {
984 ovp = (union overhead *)(last_sbrk_top - sbrked_remains);
985 sbrked_remains -= needed;
986 last_op = (char*)ovp;
988 ovp = getpages(needed, &nblks, bucket);
994 * Add new memory allocated to that on
995 * free list for this hash bucket.
997 siz = BUCKET_SIZE(bucket);
999 *(u_char*)ovp = bucket; /* Fill index. */
1000 if (bucket <= MAX_PACKED) {
1001 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
1002 nblks = N_BLKS(bucket);
1003 # ifdef DEBUGGING_MSTATS
1004 start_slack += BLK_SHIFT(bucket);
1006 } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) {
1007 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
1008 siz -= sizeof(union overhead);
1009 } else ovp++; /* One chunk per block. */
1010 #endif /* PACK_MALLOC */
1011 nextf[bucket] = ovp;
1012 #ifdef DEBUGGING_MSTATS
1013 nmalloc[bucket] += nblks;
1014 if (bucket > MAX_PACKED) {
1015 start_slack += M_OVERHEAD * nblks;
1018 while (--nblks > 0) {
1019 ovp->ov_next = (union overhead *)((caddr_t)ovp + siz);
1020 ovp = (union overhead *)((caddr_t)ovp + siz);
1022 /* Not all sbrks return zeroed memory.*/
1023 ovp->ov_next = (union overhead *)NULL;
1025 if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */
1026 union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next;
1027 nextf[7*BUCKETS_PER_POW2] =
1028 (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2]
1029 - sizeof(union overhead));
1030 nextf[7*BUCKETS_PER_POW2]->ov_next = n_op;
1032 #endif /* !PACK_MALLOC */
1038 register MEM_SIZE size;
1039 register union overhead *ovp;
1040 char *cp = (char*)mp;
1045 DEBUG_m(PerlIO_printf(Perl_debug_log,
1046 "0x%lx: (%05lu) free\n",
1047 (unsigned long)cp, (unsigned long)(an++)));
1051 ovp = (union overhead *)((caddr_t)cp
1052 - sizeof (union overhead) * CHUNK_SHIFT);
1054 bucket = OV_INDEX(ovp);
1056 #ifdef IGNORE_SMALL_BAD_FREE
1057 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
1058 && (OV_MAGIC(ovp, bucket) != MAGIC))
1060 if (OV_MAGIC(ovp, bucket) != MAGIC)
1063 static int bad_free_warn = -1;
1064 if (bad_free_warn == -1) {
1065 char *pbf = PerlEnv_getenv("PERL_BADFREE");
1066 bad_free_warn = (pbf) ? atoi(pbf) : 1;
1071 warn("%s free() ignored",
1072 ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad");
1074 warn("%s", "Bad free() ignored");
1076 return; /* sanity */
1078 MUTEX_LOCK(&malloc_mutex);
1080 ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite");
1081 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1083 MEM_SIZE nbytes = ovp->ov_size + 1;
1085 if ((i = nbytes & 3)) {
1088 ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i))
1089 == RMAGIC_C, "chunk's tail overwrite");
1092 nbytes = (nbytes + 3) &~ 3;
1093 ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC, "chunk's tail overwrite");
1095 ovp->ov_rmagic = RMAGIC - 1;
1097 ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite");
1098 size = OV_INDEX(ovp);
1099 ovp->ov_next = nextf[size];
1101 MUTEX_UNLOCK(&malloc_mutex);
1105 * When a program attempts "storage compaction" as mentioned in the
1106 * old malloc man page, it realloc's an already freed block. Usually
1107 * this is the last block it freed; occasionally it might be farther
1108 * back. We have to search all the free lists for the block in order
1109 * to determine its bucket: 1st we make one pass thru the lists
1110 * checking only the first block in each; if that fails we search
1111 * ``reall_srchlen'' blocks in each list for a match (the variable
1112 * is extern so the caller can modify it). If that fails we just copy
1113 * however many bytes was given to realloc() and hope it's not huge.
1115 int reall_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */
1118 realloc(void *mp, size_t nbytes)
1120 register MEM_SIZE onb;
1121 union overhead *ovp;
1124 register int bucket;
1125 int was_alloced = 0, incr;
1126 char *cp = (char*)mp;
1128 #if defined(DEBUGGING) || !defined(PERL_CORE)
1129 MEM_SIZE size = nbytes;
1131 if ((long)nbytes < 0)
1132 croak("%s", "panic: realloc");
1135 BARK_64K_LIMIT("Reallocation",nbytes,size);
1137 return malloc(nbytes);
1139 MUTEX_LOCK(&malloc_mutex);
1140 ovp = (union overhead *)((caddr_t)cp
1141 - sizeof (union overhead) * CHUNK_SHIFT);
1142 bucket = OV_INDEX(ovp);
1143 #ifdef IGNORE_SMALL_BAD_FREE
1144 if ((bucket < FIRST_BUCKET_WITH_CHECK)
1145 || (OV_MAGIC(ovp, bucket) == MAGIC))
1147 if (OV_MAGIC(ovp, bucket) == MAGIC)
1153 * Already free, doing "compaction".
1155 * Search for the old block of memory on the
1156 * free list. First, check the most common
1157 * case (last element free'd), then (this failing)
1158 * the last ``reall_srchlen'' items free'd.
1159 * If all lookups fail, then assume the size of
1160 * the memory block being realloc'd is the
1161 * smallest possible.
1163 if ((bucket = findbucket(ovp, 1)) < 0 &&
1164 (bucket = findbucket(ovp, reall_srchlen)) < 0)
1167 onb = BUCKET_SIZE_REAL(bucket);
1169 * avoid the copy if same size block.
1170 * We are not agressive with boundary cases. Note that it might
1171 * (for a small number of cases) give false negative if
1172 * both new size and old one are in the bucket for
1173 * FIRST_BIG_POW2, but the new one is near the lower end.
1175 * We do not try to go to 1.5 times smaller bucket so far.
1177 if (nbytes > onb) incr = 1;
1179 #ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING
1180 if ( /* This is a little bit pessimal if PACK_MALLOC: */
1181 nbytes > ( (onb >> 1) - M_OVERHEAD )
1182 # ifdef TWO_POT_OPTIMIZE
1183 || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND )
1186 #else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1187 prev_bucket = ( (bucket > MAX_PACKED + 1)
1188 ? bucket - BUCKETS_PER_POW2
1190 if (nbytes > BUCKET_SIZE_REAL(prev_bucket))
1191 #endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1196 #ifdef STRESS_REALLOC
1197 || 1 /* always do it the hard way */
1200 else if (incr == 0) {
1204 * Record new allocated size of block and
1205 * bound space with magic numbers.
1207 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1208 int i, nb = ovp->ov_size + 1;
1213 ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C, "chunk's tail overwrite");
1217 ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC, "chunk's tail overwrite");
1219 * Convert amount of memory requested into
1220 * closest block size stored in hash buckets
1221 * which satisfies request. Account for
1222 * space used per block for accounting.
1224 nbytes += M_OVERHEAD;
1225 ovp->ov_size = nbytes - 1;
1226 if ((i = nbytes & 3)) {
1229 *((char *)((caddr_t)ovp + nbytes - RSLOP + i))
1232 nbytes = (nbytes + 3) &~ 3;
1233 *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC;
1237 MUTEX_UNLOCK(&malloc_mutex);
1238 } else if (incr == 1 && (cp - M_OVERHEAD == last_op)
1239 && (onb > (1 << LOG_OF_MIN_ARENA))) {
1240 MEM_SIZE require, newarena = nbytes, pow;
1243 POW2_OPTIMIZE_ADJUST(newarena);
1244 newarena = newarena + M_OVERHEAD;
1245 /* newarena = (newarena + 3) &~ 3; */
1246 shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA;
1247 pow = LOG_OF_MIN_ARENA + 1;
1248 /* apart from this loop, this is O(1) */
1249 while (shiftr >>= 1)
1251 newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2);
1252 require = newarena - onb - M_OVERHEAD;
1254 if (getpages_adjacent(require)) {
1255 #ifdef DEBUGGING_MSTATS
1257 nmalloc[pow * BUCKETS_PER_POW2]++;
1259 *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */
1265 MUTEX_UNLOCK(&malloc_mutex);
1266 if ((res = (char*)malloc(nbytes)) == NULL)
1268 if (cp != res) /* common optimization */
1269 Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char);
1274 DEBUG_m(PerlIO_printf(Perl_debug_log, "0x%lu: (%05lu) rfree\n",
1275 (unsigned long)res,(unsigned long)(an++)));
1276 DEBUG_m(PerlIO_printf(Perl_debug_log,
1277 "0x%lx: (%05lu) realloc %ld bytes\n",
1278 (unsigned long)res,(unsigned long)(an++),
1280 return ((Malloc_t)res);
1284 * Search ``srchlen'' elements of each free list for a block whose
1285 * header starts at ``freep''. If srchlen is -1 search the whole list.
1286 * Return bucket number, or -1 if not found.
1289 findbucket(union overhead *freep, int srchlen)
1291 register union overhead *p;
1294 for (i = 0; i < NBUCKETS; i++) {
1296 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
1306 calloc(register size_t elements, register size_t size)
1308 long sz = elements * size;
1309 Malloc_t p = malloc(sz);
1312 memset((void*)p, 0, sz);
1318 malloced_size(void *p)
1320 int bucket = OV_INDEX((union overhead *)p);
1322 return BUCKET_SIZE_REAL(bucket);
1325 #ifdef DEBUGGING_MSTATS
1327 # ifdef BUCKETS_ROOT2
1328 # define MIN_EVEN_REPORT 6
1330 # define MIN_EVEN_REPORT MIN_BUCKET
1333 * mstats - print out statistics about malloc
1335 * Prints two lines of numbers, one showing the length of the free list
1336 * for each size category, the second showing the number of mallocs -
1337 * frees for each size category.
1340 dump_mstats(char *s)
1343 register union overhead *p;
1344 int topbucket=0, topbucket_ev=0, topbucket_odd=0, totfree=0, total=0;
1345 u_int nfree[NBUCKETS];
1346 int total_chain = 0;
1347 struct chunk_chain_s* nextchain = chunk_chain;
1349 for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
1350 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
1353 totfree += nfree[i] * BUCKET_SIZE_REAL(i);
1354 total += nmalloc[i] * BUCKET_SIZE_REAL(i);
1356 i % 2 ? (topbucket_odd = i) : (topbucket_ev = i);
1361 PerlIO_printf(PerlIO_stderr(),
1362 "Memory allocation statistics %s (buckets %ld(%ld)..%ld(%ld)\n",
1364 (long)BUCKET_SIZE_REAL(MIN_BUCKET),
1365 (long)BUCKET_SIZE(MIN_BUCKET),
1366 (long)BUCKET_SIZE_REAL(topbucket), (long)BUCKET_SIZE(topbucket));
1367 PerlIO_printf(PerlIO_stderr(), "%8d free:", totfree);
1368 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1369 PerlIO_printf(PerlIO_stderr(),
1370 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1372 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1375 #ifdef BUCKETS_ROOT2
1376 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1377 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1378 PerlIO_printf(PerlIO_stderr(),
1379 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1381 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1385 PerlIO_printf(PerlIO_stderr(), "\n%8d used:", total - totfree);
1386 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1387 PerlIO_printf(PerlIO_stderr(),
1388 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1390 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1391 nmalloc[i] - nfree[i]);
1393 #ifdef BUCKETS_ROOT2
1394 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1395 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1396 PerlIO_printf(PerlIO_stderr(),
1397 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1399 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1400 nmalloc[i] - nfree[i]);
1404 total_chain += nextchain->size;
1405 nextchain = nextchain->next;
1407 PerlIO_printf(PerlIO_stderr(), "\nTotal sbrk(): %d/%d:%d. Odd ends: pad+heads+chain+tail: %d+%d+%d+%d.\n",
1408 goodsbrk + sbrk_slack, sbrks, sbrk_good, sbrk_slack,
1409 start_slack, total_chain, sbrked_remains);
1413 dump_mstats(char *s)
1420 #ifdef USE_PERL_SBRK
1423 # define PERL_SBRK_VIA_MALLOC
1426 # ifdef PERL_SBRK_VIA_MALLOC
1427 # if defined(HIDEMYMALLOC) || defined(EMBEDMYMALLOC)
1430 # include "Error: -DPERL_SBRK_VIA_MALLOC needs -D(HIDE|EMBED)MYMALLOC"
1433 /* it may seem schizophrenic to use perl's malloc and let it call system */
1434 /* malloc, the reason for that is only the 3.2 version of the OS that had */
1435 /* frequent core dumps within nxzonefreenolock. This sbrk routine put an */
1436 /* end to the cores */
1438 # define SYSTEM_ALLOC(a) malloc(a)
1440 # endif /* PERL_SBRK_VIA_MALLOC */
1442 static IV Perl_sbrk_oldchunk;
1443 static long Perl_sbrk_oldsize;
1445 # define PERLSBRK_32_K (1<<15)
1446 # define PERLSBRK_64_K (1<<16)
1455 if (!size) return 0;
1457 reqsize = size; /* just for the DEBUG_m statement */
1460 size = (size + 0x7ff) & ~0x7ff;
1462 if (size <= Perl_sbrk_oldsize) {
1463 got = Perl_sbrk_oldchunk;
1464 Perl_sbrk_oldchunk += size;
1465 Perl_sbrk_oldsize -= size;
1467 if (size >= PERLSBRK_32_K) {
1470 size = PERLSBRK_64_K;
1473 got = (IV)SYSTEM_ALLOC(size);
1475 got = (got + 0x7ff) & ~0x7ff;
1478 /* Chunk is small, register the rest for future allocs. */
1479 Perl_sbrk_oldchunk = got + reqsize;
1480 Perl_sbrk_oldsize = size - reqsize;
1484 DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%lx\n",
1485 size, reqsize, Perl_sbrk_oldsize, got));
1490 #endif /* ! defined USE_PERL_SBRK */