5 #ifndef NO_FANCY_MALLOC
6 # ifndef SMALL_BUCKET_VIA_TABLE
7 # define SMALL_BUCKET_VIA_TABLE
10 # define BUCKETS_ROOT2
12 # ifndef IGNORE_SMALL_BAD_FREE
13 # define IGNORE_SMALL_BAD_FREE
17 #ifndef PLAIN_MALLOC /* Bulk enable features */
21 # ifndef TWO_POT_OPTIMIZE
22 # define TWO_POT_OPTIMIZE
24 # if defined(PERL_CORE) && !defined(EMERGENCY_SBRK)
25 # define EMERGENCY_SBRK
27 # if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS)
28 # define DEBUGGING_MSTATS
32 #define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */
33 #define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2)
35 #if !(defined(I286) || defined(atarist))
36 /* take 2k unless the block is bigger than that */
37 # define LOG_OF_MIN_ARENA 11
39 /* take 16k unless the block is bigger than that
40 (80286s like large segments!), probably good on the atari too */
41 # define LOG_OF_MIN_ARENA 14
45 # if defined(DEBUGGING) && !defined(NO_RCHECK)
48 # if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE)
49 # undef IGNORE_SMALL_BAD_FREE
52 * malloc.c (Caltech) 2/21/82
53 * Chris Kingsley, kingsley@cit-20.
55 * This is a very fast storage allocator. It allocates blocks of a small
56 * number of different sizes, and keeps free lists of each size. Blocks that
57 * don't exactly fit are passed up to the next larger size. In this
58 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
59 * If PACK_MALLOC is defined, small blocks are 2^n bytes long.
60 * This is designed for use in a program that uses vast quantities of memory,
61 * but bombs when it runs out.
68 # ifndef croak /* make depend */
69 # define croak(mess) fprintf(stderr,mess); exit(1);
81 # define MUTEX_LOCK(l)
85 # define MUTEX_UNLOCK(l)
90 # define DEBUG_m(a) if (debug & 128) a
93 /* I don't much care whether these are defined in sys/types.h--LAW */
95 #define u_char unsigned char
96 #define u_int unsigned int
99 # define u_bigint UV /* Needs to eat *void. */
101 # define u_bigint unsigned long /* Needs to eat *void. */
104 #define u_short unsigned short
106 /* 286 and atarist like big chunks, which gives too much overhead. */
107 #if (defined(RCHECK) || defined(I286) || defined(atarist)) && defined(PACK_MALLOC)
112 * The description below is applicable if PACK_MALLOC is not defined.
114 * The overhead on a block is at least 4 bytes. When free, this space
115 * contains a pointer to the next free block, and the bottom two bits must
116 * be zero. When in use, the first byte is set to MAGIC, and the second
117 * byte is the size index. The remaining bytes are for alignment.
118 * If range checking is enabled and the size of the block fits
119 * in two bytes, then the top two bytes hold the size of the requested block
120 * plus the range checking words, and the header word MINUS ONE.
123 union overhead *ov_next; /* when free */
124 #if MEM_ALIGNBYTES > 4
125 double strut; /* alignment problems */
128 u_char ovu_magic; /* magic number */
129 u_char ovu_index; /* bucket # */
131 u_short ovu_size; /* actual block size */
132 u_int ovu_rmagic; /* range magic number */
135 #define ov_magic ovu.ovu_magic
136 #define ov_index ovu.ovu_index
137 #define ov_size ovu.ovu_size
138 #define ov_rmagic ovu.ovu_rmagic
142 static void botch _((char *s));
144 static void morecore _((int bucket));
145 static int findbucket _((union overhead *freep, int srchlen));
147 #define MAGIC 0xff /* magic # on accounting info */
148 #define RMAGIC 0x55555555 /* magic # on range info */
149 #define RMAGIC_C 0x55 /* magic # on range info */
152 # define RSLOP sizeof (u_int)
153 # ifdef TWO_POT_OPTIMIZE
154 # define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2)
156 # define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2)
162 #if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2)
163 # undef BUCKETS_ROOT2
167 # define BUCKET_TABLE_SHIFT 2
168 # define BUCKET_POW2_SHIFT 1
169 # define BUCKETS_PER_POW2 2
171 # define BUCKET_TABLE_SHIFT MIN_BUC_POW2
172 # define BUCKET_POW2_SHIFT 0
173 # define BUCKETS_PER_POW2 1
177 # define MAX_BUCKET_BY_TABLE 13
178 static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] =
180 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80,
182 # define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT)))
183 # define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \
185 : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \
187 + POW2_OPTIMIZE_SURPLUS(i)))
189 # define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT))
190 # define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i))
195 /* In this case it is assumed that if we do sbrk() in 2K units, we
196 * will get 2K aligned arenas (at least after some initial
197 * alignment). The bucket number of the given subblock is on the start
198 * of 2K arena which contains the subblock. Several following bytes
199 * contain the magic numbers for the subblocks in the block.
201 * Sizes of chunks are powers of 2 for chunks in buckets <=
202 * MAX_PACKED, after this they are (2^n - sizeof(union overhead)) (to
203 * get alignment right).
205 * Consider an arena for 2^n with n>MAX_PACKED. We suppose that
206 * starts of all the chunks in a 2K arena are in different
207 * 2^n-byte-long chunks. If the top of the last chunk is aligned on a
208 * boundary of 2K block, this means that sizeof(union
209 * overhead)*"number of chunks" < 2^n, or sizeof(union overhead)*2K <
210 * 4^n, or n > 6 + log2(sizeof()/2)/2, since a chunk of size 2^n -
211 * overhead is used. Since this rules out n = 7 for 8 byte alignment,
212 * we specialcase allocation of the first of 16 128-byte-long chunks.
214 * Note that with the above assumption we automatically have enough
215 * place for MAGIC at the start of 2K block. Note also that we
216 * overlay union overhead over the chunk, thus the start of small chunks
217 * is immediately overwritten after freeing. */
218 # define MAX_PACKED_POW2 6
219 # define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT)
220 # define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD)
221 # define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1)
222 # define TWOK_MASKED(x) ((u_bigint)(x) & ~TWOK_MASK)
223 # define TWOK_SHIFT(x) ((u_bigint)(x) & TWOK_MASK)
224 # define OV_INDEXp(block) ((u_char*)(TWOK_MASKED(block)))
225 # define OV_INDEX(block) (*OV_INDEXp(block))
226 # define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \
227 (TWOK_SHIFT(block)>> \
228 (bucket>>BUCKET_POW2_SHIFT)) + \
229 (bucket >= MIN_NEEDS_SHIFT ? 1 : 0)))
230 /* A bucket can have a shift smaller than it size, we need to
231 shift its magic number so it will not overwrite index: */
232 # ifdef BUCKETS_ROOT2
233 # define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */
235 # define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */
237 # define CHUNK_SHIFT 0
239 /* Number of active buckets of given ordinal. */
240 #ifdef IGNORE_SMALL_BAD_FREE
241 #define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */
242 # define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
243 ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \
246 # define N_BLKS(bucket) n_blks[bucket]
249 static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
251 # if BUCKETS_PER_POW2==1
253 (MIN_BUC_POW2==2 ? 384 : 0),
254 224, 120, 62, 31, 16, 8, 4, 2
257 (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */
258 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2
262 /* Shift of the first bucket with the given ordinal inside 2K chunk. */
263 #ifdef IGNORE_SMALL_BAD_FREE
264 # define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
265 ? ((1<<LOG_OF_MIN_ARENA) \
266 - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \
269 # define BLK_SHIFT(bucket) blk_shift[bucket]
272 static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
274 # if BUCKETS_PER_POW2==1
276 (MIN_BUC_POW2==2 ? 512 : 0),
277 256, 128, 64, 64, /* 8 to 64 */
278 16*sizeof(union overhead),
279 8*sizeof(union overhead),
280 4*sizeof(union overhead),
281 2*sizeof(union overhead),
284 (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0),
285 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */
286 16*sizeof(union overhead), 16*sizeof(union overhead),
287 8*sizeof(union overhead), 8*sizeof(union overhead),
288 4*sizeof(union overhead), 4*sizeof(union overhead),
289 2*sizeof(union overhead), 2*sizeof(union overhead),
293 #else /* !PACK_MALLOC */
295 # define OV_MAGIC(block,bucket) (block)->ov_magic
296 # define OV_INDEX(block) (block)->ov_index
297 # define CHUNK_SHIFT 1
298 # define MAX_PACKED -1
299 #endif /* !PACK_MALLOC */
301 #define M_OVERHEAD (sizeof(union overhead) + RSLOP)
304 # define MEM_OVERHEAD(bucket) \
305 (bucket <= MAX_PACKED ? 0 : M_OVERHEAD)
306 # ifdef SMALL_BUCKET_VIA_TABLE
307 # define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2)
308 # define START_SHIFT MAX_PACKED_POW2
309 # ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
310 # define SIZE_TABLE_MAX 80
312 # define SIZE_TABLE_MAX 64
314 static char bucket_of[] =
316 # ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
317 /* 0 to 15 in 4-byte increments. */
318 (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */
321 9, 9, 10, 10, /* 24, 32 */
322 11, 11, 11, 11, /* 48 */
323 12, 12, 12, 12, /* 64 */
324 13, 13, 13, 13, /* 80 */
325 13, 13, 13, 13 /* 80 */
326 # else /* !BUCKETS_ROOT2 */
327 /* 0 to 15 in 4-byte increments. */
328 (sizeof(void*) > 4 ? 3 : 2),
334 # endif /* !BUCKETS_ROOT2 */
336 # else /* !SMALL_BUCKET_VIA_TABLE */
337 # define START_SHIFTS_BUCKET MIN_BUCKET
338 # define START_SHIFT (MIN_BUC_POW2 - 1)
339 # endif /* !SMALL_BUCKET_VIA_TABLE */
340 #else /* !PACK_MALLOC */
341 # define MEM_OVERHEAD(bucket) M_OVERHEAD
342 # ifdef SMALL_BUCKET_VIA_TABLE
343 # undef SMALL_BUCKET_VIA_TABLE
345 # define START_SHIFTS_BUCKET MIN_BUCKET
346 # define START_SHIFT (MIN_BUC_POW2 - 1)
347 #endif /* !PACK_MALLOC */
350 * Big allocations are often of the size 2^n bytes. To make them a
351 * little bit better, make blocks of size 2^n+pagesize for big n.
354 #ifdef TWO_POT_OPTIMIZE
356 # ifndef PERL_PAGESIZE
357 # define PERL_PAGESIZE 4096
359 # ifndef FIRST_BIG_POW2
360 # define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */
362 # define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2)
363 /* If this value or more, check against bigger blocks. */
364 # define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD)
365 /* If less than this value, goes into 2^n-overhead-block. */
366 # define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD)
368 # define POW2_OPTIMIZE_ADJUST(nbytes) \
369 ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0)
370 # define POW2_OPTIMIZE_SURPLUS(bucket) \
371 ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0)
373 #else /* !TWO_POT_OPTIMIZE */
374 # define POW2_OPTIMIZE_ADJUST(nbytes)
375 # define POW2_OPTIMIZE_SURPLUS(bucket) 0
376 #endif /* !TWO_POT_OPTIMIZE */
378 #if defined(HAS_64K_LIMIT) && defined(PERL_CORE)
379 # define BARK_64K_LIMIT(what,nbytes,size) \
380 if (nbytes > 0xffff) { \
381 PerlIO_printf(PerlIO_stderr(), \
382 "%s too large: %lx\n", what, size); \
385 #else /* !HAS_64K_LIMIT || !PERL_CORE */
386 # define BARK_64K_LIMIT(what,nbytes,size)
387 #endif /* !HAS_64K_LIMIT || !PERL_CORE */
390 # define MIN_SBRK 2048
394 # define FIRST_SBRK (32*1024)
397 /* Minimal sbrk in percents of what is already alloced. */
398 #ifndef MIN_SBRK_FRAC
399 # define MIN_SBRK_FRAC 3
402 #ifndef SBRK_ALLOW_FAILURES
403 # define SBRK_ALLOW_FAILURES 3
406 #ifndef SBRK_FAILURE_PRICE
407 # define SBRK_FAILURE_PRICE 50
410 #if defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)
413 # define BIG_SIZE (1<<16) /* 64K */
416 static char *emergency_buffer;
417 static MEM_SIZE emergency_buffer_size;
423 if (size >= BIG_SIZE) {
424 /* Give the possibility to recover: */
425 die("Out of memory during request for %i bytes", size);
426 /* croak may eat too much memory. */
429 if (!emergency_buffer) {
431 /* First offense, give a possibility to recover by dieing. */
432 /* No malloc involved here: */
433 GV **gvp = (GV**)hv_fetch(defstash, "^M", 2, 0);
437 if (!gvp) gvp = (GV**)hv_fetch(defstash, "\015", 1, 0);
438 if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv)
439 || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD))
440 return (char *)-1; /* Now die die die... */
442 /* Got it, now detach SvPV: */
444 /* Check alignment: */
445 if (((u_bigint)(pv - M_OVERHEAD)) & ((1<<LOG_OF_MIN_ARENA) - 1)) {
446 PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
447 return (char *)-1; /* die die die */
450 emergency_buffer = pv - M_OVERHEAD;
451 emergency_buffer_size = SvLEN(sv) + M_OVERHEAD;
454 die("Out of memory!"); /* croak may eat too much memory. */
456 else if (emergency_buffer_size >= size) {
457 emergency_buffer_size -= size;
458 return emergency_buffer + emergency_buffer_size;
461 return (char *)-1; /* poor guy... */
464 #else /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
465 # define emergency_sbrk(size) -1
466 #endif /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
469 * nextf[i] is the pointer to the next free block of size 2^i. The
470 * smallest allocatable block is 8 bytes. The overhead information
471 * precedes the data area returned to the user.
473 #define NBUCKETS (32*BUCKETS_PER_POW2 + 1)
474 static union overhead *nextf[NBUCKETS];
477 #define sbrk(a) Perl_sbrk(a)
478 Malloc_t Perl_sbrk _((int size));
480 #ifdef DONT_DECLARE_STD
485 extern Malloc_t sbrk(int);
489 #ifdef DEBUGGING_MSTATS
491 * nmalloc[i] is the difference between the number of mallocs and frees
492 * for a given block size.
494 static u_int nmalloc[NBUCKETS];
495 static u_int sbrk_slack;
496 static u_int start_slack;
499 static u_int goodsbrk;
502 #define ASSERT(p) if (!(p)) botch(STRINGIFY(p)); else
506 PerlIO_printf(PerlIO_stderr(), "assertion botched: %s\n", s);
514 malloc(register size_t nbytes)
516 register union overhead *p;
518 register MEM_SIZE shiftr;
520 #if defined(DEBUGGING) || defined(RCHECK)
521 MEM_SIZE size = nbytes;
524 BARK_64K_LIMIT("Allocation",nbytes,nbytes);
526 if ((long)nbytes < 0)
527 croak("panic: malloc");
530 MUTEX_LOCK(&malloc_mutex);
532 * Convert amount of memory requested into
533 * closest block size stored in hash buckets
534 * which satisfies request. Account for
535 * space used per block for accounting.
538 # ifdef SMALL_BUCKET_VIA_TABLE
541 else if (nbytes <= SIZE_TABLE_MAX) {
542 bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT];
547 if (nbytes <= MAX_POW2_ALGO) goto do_shifts;
552 POW2_OPTIMIZE_ADJUST(nbytes);
553 nbytes += M_OVERHEAD;
554 nbytes = (nbytes + 3) &~ 3;
556 shiftr = (nbytes - 1) >> START_SHIFT;
557 bucket = START_SHIFTS_BUCKET;
558 /* apart from this loop, this is O(1) */
560 bucket += BUCKETS_PER_POW2;
563 * If nothing in hash bucket right now,
564 * request more memory from the system.
566 if (nextf[bucket] == NULL)
568 if ((p = nextf[bucket]) == NULL) {
569 MUTEX_UNLOCK(&malloc_mutex);
572 PerlIO_puts(PerlIO_stderr(),"Out of memory!\n");
580 DEBUG_m(PerlIO_printf(Perl_debug_log,
581 "0x%lx: (%05lu) malloc %ld bytes\n",
582 (unsigned long)(p+1), (unsigned long)(an++),
585 /* remove from linked list */
587 if (*((int*)p) & (sizeof(union overhead) - 1))
588 PerlIO_printf(PerlIO_stderr(), "Corrupt malloc ptr 0x%lx at 0x%lx\n",
589 (unsigned long)*((int*)p),(unsigned long)p);
591 nextf[bucket] = p->ov_next;
592 #ifdef IGNORE_SMALL_BAD_FREE
593 if (bucket >= FIRST_BUCKET_WITH_CHECK)
595 OV_MAGIC(p, bucket) = MAGIC;
597 OV_INDEX(p) = bucket;
601 * Record allocated size of block and
602 * bound space with magic numbers.
604 p->ov_rmagic = RMAGIC;
605 if (bucket <= MAX_SHORT_BUCKET) {
608 nbytes = size + M_OVERHEAD;
609 p->ov_size = nbytes - 1;
610 if ((i = nbytes & 3)) {
613 *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C;
615 nbytes = (nbytes + 3) &~ 3;
616 *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC;
619 MUTEX_UNLOCK(&malloc_mutex);
620 return ((Malloc_t)(p + CHUNK_SHIFT));
623 static char *last_sbrk_top;
624 static char *last_op; /* This arena can be easily extended. */
625 static int sbrked_remains;
626 static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE;
628 #ifdef DEBUGGING_MSTATS
632 struct chunk_chain_s {
633 struct chunk_chain_s *next;
636 static struct chunk_chain_s *chunk_chain;
638 static char max_bucket;
640 /* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */
642 get_from_chain(MEM_SIZE size)
644 struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain;
645 struct chunk_chain_s **oldgoodp = NULL;
646 long min_remain = LONG_MAX;
649 if (elt->size >= size) {
650 long remains = elt->size - size;
651 if (remains >= 0 && remains < min_remain) {
653 min_remain = remains;
659 oldp = &( elt->next );
662 if (!oldgoodp) return NULL;
664 void *ret = *oldgoodp;
665 struct chunk_chain_s *next = (*oldgoodp)->next;
667 *oldgoodp = (struct chunk_chain_s *)((char*)ret + size);
668 (*oldgoodp)->size = min_remain;
669 (*oldgoodp)->next = next;
672 void *ret = *oldgoodp;
673 *oldgoodp = (*oldgoodp)->next;
680 add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip)
682 struct chunk_chain_s *next = chunk_chain;
686 chunk_chain = (struct chunk_chain_s *)cp;
687 chunk_chain->size = size - chip;
688 chunk_chain->next = next;
693 get_from_bigger_buckets(int bucket, MEM_SIZE size)
696 static int bucketprice[NBUCKETS];
697 while (bucket <= max_bucket) {
698 /* We postpone stealing from bigger buckets until we want it
700 if (nextf[bucket] && bucketprice[bucket]++ >= price) {
702 void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT);
703 bucketprice[bucket] = 0;
704 if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) {
705 last_op = NULL; /* Disable optimization */
707 nextf[bucket] = nextf[bucket]->ov_next;
708 #ifdef DEBUGGING_MSTATS
710 start_slack -= M_OVERHEAD;
712 add_to_chain(ret, (BUCKET_SIZE(bucket) +
713 POW2_OPTIMIZE_SURPLUS(bucket)),
723 * Allocate more memory to the indicated bucket.
726 morecore(register int bucket)
728 register union overhead *ovp;
729 register int rnu; /* 2^rnu bytes will be requested */
730 register int nblks; /* become nblks blocks of the desired size */
731 register MEM_SIZE siz, needed;
736 if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) {
737 croak("Allocation too large");
740 if (bucket > max_bucket) {
743 rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT))
745 : (bucket >> BUCKET_POW2_SHIFT) );
746 /* This may be overwritten later: */
747 nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */
748 needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket);
749 if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */
750 ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT;
751 nextf[rnu << BUCKET_POW2_SHIFT]
752 = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next;
753 #ifdef DEBUGGING_MSTATS
754 nmalloc[rnu << BUCKET_POW2_SHIFT]--;
755 start_slack -= M_OVERHEAD;
757 DEBUG_m(PerlIO_printf(Perl_debug_log,
758 "stealing %ld bytes from %ld arena\n",
759 (long) needed, (long) rnu << BUCKET_POW2_SHIFT));
760 } else if (chunk_chain
761 && (ovp = (union overhead*) get_from_chain(needed))) {
762 DEBUG_m(PerlIO_printf(Perl_debug_log,
763 "stealing %ld bytes from chain\n",
765 } else if (ovp = (union overhead*)
766 get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1,
768 DEBUG_m(PerlIO_printf(Perl_debug_log,
769 "stealing %ld bytes from bigger buckets\n",
771 } else if (needed <= sbrked_remains) {
772 ovp = (union overhead *)(last_sbrk_top - sbrked_remains);
773 sbrked_remains -= needed;
774 last_op = (char*)ovp;
776 /* Need to do (possibly expensive) system call. Try to
777 optimize it for rare calling. */
778 MEM_SIZE require = needed - sbrked_remains;
782 if (!last_sbrk_top && require < FIRST_SBRK)
783 require = FIRST_SBRK;
784 else if (require < MIN_SBRK) require = MIN_SBRK;
786 if (require < goodsbrk * MIN_SBRK_FRAC / 100)
787 require = goodsbrk * MIN_SBRK_FRAC / 100;
788 require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK;
795 DEBUG_m(PerlIO_printf(Perl_debug_log,
796 "sbrk(%ld) for %ld-byte-long arena\n",
797 (long)require, (long) needed));
798 cp = (char *)sbrk(require);
799 #ifdef DEBUGGING_MSTATS
802 if (cp == last_sbrk_top) {
803 /* Common case, anything is fine. */
805 ovp = (union overhead *) (cp - sbrked_remains);
806 sbrked_remains = require - (needed - sbrked_remains);
807 } else if (cp == (char *)-1) { /* no more room! */
808 ovp = (union overhead *)emergency_sbrk(needed);
809 if (ovp == (union overhead *)-1)
812 } else { /* Non-continuous or first sbrk(). */
813 long add = sbrked_remains;
816 if (sbrked_remains) { /* Put rest into chain, we
817 cannot use it right now. */
818 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
822 /* Second, check alignment. */
825 #ifndef atarist /* on the atari we dont have to worry about this */
826 # ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */
828 /* CHUNK_SHIFT is 1 for PACK_MALLOC, 0 otherwise. */
829 if ((UV)cp & (0x7FF >> CHUNK_SHIFT)) { /* Not aligned. */
830 slack = (0x800 >> CHUNK_SHIFT)
831 - ((UV)cp & (0x7FF >> CHUNK_SHIFT));
838 DEBUG_m(PerlIO_printf(Perl_debug_log,
839 "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n",
840 (long)add, (long) slack,
841 (long) sbrked_remains));
842 newcp = (char *)sbrk(add);
843 #if defined(DEBUGGING_MSTATS)
847 if (newcp != cp + require) {
848 /* Too bad: even rounding sbrk() is not continuous.*/
849 DEBUG_m(PerlIO_printf(Perl_debug_log,
850 "failed to fix bad sbrk()\n"));
853 croak("panic: Off-page sbrk");
855 if (sbrked_remains) {
857 #if defined(DEBUGGING_MSTATS)
858 sbrk_slack += require;
861 DEBUG_m(PerlIO_printf(Perl_debug_log,
862 "straight sbrk(%ld)\n",
864 cp = (char *)sbrk(require);
865 #ifdef DEBUGGING_MSTATS
868 if (cp == (char *)-1)
871 sbrk_good = -1; /* Disable optimization!
872 Continue with not-aligned... */
875 require += sbrked_remains;
880 sbrk_good -= SBRK_FAILURE_PRICE;
883 ovp = (union overhead *) cp;
885 * Round up to minimum allocation size boundary
886 * and deduct from block count to reflect.
889 #ifndef I286 /* Again, this should always be ok on an 80286 */
891 ovp = (union overhead *)(((UV)ovp + 8) & ~7);
892 DEBUG_m(PerlIO_printf(Perl_debug_log,
893 "fixing sbrk(): %d bytes off machine alignement\n",
894 (int)((UV)ovp & 7)));
896 # if defined(DEBUGGING_MSTATS)
897 /* This is only approx. if TWO_POT_OPTIMIZE: */
898 sbrk_slack += (1 << bucket);
902 sbrked_remains = require - needed;
904 last_sbrk_top = cp + require;
905 last_op = (char*) cp;
906 #ifdef DEBUGGING_MSTATS
913 * Add new memory allocated to that on
914 * free list for this hash bucket.
916 siz = BUCKET_SIZE(bucket);
918 *(u_char*)ovp = bucket; /* Fill index. */
919 if (bucket <= MAX_PACKED) {
920 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
921 nblks = N_BLKS(bucket);
922 # ifdef DEBUGGING_MSTATS
923 start_slack += BLK_SHIFT(bucket);
925 } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) {
926 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
927 siz -= sizeof(union overhead);
928 } else ovp++; /* One chunk per block. */
929 #endif /* PACK_MALLOC */
931 #ifdef DEBUGGING_MSTATS
932 nmalloc[bucket] += nblks;
933 if (bucket > MAX_PACKED) {
934 start_slack += M_OVERHEAD * nblks;
937 while (--nblks > 0) {
938 ovp->ov_next = (union overhead *)((caddr_t)ovp + siz);
939 ovp = (union overhead *)((caddr_t)ovp + siz);
941 /* Not all sbrks return zeroed memory.*/
942 ovp->ov_next = (union overhead *)NULL;
944 if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */
945 union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next;
946 nextf[7*BUCKETS_PER_POW2] =
947 (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2]
948 - sizeof(union overhead));
949 nextf[7*BUCKETS_PER_POW2]->ov_next = n_op;
951 #endif /* !PACK_MALLOC */
957 register MEM_SIZE size;
958 register union overhead *ovp;
959 char *cp = (char*)mp;
964 DEBUG_m(PerlIO_printf(Perl_debug_log,
965 "0x%lx: (%05lu) free\n",
966 (unsigned long)cp, (unsigned long)(an++)));
970 ovp = (union overhead *)((caddr_t)cp
971 - sizeof (union overhead) * CHUNK_SHIFT);
973 bucket = OV_INDEX(ovp);
975 #ifdef IGNORE_SMALL_BAD_FREE
976 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
977 && (OV_MAGIC(ovp, bucket) != MAGIC))
979 if (OV_MAGIC(ovp, bucket) != MAGIC)
982 static int bad_free_warn = -1;
983 if (bad_free_warn == -1) {
984 char *pbf = PerlEnv_getenv("PERL_BADFREE");
985 bad_free_warn = (pbf) ? atoi(pbf) : 1;
990 warn("%s free() ignored",
991 ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad");
993 warn("Bad free() ignored");
997 MUTEX_LOCK(&malloc_mutex);
999 ASSERT(ovp->ov_rmagic == RMAGIC);
1000 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1002 MEM_SIZE nbytes = ovp->ov_size + 1;
1004 if ((i = nbytes & 3)) {
1007 ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i))
1011 nbytes = (nbytes + 3) &~ 3;
1012 ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC);
1014 ovp->ov_rmagic = RMAGIC - 1;
1016 ASSERT(OV_INDEX(ovp) < NBUCKETS);
1017 size = OV_INDEX(ovp);
1018 ovp->ov_next = nextf[size];
1020 MUTEX_UNLOCK(&malloc_mutex);
1024 * When a program attempts "storage compaction" as mentioned in the
1025 * old malloc man page, it realloc's an already freed block. Usually
1026 * this is the last block it freed; occasionally it might be farther
1027 * back. We have to search all the free lists for the block in order
1028 * to determine its bucket: 1st we make one pass thru the lists
1029 * checking only the first block in each; if that fails we search
1030 * ``reall_srchlen'' blocks in each list for a match (the variable
1031 * is extern so the caller can modify it). If that fails we just copy
1032 * however many bytes was given to realloc() and hope it's not huge.
1034 int reall_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */
1037 realloc(void *mp, size_t nbytes)
1039 register MEM_SIZE onb;
1040 union overhead *ovp;
1041 char *res, prev_bucket;
1042 register int bucket;
1043 int was_alloced = 0, incr;
1044 char *cp = (char*)mp;
1046 #if defined(DEBUGGING) || !defined(PERL_CORE)
1047 MEM_SIZE size = nbytes;
1049 if ((long)nbytes < 0)
1050 croak("panic: realloc");
1053 BARK_64K_LIMIT("Reallocation",nbytes,size);
1055 return malloc(nbytes);
1057 MUTEX_LOCK(&malloc_mutex);
1058 ovp = (union overhead *)((caddr_t)cp
1059 - sizeof (union overhead) * CHUNK_SHIFT);
1060 bucket = OV_INDEX(ovp);
1061 #ifdef IGNORE_SMALL_BAD_FREE
1062 if ((bucket < FIRST_BUCKET_WITH_CHECK)
1063 || (OV_MAGIC(ovp, bucket) == MAGIC))
1065 if (OV_MAGIC(ovp, bucket) == MAGIC)
1071 * Already free, doing "compaction".
1073 * Search for the old block of memory on the
1074 * free list. First, check the most common
1075 * case (last element free'd), then (this failing)
1076 * the last ``reall_srchlen'' items free'd.
1077 * If all lookups fail, then assume the size of
1078 * the memory block being realloc'd is the
1079 * smallest possible.
1081 if ((bucket = findbucket(ovp, 1)) < 0 &&
1082 (bucket = findbucket(ovp, reall_srchlen)) < 0)
1085 onb = BUCKET_SIZE_REAL(bucket);
1087 * avoid the copy if same size block.
1088 * We are not agressive with boundary cases. Note that it might
1089 * (for a small number of cases) give false negative if
1090 * both new size and old one are in the bucket for
1091 * FIRST_BIG_POW2, but the new one is near the lower end.
1093 * We do not try to go to 1.5 times smaller bucket so far.
1095 if (nbytes > onb) incr = 1;
1097 #ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING
1098 if ( /* This is a little bit pessimal if PACK_MALLOC: */
1099 nbytes > ( (onb >> 1) - M_OVERHEAD )
1100 # ifdef TWO_POT_OPTIMIZE
1101 || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND )
1104 #else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1105 prev_bucket = ( (bucket > MAX_PACKED + 1)
1106 ? bucket - BUCKETS_PER_POW2
1108 if (nbytes > BUCKET_SIZE_REAL(prev_bucket))
1109 #endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1114 #ifdef STRESS_REALLOC
1115 || 1 /* always do it the hard way */
1118 else if (incr == 0) {
1122 * Record new allocated size of block and
1123 * bound space with magic numbers.
1125 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1126 int i, nb = ovp->ov_size + 1;
1131 ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C);
1135 ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC);
1137 * Convert amount of memory requested into
1138 * closest block size stored in hash buckets
1139 * which satisfies request. Account for
1140 * space used per block for accounting.
1142 nbytes += M_OVERHEAD;
1143 ovp->ov_size = nbytes - 1;
1144 if ((i = nbytes & 3)) {
1147 *((char *)((caddr_t)ovp + nbytes - RSLOP + i))
1150 nbytes = (nbytes + 3) &~ 3;
1151 *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC;
1155 MUTEX_UNLOCK(&malloc_mutex);
1156 } else if (incr == 1 && (cp - M_OVERHEAD == last_op)
1157 && (onb > (1 << LOG_OF_MIN_ARENA))) {
1158 MEM_SIZE require, newarena = nbytes, pow;
1161 POW2_OPTIMIZE_ADJUST(newarena);
1162 newarena = newarena + M_OVERHEAD;
1163 /* newarena = (newarena + 3) &~ 3; */
1164 shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA;
1165 pow = LOG_OF_MIN_ARENA + 1;
1166 /* apart from this loop, this is O(1) */
1167 while (shiftr >>= 1)
1169 newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2);
1170 require = newarena - onb - M_OVERHEAD;
1172 if (require <= sbrked_remains) {
1173 sbrked_remains -= require;
1177 require -= sbrked_remains;
1178 /* We do not try to optimize sbrks here, we go for place. */
1179 cp = (char*) sbrk(require);
1180 #ifdef DEBUGGING_MSTATS
1182 goodsbrk += require;
1184 if (cp == last_sbrk_top) {
1186 last_sbrk_top = cp + require;
1188 /* Report the failure: */
1190 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1192 add_to_chain((void*)cp, require, 0);
1193 sbrk_good -= SBRK_FAILURE_PRICE;
1201 #ifdef DEBUGGING_MSTATS
1203 nmalloc[pow * BUCKETS_PER_POW2]++;
1205 *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */
1209 MUTEX_UNLOCK(&malloc_mutex);
1210 if ((res = (char*)malloc(nbytes)) == NULL)
1212 if (cp != res) /* common optimization */
1213 Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char);
1218 DEBUG_m(PerlIO_printf(Perl_debug_log, "0x%lu: (%05lu) rfree\n",
1219 (unsigned long)res,(unsigned long)(an++)));
1220 DEBUG_m(PerlIO_printf(Perl_debug_log,
1221 "0x%lx: (%05lu) realloc %ld bytes\n",
1222 (unsigned long)res,(unsigned long)(an++),
1224 return ((Malloc_t)res);
1228 * Search ``srchlen'' elements of each free list for a block whose
1229 * header starts at ``freep''. If srchlen is -1 search the whole list.
1230 * Return bucket number, or -1 if not found.
1233 findbucket(union overhead *freep, int srchlen)
1235 register union overhead *p;
1238 for (i = 0; i < NBUCKETS; i++) {
1240 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
1250 calloc(register size_t elements, register size_t size)
1252 long sz = elements * size;
1253 Malloc_t p = malloc(sz);
1256 memset((void*)p, 0, sz);
1262 malloced_size(void *p)
1264 int bucket = OV_INDEX((union overhead *)p);
1266 return BUCKET_SIZE_REAL(bucket);
1269 #ifdef DEBUGGING_MSTATS
1271 # ifdef BUCKETS_ROOT2
1272 # define MIN_EVEN_REPORT 6
1274 # define MIN_EVEN_REPORT MIN_BUCKET
1277 * mstats - print out statistics about malloc
1279 * Prints two lines of numbers, one showing the length of the free list
1280 * for each size category, the second showing the number of mallocs -
1281 * frees for each size category.
1284 dump_mstats(char *s)
1287 register union overhead *p;
1288 int topbucket=0, topbucket_ev=0, topbucket_odd=0, totfree=0, total=0;
1289 u_int nfree[NBUCKETS];
1290 int total_chain = 0;
1291 struct chunk_chain_s* nextchain = chunk_chain;
1293 for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
1294 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
1297 totfree += nfree[i] * BUCKET_SIZE_REAL(i);
1298 total += nmalloc[i] * BUCKET_SIZE_REAL(i);
1300 i % 2 ? (topbucket_odd = i) : (topbucket_ev = i);
1305 PerlIO_printf(PerlIO_stderr(),
1306 "Memory allocation statistics %s (buckets %d(%d)..%d(%d)\n",
1308 BUCKET_SIZE_REAL(MIN_BUCKET),
1309 BUCKET_SIZE(MIN_BUCKET),
1310 BUCKET_SIZE_REAL(topbucket), BUCKET_SIZE(topbucket));
1311 PerlIO_printf(PerlIO_stderr(), "%8d free:", totfree);
1312 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1313 PerlIO_printf(PerlIO_stderr(),
1314 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1316 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1319 #ifdef BUCKETS_ROOT2
1320 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1321 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1322 PerlIO_printf(PerlIO_stderr(),
1323 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1325 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1329 PerlIO_printf(PerlIO_stderr(), "\n%8d used:", total - totfree);
1330 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1331 PerlIO_printf(PerlIO_stderr(),
1332 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1334 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1335 nmalloc[i] - nfree[i]);
1337 #ifdef BUCKETS_ROOT2
1338 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1339 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1340 PerlIO_printf(PerlIO_stderr(),
1341 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1343 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1344 nmalloc[i] - nfree[i]);
1348 total_chain += nextchain->size;
1349 nextchain = nextchain->next;
1351 PerlIO_printf(PerlIO_stderr(), "\nTotal sbrk(): %d/%d:%d. Odd ends: pad+heads+chain+tail: %d+%d+%d+%d.\n",
1352 goodsbrk + sbrk_slack, sbrks, sbrk_good, sbrk_slack,
1353 start_slack, total_chain, sbrked_remains);
1357 dump_mstats(char *s)
1364 #ifdef USE_PERL_SBRK
1367 # define PERL_SBRK_VIA_MALLOC
1370 # ifdef PERL_SBRK_VIA_MALLOC
1371 # if defined(HIDEMYMALLOC) || defined(EMBEDMYMALLOC)
1374 # include "Error: -DPERL_SBRK_VIA_MALLOC needs -D(HIDE|EMBED)MYMALLOC"
1377 /* it may seem schizophrenic to use perl's malloc and let it call system */
1378 /* malloc, the reason for that is only the 3.2 version of the OS that had */
1379 /* frequent core dumps within nxzonefreenolock. This sbrk routine put an */
1380 /* end to the cores */
1382 # define SYSTEM_ALLOC(a) malloc(a)
1384 # endif /* PERL_SBRK_VIA_MALLOC */
1386 static IV Perl_sbrk_oldchunk;
1387 static long Perl_sbrk_oldsize;
1389 # define PERLSBRK_32_K (1<<15)
1390 # define PERLSBRK_64_K (1<<16)
1399 if (!size) return 0;
1401 reqsize = size; /* just for the DEBUG_m statement */
1404 size = (size + 0x7ff) & ~0x7ff;
1406 if (size <= Perl_sbrk_oldsize) {
1407 got = Perl_sbrk_oldchunk;
1408 Perl_sbrk_oldchunk += size;
1409 Perl_sbrk_oldsize -= size;
1411 if (size >= PERLSBRK_32_K) {
1414 size = PERLSBRK_64_K;
1417 got = (IV)SYSTEM_ALLOC(size);
1419 got = (got + 0x7ff) & ~0x7ff;
1422 /* Chunk is small, register the rest for future allocs. */
1423 Perl_sbrk_oldchunk = got + reqsize;
1424 Perl_sbrk_oldsize = size - reqsize;
1428 DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%lx\n",
1429 size, reqsize, Perl_sbrk_oldsize, got));
1434 #endif /* ! defined USE_PERL_SBRK */