undo goofed change 1157 (backed out the fix instead of keeping it)
[p5sagit/p5-mst-13.2.git] / malloc.c
CommitLineData
a0d0e21e 1/* malloc.c
8d063cd8 2 *
8d063cd8 3 */
4
e8bc2b5c 5#ifndef NO_FANCY_MALLOC
6# ifndef SMALL_BUCKET_VIA_TABLE
7# define SMALL_BUCKET_VIA_TABLE
8# endif
9# ifndef BUCKETS_ROOT2
10# define BUCKETS_ROOT2
11# endif
12# ifndef IGNORE_SMALL_BAD_FREE
13# define IGNORE_SMALL_BAD_FREE
14# endif
3562ef9b 15#endif
16
e8bc2b5c 17#ifndef PLAIN_MALLOC /* Bulk enable features */
18# ifndef PACK_MALLOC
19# define PACK_MALLOC
20# endif
21# ifndef TWO_POT_OPTIMIZE
22# define TWO_POT_OPTIMIZE
23# endif
24# if defined(PERL_CORE) && !defined(EMERGENCY_SBRK)
25# define EMERGENCY_SBRK
26# endif
27# if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS)
28# define DEBUGGING_MSTATS
29# endif
30#endif
31
32#define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */
33#define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2)
34
35#if !(defined(I286) || defined(atarist))
36 /* take 2k unless the block is bigger than that */
37# define LOG_OF_MIN_ARENA 11
38#else
39 /* take 16k unless the block is bigger than that
40 (80286s like large segments!), probably good on the atari too */
41# define LOG_OF_MIN_ARENA 14
42#endif
43
8d063cd8 44#ifndef lint
1944739a 45# if defined(DEBUGGING) && !defined(NO_RCHECK)
46# define RCHECK
47# endif
e8bc2b5c 48# if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE)
49# undef IGNORE_SMALL_BAD_FREE
50# endif
8d063cd8 51/*
52 * malloc.c (Caltech) 2/21/82
53 * Chris Kingsley, kingsley@cit-20.
54 *
55 * This is a very fast storage allocator. It allocates blocks of a small
56 * number of different sizes, and keeps free lists of each size. Blocks that
57 * don't exactly fit are passed up to the next larger size. In this
58 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
cf5c4ad8 59 * If PACK_MALLOC is defined, small blocks are 2^n bytes long.
8d063cd8 60 * This is designed for use in a program that uses vast quantities of memory,
61 * but bombs when it runs out.
62 */
63
135863df 64#include "EXTERN.h"
135863df 65#include "perl.h"
66
e8bc2b5c 67#ifndef PERL_CORE
68# ifndef croak /* make depend */
69# define croak(mess) fprintf(stderr,mess); exit(1);
70# endif
71# ifdef DEBUG_m
72# undef DEBUG_m
73# endif
74# define DEBUG_m(a)
75# ifdef DEBUGGING
76# undef DEBUGGING
77# endif
78#endif
79
80#ifndef MUTEX_LOCK
81# define MUTEX_LOCK(l)
82#endif
83
84#ifndef MUTEX_UNLOCK
85# define MUTEX_UNLOCK(l)
86#endif
87
760ac839 88#ifdef DEBUGGING
e8bc2b5c 89# undef DEBUG_m
90# define DEBUG_m(a) if (debug & 128) a
760ac839 91#endif
92
135863df 93/* I don't much care whether these are defined in sys/types.h--LAW */
94
95#define u_char unsigned char
96#define u_int unsigned int
e8bc2b5c 97
98#ifdef HAS_QUAD
99# define u_bigint UV /* Needs to eat *void. */
100#else /* needed? */
101# define u_bigint unsigned long /* Needs to eat *void. */
102#endif
103
135863df 104#define u_short unsigned short
8d063cd8 105
cf5c4ad8 106/* 286 and atarist like big chunks, which gives too much overhead. */
107#if (defined(RCHECK) || defined(I286) || defined(atarist)) && defined(PACK_MALLOC)
e8bc2b5c 108# undef PACK_MALLOC
cf5c4ad8 109#endif
110
8d063cd8 111/*
cf5c4ad8 112 * The description below is applicable if PACK_MALLOC is not defined.
113 *
8d063cd8 114 * The overhead on a block is at least 4 bytes. When free, this space
115 * contains a pointer to the next free block, and the bottom two bits must
116 * be zero. When in use, the first byte is set to MAGIC, and the second
117 * byte is the size index. The remaining bytes are for alignment.
118 * If range checking is enabled and the size of the block fits
119 * in two bytes, then the top two bytes hold the size of the requested block
120 * plus the range checking words, and the header word MINUS ONE.
121 */
122union overhead {
123 union overhead *ov_next; /* when free */
85e6fe83 124#if MEM_ALIGNBYTES > 4
c623bd54 125 double strut; /* alignment problems */
a687059c 126#endif
8d063cd8 127 struct {
128 u_char ovu_magic; /* magic number */
129 u_char ovu_index; /* bucket # */
130#ifdef RCHECK
131 u_short ovu_size; /* actual block size */
132 u_int ovu_rmagic; /* range magic number */
133#endif
134 } ovu;
135#define ov_magic ovu.ovu_magic
136#define ov_index ovu.ovu_index
137#define ov_size ovu.ovu_size
138#define ov_rmagic ovu.ovu_rmagic
139};
140
760ac839 141#ifdef DEBUGGING
a0d0e21e 142static void botch _((char *s));
143#endif
144static void morecore _((int bucket));
145static int findbucket _((union overhead *freep, int srchlen));
146
8d063cd8 147#define MAGIC 0xff /* magic # on accounting info */
148#define RMAGIC 0x55555555 /* magic # on range info */
e8bc2b5c 149#define RMAGIC_C 0x55 /* magic # on range info */
150
8d063cd8 151#ifdef RCHECK
c2a5c2d2 152# define RSLOP sizeof (u_int)
153# ifdef TWO_POT_OPTIMIZE
e8bc2b5c 154# define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2)
c2a5c2d2 155# else
e8bc2b5c 156# define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2)
c2a5c2d2 157# endif
8d063cd8 158#else
c2a5c2d2 159# define RSLOP 0
8d063cd8 160#endif
161
e8bc2b5c 162#if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2)
163# undef BUCKETS_ROOT2
164#endif
165
166#ifdef BUCKETS_ROOT2
167# define BUCKET_TABLE_SHIFT 2
168# define BUCKET_POW2_SHIFT 1
169# define BUCKETS_PER_POW2 2
170#else
171# define BUCKET_TABLE_SHIFT MIN_BUC_POW2
172# define BUCKET_POW2_SHIFT 0
173# define BUCKETS_PER_POW2 1
174#endif
175
176#ifdef BUCKETS_ROOT2
177# define MAX_BUCKET_BY_TABLE 13
178static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] =
179 {
180 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80,
181 };
182# define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT)))
183# define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \
184 ? buck_size[i] \
185 : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \
186 - MEM_OVERHEAD(i) \
187 + POW2_OPTIMIZE_SURPLUS(i)))
188#else
189# define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT))
190# define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i))
191#endif
192
193
cf5c4ad8 194#ifdef PACK_MALLOC
e8bc2b5c 195/* In this case it is assumed that if we do sbrk() in 2K units, we
196 * will get 2K aligned arenas (at least after some initial
197 * alignment). The bucket number of the given subblock is on the start
198 * of 2K arena which contains the subblock. Several following bytes
199 * contain the magic numbers for the subblocks in the block.
cf5c4ad8 200 *
201 * Sizes of chunks are powers of 2 for chunks in buckets <=
202 * MAX_PACKED, after this they are (2^n - sizeof(union overhead)) (to
203 * get alignment right).
204 *
e8bc2b5c 205 * Consider an arena for 2^n with n>MAX_PACKED. We suppose that
206 * starts of all the chunks in a 2K arena are in different
207 * 2^n-byte-long chunks. If the top of the last chunk is aligned on a
208 * boundary of 2K block, this means that sizeof(union
209 * overhead)*"number of chunks" < 2^n, or sizeof(union overhead)*2K <
210 * 4^n, or n > 6 + log2(sizeof()/2)/2, since a chunk of size 2^n -
211 * overhead is used. Since this rules out n = 7 for 8 byte alignment,
212 * we specialcase allocation of the first of 16 128-byte-long chunks.
cf5c4ad8 213 *
214 * Note that with the above assumption we automatically have enough
215 * place for MAGIC at the start of 2K block. Note also that we
e8bc2b5c 216 * overlay union overhead over the chunk, thus the start of small chunks
217 * is immediately overwritten after freeing. */
218# define MAX_PACKED_POW2 6
219# define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT)
220# define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD)
221# define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1)
222# define TWOK_MASKED(x) ((u_bigint)(x) & ~TWOK_MASK)
223# define TWOK_SHIFT(x) ((u_bigint)(x) & TWOK_MASK)
cf5c4ad8 224# define OV_INDEXp(block) ((u_char*)(TWOK_MASKED(block)))
225# define OV_INDEX(block) (*OV_INDEXp(block))
226# define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \
e8bc2b5c 227 (TWOK_SHIFT(block)>> \
228 (bucket>>BUCKET_POW2_SHIFT)) + \
229 (bucket >= MIN_NEEDS_SHIFT ? 1 : 0)))
230 /* A bucket can have a shift smaller than it size, we need to
231 shift its magic number so it will not overwrite index: */
232# ifdef BUCKETS_ROOT2
233# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */
234# else
235# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */
236# endif
cf5c4ad8 237# define CHUNK_SHIFT 0
238
e8bc2b5c 239/* Number of active buckets of given ordinal. */
240#ifdef IGNORE_SMALL_BAD_FREE
241#define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */
242# define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
243 ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \
244 : n_blks[bucket] )
245#else
246# define N_BLKS(bucket) n_blks[bucket]
247#endif
248
249static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
250 {
251# if BUCKETS_PER_POW2==1
252 0, 0,
253 (MIN_BUC_POW2==2 ? 384 : 0),
254 224, 120, 62, 31, 16, 8, 4, 2
255# else
256 0, 0, 0, 0,
257 (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */
258 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2
259# endif
260 };
261
262/* Shift of the first bucket with the given ordinal inside 2K chunk. */
263#ifdef IGNORE_SMALL_BAD_FREE
264# define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
265 ? ((1<<LOG_OF_MIN_ARENA) \
266 - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \
267 : blk_shift[bucket])
268#else
269# define BLK_SHIFT(bucket) blk_shift[bucket]
270#endif
271
272static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
273 {
274# if BUCKETS_PER_POW2==1
275 0, 0,
276 (MIN_BUC_POW2==2 ? 512 : 0),
277 256, 128, 64, 64, /* 8 to 64 */
278 16*sizeof(union overhead),
279 8*sizeof(union overhead),
280 4*sizeof(union overhead),
281 2*sizeof(union overhead),
282# else
283 0, 0, 0, 0,
284 (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0),
285 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */
286 16*sizeof(union overhead), 16*sizeof(union overhead),
287 8*sizeof(union overhead), 8*sizeof(union overhead),
288 4*sizeof(union overhead), 4*sizeof(union overhead),
289 2*sizeof(union overhead), 2*sizeof(union overhead),
290# endif
291 };
cf5c4ad8 292
cf5c4ad8 293#else /* !PACK_MALLOC */
294
295# define OV_MAGIC(block,bucket) (block)->ov_magic
296# define OV_INDEX(block) (block)->ov_index
297# define CHUNK_SHIFT 1
e8bc2b5c 298# define MAX_PACKED -1
cf5c4ad8 299#endif /* !PACK_MALLOC */
300
e8bc2b5c 301#define M_OVERHEAD (sizeof(union overhead) + RSLOP)
302
303#ifdef PACK_MALLOC
304# define MEM_OVERHEAD(bucket) \
305 (bucket <= MAX_PACKED ? 0 : M_OVERHEAD)
306# ifdef SMALL_BUCKET_VIA_TABLE
307# define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2)
308# define START_SHIFT MAX_PACKED_POW2
309# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
310# define SIZE_TABLE_MAX 80
311# else
312# define SIZE_TABLE_MAX 64
313# endif
314static char bucket_of[] =
315 {
316# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
317 /* 0 to 15 in 4-byte increments. */
318 (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */
319 6, /* 8 */
320 7, 8, /* 12, 16 */
321 9, 9, 10, 10, /* 24, 32 */
322 11, 11, 11, 11, /* 48 */
323 12, 12, 12, 12, /* 64 */
324 13, 13, 13, 13, /* 80 */
325 13, 13, 13, 13 /* 80 */
326# else /* !BUCKETS_ROOT2 */
327 /* 0 to 15 in 4-byte increments. */
328 (sizeof(void*) > 4 ? 3 : 2),
329 3,
330 4, 4,
331 5, 5, 5, 5,
332 6, 6, 6, 6,
333 6, 6, 6, 6
334# endif /* !BUCKETS_ROOT2 */
335 };
336# else /* !SMALL_BUCKET_VIA_TABLE */
337# define START_SHIFTS_BUCKET MIN_BUCKET
338# define START_SHIFT (MIN_BUC_POW2 - 1)
339# endif /* !SMALL_BUCKET_VIA_TABLE */
340#else /* !PACK_MALLOC */
341# define MEM_OVERHEAD(bucket) M_OVERHEAD
342# ifdef SMALL_BUCKET_VIA_TABLE
343# undef SMALL_BUCKET_VIA_TABLE
344# endif
345# define START_SHIFTS_BUCKET MIN_BUCKET
346# define START_SHIFT (MIN_BUC_POW2 - 1)
347#endif /* !PACK_MALLOC */
cf5c4ad8 348
8d063cd8 349/*
55497cff 350 * Big allocations are often of the size 2^n bytes. To make them a
351 * little bit better, make blocks of size 2^n+pagesize for big n.
352 */
353
354#ifdef TWO_POT_OPTIMIZE
355
5f05dabc 356# ifndef PERL_PAGESIZE
357# define PERL_PAGESIZE 4096
358# endif
e8bc2b5c 359# ifndef FIRST_BIG_POW2
360# define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */
5f05dabc 361# endif
e8bc2b5c 362# define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2)
55497cff 363/* If this value or more, check against bigger blocks. */
364# define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD)
365/* If less than this value, goes into 2^n-overhead-block. */
366# define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD)
367
e8bc2b5c 368# define POW2_OPTIMIZE_ADJUST(nbytes) \
369 ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0)
370# define POW2_OPTIMIZE_SURPLUS(bucket) \
371 ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0)
372
373#else /* !TWO_POT_OPTIMIZE */
374# define POW2_OPTIMIZE_ADJUST(nbytes)
375# define POW2_OPTIMIZE_SURPLUS(bucket) 0
376#endif /* !TWO_POT_OPTIMIZE */
377
378#if defined(HAS_64K_LIMIT) && defined(PERL_CORE)
379# define BARK_64K_LIMIT(what,nbytes,size) \
380 if (nbytes > 0xffff) { \
381 PerlIO_printf(PerlIO_stderr(), \
382 "%s too large: %lx\n", what, size); \
383 my_exit(1); \
384 }
385#else /* !HAS_64K_LIMIT || !PERL_CORE */
386# define BARK_64K_LIMIT(what,nbytes,size)
387#endif /* !HAS_64K_LIMIT || !PERL_CORE */
55497cff 388
e8bc2b5c 389#ifndef MIN_SBRK
390# define MIN_SBRK 2048
391#endif
392
393#ifndef FIRST_SBRK
394# define FIRST_SBRK (32*1024)
395#endif
396
397/* Minimal sbrk in percents of what is already alloced. */
398#ifndef MIN_SBRK_FRAC
399# define MIN_SBRK_FRAC 3
400#endif
401
402#ifndef SBRK_ALLOW_FAILURES
403# define SBRK_ALLOW_FAILURES 3
404#endif
55497cff 405
e8bc2b5c 406#ifndef SBRK_FAILURE_PRICE
407# define SBRK_FAILURE_PRICE 50
55497cff 408#endif
409
e8bc2b5c 410#if defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)
411
412# ifndef BIG_SIZE
413# define BIG_SIZE (1<<16) /* 64K */
414# endif
415
55497cff 416static char *emergency_buffer;
417static MEM_SIZE emergency_buffer_size;
418
52082926 419static Malloc_t
55497cff 420emergency_sbrk(size)
421 MEM_SIZE size;
422{
423 if (size >= BIG_SIZE) {
424 /* Give the possibility to recover: */
425 die("Out of memory during request for %i bytes", size);
426 /* croak may eat too much memory. */
427 }
428
429 if (!emergency_buffer) {
18f739ee 430 dTHR;
55497cff 431 /* First offense, give a possibility to recover by dieing. */
432 /* No malloc involved here: */
433 GV **gvp = (GV**)hv_fetch(defstash, "^M", 2, 0);
434 SV *sv;
435 char *pv;
436
437 if (!gvp) gvp = (GV**)hv_fetch(defstash, "\015", 1, 0);
438 if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv)
e8bc2b5c 439 || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD))
55497cff 440 return (char *)-1; /* Now die die die... */
441
442 /* Got it, now detach SvPV: */
bbce6d69 443 pv = SvPV(sv, na);
55497cff 444 /* Check alignment: */
e8bc2b5c 445 if (((u_bigint)(pv - M_OVERHEAD)) & ((1<<LOG_OF_MIN_ARENA) - 1)) {
55497cff 446 PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
bbce6d69 447 return (char *)-1; /* die die die */
55497cff 448 }
449
450 emergency_buffer = pv - M_OVERHEAD;
451 emergency_buffer_size = SvLEN(sv) + M_OVERHEAD;
452 SvPOK_off(sv);
453 SvREADONLY_on(sv);
454 die("Out of memory!"); /* croak may eat too much memory. */
ff68c719 455 }
456 else if (emergency_buffer_size >= size) {
55497cff 457 emergency_buffer_size -= size;
458 return emergency_buffer + emergency_buffer_size;
459 }
460
461 return (char *)-1; /* poor guy... */
462}
463
e8bc2b5c 464#else /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 465# define emergency_sbrk(size) -1
e8bc2b5c 466#endif /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 467
468/*
e8bc2b5c 469 * nextf[i] is the pointer to the next free block of size 2^i. The
8d063cd8 470 * smallest allocatable block is 8 bytes. The overhead information
471 * precedes the data area returned to the user.
472 */
e8bc2b5c 473#define NBUCKETS (32*BUCKETS_PER_POW2 + 1)
8d063cd8 474static union overhead *nextf[NBUCKETS];
cf5c4ad8 475
476#ifdef USE_PERL_SBRK
477#define sbrk(a) Perl_sbrk(a)
52082926 478Malloc_t Perl_sbrk _((int size));
8ac85365 479#else
480#ifdef DONT_DECLARE_STD
481#ifdef I_UNISTD
482#include <unistd.h>
483#endif
cf5c4ad8 484#else
52082926 485extern Malloc_t sbrk(int);
8ac85365 486#endif
cf5c4ad8 487#endif
8d063cd8 488
c07a80fd 489#ifdef DEBUGGING_MSTATS
8d063cd8 490/*
491 * nmalloc[i] is the difference between the number of mallocs and frees
492 * for a given block size.
493 */
494static u_int nmalloc[NBUCKETS];
5f05dabc 495static u_int sbrk_slack;
496static u_int start_slack;
8d063cd8 497#endif
498
e8bc2b5c 499static u_int goodsbrk;
500
760ac839 501#ifdef DEBUGGING
4dfc412b 502#define ASSERT(p) if (!(p)) botch(STRINGIFY(p)); else
ee0007ab 503static void
8ac85365 504botch(char *s)
8d063cd8 505{
4dfc412b 506 PerlIO_printf(PerlIO_stderr(), "assertion botched: %s\n", s);
3028581b 507 PerlProc_abort();
8d063cd8 508}
509#else
510#define ASSERT(p)
511#endif
512
2304df62 513Malloc_t
8ac85365 514malloc(register size_t nbytes)
8d063cd8 515{
516 register union overhead *p;
e8bc2b5c 517 register int bucket;
ee0007ab 518 register MEM_SIZE shiftr;
8d063cd8 519
c2a5c2d2 520#if defined(DEBUGGING) || defined(RCHECK)
ee0007ab 521 MEM_SIZE size = nbytes;
45d8adaa 522#endif
523
e8bc2b5c 524 BARK_64K_LIMIT("Allocation",nbytes,nbytes);
45d8adaa 525#ifdef DEBUGGING
526 if ((long)nbytes < 0)
043bf814 527 croak("panic: malloc");
45d8adaa 528#endif
45d8adaa 529
11343788 530 MUTEX_LOCK(&malloc_mutex);
8d063cd8 531 /*
532 * Convert amount of memory requested into
533 * closest block size stored in hash buckets
534 * which satisfies request. Account for
535 * space used per block for accounting.
536 */
cf5c4ad8 537#ifdef PACK_MALLOC
e8bc2b5c 538# ifdef SMALL_BUCKET_VIA_TABLE
539 if (nbytes == 0)
540 bucket = MIN_BUCKET;
541 else if (nbytes <= SIZE_TABLE_MAX) {
542 bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT];
543 } else
544# else
043bf814 545 if (nbytes == 0)
546 nbytes = 1;
e8bc2b5c 547 if (nbytes <= MAX_POW2_ALGO) goto do_shifts;
548 else
549# endif
55497cff 550#endif
e8bc2b5c 551 {
552 POW2_OPTIMIZE_ADJUST(nbytes);
553 nbytes += M_OVERHEAD;
554 nbytes = (nbytes + 3) &~ 3;
555 do_shifts:
556 shiftr = (nbytes - 1) >> START_SHIFT;
557 bucket = START_SHIFTS_BUCKET;
558 /* apart from this loop, this is O(1) */
559 while (shiftr >>= 1)
560 bucket += BUCKETS_PER_POW2;
cf5c4ad8 561 }
8d063cd8 562 /*
563 * If nothing in hash bucket right now,
564 * request more memory from the system.
565 */
566 if (nextf[bucket] == NULL)
567 morecore(bucket);
e8bc2b5c 568 if ((p = nextf[bucket]) == NULL) {
11343788 569 MUTEX_UNLOCK(&malloc_mutex);
55497cff 570#ifdef PERL_CORE
ee0007ab 571 if (!nomemok) {
760ac839 572 PerlIO_puts(PerlIO_stderr(),"Out of memory!\n");
79072805 573 my_exit(1);
ee0007ab 574 }
45d8adaa 575#else
8d063cd8 576 return (NULL);
45d8adaa 577#endif
578 }
579
e8bc2b5c 580 DEBUG_m(PerlIO_printf(Perl_debug_log,
581 "0x%lx: (%05lu) malloc %ld bytes\n",
582 (unsigned long)(p+1), (unsigned long)(an++),
583 (long)size));
45d8adaa 584
8d063cd8 585 /* remove from linked list */
bf38876a 586#ifdef RCHECK
587 if (*((int*)p) & (sizeof(union overhead) - 1))
760ac839 588 PerlIO_printf(PerlIO_stderr(), "Corrupt malloc ptr 0x%lx at 0x%lx\n",
a0d0e21e 589 (unsigned long)*((int*)p),(unsigned long)p);
bf38876a 590#endif
591 nextf[bucket] = p->ov_next;
e8bc2b5c 592#ifdef IGNORE_SMALL_BAD_FREE
593 if (bucket >= FIRST_BUCKET_WITH_CHECK)
594#endif
595 OV_MAGIC(p, bucket) = MAGIC;
cf5c4ad8 596#ifndef PACK_MALLOC
597 OV_INDEX(p) = bucket;
598#endif
8d063cd8 599#ifdef RCHECK
600 /*
601 * Record allocated size of block and
602 * bound space with magic numbers.
603 */
8d063cd8 604 p->ov_rmagic = RMAGIC;
e8bc2b5c 605 if (bucket <= MAX_SHORT_BUCKET) {
606 int i;
607
608 nbytes = size + M_OVERHEAD;
609 p->ov_size = nbytes - 1;
610 if ((i = nbytes & 3)) {
611 i = 4 - i;
612 while (i--)
613 *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C;
614 }
615 nbytes = (nbytes + 3) &~ 3;
616 *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC;
617 }
8d063cd8 618#endif
11343788 619 MUTEX_UNLOCK(&malloc_mutex);
cf5c4ad8 620 return ((Malloc_t)(p + CHUNK_SHIFT));
8d063cd8 621}
622
e8bc2b5c 623static char *last_sbrk_top;
624static char *last_op; /* This arena can be easily extended. */
625static int sbrked_remains;
626static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE;
627
628#ifdef DEBUGGING_MSTATS
629static int sbrks;
630#endif
631
632struct chunk_chain_s {
633 struct chunk_chain_s *next;
634 MEM_SIZE size;
635};
636static struct chunk_chain_s *chunk_chain;
637static int n_chunks;
638static char max_bucket;
639
640/* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */
641static void *
642get_from_chain(MEM_SIZE size)
643{
644 struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain;
645 struct chunk_chain_s **oldgoodp = NULL;
646 long min_remain = LONG_MAX;
647
648 while (elt) {
649 if (elt->size >= size) {
650 long remains = elt->size - size;
651 if (remains >= 0 && remains < min_remain) {
652 oldgoodp = oldp;
653 min_remain = remains;
654 }
655 if (remains == 0) {
656 break;
657 }
658 }
659 oldp = &( elt->next );
660 elt = elt->next;
661 }
662 if (!oldgoodp) return NULL;
663 if (min_remain) {
664 void *ret = *oldgoodp;
665 struct chunk_chain_s *next = (*oldgoodp)->next;
666
667 *oldgoodp = (struct chunk_chain_s *)((char*)ret + size);
668 (*oldgoodp)->size = min_remain;
669 (*oldgoodp)->next = next;
670 return ret;
671 } else {
672 void *ret = *oldgoodp;
673 *oldgoodp = (*oldgoodp)->next;
674 n_chunks--;
675 return ret;
676 }
677}
678
679static void
680add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip)
681{
682 struct chunk_chain_s *next = chunk_chain;
683 char *cp = (char*)p;
684
685 cp += chip;
686 chunk_chain = (struct chunk_chain_s *)cp;
687 chunk_chain->size = size - chip;
688 chunk_chain->next = next;
689 n_chunks++;
690}
691
692static void *
693get_from_bigger_buckets(int bucket, MEM_SIZE size)
694{
695 int price = 1;
696 static int bucketprice[NBUCKETS];
697 while (bucket <= max_bucket) {
698 /* We postpone stealing from bigger buckets until we want it
699 often enough. */
700 if (nextf[bucket] && bucketprice[bucket]++ >= price) {
701 /* Steal it! */
702 void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT);
703 bucketprice[bucket] = 0;
704 if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) {
705 last_op = NULL; /* Disable optimization */
706 }
707 nextf[bucket] = nextf[bucket]->ov_next;
708#ifdef DEBUGGING_MSTATS
709 nmalloc[bucket]--;
710 start_slack -= M_OVERHEAD;
711#endif
712 add_to_chain(ret, (BUCKET_SIZE(bucket) +
713 POW2_OPTIMIZE_SURPLUS(bucket)),
714 size);
715 return ret;
716 }
717 bucket++;
718 }
719 return NULL;
720}
721
8d063cd8 722/*
723 * Allocate more memory to the indicated bucket.
724 */
a0d0e21e 725static void
8ac85365 726morecore(register int bucket)
8d063cd8 727{
72aaf631 728 register union overhead *ovp;
8d063cd8 729 register int rnu; /* 2^rnu bytes will be requested */
730 register int nblks; /* become nblks blocks of the desired size */
bbce6d69 731 register MEM_SIZE siz, needed;
cf5c4ad8 732 int slack = 0;
8d063cd8 733
734 if (nextf[bucket])
735 return;
e8bc2b5c 736 if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) {
55497cff 737 croak("Allocation too large");
738 }
e8bc2b5c 739
740 if (bucket > max_bucket) {
741 max_bucket = bucket;
c030ccd9 742 }
e8bc2b5c 743 rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT))
744 ? LOG_OF_MIN_ARENA
745 : (bucket >> BUCKET_POW2_SHIFT) );
746 /* This may be overwritten later: */
747 nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */
748 needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket);
749 if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */
750 ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT;
751 nextf[rnu << BUCKET_POW2_SHIFT]
752 = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next;
753#ifdef DEBUGGING_MSTATS
754 nmalloc[rnu << BUCKET_POW2_SHIFT]--;
755 start_slack -= M_OVERHEAD;
756#endif
757 DEBUG_m(PerlIO_printf(Perl_debug_log,
758 "stealing %ld bytes from %ld arena\n",
759 (long) needed, (long) rnu << BUCKET_POW2_SHIFT));
760 } else if (chunk_chain
761 && (ovp = (union overhead*) get_from_chain(needed))) {
762 DEBUG_m(PerlIO_printf(Perl_debug_log,
763 "stealing %ld bytes from chain\n",
764 (long) needed));
765 } else if (ovp = (union overhead*)
766 get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1,
767 needed)) {
768 DEBUG_m(PerlIO_printf(Perl_debug_log,
769 "stealing %ld bytes from bigger buckets\n",
770 (long) needed));
771 } else if (needed <= sbrked_remains) {
772 ovp = (union overhead *)(last_sbrk_top - sbrked_remains);
773 sbrked_remains -= needed;
774 last_op = (char*)ovp;
775 } else {
776 /* Need to do (possibly expensive) system call. Try to
777 optimize it for rare calling. */
778 MEM_SIZE require = needed - sbrked_remains;
779 char *cp;
780
781 if (sbrk_good > 0) {
782 if (!last_sbrk_top && require < FIRST_SBRK)
783 require = FIRST_SBRK;
784 else if (require < MIN_SBRK) require = MIN_SBRK;
785
786 if (require < goodsbrk * MIN_SBRK_FRAC / 100)
787 require = goodsbrk * MIN_SBRK_FRAC / 100;
788 require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK;
789 } else {
790 require = needed;
791 last_sbrk_top = 0;
792 sbrked_remains = 0;
793 }
794
795 DEBUG_m(PerlIO_printf(Perl_debug_log,
796 "sbrk(%ld) for %ld-byte-long arena\n",
797 (long)require, (long) needed));
798 cp = (char *)sbrk(require);
799#ifdef DEBUGGING_MSTATS
800 sbrks++;
801#endif
802 if (cp == last_sbrk_top) {
803 /* Common case, anything is fine. */
804 sbrk_good++;
805 ovp = (union overhead *) (cp - sbrked_remains);
806 sbrked_remains = require - (needed - sbrked_remains);
807 } else if (cp == (char *)-1) { /* no more room! */
808 ovp = (union overhead *)emergency_sbrk(needed);
809 if (ovp == (union overhead *)-1)
810 return;
811 goto gotit;
812 } else { /* Non-continuous or first sbrk(). */
813 long add = sbrked_remains;
814 char *newcp;
815
816 if (sbrked_remains) { /* Put rest into chain, we
817 cannot use it right now. */
818 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
819 sbrked_remains, 0);
820 }
821
822 /* Second, check alignment. */
823 slack = 0;
824
825#ifndef atarist /* on the atari we dont have to worry about this */
826# ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */
827
828 /* CHUNK_SHIFT is 1 for PACK_MALLOC, 0 otherwise. */
829 if ((UV)cp & (0x7FF >> CHUNK_SHIFT)) { /* Not aligned. */
830 slack = (0x800 >> CHUNK_SHIFT)
831 - ((UV)cp & (0x7FF >> CHUNK_SHIFT));
832 add += slack;
833 }
cf5c4ad8 834# endif
ee0007ab 835#endif /* atarist */
e8bc2b5c 836
837 if (add) {
838 DEBUG_m(PerlIO_printf(Perl_debug_log,
839"sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n",
840 (long)add, (long) slack,
841 (long) sbrked_remains));
842 newcp = (char *)sbrk(add);
843#if defined(DEBUGGING_MSTATS)
844 sbrks++;
845 sbrk_slack += add;
a687059c 846#endif
e8bc2b5c 847 if (newcp != cp + require) {
848 /* Too bad: even rounding sbrk() is not continuous.*/
849 DEBUG_m(PerlIO_printf(Perl_debug_log,
850 "failed to fix bad sbrk()\n"));
851#ifdef PACK_MALLOC
852 if (slack)
853 croak("panic: Off-page sbrk");
854#endif
855 if (sbrked_remains) {
856 /* Try again. */
857#if defined(DEBUGGING_MSTATS)
858 sbrk_slack += require;
859#endif
860 require = needed;
861 DEBUG_m(PerlIO_printf(Perl_debug_log,
862 "straight sbrk(%ld)\n",
863 (long)require));
864 cp = (char *)sbrk(require);
865#ifdef DEBUGGING_MSTATS
866 sbrks++;
55497cff 867#endif
e8bc2b5c 868 if (cp == (char *)-1)
869 return;
870 }
871 sbrk_good = -1; /* Disable optimization!
872 Continue with not-aligned... */
873 } else {
874 cp += slack;
875 require += sbrked_remains;
876 }
877 }
878
879 if (last_sbrk_top) {
880 sbrk_good -= SBRK_FAILURE_PRICE;
881 }
882
883 ovp = (union overhead *) cp;
884 /*
885 * Round up to minimum allocation size boundary
886 * and deduct from block count to reflect.
887 */
888
889#ifndef I286 /* Again, this should always be ok on an 80286 */
890 if ((UV)ovp & 7) {
891 ovp = (union overhead *)(((UV)ovp + 8) & ~7);
892 DEBUG_m(PerlIO_printf(Perl_debug_log,
893 "fixing sbrk(): %d bytes off machine alignement\n",
894 (int)((UV)ovp & 7)));
895 nblks--;
896# if defined(DEBUGGING_MSTATS)
897 /* This is only approx. if TWO_POT_OPTIMIZE: */
898 sbrk_slack += (1 << bucket);
899# endif
900 }
901#endif
902 sbrked_remains = require - needed;
903 }
904 last_sbrk_top = cp + require;
905 last_op = (char*) cp;
5f05dabc 906#ifdef DEBUGGING_MSTATS
e8bc2b5c 907 goodsbrk += require;
5f05dabc 908#endif
e8bc2b5c 909 }
910
911 gotit:
8d063cd8 912 /*
913 * Add new memory allocated to that on
914 * free list for this hash bucket.
915 */
e8bc2b5c 916 siz = BUCKET_SIZE(bucket);
cf5c4ad8 917#ifdef PACK_MALLOC
72aaf631 918 *(u_char*)ovp = bucket; /* Fill index. */
e8bc2b5c 919 if (bucket <= MAX_PACKED) {
920 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
921 nblks = N_BLKS(bucket);
cf5c4ad8 922# ifdef DEBUGGING_MSTATS
e8bc2b5c 923 start_slack += BLK_SHIFT(bucket);
cf5c4ad8 924# endif
e8bc2b5c 925 } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) {
926 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
cf5c4ad8 927 siz -= sizeof(union overhead);
72aaf631 928 } else ovp++; /* One chunk per block. */
e8bc2b5c 929#endif /* PACK_MALLOC */
72aaf631 930 nextf[bucket] = ovp;
5f05dabc 931#ifdef DEBUGGING_MSTATS
932 nmalloc[bucket] += nblks;
e8bc2b5c 933 if (bucket > MAX_PACKED) {
934 start_slack += M_OVERHEAD * nblks;
935 }
5f05dabc 936#endif
8d063cd8 937 while (--nblks > 0) {
72aaf631 938 ovp->ov_next = (union overhead *)((caddr_t)ovp + siz);
939 ovp = (union overhead *)((caddr_t)ovp + siz);
8d063cd8 940 }
8595d6f1 941 /* Not all sbrks return zeroed memory.*/
72aaf631 942 ovp->ov_next = (union overhead *)NULL;
cf5c4ad8 943#ifdef PACK_MALLOC
e8bc2b5c 944 if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */
945 union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next;
946 nextf[7*BUCKETS_PER_POW2] =
947 (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2]
948 - sizeof(union overhead));
949 nextf[7*BUCKETS_PER_POW2]->ov_next = n_op;
cf5c4ad8 950 }
951#endif /* !PACK_MALLOC */
8d063cd8 952}
953
94b6baf5 954Free_t
8ac85365 955free(void *mp)
8d063cd8 956{
ee0007ab 957 register MEM_SIZE size;
72aaf631 958 register union overhead *ovp;
352d5a3a 959 char *cp = (char*)mp;
cf5c4ad8 960#ifdef PACK_MALLOC
961 u_char bucket;
962#endif
8d063cd8 963
e8bc2b5c 964 DEBUG_m(PerlIO_printf(Perl_debug_log,
965 "0x%lx: (%05lu) free\n",
966 (unsigned long)cp, (unsigned long)(an++)));
45d8adaa 967
cf5c4ad8 968 if (cp == NULL)
969 return;
72aaf631 970 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c 971 - sizeof (union overhead) * CHUNK_SHIFT);
cf5c4ad8 972#ifdef PACK_MALLOC
72aaf631 973 bucket = OV_INDEX(ovp);
cf5c4ad8 974#endif
e8bc2b5c 975#ifdef IGNORE_SMALL_BAD_FREE
976 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
977 && (OV_MAGIC(ovp, bucket) != MAGIC))
978#else
979 if (OV_MAGIC(ovp, bucket) != MAGIC)
980#endif
981 {
68dc0745 982 static int bad_free_warn = -1;
cf5c4ad8 983 if (bad_free_warn == -1) {
5fd9e9a4 984 char *pbf = PerlEnv_getenv("PERL_BADFREE");
cf5c4ad8 985 bad_free_warn = (pbf) ? atoi(pbf) : 1;
986 }
987 if (!bad_free_warn)
988 return;
8990e307 989#ifdef RCHECK
a687059c 990 warn("%s free() ignored",
72aaf631 991 ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad");
8990e307 992#else
993 warn("Bad free() ignored");
994#endif
8d063cd8 995 return; /* sanity */
e8bc2b5c 996 }
11343788 997 MUTEX_LOCK(&malloc_mutex);
8d063cd8 998#ifdef RCHECK
72aaf631 999 ASSERT(ovp->ov_rmagic == RMAGIC);
e8bc2b5c 1000 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1001 int i;
1002 MEM_SIZE nbytes = ovp->ov_size + 1;
1003
1004 if ((i = nbytes & 3)) {
1005 i = 4 - i;
1006 while (i--) {
1007 ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i))
1008 == RMAGIC_C);
1009 }
1010 }
1011 nbytes = (nbytes + 3) &~ 3;
1012 ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC);
1013 }
72aaf631 1014 ovp->ov_rmagic = RMAGIC - 1;
8d063cd8 1015#endif
72aaf631 1016 ASSERT(OV_INDEX(ovp) < NBUCKETS);
1017 size = OV_INDEX(ovp);
1018 ovp->ov_next = nextf[size];
1019 nextf[size] = ovp;
11343788 1020 MUTEX_UNLOCK(&malloc_mutex);
8d063cd8 1021}
1022
1023/*
1024 * When a program attempts "storage compaction" as mentioned in the
1025 * old malloc man page, it realloc's an already freed block. Usually
1026 * this is the last block it freed; occasionally it might be farther
1027 * back. We have to search all the free lists for the block in order
1028 * to determine its bucket: 1st we make one pass thru the lists
1029 * checking only the first block in each; if that fails we search
378cc40b 1030 * ``reall_srchlen'' blocks in each list for a match (the variable
8d063cd8 1031 * is extern so the caller can modify it). If that fails we just copy
1032 * however many bytes was given to realloc() and hope it's not huge.
1033 */
378cc40b 1034int reall_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */
8d063cd8 1035
2304df62 1036Malloc_t
8ac85365 1037realloc(void *mp, size_t nbytes)
8d063cd8 1038{
ee0007ab 1039 register MEM_SIZE onb;
72aaf631 1040 union overhead *ovp;
e8bc2b5c 1041 char *res, prev_bucket;
1042 register int bucket;
1043 int was_alloced = 0, incr;
352d5a3a 1044 char *cp = (char*)mp;
8d063cd8 1045
e8bc2b5c 1046#if defined(DEBUGGING) || !defined(PERL_CORE)
ee0007ab 1047 MEM_SIZE size = nbytes;
45d8adaa 1048
45d8adaa 1049 if ((long)nbytes < 0)
463ee0b2 1050 croak("panic: realloc");
45d8adaa 1051#endif
e8bc2b5c 1052
1053 BARK_64K_LIMIT("Reallocation",nbytes,size);
1054 if (!cp)
1055 return malloc(nbytes);
45d8adaa 1056
11343788 1057 MUTEX_LOCK(&malloc_mutex);
72aaf631 1058 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c 1059 - sizeof (union overhead) * CHUNK_SHIFT);
1060 bucket = OV_INDEX(ovp);
1061#ifdef IGNORE_SMALL_BAD_FREE
1062 if ((bucket < FIRST_BUCKET_WITH_CHECK)
1063 || (OV_MAGIC(ovp, bucket) == MAGIC))
1064#else
1065 if (OV_MAGIC(ovp, bucket) == MAGIC)
1066#endif
1067 {
55497cff 1068 was_alloced = 1;
8d063cd8 1069 } else {
1070 /*
1071 * Already free, doing "compaction".
1072 *
1073 * Search for the old block of memory on the
1074 * free list. First, check the most common
1075 * case (last element free'd), then (this failing)
378cc40b 1076 * the last ``reall_srchlen'' items free'd.
8d063cd8 1077 * If all lookups fail, then assume the size of
1078 * the memory block being realloc'd is the
1079 * smallest possible.
1080 */
e8bc2b5c 1081 if ((bucket = findbucket(ovp, 1)) < 0 &&
1082 (bucket = findbucket(ovp, reall_srchlen)) < 0)
1083 bucket = 0;
8d063cd8 1084 }
e8bc2b5c 1085 onb = BUCKET_SIZE_REAL(bucket);
55497cff 1086 /*
1087 * avoid the copy if same size block.
e8bc2b5c 1088 * We are not agressive with boundary cases. Note that it might
1089 * (for a small number of cases) give false negative if
55497cff 1090 * both new size and old one are in the bucket for
e8bc2b5c 1091 * FIRST_BIG_POW2, but the new one is near the lower end.
1092 *
1093 * We do not try to go to 1.5 times smaller bucket so far.
55497cff 1094 */
e8bc2b5c 1095 if (nbytes > onb) incr = 1;
1096 else {
1097#ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING
1098 if ( /* This is a little bit pessimal if PACK_MALLOC: */
1099 nbytes > ( (onb >> 1) - M_OVERHEAD )
1100# ifdef TWO_POT_OPTIMIZE
1101 || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND )
1102# endif
1103 )
1104#else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1105 prev_bucket = ( (bucket > MAX_PACKED + 1)
1106 ? bucket - BUCKETS_PER_POW2
1107 : bucket - 1);
1108 if (nbytes > BUCKET_SIZE_REAL(prev_bucket))
1109#endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1110 incr = 0;
1111 else incr = -1;
1112 }
1113 if (!was_alloced
2ce36478 1114#ifdef STRESS_REALLOC
e8bc2b5c 1115 || 1 /* always do it the hard way */
2ce36478 1116#endif
e8bc2b5c 1117 ) goto hard_way;
1118 else if (incr == 0) {
852c2e52 1119 inplace_label:
a687059c 1120#ifdef RCHECK
1121 /*
1122 * Record new allocated size of block and
1123 * bound space with magic numbers.
1124 */
72aaf631 1125 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
e8bc2b5c 1126 int i, nb = ovp->ov_size + 1;
1127
1128 if ((i = nb & 3)) {
1129 i = 4 - i;
1130 while (i--) {
1131 ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C);
1132 }
1133 }
1134 nb = (nb + 3) &~ 3;
1135 ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC);
a687059c 1136 /*
1137 * Convert amount of memory requested into
1138 * closest block size stored in hash buckets
1139 * which satisfies request. Account for
1140 * space used per block for accounting.
1141 */
cf5c4ad8 1142 nbytes += M_OVERHEAD;
72aaf631 1143 ovp->ov_size = nbytes - 1;
e8bc2b5c 1144 if ((i = nbytes & 3)) {
1145 i = 4 - i;
1146 while (i--)
1147 *((char *)((caddr_t)ovp + nbytes - RSLOP + i))
1148 = RMAGIC_C;
1149 }
1150 nbytes = (nbytes + 3) &~ 3;
72aaf631 1151 *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC;
a687059c 1152 }
1153#endif
45d8adaa 1154 res = cp;
11343788 1155 MUTEX_UNLOCK(&malloc_mutex);
e8bc2b5c 1156 } else if (incr == 1 && (cp - M_OVERHEAD == last_op)
1157 && (onb > (1 << LOG_OF_MIN_ARENA))) {
1158 MEM_SIZE require, newarena = nbytes, pow;
1159 int shiftr;
1160
1161 POW2_OPTIMIZE_ADJUST(newarena);
1162 newarena = newarena + M_OVERHEAD;
1163 /* newarena = (newarena + 3) &~ 3; */
1164 shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA;
1165 pow = LOG_OF_MIN_ARENA + 1;
1166 /* apart from this loop, this is O(1) */
1167 while (shiftr >>= 1)
1168 pow++;
1169 newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2);
1170 require = newarena - onb - M_OVERHEAD;
1171
1172 if (require <= sbrked_remains) {
1173 sbrked_remains -= require;
1174 } else {
1175 char *cp;
1176
1177 require -= sbrked_remains;
1178 /* We do not try to optimize sbrks here, we go for place. */
1179 cp = (char*) sbrk(require);
1180#ifdef DEBUGGING_MSTATS
1181 sbrks++;
1182 goodsbrk += require;
1183#endif
1184 if (cp == last_sbrk_top) {
1185 sbrked_remains = 0;
1186 last_sbrk_top = cp + require;
1187 } else {
1188 /* Report the failure: */
1189 if (sbrked_remains)
1190 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1191 sbrked_remains, 0);
1192 add_to_chain((void*)cp, require, 0);
1193 sbrk_good -= SBRK_FAILURE_PRICE;
1194 sbrked_remains = 0;
1195 last_sbrk_top = 0;
1196 last_op = 0;
1197 goto hard_way;
1198 }
1199 }
1200
1201#ifdef DEBUGGING_MSTATS
1202 nmalloc[bucket]--;
1203 nmalloc[pow * BUCKETS_PER_POW2]++;
1204#endif
1205 *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */
852c2e52 1206 goto inplace_label;
e8bc2b5c 1207 } else {
1208 hard_way:
1209 MUTEX_UNLOCK(&malloc_mutex);
1210 if ((res = (char*)malloc(nbytes)) == NULL)
1211 return (NULL);
1212 if (cp != res) /* common optimization */
1213 Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char);
1214 if (was_alloced)
1215 free(cp);
45d8adaa 1216 }
1217
e8bc2b5c 1218 DEBUG_m(PerlIO_printf(Perl_debug_log, "0x%lu: (%05lu) rfree\n",
1219 (unsigned long)res,(unsigned long)(an++)));
1220 DEBUG_m(PerlIO_printf(Perl_debug_log,
1221 "0x%lx: (%05lu) realloc %ld bytes\n",
1222 (unsigned long)res,(unsigned long)(an++),
1223 (long)size));
2304df62 1224 return ((Malloc_t)res);
8d063cd8 1225}
1226
1227/*
1228 * Search ``srchlen'' elements of each free list for a block whose
1229 * header starts at ``freep''. If srchlen is -1 search the whole list.
1230 * Return bucket number, or -1 if not found.
1231 */
ee0007ab 1232static int
8ac85365 1233findbucket(union overhead *freep, int srchlen)
8d063cd8 1234{
1235 register union overhead *p;
1236 register int i, j;
1237
1238 for (i = 0; i < NBUCKETS; i++) {
1239 j = 0;
1240 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
1241 if (p == freep)
1242 return (i);
1243 j++;
1244 }
1245 }
1246 return (-1);
1247}
1248
cf5c4ad8 1249Malloc_t
8ac85365 1250calloc(register size_t elements, register size_t size)
cf5c4ad8 1251{
1252 long sz = elements * size;
1253 Malloc_t p = malloc(sz);
1254
1255 if (p) {
1256 memset((void*)p, 0, sz);
1257 }
1258 return p;
1259}
1260
e8bc2b5c 1261MEM_SIZE
1262malloced_size(void *p)
1263{
1264 int bucket = OV_INDEX((union overhead *)p);
1265
1266 return BUCKET_SIZE_REAL(bucket);
1267}
1268
c07a80fd 1269#ifdef DEBUGGING_MSTATS
e8bc2b5c 1270
1271# ifdef BUCKETS_ROOT2
1272# define MIN_EVEN_REPORT 6
1273# else
1274# define MIN_EVEN_REPORT MIN_BUCKET
1275# endif
8d063cd8 1276/*
1277 * mstats - print out statistics about malloc
1278 *
1279 * Prints two lines of numbers, one showing the length of the free list
1280 * for each size category, the second showing the number of mallocs -
1281 * frees for each size category.
1282 */
ee0007ab 1283void
8ac85365 1284dump_mstats(char *s)
8d063cd8 1285{
1286 register int i, j;
1287 register union overhead *p;
e8bc2b5c 1288 int topbucket=0, topbucket_ev=0, topbucket_odd=0, totfree=0, total=0;
c07a80fd 1289 u_int nfree[NBUCKETS];
e8bc2b5c 1290 int total_chain = 0;
1291 struct chunk_chain_s* nextchain = chunk_chain;
8d063cd8 1292
e8bc2b5c 1293 for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
8d063cd8 1294 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
1295 ;
c07a80fd 1296 nfree[i] = j;
e8bc2b5c 1297 totfree += nfree[i] * BUCKET_SIZE_REAL(i);
1298 total += nmalloc[i] * BUCKET_SIZE_REAL(i);
1299 if (nmalloc[i]) {
1300 i % 2 ? (topbucket_odd = i) : (topbucket_ev = i);
1301 topbucket = i;
1302 }
c07a80fd 1303 }
1304 if (s)
e8bc2b5c 1305 PerlIO_printf(PerlIO_stderr(),
1306 "Memory allocation statistics %s (buckets %d(%d)..%d(%d)\n",
1307 s,
1308 BUCKET_SIZE_REAL(MIN_BUCKET),
1309 BUCKET_SIZE(MIN_BUCKET),
1310 BUCKET_SIZE_REAL(topbucket), BUCKET_SIZE(topbucket));
5f05dabc 1311 PerlIO_printf(PerlIO_stderr(), "%8d free:", totfree);
e8bc2b5c 1312 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1313 PerlIO_printf(PerlIO_stderr(),
1314 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1315 ? " %5d"
1316 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1317 nfree[i]);
1318 }
1319#ifdef BUCKETS_ROOT2
1320 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1321 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1322 PerlIO_printf(PerlIO_stderr(),
1323 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1324 ? " %5d"
1325 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1326 nfree[i]);
8d063cd8 1327 }
e8bc2b5c 1328#endif
5f05dabc 1329 PerlIO_printf(PerlIO_stderr(), "\n%8d used:", total - totfree);
e8bc2b5c 1330 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1331 PerlIO_printf(PerlIO_stderr(),
1332 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1333 ? " %5d"
1334 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1335 nmalloc[i] - nfree[i]);
c07a80fd 1336 }
e8bc2b5c 1337#ifdef BUCKETS_ROOT2
1338 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1339 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1340 PerlIO_printf(PerlIO_stderr(),
1341 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1342 ? " %5d"
1343 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1344 nmalloc[i] - nfree[i]);
1345 }
1346#endif
1347 while (nextchain) {
1348 total_chain += nextchain->size;
1349 nextchain = nextchain->next;
1350 }
1351 PerlIO_printf(PerlIO_stderr(), "\nTotal sbrk(): %d/%d:%d. Odd ends: pad+heads+chain+tail: %d+%d+%d+%d.\n",
1352 goodsbrk + sbrk_slack, sbrks, sbrk_good, sbrk_slack,
1353 start_slack, total_chain, sbrked_remains);
c07a80fd 1354}
1355#else
1356void
8ac85365 1357dump_mstats(char *s)
c07a80fd 1358{
8d063cd8 1359}
1360#endif
a687059c 1361#endif /* lint */
cf5c4ad8 1362
1363
1364#ifdef USE_PERL_SBRK
1365
760ac839 1366# ifdef NeXT
1367# define PERL_SBRK_VIA_MALLOC
1368# endif
1369
1370# ifdef PERL_SBRK_VIA_MALLOC
72e5b9db 1371# if defined(HIDEMYMALLOC) || defined(EMBEDMYMALLOC)
760ac839 1372# undef malloc
1373# else
72e5b9db 1374# include "Error: -DPERL_SBRK_VIA_MALLOC needs -D(HIDE|EMBED)MYMALLOC"
760ac839 1375# endif
cf5c4ad8 1376
1377/* it may seem schizophrenic to use perl's malloc and let it call system */
1378/* malloc, the reason for that is only the 3.2 version of the OS that had */
1379/* frequent core dumps within nxzonefreenolock. This sbrk routine put an */
1380/* end to the cores */
1381
760ac839 1382# define SYSTEM_ALLOC(a) malloc(a)
cf5c4ad8 1383
760ac839 1384# endif /* PERL_SBRK_VIA_MALLOC */
cf5c4ad8 1385
1386static IV Perl_sbrk_oldchunk;
1387static long Perl_sbrk_oldsize;
1388
760ac839 1389# define PERLSBRK_32_K (1<<15)
1390# define PERLSBRK_64_K (1<<16)
cf5c4ad8 1391
b63effbb 1392Malloc_t
cf5c4ad8 1393Perl_sbrk(size)
1394int size;
1395{
1396 IV got;
1397 int small, reqsize;
1398
1399 if (!size) return 0;
55497cff 1400#ifdef PERL_CORE
cf5c4ad8 1401 reqsize = size; /* just for the DEBUG_m statement */
1402#endif
57569e04 1403#ifdef PACK_MALLOC
1404 size = (size + 0x7ff) & ~0x7ff;
1405#endif
cf5c4ad8 1406 if (size <= Perl_sbrk_oldsize) {
1407 got = Perl_sbrk_oldchunk;
1408 Perl_sbrk_oldchunk += size;
1409 Perl_sbrk_oldsize -= size;
1410 } else {
1411 if (size >= PERLSBRK_32_K) {
1412 small = 0;
1413 } else {
cf5c4ad8 1414 size = PERLSBRK_64_K;
1415 small = 1;
1416 }
1417 got = (IV)SYSTEM_ALLOC(size);
57569e04 1418#ifdef PACK_MALLOC
1419 got = (got + 0x7ff) & ~0x7ff;
1420#endif
cf5c4ad8 1421 if (small) {
1422 /* Chunk is small, register the rest for future allocs. */
1423 Perl_sbrk_oldchunk = got + reqsize;
1424 Perl_sbrk_oldsize = size - reqsize;
1425 }
1426 }
1427
fb73857a 1428 DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%lx\n",
cf5c4ad8 1429 size, reqsize, Perl_sbrk_oldsize, got));
cf5c4ad8 1430
1431 return (void *)got;
1432}
1433
1434#endif /* ! defined USE_PERL_SBRK */