applied patch, regen headers
[p5sagit/p5-mst-13.2.git] / malloc.c
CommitLineData
a0d0e21e 1/* malloc.c
8d063cd8 2 *
8d063cd8 3 */
4
87c6202a 5/*
6 Here are some notes on configuring Perl's malloc.
7
8 There are two macros which serve as bulk disablers of advanced
9 features of this malloc: NO_FANCY_MALLOC, PLAIN_MALLOC (undef by
10 default). Look in the list of default values below to understand
11 their exact effect. Defining NO_FANCY_MALLOC returns malloc.c to the
12 state of the malloc in Perl 5.004. Additionally defining PLAIN_MALLOC
13 returns it to the state as of Perl 5.000.
14
15 Note that some of the settings below may be ignored in the code based
16 on values of other macros. The PERL_CORE symbol is only defined when
17 perl itself is being compiled (so malloc can make some assumptions
18 about perl's facilities being available to it).
19
20 Each config option has a short description, followed by its name,
21 default value, and a comment about the default (if applicable). Some
22 options take a precise value, while the others are just boolean.
23 The boolean ones are listed first.
24
25 # Enable code for an emergency memory pool in $^M. See perlvar.pod
26 # for a description of $^M.
27 PERL_EMERGENCY_SBRK (!PLAIN_MALLOC && PERL_CORE)
28
29 # Enable code for printing memory statistics.
30 DEBUGGING_MSTATS (!PLAIN_MALLOC && PERL_CORE)
31
32 # Move allocation info for small buckets into separate areas.
33 # Memory optimization (especially for small allocations, of the
34 # less than 64 bytes). Since perl usually makes a large number
35 # of small allocations, this is usually a win.
36 PACK_MALLOC (!PLAIN_MALLOC && !RCHECK)
37
38 # Add one page to big powers of two when calculating bucket size.
39 # This is targeted at big allocations, as are common in image
40 # processing.
41 TWO_POT_OPTIMIZE !PLAIN_MALLOC
42
43 # Use intermediate bucket sizes between powers-of-two. This is
44 # generally a memory optimization, and a (small) speed pessimization.
45 BUCKETS_ROOT2 !NO_FANCY_MALLOC
46
47 # Do not check small deallocations for bad free(). Memory
48 # and speed optimization, error reporting pessimization.
49 IGNORE_SMALL_BAD_FREE (!NO_FANCY_MALLOC && !RCHECK)
50
51 # Use table lookup to decide in which bucket a given allocation will go.
52 SMALL_BUCKET_VIA_TABLE !NO_FANCY_MALLOC
53
54 # Use system-malloc() to emulate sbrk(). Normally only used with broken
55 # sbrk()s.
56 PERL_SBRK_VIA_MALLOC undef
57
58 # Disable memory overwrite checking with DEBUGGING. Memory and speed
59 # optimization, error reporting pessimization.
60 NO_RCHECK undef
61
62 # Enable memory overwrite checking with DEBUGGING. Memory and speed
63 # pessimization, error reporting optimization
64 RCHECK (DEBUGGING && !NO_RCHECK)
65
66 # Failed allocations bigger than this size croak (if
67 # PERL_EMERGENCY_SBRK is enabled) without touching $^M. See
68 # perlvar.pod for a description of $^M.
69 BIG_SIZE (1<<16) # 64K
70
71 # Starting from this power of two, add an extra page to the
72 # size of the bucket. This enables optimized allocations of sizes
73 # close to powers of 2. Note that the value is indexed at 0.
74 FIRST_BIG_POW2 15 # 32K, 16K is used too often
75
76 # Estimate of minimal memory footprint. malloc uses this value to
77 # request the most reasonable largest blocks of memory from the system.
78 FIRST_SBRK (48*1024)
79
80 # Round up sbrk()s to multiples of this.
81 MIN_SBRK 2048
82
83 # Round up sbrk()s to multiples of this percent of footprint.
84 MIN_SBRK_FRAC 3
85
86 # Add this much memory to big powers of two to get the bucket size.
87 PERL_PAGESIZE 4096
88
89 # This many sbrk() discontinuities should be tolerated even
90 # from the start without deciding that sbrk() is usually
91 # discontinuous.
92 SBRK_ALLOW_FAILURES 3
93
94 # This many continuous sbrk()s compensate for one discontinuous one.
95 SBRK_FAILURE_PRICE 50
96
97 # Which allocator to use if PERL_SBRK_VIA_MALLOC
98 SYSTEM_ALLOC(a) malloc(a)
99
100 This implementation assumes that calling PerlIO_printf() does not
101 result in any memory allocation calls (used during a panic).
102
103 */
104
e8bc2b5c 105#ifndef NO_FANCY_MALLOC
106# ifndef SMALL_BUCKET_VIA_TABLE
107# define SMALL_BUCKET_VIA_TABLE
108# endif
109# ifndef BUCKETS_ROOT2
110# define BUCKETS_ROOT2
111# endif
112# ifndef IGNORE_SMALL_BAD_FREE
113# define IGNORE_SMALL_BAD_FREE
114# endif
3562ef9b 115#endif
116
e8bc2b5c 117#ifndef PLAIN_MALLOC /* Bulk enable features */
118# ifndef PACK_MALLOC
119# define PACK_MALLOC
120# endif
121# ifndef TWO_POT_OPTIMIZE
122# define TWO_POT_OPTIMIZE
123# endif
d720c441 124# if defined(PERL_CORE) && !defined(PERL_EMERGENCY_SBRK)
125# define PERL_EMERGENCY_SBRK
e8bc2b5c 126# endif
127# if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS)
128# define DEBUGGING_MSTATS
129# endif
130#endif
131
132#define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */
133#define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2)
134
135#if !(defined(I286) || defined(atarist))
136 /* take 2k unless the block is bigger than that */
137# define LOG_OF_MIN_ARENA 11
138#else
139 /* take 16k unless the block is bigger than that
140 (80286s like large segments!), probably good on the atari too */
141# define LOG_OF_MIN_ARENA 14
142#endif
143
8d063cd8 144#ifndef lint
1944739a 145# if defined(DEBUGGING) && !defined(NO_RCHECK)
146# define RCHECK
147# endif
e8bc2b5c 148# if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE)
149# undef IGNORE_SMALL_BAD_FREE
150# endif
8d063cd8 151/*
152 * malloc.c (Caltech) 2/21/82
153 * Chris Kingsley, kingsley@cit-20.
154 *
155 * This is a very fast storage allocator. It allocates blocks of a small
156 * number of different sizes, and keeps free lists of each size. Blocks that
157 * don't exactly fit are passed up to the next larger size. In this
158 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
cf5c4ad8 159 * If PACK_MALLOC is defined, small blocks are 2^n bytes long.
8d063cd8 160 * This is designed for use in a program that uses vast quantities of memory,
161 * but bombs when it runs out.
162 */
163
d720c441 164#ifdef PERL_CORE
165# include "EXTERN.h"
166# include "perl.h"
167#else
168# ifdef PERL_FOR_X2P
169# include "../EXTERN.h"
170# include "../perl.h"
171# else
172# include <stdlib.h>
173# include <stdio.h>
174# include <memory.h>
175# define _(arg) arg
176# ifndef Malloc_t
177# define Malloc_t void *
178# endif
179# ifndef MEM_SIZE
180# define MEM_SIZE unsigned long
181# endif
182# ifndef LONG_MAX
183# define LONG_MAX 0x7FFFFFFF
184# endif
185# ifndef UV
186# define UV unsigned long
187# endif
188# ifndef caddr_t
189# define caddr_t char *
190# endif
191# ifndef Free_t
192# define Free_t void
193# endif
194# define Copy(s,d,n,t) (void)memcpy((char*)(d),(char*)(s), (n) * sizeof(t))
195# define PerlEnv_getenv getenv
196# define PerlIO_printf fprintf
197# define PerlIO_stderr() stderr
198# endif
e8bc2b5c 199# ifndef croak /* make depend */
d720c441 200# define croak(mess, arg) warn((mess), (arg)); exit(1);
201# endif
202# ifndef warn
203# define warn(mess, arg) fprintf(stderr, (mess), (arg));
e8bc2b5c 204# endif
205# ifdef DEBUG_m
206# undef DEBUG_m
207# endif
208# define DEBUG_m(a)
209# ifdef DEBUGGING
210# undef DEBUGGING
211# endif
212#endif
213
214#ifndef MUTEX_LOCK
215# define MUTEX_LOCK(l)
216#endif
217
218#ifndef MUTEX_UNLOCK
219# define MUTEX_UNLOCK(l)
220#endif
221
760ac839 222#ifdef DEBUGGING
e8bc2b5c 223# undef DEBUG_m
224# define DEBUG_m(a) if (debug & 128) a
760ac839 225#endif
226
135863df 227/* I don't much care whether these are defined in sys/types.h--LAW */
228
229#define u_char unsigned char
230#define u_int unsigned int
e8bc2b5c 231
232#ifdef HAS_QUAD
233# define u_bigint UV /* Needs to eat *void. */
234#else /* needed? */
235# define u_bigint unsigned long /* Needs to eat *void. */
236#endif
237
135863df 238#define u_short unsigned short
8d063cd8 239
cf5c4ad8 240/* 286 and atarist like big chunks, which gives too much overhead. */
241#if (defined(RCHECK) || defined(I286) || defined(atarist)) && defined(PACK_MALLOC)
e8bc2b5c 242# undef PACK_MALLOC
cf5c4ad8 243#endif
244
8d063cd8 245/*
cf5c4ad8 246 * The description below is applicable if PACK_MALLOC is not defined.
247 *
8d063cd8 248 * The overhead on a block is at least 4 bytes. When free, this space
249 * contains a pointer to the next free block, and the bottom two bits must
250 * be zero. When in use, the first byte is set to MAGIC, and the second
251 * byte is the size index. The remaining bytes are for alignment.
252 * If range checking is enabled and the size of the block fits
253 * in two bytes, then the top two bytes hold the size of the requested block
254 * plus the range checking words, and the header word MINUS ONE.
255 */
256union overhead {
257 union overhead *ov_next; /* when free */
85e6fe83 258#if MEM_ALIGNBYTES > 4
c623bd54 259 double strut; /* alignment problems */
a687059c 260#endif
8d063cd8 261 struct {
262 u_char ovu_magic; /* magic number */
263 u_char ovu_index; /* bucket # */
264#ifdef RCHECK
265 u_short ovu_size; /* actual block size */
266 u_int ovu_rmagic; /* range magic number */
267#endif
268 } ovu;
269#define ov_magic ovu.ovu_magic
270#define ov_index ovu.ovu_index
271#define ov_size ovu.ovu_size
272#define ov_rmagic ovu.ovu_rmagic
273};
274
760ac839 275#ifdef DEBUGGING
d720c441 276static void botch _((char *diag, char *s));
a0d0e21e 277#endif
278static void morecore _((int bucket));
279static int findbucket _((union overhead *freep, int srchlen));
280
8d063cd8 281#define MAGIC 0xff /* magic # on accounting info */
282#define RMAGIC 0x55555555 /* magic # on range info */
e8bc2b5c 283#define RMAGIC_C 0x55 /* magic # on range info */
284
8d063cd8 285#ifdef RCHECK
c2a5c2d2 286# define RSLOP sizeof (u_int)
287# ifdef TWO_POT_OPTIMIZE
e8bc2b5c 288# define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2)
c2a5c2d2 289# else
e8bc2b5c 290# define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2)
c2a5c2d2 291# endif
8d063cd8 292#else
c2a5c2d2 293# define RSLOP 0
8d063cd8 294#endif
295
e8bc2b5c 296#if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2)
297# undef BUCKETS_ROOT2
298#endif
299
300#ifdef BUCKETS_ROOT2
301# define BUCKET_TABLE_SHIFT 2
302# define BUCKET_POW2_SHIFT 1
303# define BUCKETS_PER_POW2 2
304#else
305# define BUCKET_TABLE_SHIFT MIN_BUC_POW2
306# define BUCKET_POW2_SHIFT 0
307# define BUCKETS_PER_POW2 1
308#endif
309
274c7500 310#if !defined(MEM_ALIGNBYTES) || ((MEM_ALIGNBYTES > 4) && !defined(STRICT_ALIGNMENT))
311/* Figure out the alignment of void*. */
312struct aligner {
313 char c;
314 void *p;
315};
316# define ALIGN_SMALL ((int)((caddr_t)&(((struct aligner*)0)->p)))
317#else
318# define ALIGN_SMALL MEM_ALIGNBYTES
319#endif
320
321#define IF_ALIGN_8(yes,no) ((ALIGN_SMALL>4) ? (yes) : (no))
322
e8bc2b5c 323#ifdef BUCKETS_ROOT2
324# define MAX_BUCKET_BY_TABLE 13
325static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] =
326 {
327 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80,
328 };
329# define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT)))
330# define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \
331 ? buck_size[i] \
332 : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \
333 - MEM_OVERHEAD(i) \
334 + POW2_OPTIMIZE_SURPLUS(i)))
335#else
336# define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT))
337# define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i))
338#endif
339
340
cf5c4ad8 341#ifdef PACK_MALLOC
e8bc2b5c 342/* In this case it is assumed that if we do sbrk() in 2K units, we
343 * will get 2K aligned arenas (at least after some initial
344 * alignment). The bucket number of the given subblock is on the start
345 * of 2K arena which contains the subblock. Several following bytes
346 * contain the magic numbers for the subblocks in the block.
cf5c4ad8 347 *
348 * Sizes of chunks are powers of 2 for chunks in buckets <=
349 * MAX_PACKED, after this they are (2^n - sizeof(union overhead)) (to
350 * get alignment right).
351 *
e8bc2b5c 352 * Consider an arena for 2^n with n>MAX_PACKED. We suppose that
353 * starts of all the chunks in a 2K arena are in different
354 * 2^n-byte-long chunks. If the top of the last chunk is aligned on a
355 * boundary of 2K block, this means that sizeof(union
356 * overhead)*"number of chunks" < 2^n, or sizeof(union overhead)*2K <
357 * 4^n, or n > 6 + log2(sizeof()/2)/2, since a chunk of size 2^n -
358 * overhead is used. Since this rules out n = 7 for 8 byte alignment,
359 * we specialcase allocation of the first of 16 128-byte-long chunks.
cf5c4ad8 360 *
361 * Note that with the above assumption we automatically have enough
362 * place for MAGIC at the start of 2K block. Note also that we
e8bc2b5c 363 * overlay union overhead over the chunk, thus the start of small chunks
364 * is immediately overwritten after freeing. */
365# define MAX_PACKED_POW2 6
366# define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT)
367# define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD)
368# define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1)
369# define TWOK_MASKED(x) ((u_bigint)(x) & ~TWOK_MASK)
370# define TWOK_SHIFT(x) ((u_bigint)(x) & TWOK_MASK)
cf5c4ad8 371# define OV_INDEXp(block) ((u_char*)(TWOK_MASKED(block)))
372# define OV_INDEX(block) (*OV_INDEXp(block))
373# define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \
e8bc2b5c 374 (TWOK_SHIFT(block)>> \
375 (bucket>>BUCKET_POW2_SHIFT)) + \
376 (bucket >= MIN_NEEDS_SHIFT ? 1 : 0)))
377 /* A bucket can have a shift smaller than it size, we need to
378 shift its magic number so it will not overwrite index: */
379# ifdef BUCKETS_ROOT2
380# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */
381# else
382# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */
383# endif
cf5c4ad8 384# define CHUNK_SHIFT 0
385
e8bc2b5c 386/* Number of active buckets of given ordinal. */
387#ifdef IGNORE_SMALL_BAD_FREE
388#define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */
389# define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
390 ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \
391 : n_blks[bucket] )
392#else
393# define N_BLKS(bucket) n_blks[bucket]
394#endif
395
396static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
397 {
398# if BUCKETS_PER_POW2==1
399 0, 0,
400 (MIN_BUC_POW2==2 ? 384 : 0),
401 224, 120, 62, 31, 16, 8, 4, 2
402# else
403 0, 0, 0, 0,
404 (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */
405 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2
406# endif
407 };
408
409/* Shift of the first bucket with the given ordinal inside 2K chunk. */
410#ifdef IGNORE_SMALL_BAD_FREE
411# define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
412 ? ((1<<LOG_OF_MIN_ARENA) \
413 - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \
414 : blk_shift[bucket])
415#else
416# define BLK_SHIFT(bucket) blk_shift[bucket]
417#endif
418
419static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
420 {
421# if BUCKETS_PER_POW2==1
422 0, 0,
423 (MIN_BUC_POW2==2 ? 512 : 0),
424 256, 128, 64, 64, /* 8 to 64 */
425 16*sizeof(union overhead),
426 8*sizeof(union overhead),
427 4*sizeof(union overhead),
428 2*sizeof(union overhead),
429# else
430 0, 0, 0, 0,
431 (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0),
432 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */
433 16*sizeof(union overhead), 16*sizeof(union overhead),
434 8*sizeof(union overhead), 8*sizeof(union overhead),
435 4*sizeof(union overhead), 4*sizeof(union overhead),
436 2*sizeof(union overhead), 2*sizeof(union overhead),
437# endif
438 };
cf5c4ad8 439
cf5c4ad8 440#else /* !PACK_MALLOC */
441
442# define OV_MAGIC(block,bucket) (block)->ov_magic
443# define OV_INDEX(block) (block)->ov_index
444# define CHUNK_SHIFT 1
e8bc2b5c 445# define MAX_PACKED -1
cf5c4ad8 446#endif /* !PACK_MALLOC */
447
e8bc2b5c 448#define M_OVERHEAD (sizeof(union overhead) + RSLOP)
449
450#ifdef PACK_MALLOC
451# define MEM_OVERHEAD(bucket) \
452 (bucket <= MAX_PACKED ? 0 : M_OVERHEAD)
453# ifdef SMALL_BUCKET_VIA_TABLE
454# define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2)
455# define START_SHIFT MAX_PACKED_POW2
456# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
457# define SIZE_TABLE_MAX 80
458# else
459# define SIZE_TABLE_MAX 64
460# endif
461static char bucket_of[] =
462 {
463# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
464 /* 0 to 15 in 4-byte increments. */
465 (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */
466 6, /* 8 */
274c7500 467 IF_ALIGN_8(8,7), 8, /* 16/12, 16 */
e8bc2b5c 468 9, 9, 10, 10, /* 24, 32 */
469 11, 11, 11, 11, /* 48 */
470 12, 12, 12, 12, /* 64 */
471 13, 13, 13, 13, /* 80 */
472 13, 13, 13, 13 /* 80 */
473# else /* !BUCKETS_ROOT2 */
474 /* 0 to 15 in 4-byte increments. */
475 (sizeof(void*) > 4 ? 3 : 2),
476 3,
477 4, 4,
478 5, 5, 5, 5,
479 6, 6, 6, 6,
480 6, 6, 6, 6
481# endif /* !BUCKETS_ROOT2 */
482 };
483# else /* !SMALL_BUCKET_VIA_TABLE */
484# define START_SHIFTS_BUCKET MIN_BUCKET
485# define START_SHIFT (MIN_BUC_POW2 - 1)
486# endif /* !SMALL_BUCKET_VIA_TABLE */
487#else /* !PACK_MALLOC */
488# define MEM_OVERHEAD(bucket) M_OVERHEAD
489# ifdef SMALL_BUCKET_VIA_TABLE
490# undef SMALL_BUCKET_VIA_TABLE
491# endif
492# define START_SHIFTS_BUCKET MIN_BUCKET
493# define START_SHIFT (MIN_BUC_POW2 - 1)
494#endif /* !PACK_MALLOC */
cf5c4ad8 495
8d063cd8 496/*
55497cff 497 * Big allocations are often of the size 2^n bytes. To make them a
498 * little bit better, make blocks of size 2^n+pagesize for big n.
499 */
500
501#ifdef TWO_POT_OPTIMIZE
502
5f05dabc 503# ifndef PERL_PAGESIZE
504# define PERL_PAGESIZE 4096
505# endif
e8bc2b5c 506# ifndef FIRST_BIG_POW2
507# define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */
5f05dabc 508# endif
e8bc2b5c 509# define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2)
55497cff 510/* If this value or more, check against bigger blocks. */
511# define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD)
512/* If less than this value, goes into 2^n-overhead-block. */
513# define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD)
514
e8bc2b5c 515# define POW2_OPTIMIZE_ADJUST(nbytes) \
516 ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0)
517# define POW2_OPTIMIZE_SURPLUS(bucket) \
518 ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0)
519
520#else /* !TWO_POT_OPTIMIZE */
521# define POW2_OPTIMIZE_ADJUST(nbytes)
522# define POW2_OPTIMIZE_SURPLUS(bucket) 0
523#endif /* !TWO_POT_OPTIMIZE */
524
525#if defined(HAS_64K_LIMIT) && defined(PERL_CORE)
526# define BARK_64K_LIMIT(what,nbytes,size) \
527 if (nbytes > 0xffff) { \
528 PerlIO_printf(PerlIO_stderr(), \
529 "%s too large: %lx\n", what, size); \
530 my_exit(1); \
531 }
532#else /* !HAS_64K_LIMIT || !PERL_CORE */
533# define BARK_64K_LIMIT(what,nbytes,size)
534#endif /* !HAS_64K_LIMIT || !PERL_CORE */
55497cff 535
e8bc2b5c 536#ifndef MIN_SBRK
537# define MIN_SBRK 2048
538#endif
539
540#ifndef FIRST_SBRK
d720c441 541# define FIRST_SBRK (48*1024)
e8bc2b5c 542#endif
543
544/* Minimal sbrk in percents of what is already alloced. */
545#ifndef MIN_SBRK_FRAC
546# define MIN_SBRK_FRAC 3
547#endif
548
549#ifndef SBRK_ALLOW_FAILURES
550# define SBRK_ALLOW_FAILURES 3
551#endif
55497cff 552
e8bc2b5c 553#ifndef SBRK_FAILURE_PRICE
554# define SBRK_FAILURE_PRICE 50
55497cff 555#endif
556
e8bc2b5c 557#if defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)
558
559# ifndef BIG_SIZE
560# define BIG_SIZE (1<<16) /* 64K */
561# endif
562
55497cff 563static char *emergency_buffer;
564static MEM_SIZE emergency_buffer_size;
565
52082926 566static Malloc_t
55497cff 567emergency_sbrk(size)
568 MEM_SIZE size;
569{
570 if (size >= BIG_SIZE) {
571 /* Give the possibility to recover: */
d720c441 572 MUTEX_UNLOCK(&malloc_mutex);
1b979e0a 573 croak("Out of memory during \"large\" request for %i bytes", size);
55497cff 574 }
575
576 if (!emergency_buffer) {
18f739ee 577 dTHR;
55497cff 578 /* First offense, give a possibility to recover by dieing. */
579 /* No malloc involved here: */
580 GV **gvp = (GV**)hv_fetch(defstash, "^M", 2, 0);
581 SV *sv;
582 char *pv;
583
584 if (!gvp) gvp = (GV**)hv_fetch(defstash, "\015", 1, 0);
585 if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv)
e8bc2b5c 586 || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD))
55497cff 587 return (char *)-1; /* Now die die die... */
588
589 /* Got it, now detach SvPV: */
bbce6d69 590 pv = SvPV(sv, na);
55497cff 591 /* Check alignment: */
e8bc2b5c 592 if (((u_bigint)(pv - M_OVERHEAD)) & ((1<<LOG_OF_MIN_ARENA) - 1)) {
55497cff 593 PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
bbce6d69 594 return (char *)-1; /* die die die */
55497cff 595 }
596
597 emergency_buffer = pv - M_OVERHEAD;
598 emergency_buffer_size = SvLEN(sv) + M_OVERHEAD;
599 SvPOK_off(sv);
600 SvREADONLY_on(sv);
d720c441 601 MUTEX_UNLOCK(&malloc_mutex);
602 croak("Out of memory during request for %i bytes", size);
ff68c719 603 }
604 else if (emergency_buffer_size >= size) {
55497cff 605 emergency_buffer_size -= size;
606 return emergency_buffer + emergency_buffer_size;
607 }
608
609 return (char *)-1; /* poor guy... */
610}
611
e8bc2b5c 612#else /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 613# define emergency_sbrk(size) -1
e8bc2b5c 614#endif /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 615
616/*
e8bc2b5c 617 * nextf[i] is the pointer to the next free block of size 2^i. The
8d063cd8 618 * smallest allocatable block is 8 bytes. The overhead information
619 * precedes the data area returned to the user.
620 */
e8bc2b5c 621#define NBUCKETS (32*BUCKETS_PER_POW2 + 1)
8d063cd8 622static union overhead *nextf[NBUCKETS];
cf5c4ad8 623
624#ifdef USE_PERL_SBRK
625#define sbrk(a) Perl_sbrk(a)
52082926 626Malloc_t Perl_sbrk _((int size));
8ac85365 627#else
628#ifdef DONT_DECLARE_STD
629#ifdef I_UNISTD
630#include <unistd.h>
631#endif
cf5c4ad8 632#else
52082926 633extern Malloc_t sbrk(int);
8ac85365 634#endif
cf5c4ad8 635#endif
8d063cd8 636
c07a80fd 637#ifdef DEBUGGING_MSTATS
8d063cd8 638/*
639 * nmalloc[i] is the difference between the number of mallocs and frees
640 * for a given block size.
641 */
642static u_int nmalloc[NBUCKETS];
5f05dabc 643static u_int sbrk_slack;
644static u_int start_slack;
8d063cd8 645#endif
646
e8bc2b5c 647static u_int goodsbrk;
648
760ac839 649#ifdef DEBUGGING
d720c441 650#define ASSERT(p,diag) if (!(p)) botch(diag,STRINGIFY(p)); else
ee0007ab 651static void
d720c441 652botch(char *diag, char *s)
8d063cd8 653{
d720c441 654 PerlIO_printf(PerlIO_stderr(), "assertion botched (%s?): %s\n", diag, s);
3028581b 655 PerlProc_abort();
8d063cd8 656}
657#else
d720c441 658#define ASSERT(p, diag)
8d063cd8 659#endif
660
2304df62 661Malloc_t
8ac85365 662malloc(register size_t nbytes)
8d063cd8 663{
664 register union overhead *p;
e8bc2b5c 665 register int bucket;
ee0007ab 666 register MEM_SIZE shiftr;
8d063cd8 667
c2a5c2d2 668#if defined(DEBUGGING) || defined(RCHECK)
ee0007ab 669 MEM_SIZE size = nbytes;
45d8adaa 670#endif
671
e8bc2b5c 672 BARK_64K_LIMIT("Allocation",nbytes,nbytes);
45d8adaa 673#ifdef DEBUGGING
674 if ((long)nbytes < 0)
d720c441 675 croak("%s", "panic: malloc");
45d8adaa 676#endif
45d8adaa 677
11343788 678 MUTEX_LOCK(&malloc_mutex);
8d063cd8 679 /*
680 * Convert amount of memory requested into
681 * closest block size stored in hash buckets
682 * which satisfies request. Account for
683 * space used per block for accounting.
684 */
cf5c4ad8 685#ifdef PACK_MALLOC
e8bc2b5c 686# ifdef SMALL_BUCKET_VIA_TABLE
687 if (nbytes == 0)
688 bucket = MIN_BUCKET;
689 else if (nbytes <= SIZE_TABLE_MAX) {
690 bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT];
691 } else
692# else
043bf814 693 if (nbytes == 0)
694 nbytes = 1;
e8bc2b5c 695 if (nbytes <= MAX_POW2_ALGO) goto do_shifts;
696 else
697# endif
55497cff 698#endif
e8bc2b5c 699 {
700 POW2_OPTIMIZE_ADJUST(nbytes);
701 nbytes += M_OVERHEAD;
702 nbytes = (nbytes + 3) &~ 3;
703 do_shifts:
704 shiftr = (nbytes - 1) >> START_SHIFT;
705 bucket = START_SHIFTS_BUCKET;
706 /* apart from this loop, this is O(1) */
707 while (shiftr >>= 1)
708 bucket += BUCKETS_PER_POW2;
cf5c4ad8 709 }
8d063cd8 710 /*
711 * If nothing in hash bucket right now,
712 * request more memory from the system.
713 */
714 if (nextf[bucket] == NULL)
715 morecore(bucket);
e8bc2b5c 716 if ((p = nextf[bucket]) == NULL) {
11343788 717 MUTEX_UNLOCK(&malloc_mutex);
55497cff 718#ifdef PERL_CORE
ee0007ab 719 if (!nomemok) {
760ac839 720 PerlIO_puts(PerlIO_stderr(),"Out of memory!\n");
79072805 721 my_exit(1);
ee0007ab 722 }
45d8adaa 723#else
8d063cd8 724 return (NULL);
45d8adaa 725#endif
726 }
727
e8bc2b5c 728 DEBUG_m(PerlIO_printf(Perl_debug_log,
729 "0x%lx: (%05lu) malloc %ld bytes\n",
730 (unsigned long)(p+1), (unsigned long)(an++),
731 (long)size));
45d8adaa 732
8d063cd8 733 /* remove from linked list */
bf38876a 734#ifdef RCHECK
735 if (*((int*)p) & (sizeof(union overhead) - 1))
760ac839 736 PerlIO_printf(PerlIO_stderr(), "Corrupt malloc ptr 0x%lx at 0x%lx\n",
a0d0e21e 737 (unsigned long)*((int*)p),(unsigned long)p);
bf38876a 738#endif
739 nextf[bucket] = p->ov_next;
e8bc2b5c 740#ifdef IGNORE_SMALL_BAD_FREE
741 if (bucket >= FIRST_BUCKET_WITH_CHECK)
742#endif
743 OV_MAGIC(p, bucket) = MAGIC;
cf5c4ad8 744#ifndef PACK_MALLOC
745 OV_INDEX(p) = bucket;
746#endif
8d063cd8 747#ifdef RCHECK
748 /*
749 * Record allocated size of block and
750 * bound space with magic numbers.
751 */
8d063cd8 752 p->ov_rmagic = RMAGIC;
e8bc2b5c 753 if (bucket <= MAX_SHORT_BUCKET) {
754 int i;
755
756 nbytes = size + M_OVERHEAD;
757 p->ov_size = nbytes - 1;
758 if ((i = nbytes & 3)) {
759 i = 4 - i;
760 while (i--)
761 *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C;
762 }
763 nbytes = (nbytes + 3) &~ 3;
764 *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC;
765 }
8d063cd8 766#endif
11343788 767 MUTEX_UNLOCK(&malloc_mutex);
cf5c4ad8 768 return ((Malloc_t)(p + CHUNK_SHIFT));
8d063cd8 769}
770
e8bc2b5c 771static char *last_sbrk_top;
772static char *last_op; /* This arena can be easily extended. */
773static int sbrked_remains;
774static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE;
775
776#ifdef DEBUGGING_MSTATS
777static int sbrks;
778#endif
779
780struct chunk_chain_s {
781 struct chunk_chain_s *next;
782 MEM_SIZE size;
783};
784static struct chunk_chain_s *chunk_chain;
785static int n_chunks;
786static char max_bucket;
787
788/* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */
789static void *
790get_from_chain(MEM_SIZE size)
791{
792 struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain;
793 struct chunk_chain_s **oldgoodp = NULL;
794 long min_remain = LONG_MAX;
795
796 while (elt) {
797 if (elt->size >= size) {
798 long remains = elt->size - size;
799 if (remains >= 0 && remains < min_remain) {
800 oldgoodp = oldp;
801 min_remain = remains;
802 }
803 if (remains == 0) {
804 break;
805 }
806 }
807 oldp = &( elt->next );
808 elt = elt->next;
809 }
810 if (!oldgoodp) return NULL;
811 if (min_remain) {
812 void *ret = *oldgoodp;
813 struct chunk_chain_s *next = (*oldgoodp)->next;
814
815 *oldgoodp = (struct chunk_chain_s *)((char*)ret + size);
816 (*oldgoodp)->size = min_remain;
817 (*oldgoodp)->next = next;
818 return ret;
819 } else {
820 void *ret = *oldgoodp;
821 *oldgoodp = (*oldgoodp)->next;
822 n_chunks--;
823 return ret;
824 }
825}
826
827static void
828add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip)
829{
830 struct chunk_chain_s *next = chunk_chain;
831 char *cp = (char*)p;
832
833 cp += chip;
834 chunk_chain = (struct chunk_chain_s *)cp;
835 chunk_chain->size = size - chip;
836 chunk_chain->next = next;
837 n_chunks++;
838}
839
840static void *
841get_from_bigger_buckets(int bucket, MEM_SIZE size)
842{
843 int price = 1;
844 static int bucketprice[NBUCKETS];
845 while (bucket <= max_bucket) {
846 /* We postpone stealing from bigger buckets until we want it
847 often enough. */
848 if (nextf[bucket] && bucketprice[bucket]++ >= price) {
849 /* Steal it! */
850 void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT);
851 bucketprice[bucket] = 0;
852 if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) {
853 last_op = NULL; /* Disable optimization */
854 }
855 nextf[bucket] = nextf[bucket]->ov_next;
856#ifdef DEBUGGING_MSTATS
857 nmalloc[bucket]--;
858 start_slack -= M_OVERHEAD;
859#endif
860 add_to_chain(ret, (BUCKET_SIZE(bucket) +
861 POW2_OPTIMIZE_SURPLUS(bucket)),
862 size);
863 return ret;
864 }
865 bucket++;
866 }
867 return NULL;
868}
869
fa423c5b 870static union overhead *
871getpages(int needed, int *nblksp, int bucket)
872{
873 /* Need to do (possibly expensive) system call. Try to
874 optimize it for rare calling. */
875 MEM_SIZE require = needed - sbrked_remains;
876 char *cp;
877 union overhead *ovp;
878 int slack = 0;
879
880 if (sbrk_good > 0) {
881 if (!last_sbrk_top && require < FIRST_SBRK)
882 require = FIRST_SBRK;
883 else if (require < MIN_SBRK) require = MIN_SBRK;
884
885 if (require < goodsbrk * MIN_SBRK_FRAC / 100)
886 require = goodsbrk * MIN_SBRK_FRAC / 100;
887 require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK;
888 } else {
889 require = needed;
890 last_sbrk_top = 0;
891 sbrked_remains = 0;
892 }
893
894 DEBUG_m(PerlIO_printf(Perl_debug_log,
895 "sbrk(%ld) for %ld-byte-long arena\n",
896 (long)require, (long) needed));
897 cp = (char *)sbrk(require);
898#ifdef DEBUGGING_MSTATS
899 sbrks++;
900#endif
901 if (cp == last_sbrk_top) {
902 /* Common case, anything is fine. */
903 sbrk_good++;
904 ovp = (union overhead *) (cp - sbrked_remains);
905 sbrked_remains = require - (needed - sbrked_remains);
906 } else if (cp == (char *)-1) { /* no more room! */
907 ovp = (union overhead *)emergency_sbrk(needed);
908 if (ovp == (union overhead *)-1)
909 return 0;
910 return ovp;
911 } else { /* Non-continuous or first sbrk(). */
912 long add = sbrked_remains;
913 char *newcp;
914
915 if (sbrked_remains) { /* Put rest into chain, we
916 cannot use it right now. */
917 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
918 sbrked_remains, 0);
919 }
920
921 /* Second, check alignment. */
922 slack = 0;
923
924#ifndef atarist /* on the atari we dont have to worry about this */
925# ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */
926
927 /* CHUNK_SHIFT is 1 for PACK_MALLOC, 0 otherwise. */
928 if ((UV)cp & (0x7FF >> CHUNK_SHIFT)) { /* Not aligned. */
929 slack = (0x800 >> CHUNK_SHIFT)
930 - ((UV)cp & (0x7FF >> CHUNK_SHIFT));
931 add += slack;
932 }
933# endif
934#endif /* atarist */
935
936 if (add) {
937 DEBUG_m(PerlIO_printf(Perl_debug_log,
938 "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n",
939 (long)add, (long) slack,
940 (long) sbrked_remains));
941 newcp = (char *)sbrk(add);
942#if defined(DEBUGGING_MSTATS)
943 sbrks++;
944 sbrk_slack += add;
945#endif
946 if (newcp != cp + require) {
947 /* Too bad: even rounding sbrk() is not continuous.*/
948 DEBUG_m(PerlIO_printf(Perl_debug_log,
949 "failed to fix bad sbrk()\n"));
950#ifdef PACK_MALLOC
951 if (slack) {
952 MUTEX_UNLOCK(&malloc_mutex);
953 croak("%s", "panic: Off-page sbrk");
954 }
955#endif
956 if (sbrked_remains) {
957 /* Try again. */
958#if defined(DEBUGGING_MSTATS)
959 sbrk_slack += require;
960#endif
961 require = needed;
962 DEBUG_m(PerlIO_printf(Perl_debug_log,
963 "straight sbrk(%ld)\n",
964 (long)require));
965 cp = (char *)sbrk(require);
966#ifdef DEBUGGING_MSTATS
967 sbrks++;
968#endif
969 if (cp == (char *)-1)
970 return 0;
971 }
972 sbrk_good = -1; /* Disable optimization!
973 Continue with not-aligned... */
974 } else {
975 cp += slack;
976 require += sbrked_remains;
977 }
978 }
979
980 if (last_sbrk_top) {
981 sbrk_good -= SBRK_FAILURE_PRICE;
982 }
983
984 ovp = (union overhead *) cp;
985 /*
986 * Round up to minimum allocation size boundary
987 * and deduct from block count to reflect.
988 */
989
990#ifndef I286 /* Again, this should always be ok on an 80286 */
991 if ((UV)ovp & 7) {
992 ovp = (union overhead *)(((UV)ovp + 8) & ~7);
993 DEBUG_m(PerlIO_printf(Perl_debug_log,
994 "fixing sbrk(): %d bytes off machine alignement\n",
995 (int)((UV)ovp & 7)));
996 (*nblksp)--;
997# if defined(DEBUGGING_MSTATS)
998 /* This is only approx. if TWO_POT_OPTIMIZE: */
999 sbrk_slack += (1 << bucket);
1000# endif
1001 }
1002#endif
1003 sbrked_remains = require - needed;
1004 }
1005 last_sbrk_top = cp + require;
1006 last_op = (char*) cp;
1007#ifdef DEBUGGING_MSTATS
1008 goodsbrk += require;
1009#endif
1010 return ovp;
1011}
1012
1013static int
1014getpages_adjacent(int require)
1015{
1016 if (require <= sbrked_remains) {
1017 sbrked_remains -= require;
1018 } else {
1019 char *cp;
1020
1021 require -= sbrked_remains;
1022 /* We do not try to optimize sbrks here, we go for place. */
1023 cp = (char*) sbrk(require);
1024#ifdef DEBUGGING_MSTATS
1025 sbrks++;
1026 goodsbrk += require;
1027#endif
1028 if (cp == last_sbrk_top) {
1029 sbrked_remains = 0;
1030 last_sbrk_top = cp + require;
1031 } else {
1032 /* Report the failure: */
1033 if (sbrked_remains)
1034 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1035 sbrked_remains, 0);
1036 add_to_chain((void*)cp, require, 0);
1037 sbrk_good -= SBRK_FAILURE_PRICE;
1038 sbrked_remains = 0;
1039 last_sbrk_top = 0;
1040 last_op = 0;
1041 return 0;
1042 }
1043 }
1044
1045 return 1;
1046}
1047
8d063cd8 1048/*
1049 * Allocate more memory to the indicated bucket.
1050 */
a0d0e21e 1051static void
8ac85365 1052morecore(register int bucket)
8d063cd8 1053{
72aaf631 1054 register union overhead *ovp;
8d063cd8 1055 register int rnu; /* 2^rnu bytes will be requested */
fa423c5b 1056 int nblks; /* become nblks blocks of the desired size */
bbce6d69 1057 register MEM_SIZE siz, needed;
8d063cd8 1058
1059 if (nextf[bucket])
1060 return;
e8bc2b5c 1061 if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) {
d720c441 1062 MUTEX_UNLOCK(&malloc_mutex);
1063 croak("%s", "Out of memory during ridiculously large request");
55497cff 1064 }
d720c441 1065 if (bucket > max_bucket)
e8bc2b5c 1066 max_bucket = bucket;
d720c441 1067
e8bc2b5c 1068 rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT))
1069 ? LOG_OF_MIN_ARENA
1070 : (bucket >> BUCKET_POW2_SHIFT) );
1071 /* This may be overwritten later: */
1072 nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */
1073 needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket);
1074 if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */
1075 ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT;
1076 nextf[rnu << BUCKET_POW2_SHIFT]
1077 = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next;
1078#ifdef DEBUGGING_MSTATS
1079 nmalloc[rnu << BUCKET_POW2_SHIFT]--;
1080 start_slack -= M_OVERHEAD;
1081#endif
1082 DEBUG_m(PerlIO_printf(Perl_debug_log,
1083 "stealing %ld bytes from %ld arena\n",
1084 (long) needed, (long) rnu << BUCKET_POW2_SHIFT));
1085 } else if (chunk_chain
1086 && (ovp = (union overhead*) get_from_chain(needed))) {
1087 DEBUG_m(PerlIO_printf(Perl_debug_log,
1088 "stealing %ld bytes from chain\n",
1089 (long) needed));
d720c441 1090 } else if ( (ovp = (union overhead*)
1091 get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1,
1092 needed)) ) {
e8bc2b5c 1093 DEBUG_m(PerlIO_printf(Perl_debug_log,
1094 "stealing %ld bytes from bigger buckets\n",
1095 (long) needed));
1096 } else if (needed <= sbrked_remains) {
1097 ovp = (union overhead *)(last_sbrk_top - sbrked_remains);
1098 sbrked_remains -= needed;
1099 last_op = (char*)ovp;
fa423c5b 1100 } else
1101 ovp = getpages(needed, &nblks, bucket);
e8bc2b5c 1102
fa423c5b 1103 if (!ovp)
1104 return;
e8bc2b5c 1105
8d063cd8 1106 /*
1107 * Add new memory allocated to that on
1108 * free list for this hash bucket.
1109 */
e8bc2b5c 1110 siz = BUCKET_SIZE(bucket);
cf5c4ad8 1111#ifdef PACK_MALLOC
72aaf631 1112 *(u_char*)ovp = bucket; /* Fill index. */
e8bc2b5c 1113 if (bucket <= MAX_PACKED) {
1114 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
1115 nblks = N_BLKS(bucket);
cf5c4ad8 1116# ifdef DEBUGGING_MSTATS
e8bc2b5c 1117 start_slack += BLK_SHIFT(bucket);
cf5c4ad8 1118# endif
e8bc2b5c 1119 } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) {
1120 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
cf5c4ad8 1121 siz -= sizeof(union overhead);
72aaf631 1122 } else ovp++; /* One chunk per block. */
e8bc2b5c 1123#endif /* PACK_MALLOC */
72aaf631 1124 nextf[bucket] = ovp;
5f05dabc 1125#ifdef DEBUGGING_MSTATS
1126 nmalloc[bucket] += nblks;
e8bc2b5c 1127 if (bucket > MAX_PACKED) {
1128 start_slack += M_OVERHEAD * nblks;
1129 }
5f05dabc 1130#endif
8d063cd8 1131 while (--nblks > 0) {
72aaf631 1132 ovp->ov_next = (union overhead *)((caddr_t)ovp + siz);
1133 ovp = (union overhead *)((caddr_t)ovp + siz);
8d063cd8 1134 }
8595d6f1 1135 /* Not all sbrks return zeroed memory.*/
72aaf631 1136 ovp->ov_next = (union overhead *)NULL;
cf5c4ad8 1137#ifdef PACK_MALLOC
e8bc2b5c 1138 if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */
1139 union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next;
1140 nextf[7*BUCKETS_PER_POW2] =
1141 (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2]
1142 - sizeof(union overhead));
1143 nextf[7*BUCKETS_PER_POW2]->ov_next = n_op;
cf5c4ad8 1144 }
1145#endif /* !PACK_MALLOC */
8d063cd8 1146}
1147
94b6baf5 1148Free_t
8ac85365 1149free(void *mp)
8d063cd8 1150{
ee0007ab 1151 register MEM_SIZE size;
72aaf631 1152 register union overhead *ovp;
352d5a3a 1153 char *cp = (char*)mp;
cf5c4ad8 1154#ifdef PACK_MALLOC
1155 u_char bucket;
1156#endif
8d063cd8 1157
e8bc2b5c 1158 DEBUG_m(PerlIO_printf(Perl_debug_log,
1159 "0x%lx: (%05lu) free\n",
1160 (unsigned long)cp, (unsigned long)(an++)));
45d8adaa 1161
cf5c4ad8 1162 if (cp == NULL)
1163 return;
72aaf631 1164 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c 1165 - sizeof (union overhead) * CHUNK_SHIFT);
cf5c4ad8 1166#ifdef PACK_MALLOC
72aaf631 1167 bucket = OV_INDEX(ovp);
cf5c4ad8 1168#endif
e8bc2b5c 1169#ifdef IGNORE_SMALL_BAD_FREE
1170 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
1171 && (OV_MAGIC(ovp, bucket) != MAGIC))
1172#else
1173 if (OV_MAGIC(ovp, bucket) != MAGIC)
1174#endif
1175 {
68dc0745 1176 static int bad_free_warn = -1;
cf5c4ad8 1177 if (bad_free_warn == -1) {
5fd9e9a4 1178 char *pbf = PerlEnv_getenv("PERL_BADFREE");
cf5c4ad8 1179 bad_free_warn = (pbf) ? atoi(pbf) : 1;
1180 }
1181 if (!bad_free_warn)
1182 return;
8990e307 1183#ifdef RCHECK
a687059c 1184 warn("%s free() ignored",
72aaf631 1185 ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad");
8990e307 1186#else
d720c441 1187 warn("%s", "Bad free() ignored");
8990e307 1188#endif
8d063cd8 1189 return; /* sanity */
e8bc2b5c 1190 }
11343788 1191 MUTEX_LOCK(&malloc_mutex);
8d063cd8 1192#ifdef RCHECK
d720c441 1193 ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite");
e8bc2b5c 1194 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1195 int i;
1196 MEM_SIZE nbytes = ovp->ov_size + 1;
1197
1198 if ((i = nbytes & 3)) {
1199 i = 4 - i;
1200 while (i--) {
1201 ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i))
d720c441 1202 == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c 1203 }
1204 }
1205 nbytes = (nbytes + 3) &~ 3;
d720c441 1206 ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC, "chunk's tail overwrite");
e8bc2b5c 1207 }
72aaf631 1208 ovp->ov_rmagic = RMAGIC - 1;
8d063cd8 1209#endif
d720c441 1210 ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite");
72aaf631 1211 size = OV_INDEX(ovp);
1212 ovp->ov_next = nextf[size];
1213 nextf[size] = ovp;
11343788 1214 MUTEX_UNLOCK(&malloc_mutex);
8d063cd8 1215}
1216
1217/*
1218 * When a program attempts "storage compaction" as mentioned in the
1219 * old malloc man page, it realloc's an already freed block. Usually
1220 * this is the last block it freed; occasionally it might be farther
1221 * back. We have to search all the free lists for the block in order
1222 * to determine its bucket: 1st we make one pass thru the lists
1223 * checking only the first block in each; if that fails we search
378cc40b 1224 * ``reall_srchlen'' blocks in each list for a match (the variable
8d063cd8 1225 * is extern so the caller can modify it). If that fails we just copy
1226 * however many bytes was given to realloc() and hope it's not huge.
1227 */
378cc40b 1228int reall_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */
8d063cd8 1229
2304df62 1230Malloc_t
8ac85365 1231realloc(void *mp, size_t nbytes)
8d063cd8 1232{
ee0007ab 1233 register MEM_SIZE onb;
72aaf631 1234 union overhead *ovp;
d720c441 1235 char *res;
1236 int prev_bucket;
e8bc2b5c 1237 register int bucket;
1238 int was_alloced = 0, incr;
352d5a3a 1239 char *cp = (char*)mp;
8d063cd8 1240
e8bc2b5c 1241#if defined(DEBUGGING) || !defined(PERL_CORE)
ee0007ab 1242 MEM_SIZE size = nbytes;
45d8adaa 1243
45d8adaa 1244 if ((long)nbytes < 0)
d720c441 1245 croak("%s", "panic: realloc");
45d8adaa 1246#endif
e8bc2b5c 1247
1248 BARK_64K_LIMIT("Reallocation",nbytes,size);
1249 if (!cp)
1250 return malloc(nbytes);
45d8adaa 1251
11343788 1252 MUTEX_LOCK(&malloc_mutex);
72aaf631 1253 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c 1254 - sizeof (union overhead) * CHUNK_SHIFT);
1255 bucket = OV_INDEX(ovp);
1256#ifdef IGNORE_SMALL_BAD_FREE
1257 if ((bucket < FIRST_BUCKET_WITH_CHECK)
1258 || (OV_MAGIC(ovp, bucket) == MAGIC))
1259#else
1260 if (OV_MAGIC(ovp, bucket) == MAGIC)
1261#endif
1262 {
55497cff 1263 was_alloced = 1;
8d063cd8 1264 } else {
1265 /*
1266 * Already free, doing "compaction".
1267 *
1268 * Search for the old block of memory on the
1269 * free list. First, check the most common
1270 * case (last element free'd), then (this failing)
378cc40b 1271 * the last ``reall_srchlen'' items free'd.
8d063cd8 1272 * If all lookups fail, then assume the size of
1273 * the memory block being realloc'd is the
1274 * smallest possible.
1275 */
e8bc2b5c 1276 if ((bucket = findbucket(ovp, 1)) < 0 &&
1277 (bucket = findbucket(ovp, reall_srchlen)) < 0)
1278 bucket = 0;
8d063cd8 1279 }
e8bc2b5c 1280 onb = BUCKET_SIZE_REAL(bucket);
55497cff 1281 /*
1282 * avoid the copy if same size block.
e8bc2b5c 1283 * We are not agressive with boundary cases. Note that it might
1284 * (for a small number of cases) give false negative if
55497cff 1285 * both new size and old one are in the bucket for
e8bc2b5c 1286 * FIRST_BIG_POW2, but the new one is near the lower end.
1287 *
1288 * We do not try to go to 1.5 times smaller bucket so far.
55497cff 1289 */
e8bc2b5c 1290 if (nbytes > onb) incr = 1;
1291 else {
1292#ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING
1293 if ( /* This is a little bit pessimal if PACK_MALLOC: */
1294 nbytes > ( (onb >> 1) - M_OVERHEAD )
1295# ifdef TWO_POT_OPTIMIZE
1296 || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND )
1297# endif
1298 )
1299#else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1300 prev_bucket = ( (bucket > MAX_PACKED + 1)
1301 ? bucket - BUCKETS_PER_POW2
1302 : bucket - 1);
1303 if (nbytes > BUCKET_SIZE_REAL(prev_bucket))
1304#endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1305 incr = 0;
1306 else incr = -1;
1307 }
1308 if (!was_alloced
2ce36478 1309#ifdef STRESS_REALLOC
e8bc2b5c 1310 || 1 /* always do it the hard way */
2ce36478 1311#endif
e8bc2b5c 1312 ) goto hard_way;
1313 else if (incr == 0) {
852c2e52 1314 inplace_label:
a687059c 1315#ifdef RCHECK
1316 /*
1317 * Record new allocated size of block and
1318 * bound space with magic numbers.
1319 */
72aaf631 1320 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
e8bc2b5c 1321 int i, nb = ovp->ov_size + 1;
1322
1323 if ((i = nb & 3)) {
1324 i = 4 - i;
1325 while (i--) {
d720c441 1326 ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c 1327 }
1328 }
1329 nb = (nb + 3) &~ 3;
d720c441 1330 ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC, "chunk's tail overwrite");
a687059c 1331 /*
1332 * Convert amount of memory requested into
1333 * closest block size stored in hash buckets
1334 * which satisfies request. Account for
1335 * space used per block for accounting.
1336 */
cf5c4ad8 1337 nbytes += M_OVERHEAD;
72aaf631 1338 ovp->ov_size = nbytes - 1;
e8bc2b5c 1339 if ((i = nbytes & 3)) {
1340 i = 4 - i;
1341 while (i--)
1342 *((char *)((caddr_t)ovp + nbytes - RSLOP + i))
1343 = RMAGIC_C;
1344 }
1345 nbytes = (nbytes + 3) &~ 3;
72aaf631 1346 *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC;
a687059c 1347 }
1348#endif
45d8adaa 1349 res = cp;
11343788 1350 MUTEX_UNLOCK(&malloc_mutex);
e8bc2b5c 1351 } else if (incr == 1 && (cp - M_OVERHEAD == last_op)
1352 && (onb > (1 << LOG_OF_MIN_ARENA))) {
1353 MEM_SIZE require, newarena = nbytes, pow;
1354 int shiftr;
1355
1356 POW2_OPTIMIZE_ADJUST(newarena);
1357 newarena = newarena + M_OVERHEAD;
1358 /* newarena = (newarena + 3) &~ 3; */
1359 shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA;
1360 pow = LOG_OF_MIN_ARENA + 1;
1361 /* apart from this loop, this is O(1) */
1362 while (shiftr >>= 1)
1363 pow++;
1364 newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2);
1365 require = newarena - onb - M_OVERHEAD;
1366
fa423c5b 1367 if (getpages_adjacent(require)) {
e8bc2b5c 1368#ifdef DEBUGGING_MSTATS
fa423c5b 1369 nmalloc[bucket]--;
1370 nmalloc[pow * BUCKETS_PER_POW2]++;
e8bc2b5c 1371#endif
fa423c5b 1372 *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */
1373 goto inplace_label;
1374 } else
1375 goto hard_way;
e8bc2b5c 1376 } else {
1377 hard_way:
1378 MUTEX_UNLOCK(&malloc_mutex);
1379 if ((res = (char*)malloc(nbytes)) == NULL)
1380 return (NULL);
1381 if (cp != res) /* common optimization */
1382 Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char);
1383 if (was_alloced)
1384 free(cp);
45d8adaa 1385 }
1386
e8bc2b5c 1387 DEBUG_m(PerlIO_printf(Perl_debug_log, "0x%lu: (%05lu) rfree\n",
1388 (unsigned long)res,(unsigned long)(an++)));
1389 DEBUG_m(PerlIO_printf(Perl_debug_log,
1390 "0x%lx: (%05lu) realloc %ld bytes\n",
1391 (unsigned long)res,(unsigned long)(an++),
1392 (long)size));
2304df62 1393 return ((Malloc_t)res);
8d063cd8 1394}
1395
1396/*
1397 * Search ``srchlen'' elements of each free list for a block whose
1398 * header starts at ``freep''. If srchlen is -1 search the whole list.
1399 * Return bucket number, or -1 if not found.
1400 */
ee0007ab 1401static int
8ac85365 1402findbucket(union overhead *freep, int srchlen)
8d063cd8 1403{
1404 register union overhead *p;
1405 register int i, j;
1406
1407 for (i = 0; i < NBUCKETS; i++) {
1408 j = 0;
1409 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
1410 if (p == freep)
1411 return (i);
1412 j++;
1413 }
1414 }
1415 return (-1);
1416}
1417
cf5c4ad8 1418Malloc_t
8ac85365 1419calloc(register size_t elements, register size_t size)
cf5c4ad8 1420{
1421 long sz = elements * size;
1422 Malloc_t p = malloc(sz);
1423
1424 if (p) {
1425 memset((void*)p, 0, sz);
1426 }
1427 return p;
1428}
1429
e8bc2b5c 1430MEM_SIZE
1431malloced_size(void *p)
1432{
8d6dde3e 1433 union overhead *ovp = (union overhead *)
1434 ((caddr_t)p - sizeof (union overhead) * CHUNK_SHIFT);
1435 int bucket = OV_INDEX(ovp);
1436#ifdef RCHECK
1437 /* The caller wants to have a complete control over the chunk,
1438 disable the memory checking inside the chunk. */
1439 if (bucket <= MAX_SHORT_BUCKET) {
1440 MEM_SIZE size = BUCKET_SIZE_REAL(bucket);
1441 ovp->ov_size = size + M_OVERHEAD - 1;
1442 *((u_int *)((caddr_t)ovp + size + M_OVERHEAD - RSLOP)) = RMAGIC;
1443 }
1444#endif
e8bc2b5c 1445 return BUCKET_SIZE_REAL(bucket);
1446}
1447
c07a80fd 1448#ifdef DEBUGGING_MSTATS
e8bc2b5c 1449
1450# ifdef BUCKETS_ROOT2
1451# define MIN_EVEN_REPORT 6
1452# else
1453# define MIN_EVEN_REPORT MIN_BUCKET
1454# endif
8d063cd8 1455/*
1456 * mstats - print out statistics about malloc
1457 *
1458 * Prints two lines of numbers, one showing the length of the free list
1459 * for each size category, the second showing the number of mallocs -
1460 * frees for each size category.
1461 */
ee0007ab 1462void
8ac85365 1463dump_mstats(char *s)
8d063cd8 1464{
1465 register int i, j;
1466 register union overhead *p;
e8bc2b5c 1467 int topbucket=0, topbucket_ev=0, topbucket_odd=0, totfree=0, total=0;
c07a80fd 1468 u_int nfree[NBUCKETS];
e8bc2b5c 1469 int total_chain = 0;
1470 struct chunk_chain_s* nextchain = chunk_chain;
8d063cd8 1471
e8bc2b5c 1472 for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
8d063cd8 1473 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
1474 ;
c07a80fd 1475 nfree[i] = j;
e8bc2b5c 1476 totfree += nfree[i] * BUCKET_SIZE_REAL(i);
1477 total += nmalloc[i] * BUCKET_SIZE_REAL(i);
1478 if (nmalloc[i]) {
1479 i % 2 ? (topbucket_odd = i) : (topbucket_ev = i);
1480 topbucket = i;
1481 }
c07a80fd 1482 }
1483 if (s)
e8bc2b5c 1484 PerlIO_printf(PerlIO_stderr(),
d720c441 1485 "Memory allocation statistics %s (buckets %ld(%ld)..%ld(%ld)\n",
e8bc2b5c 1486 s,
d720c441 1487 (long)BUCKET_SIZE_REAL(MIN_BUCKET),
1488 (long)BUCKET_SIZE(MIN_BUCKET),
1489 (long)BUCKET_SIZE_REAL(topbucket), (long)BUCKET_SIZE(topbucket));
5f05dabc 1490 PerlIO_printf(PerlIO_stderr(), "%8d free:", totfree);
e8bc2b5c 1491 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1492 PerlIO_printf(PerlIO_stderr(),
1493 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1494 ? " %5d"
1495 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1496 nfree[i]);
1497 }
1498#ifdef BUCKETS_ROOT2
1499 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1500 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1501 PerlIO_printf(PerlIO_stderr(),
1502 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1503 ? " %5d"
1504 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1505 nfree[i]);
8d063cd8 1506 }
e8bc2b5c 1507#endif
5f05dabc 1508 PerlIO_printf(PerlIO_stderr(), "\n%8d used:", total - totfree);
e8bc2b5c 1509 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1510 PerlIO_printf(PerlIO_stderr(),
1511 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1512 ? " %5d"
1513 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1514 nmalloc[i] - nfree[i]);
c07a80fd 1515 }
e8bc2b5c 1516#ifdef BUCKETS_ROOT2
1517 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1518 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1519 PerlIO_printf(PerlIO_stderr(),
1520 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1521 ? " %5d"
1522 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1523 nmalloc[i] - nfree[i]);
1524 }
1525#endif
1526 while (nextchain) {
1527 total_chain += nextchain->size;
1528 nextchain = nextchain->next;
1529 }
1530 PerlIO_printf(PerlIO_stderr(), "\nTotal sbrk(): %d/%d:%d. Odd ends: pad+heads+chain+tail: %d+%d+%d+%d.\n",
1531 goodsbrk + sbrk_slack, sbrks, sbrk_good, sbrk_slack,
1532 start_slack, total_chain, sbrked_remains);
c07a80fd 1533}
1534#else
1535void
8ac85365 1536dump_mstats(char *s)
c07a80fd 1537{
8d063cd8 1538}
1539#endif
a687059c 1540#endif /* lint */
cf5c4ad8 1541
1542
1543#ifdef USE_PERL_SBRK
1544
760ac839 1545# ifdef NeXT
1546# define PERL_SBRK_VIA_MALLOC
1547# endif
1548
1549# ifdef PERL_SBRK_VIA_MALLOC
72e5b9db 1550# if defined(HIDEMYMALLOC) || defined(EMBEDMYMALLOC)
760ac839 1551# undef malloc
1552# else
72e5b9db 1553# include "Error: -DPERL_SBRK_VIA_MALLOC needs -D(HIDE|EMBED)MYMALLOC"
760ac839 1554# endif
cf5c4ad8 1555
1556/* it may seem schizophrenic to use perl's malloc and let it call system */
1557/* malloc, the reason for that is only the 3.2 version of the OS that had */
1558/* frequent core dumps within nxzonefreenolock. This sbrk routine put an */
1559/* end to the cores */
1560
760ac839 1561# define SYSTEM_ALLOC(a) malloc(a)
cf5c4ad8 1562
760ac839 1563# endif /* PERL_SBRK_VIA_MALLOC */
cf5c4ad8 1564
1565static IV Perl_sbrk_oldchunk;
1566static long Perl_sbrk_oldsize;
1567
760ac839 1568# define PERLSBRK_32_K (1<<15)
1569# define PERLSBRK_64_K (1<<16)
cf5c4ad8 1570
b63effbb 1571Malloc_t
cf5c4ad8 1572Perl_sbrk(size)
1573int size;
1574{
1575 IV got;
1576 int small, reqsize;
1577
1578 if (!size) return 0;
55497cff 1579#ifdef PERL_CORE
cf5c4ad8 1580 reqsize = size; /* just for the DEBUG_m statement */
1581#endif
57569e04 1582#ifdef PACK_MALLOC
1583 size = (size + 0x7ff) & ~0x7ff;
1584#endif
cf5c4ad8 1585 if (size <= Perl_sbrk_oldsize) {
1586 got = Perl_sbrk_oldchunk;
1587 Perl_sbrk_oldchunk += size;
1588 Perl_sbrk_oldsize -= size;
1589 } else {
1590 if (size >= PERLSBRK_32_K) {
1591 small = 0;
1592 } else {
cf5c4ad8 1593 size = PERLSBRK_64_K;
1594 small = 1;
1595 }
1596 got = (IV)SYSTEM_ALLOC(size);
57569e04 1597#ifdef PACK_MALLOC
1598 got = (got + 0x7ff) & ~0x7ff;
1599#endif
cf5c4ad8 1600 if (small) {
1601 /* Chunk is small, register the rest for future allocs. */
1602 Perl_sbrk_oldchunk = got + reqsize;
1603 Perl_sbrk_oldsize = size - reqsize;
1604 }
1605 }
1606
fb73857a 1607 DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%lx\n",
cf5c4ad8 1608 size, reqsize, Perl_sbrk_oldsize, got));
cf5c4ad8 1609
1610 return (void *)got;
1611}
1612
1613#endif /* ! defined USE_PERL_SBRK */