integrate cfgperl contents into mainline, update Changes
[p5sagit/p5-mst-13.2.git] / malloc.c
CommitLineData
a0d0e21e 1/* malloc.c
8d063cd8 2 *
8d063cd8 3 */
4
87c6202a 5/*
741df71a 6 Here are some notes on configuring Perl's malloc. (For non-perl
7 usage see below.)
87c6202a 8
9 There are two macros which serve as bulk disablers of advanced
10 features of this malloc: NO_FANCY_MALLOC, PLAIN_MALLOC (undef by
11 default). Look in the list of default values below to understand
12 their exact effect. Defining NO_FANCY_MALLOC returns malloc.c to the
13 state of the malloc in Perl 5.004. Additionally defining PLAIN_MALLOC
14 returns it to the state as of Perl 5.000.
15
16 Note that some of the settings below may be ignored in the code based
17 on values of other macros. The PERL_CORE symbol is only defined when
18 perl itself is being compiled (so malloc can make some assumptions
19 about perl's facilities being available to it).
20
21 Each config option has a short description, followed by its name,
22 default value, and a comment about the default (if applicable). Some
23 options take a precise value, while the others are just boolean.
24 The boolean ones are listed first.
25
26 # Enable code for an emergency memory pool in $^M. See perlvar.pod
27 # for a description of $^M.
28 PERL_EMERGENCY_SBRK (!PLAIN_MALLOC && PERL_CORE)
29
30 # Enable code for printing memory statistics.
31 DEBUGGING_MSTATS (!PLAIN_MALLOC && PERL_CORE)
32
33 # Move allocation info for small buckets into separate areas.
34 # Memory optimization (especially for small allocations, of the
35 # less than 64 bytes). Since perl usually makes a large number
36 # of small allocations, this is usually a win.
37 PACK_MALLOC (!PLAIN_MALLOC && !RCHECK)
38
39 # Add one page to big powers of two when calculating bucket size.
40 # This is targeted at big allocations, as are common in image
41 # processing.
42 TWO_POT_OPTIMIZE !PLAIN_MALLOC
43
44 # Use intermediate bucket sizes between powers-of-two. This is
45 # generally a memory optimization, and a (small) speed pessimization.
46 BUCKETS_ROOT2 !NO_FANCY_MALLOC
47
48 # Do not check small deallocations for bad free(). Memory
49 # and speed optimization, error reporting pessimization.
50 IGNORE_SMALL_BAD_FREE (!NO_FANCY_MALLOC && !RCHECK)
51
52 # Use table lookup to decide in which bucket a given allocation will go.
53 SMALL_BUCKET_VIA_TABLE !NO_FANCY_MALLOC
54
38ac2dc8 55 # Use a perl-defined sbrk() instead of the (presumably broken or
56 # missing) system-supplied sbrk().
57 USE_PERL_SBRK undef
58
59 # Use system malloc() (or calloc() etc.) to emulate sbrk(). Normally
60 # only used with broken sbrk()s.
87c6202a 61 PERL_SBRK_VIA_MALLOC undef
62
38ac2dc8 63 # Which allocator to use if PERL_SBRK_VIA_MALLOC
64 SYSTEM_ALLOC(a) malloc(a)
65
9ee81ef6 66 # Minimal alignment (in bytes, should be a power of 2) of SYSTEM_ALLOC
5bbd1ef5 67 SYSTEM_ALLOC_ALIGNMENT MEM_ALIGNBYTES
68
87c6202a 69 # Disable memory overwrite checking with DEBUGGING. Memory and speed
70 # optimization, error reporting pessimization.
71 NO_RCHECK undef
72
73 # Enable memory overwrite checking with DEBUGGING. Memory and speed
74 # pessimization, error reporting optimization
75 RCHECK (DEBUGGING && !NO_RCHECK)
76
77 # Failed allocations bigger than this size croak (if
78 # PERL_EMERGENCY_SBRK is enabled) without touching $^M. See
79 # perlvar.pod for a description of $^M.
80 BIG_SIZE (1<<16) # 64K
81
82 # Starting from this power of two, add an extra page to the
83 # size of the bucket. This enables optimized allocations of sizes
84 # close to powers of 2. Note that the value is indexed at 0.
85 FIRST_BIG_POW2 15 # 32K, 16K is used too often
86
87 # Estimate of minimal memory footprint. malloc uses this value to
88 # request the most reasonable largest blocks of memory from the system.
89 FIRST_SBRK (48*1024)
90
91 # Round up sbrk()s to multiples of this.
92 MIN_SBRK 2048
93
94 # Round up sbrk()s to multiples of this percent of footprint.
95 MIN_SBRK_FRAC 3
96
97 # Add this much memory to big powers of two to get the bucket size.
98 PERL_PAGESIZE 4096
99
100 # This many sbrk() discontinuities should be tolerated even
101 # from the start without deciding that sbrk() is usually
102 # discontinuous.
103 SBRK_ALLOW_FAILURES 3
104
105 # This many continuous sbrk()s compensate for one discontinuous one.
106 SBRK_FAILURE_PRICE 50
107
28ac10b1 108 # Some configurations may ask for 12-byte-or-so allocations which
109 # require 8-byte alignment (?!). In such situation one needs to
110 # define this to disable 12-byte bucket (will increase memory footprint)
111 STRICT_ALIGNMENT undef
112
87c6202a 113 This implementation assumes that calling PerlIO_printf() does not
114 result in any memory allocation calls (used during a panic).
115
116 */
117
741df71a 118/*
119 If used outside of Perl environment, it may be useful to redefine
120 the following macros (listed below with defaults):
121
122 # Type of address returned by allocation functions
123 Malloc_t void *
124
125 # Type of size argument for allocation functions
126 MEM_SIZE unsigned long
127
128 # Maximal value in LONG
129 LONG_MAX 0x7FFFFFFF
130
131 # Unsigned integer type big enough to keep a pointer
132 UV unsigned long
133
134 # Type of pointer with 1-byte granularity
135 caddr_t char *
136
137 # Type returned by free()
138 Free_t void
139
5bbd1ef5 140 # Very fatal condition reporting function (cannot call any )
141 fatalcroak(arg) write(2,arg,strlen(arg)) + exit(2)
142
741df71a 143 # Fatal error reporting function
144 croak(format, arg) warn(idem) + exit(1)
145
146 # Error reporting function
147 warn(format, arg) fprintf(stderr, idem)
148
149 # Locking/unlocking for MT operation
cea2e8a9 150 MALLOC_LOCK MUTEX_LOCK_NOCONTEXT(&PL_malloc_mutex)
151 MALLOC_UNLOCK MUTEX_UNLOCK_NOCONTEXT(&PL_malloc_mutex)
741df71a 152
153 # Locking/unlocking mutex for MT operation
154 MUTEX_LOCK(l) void
155 MUTEX_UNLOCK(l) void
156 */
157
e8bc2b5c 158#ifndef NO_FANCY_MALLOC
159# ifndef SMALL_BUCKET_VIA_TABLE
160# define SMALL_BUCKET_VIA_TABLE
161# endif
162# ifndef BUCKETS_ROOT2
163# define BUCKETS_ROOT2
164# endif
165# ifndef IGNORE_SMALL_BAD_FREE
166# define IGNORE_SMALL_BAD_FREE
167# endif
3562ef9b 168#endif
169
e8bc2b5c 170#ifndef PLAIN_MALLOC /* Bulk enable features */
171# ifndef PACK_MALLOC
172# define PACK_MALLOC
173# endif
174# ifndef TWO_POT_OPTIMIZE
175# define TWO_POT_OPTIMIZE
176# endif
d720c441 177# if defined(PERL_CORE) && !defined(PERL_EMERGENCY_SBRK)
178# define PERL_EMERGENCY_SBRK
e8bc2b5c 179# endif
180# if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS)
181# define DEBUGGING_MSTATS
182# endif
183#endif
184
185#define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */
186#define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2)
187
61ae2fbf 188#if !(defined(I286) || defined(atarist) || defined(__MINT__))
e8bc2b5c 189 /* take 2k unless the block is bigger than that */
190# define LOG_OF_MIN_ARENA 11
191#else
192 /* take 16k unless the block is bigger than that
193 (80286s like large segments!), probably good on the atari too */
194# define LOG_OF_MIN_ARENA 14
195#endif
196
8d063cd8 197#ifndef lint
1944739a 198# if defined(DEBUGGING) && !defined(NO_RCHECK)
199# define RCHECK
200# endif
e8bc2b5c 201# if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE)
202# undef IGNORE_SMALL_BAD_FREE
203# endif
8d063cd8 204/*
205 * malloc.c (Caltech) 2/21/82
206 * Chris Kingsley, kingsley@cit-20.
207 *
208 * This is a very fast storage allocator. It allocates blocks of a small
209 * number of different sizes, and keeps free lists of each size. Blocks that
210 * don't exactly fit are passed up to the next larger size. In this
211 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
cf5c4ad8 212 * If PACK_MALLOC is defined, small blocks are 2^n bytes long.
8d063cd8 213 * This is designed for use in a program that uses vast quantities of memory,
741df71a 214 * but bombs when it runs out.
215 *
4eb8286e 216 * Modifications Copyright Ilya Zakharevich 1996-99.
741df71a 217 *
218 * Still very quick, but much more thrifty. (Std config is 10% slower
219 * than it was, and takes 67% of old heap size for typical usage.)
220 *
221 * Allocations of small blocks are now table-driven to many different
222 * buckets. Sizes of really big buckets are increased to accomodata
223 * common size=power-of-2 blocks. Running-out-of-memory is made into
224 * an exception. Deeply configurable and thread-safe.
225 *
8d063cd8 226 */
227
d720c441 228#ifdef PERL_CORE
229# include "EXTERN.h"
4ad56ec9 230# define PERL_IN_MALLOC_C
d720c441 231# include "perl.h"
cea2e8a9 232# if defined(PERL_IMPLICIT_CONTEXT)
233# define croak Perl_croak_nocontext
234# define warn Perl_warn_nocontext
235# endif
d720c441 236#else
237# ifdef PERL_FOR_X2P
238# include "../EXTERN.h"
239# include "../perl.h"
240# else
241# include <stdlib.h>
242# include <stdio.h>
243# include <memory.h>
244# define _(arg) arg
245# ifndef Malloc_t
246# define Malloc_t void *
247# endif
248# ifndef MEM_SIZE
249# define MEM_SIZE unsigned long
250# endif
251# ifndef LONG_MAX
252# define LONG_MAX 0x7FFFFFFF
253# endif
254# ifndef UV
255# define UV unsigned long
256# endif
257# ifndef caddr_t
258# define caddr_t char *
259# endif
260# ifndef Free_t
261# define Free_t void
262# endif
263# define Copy(s,d,n,t) (void)memcpy((char*)(d),(char*)(s), (n) * sizeof(t))
264# define PerlEnv_getenv getenv
265# define PerlIO_printf fprintf
266# define PerlIO_stderr() stderr
267# endif
e8bc2b5c 268# ifndef croak /* make depend */
741df71a 269# define croak(mess, arg) (warn((mess), (arg)), exit(1))
d720c441 270# endif
271# ifndef warn
741df71a 272# define warn(mess, arg) fprintf(stderr, (mess), (arg))
e8bc2b5c 273# endif
274# ifdef DEBUG_m
275# undef DEBUG_m
276# endif
277# define DEBUG_m(a)
278# ifdef DEBUGGING
279# undef DEBUGGING
280# endif
cea2e8a9 281# ifndef pTHX
282# define pTHX void
283# define pTHX_
284# define dTHX extern int Perl___notused
285# define WITH_THX(s) s
286# endif
c5be433b 287# ifndef PERL_GET_INTERP
288# define PERL_GET_INTERP PL_curinterp
289# endif
4ad56ec9 290# ifndef Perl_malloc
291# define Perl_malloc malloc
292# endif
293# ifndef Perl_mfree
294# define Perl_mfree free
295# endif
296# ifndef Perl_realloc
297# define Perl_realloc realloc
298# endif
299# ifndef Perl_calloc
300# define Perl_calloc calloc
301# endif
302# ifndef Perl_strdup
303# define Perl_strdup strdup
304# endif
e8bc2b5c 305#endif
306
307#ifndef MUTEX_LOCK
308# define MUTEX_LOCK(l)
309#endif
310
311#ifndef MUTEX_UNLOCK
312# define MUTEX_UNLOCK(l)
313#endif
314
741df71a 315#ifndef MALLOC_LOCK
cea2e8a9 316# define MALLOC_LOCK MUTEX_LOCK_NOCONTEXT(&PL_malloc_mutex)
741df71a 317#endif
318
319#ifndef MALLOC_UNLOCK
cea2e8a9 320# define MALLOC_UNLOCK MUTEX_UNLOCK_NOCONTEXT(&PL_malloc_mutex)
741df71a 321#endif
322
5bbd1ef5 323# ifndef fatalcroak /* make depend */
324# define fatalcroak(mess) (write(2, (mess), strlen(mess)), exit(2))
325# endif
326
760ac839 327#ifdef DEBUGGING
e8bc2b5c 328# undef DEBUG_m
0b250b9e 329# define DEBUG_m(a) \
330 STMT_START { \
331 if (PERL_GET_INTERP) { dTHX; if (PL_debug & 128) { a; } } \
332 } STMT_END
760ac839 333#endif
334
e476b1b5 335#ifdef PERL_IMPLICIT_CONTEXT
336# define PERL_IS_ALIVE aTHX
337#else
338# define PERL_IS_ALIVE TRUE
339#endif
340
341
e9397286 342/*
343 * Layout of memory:
344 * ~~~~~~~~~~~~~~~~
345 * The memory is broken into "blocks" which occupy multiples of 2K (and
346 * generally speaking, have size "close" to a power of 2). The addresses
347 * of such *unused* blocks are kept in nextf[i] with big enough i. (nextf
348 * is an array of linked lists.) (Addresses of used blocks are not known.)
349 *
4ad56ec9 350 * Moreover, since the algorithm may try to "bite" smaller blocks out
e9397286 351 * of unused bigger ones, there are also regions of "irregular" size,
352 * managed separately, by a linked list chunk_chain.
353 *
354 * The third type of storage is the sbrk()ed-but-not-yet-used space, its
355 * end and size are kept in last_sbrk_top and sbrked_remains.
356 *
357 * Growing blocks "in place":
358 * ~~~~~~~~~~~~~~~~~~~~~~~~~
359 * The address of the block with the greatest address is kept in last_op
360 * (if not known, last_op is 0). If it is known that the memory above
361 * last_op is not continuous, or contains a chunk from chunk_chain,
362 * last_op is set to 0.
363 *
364 * The chunk with address last_op may be grown by expanding into
365 * sbrk()ed-but-not-yet-used space, or trying to sbrk() more continuous
366 * memory.
367 *
368 * Management of last_op:
369 * ~~~~~~~~~~~~~~~~~~~~~
370 *
371 * free() never changes the boundaries of blocks, so is not relevant.
372 *
373 * The only way realloc() may change the boundaries of blocks is if it
374 * grows a block "in place". However, in the case of success such a
375 * chunk is automatically last_op, and it remains last_op. In the case
376 * of failure getpages_adjacent() clears last_op.
377 *
378 * malloc() may change blocks by calling morecore() only.
379 *
380 * morecore() may create new blocks by:
381 * a) biting pieces from chunk_chain (cannot create one above last_op);
382 * b) biting a piece from an unused block (if block was last_op, this
383 * may create a chunk from chain above last_op, thus last_op is
384 * invalidated in such a case).
385 * c) biting of sbrk()ed-but-not-yet-used space. This creates
386 * a block which is last_op.
387 * d) Allocating new pages by calling getpages();
388 *
389 * getpages() creates a new block. It marks last_op at the bottom of
390 * the chunk of memory it returns.
391 *
392 * Active pages footprint:
393 * ~~~~~~~~~~~~~~~~~~~~~~
394 * Note that we do not need to traverse the lists in nextf[i], just take
395 * the first element of this list. However, we *need* to traverse the
396 * list in chunk_chain, but most the time it should be a very short one,
397 * so we do not step on a lot of pages we are not going to use.
398 *
399 * Flaws:
400 * ~~~~~
401 * get_from_bigger_buckets(): forget to increment price => Quite
402 * aggressive.
403 */
404
135863df 405/* I don't much care whether these are defined in sys/types.h--LAW */
406
407#define u_char unsigned char
408#define u_int unsigned int
56431972 409/*
410 * I removed the definition of u_bigint which appeared to be u_bigint = UV
411 * u_bigint was only used in TWOK_MASKED and TWOK_SHIFT
412 * where I have used PTR2UV. RMB
413 */
135863df 414#define u_short unsigned short
8d063cd8 415
cf5c4ad8 416/* 286 and atarist like big chunks, which gives too much overhead. */
61ae2fbf 417#if (defined(RCHECK) || defined(I286) || defined(atarist) || defined(__MINT__)) && defined(PACK_MALLOC)
e8bc2b5c 418# undef PACK_MALLOC
cf5c4ad8 419#endif
420
8d063cd8 421/*
cf5c4ad8 422 * The description below is applicable if PACK_MALLOC is not defined.
423 *
8d063cd8 424 * The overhead on a block is at least 4 bytes. When free, this space
425 * contains a pointer to the next free block, and the bottom two bits must
426 * be zero. When in use, the first byte is set to MAGIC, and the second
427 * byte is the size index. The remaining bytes are for alignment.
428 * If range checking is enabled and the size of the block fits
429 * in two bytes, then the top two bytes hold the size of the requested block
430 * plus the range checking words, and the header word MINUS ONE.
431 */
432union overhead {
433 union overhead *ov_next; /* when free */
85e6fe83 434#if MEM_ALIGNBYTES > 4
c623bd54 435 double strut; /* alignment problems */
a687059c 436#endif
8d063cd8 437 struct {
438 u_char ovu_magic; /* magic number */
439 u_char ovu_index; /* bucket # */
440#ifdef RCHECK
441 u_short ovu_size; /* actual block size */
442 u_int ovu_rmagic; /* range magic number */
443#endif
444 } ovu;
445#define ov_magic ovu.ovu_magic
446#define ov_index ovu.ovu_index
447#define ov_size ovu.ovu_size
448#define ov_rmagic ovu.ovu_rmagic
449};
450
451#define MAGIC 0xff /* magic # on accounting info */
452#define RMAGIC 0x55555555 /* magic # on range info */
e8bc2b5c 453#define RMAGIC_C 0x55 /* magic # on range info */
454
8d063cd8 455#ifdef RCHECK
c2a5c2d2 456# define RSLOP sizeof (u_int)
457# ifdef TWO_POT_OPTIMIZE
e8bc2b5c 458# define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2)
c2a5c2d2 459# else
e8bc2b5c 460# define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2)
c2a5c2d2 461# endif
8d063cd8 462#else
c2a5c2d2 463# define RSLOP 0
8d063cd8 464#endif
465
e8bc2b5c 466#if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2)
467# undef BUCKETS_ROOT2
468#endif
469
470#ifdef BUCKETS_ROOT2
471# define BUCKET_TABLE_SHIFT 2
472# define BUCKET_POW2_SHIFT 1
473# define BUCKETS_PER_POW2 2
474#else
475# define BUCKET_TABLE_SHIFT MIN_BUC_POW2
476# define BUCKET_POW2_SHIFT 0
477# define BUCKETS_PER_POW2 1
478#endif
479
274c7500 480#if !defined(MEM_ALIGNBYTES) || ((MEM_ALIGNBYTES > 4) && !defined(STRICT_ALIGNMENT))
481/* Figure out the alignment of void*. */
482struct aligner {
483 char c;
484 void *p;
485};
486# define ALIGN_SMALL ((int)((caddr_t)&(((struct aligner*)0)->p)))
487#else
488# define ALIGN_SMALL MEM_ALIGNBYTES
489#endif
490
491#define IF_ALIGN_8(yes,no) ((ALIGN_SMALL>4) ? (yes) : (no))
492
e8bc2b5c 493#ifdef BUCKETS_ROOT2
494# define MAX_BUCKET_BY_TABLE 13
495static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] =
496 {
497 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80,
498 };
499# define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT)))
500# define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \
501 ? buck_size[i] \
502 : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \
503 - MEM_OVERHEAD(i) \
504 + POW2_OPTIMIZE_SURPLUS(i)))
505#else
506# define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT))
507# define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i))
508#endif
509
510
cf5c4ad8 511#ifdef PACK_MALLOC
4ad56ec9 512/* In this case there are several possible layout of arenas depending
513 * on the size. Arenas are of sizes multiple to 2K, 2K-aligned, and
514 * have a size close to a power of 2.
515 *
516 * Arenas of the size >= 4K keep one chunk only. Arenas of size 2K
517 * may keep one chunk or multiple chunks. Here are the possible
518 * layouts of arenas:
519 *
520 * # One chunk only, chunksize 2^k + SOMETHING - ALIGN, k >= 11
521 *
522 * INDEX MAGIC1 UNUSED CHUNK1
523 *
524 * # Multichunk with sanity checking and chunksize 2^k-ALIGN, k>7
525 *
526 * INDEX MAGIC1 MAGIC2 MAGIC3 UNUSED CHUNK1 CHUNK2 CHUNK3 ...
527 *
528 * # Multichunk with sanity checking and size 2^k-ALIGN, k=7
529 *
530 * INDEX MAGIC1 MAGIC2 MAGIC3 UNUSED CHUNK1 UNUSED CHUNK2 CHUNK3 ...
531 *
532 * # Multichunk with sanity checking and size up to 80
533 *
534 * INDEX UNUSED MAGIC1 UNUSED MAGIC2 UNUSED ... CHUNK1 CHUNK2 CHUNK3 ...
535 *
536 * # No sanity check (usually up to 48=byte-long buckets)
537 * INDEX UNUSED CHUNK1 CHUNK2 ...
538 *
539 * Above INDEX and MAGIC are one-byte-long. Sizes of UNUSED are
540 * appropriate to keep algorithms simple and memory aligned. INDEX
541 * encodes the size of the chunk, while MAGICn encodes state (used,
542 * free or non-managed-by-us-so-it-indicates-a-bug) of CHUNKn. MAGIC
543 * is used for sanity checking purposes only. SOMETHING is 0 or 4K
544 * (to make size of big CHUNK accomodate allocations for powers of two
545 * better).
546 *
547 * [There is no need to alignment between chunks, since C rules ensure
548 * that structs which need 2^k alignment have sizeof which is
549 * divisible by 2^k. Thus as far as the last chunk is aligned at the
550 * end of the arena, and 2K-alignment does not contradict things,
551 * everything is going to be OK for sizes of chunks 2^n and 2^n +
552 * 2^k. Say, 80-bit buckets will be 16-bit aligned, and as far as we
553 * put allocations for requests in 65..80 range, all is fine.
554 *
555 * Note, however, that standard malloc() puts more strict
556 * requirements than the above C rules. Moreover, our algorithms of
557 * realloc() may break this idyll, but we suppose that realloc() does
558 * need not change alignment.]
559 *
560 * Is very important to make calculation of the offset of MAGICm as
561 * quick as possible, since it is done on each malloc()/free(). In
562 * fact it is so quick that it has quite little effect on the speed of
563 * doing malloc()/free(). [By default] We forego such calculations
564 * for small chunks, but only to save extra 3% of memory, not because
565 * of speed considerations.
566 *
567 * Here is the algorithm [which is the same for all the allocations
568 * schemes above], see OV_MAGIC(block,bucket). Let OFFSETm be the
569 * offset of the CHUNKm from the start of ARENA. Then offset of
570 * MAGICm is (OFFSET1 >> SHIFT) + ADDOFFSET. Here SHIFT and ADDOFFSET
571 * are numbers which depend on the size of the chunks only.
572 *
573 * Let as check some sanity conditions. Numbers OFFSETm>>SHIFT are
574 * different for all the chunks in the arena if 2^SHIFT is not greater
575 * than size of the chunks in the arena. MAGIC1 will not overwrite
576 * INDEX provided ADDOFFSET is >0 if OFFSET1 < 2^SHIFT. MAGIClast
577 * will not overwrite CHUNK1 if OFFSET1 > (OFFSETlast >> SHIFT) +
578 * ADDOFFSET.
579 *
580 * Make SHIFT the maximal possible (there is no point in making it
581 * smaller). Since OFFSETlast is 2K - CHUNKSIZE, above restrictions
582 * give restrictions on OFFSET1 and on ADDOFFSET.
583 *
584 * In particular, for chunks of size 2^k with k>=6 we can put
585 * ADDOFFSET to be from 0 to 2^k - 2^(11-k), and have
586 * OFFSET1==chunksize. For chunks of size 80 OFFSET1 of 2K%80=48 is
587 * large enough to have ADDOFFSET between 1 and 16 (similarly for 96,
588 * when ADDOFFSET should be 1). In particular, keeping MAGICs for
589 * these sizes gives no additional size penalty.
590 *
591 * However, for chunks of size 2^k with k<=5 this gives OFFSET1 >=
592 * ADDOFSET + 2^(11-k). Keeping ADDOFFSET 0 allows for 2^(11-k)-2^(11-2k)
593 * chunks per arena. This is smaller than 2^(11-k) - 1 which are
594 * needed if no MAGIC is kept. [In fact, having a negative ADDOFFSET
595 * would allow for slightly more buckets per arena for k=2,3.]
596 *
597 * Similarly, for chunks of size 3/2*2^k with k<=5 MAGICs would span
598 * the area up to 2^(11-k)+ADDOFFSET. For k=4 this give optimal
599 * ADDOFFSET as -7..0. For k=3 ADDOFFSET can go up to 4 (with tiny
600 * savings for negative ADDOFFSET). For k=5 ADDOFFSET can go -1..16
601 * (with no savings for negative values).
cf5c4ad8 602 *
4ad56ec9 603 * In particular, keeping ADDOFFSET 0 for sizes of chunks up to 2^6
604 * leads to tiny pessimizations in case of sizes 4, 8, 12, 24, and
605 * leads to no contradictions except for size=80 (or 96.)
cf5c4ad8 606 *
4ad56ec9 607 * However, it also makes sense to keep no magic for sizes 48 or less.
608 * This is what we do. In this case one needs ADDOFFSET>=1 also for
609 * chunksizes 12, 24, and 48, unless one gets one less chunk per
610 * arena.
611 *
612 * The algo of OV_MAGIC(block,bucket) keeps ADDOFFSET 0 until
613 * chunksize of 64, then makes it 1.
cf5c4ad8 614 *
4ad56ec9 615 * This allows for an additional optimization: the above scheme leads
616 * to giant overheads for sizes 128 or more (one whole chunk needs to
617 * be sacrifised to keep INDEX). Instead we use chunks not of size
618 * 2^k, but of size 2^k-ALIGN. If we pack these chunks at the end of
619 * the arena, then the beginnings are still in different 2^k-long
620 * sections of the arena if k>=7 for ALIGN==4, and k>=8 if ALIGN=8.
621 * Thus for k>7 the above algo of calculating the offset of the magic
622 * will still give different answers for different chunks. And to
623 * avoid the overrun of MAGIC1 into INDEX, one needs ADDOFFSET of >=1.
624 * In the case k=7 we just move the first chunk an extra ALIGN
625 * backward inside the ARENA (this is done once per arena lifetime,
626 * thus is not a big overhead). */
e8bc2b5c 627# define MAX_PACKED_POW2 6
628# define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT)
629# define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD)
630# define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1)
56431972 631# define TWOK_MASKED(x) (PTR2UV(x) & ~TWOK_MASK)
632# define TWOK_SHIFT(x) (PTR2UV(x) & TWOK_MASK)
633# define OV_INDEXp(block) (INT2PTR(u_char*,TWOK_MASKED(block)))
cf5c4ad8 634# define OV_INDEX(block) (*OV_INDEXp(block))
635# define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \
e8bc2b5c 636 (TWOK_SHIFT(block)>> \
637 (bucket>>BUCKET_POW2_SHIFT)) + \
638 (bucket >= MIN_NEEDS_SHIFT ? 1 : 0)))
639 /* A bucket can have a shift smaller than it size, we need to
640 shift its magic number so it will not overwrite index: */
641# ifdef BUCKETS_ROOT2
642# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */
643# else
644# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */
645# endif
cf5c4ad8 646# define CHUNK_SHIFT 0
647
e8bc2b5c 648/* Number of active buckets of given ordinal. */
649#ifdef IGNORE_SMALL_BAD_FREE
650#define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */
651# define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
652 ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \
653 : n_blks[bucket] )
654#else
655# define N_BLKS(bucket) n_blks[bucket]
656#endif
657
658static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
659 {
660# if BUCKETS_PER_POW2==1
661 0, 0,
662 (MIN_BUC_POW2==2 ? 384 : 0),
663 224, 120, 62, 31, 16, 8, 4, 2
664# else
665 0, 0, 0, 0,
666 (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */
667 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2
668# endif
669 };
670
671/* Shift of the first bucket with the given ordinal inside 2K chunk. */
672#ifdef IGNORE_SMALL_BAD_FREE
673# define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
674 ? ((1<<LOG_OF_MIN_ARENA) \
675 - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \
676 : blk_shift[bucket])
677#else
678# define BLK_SHIFT(bucket) blk_shift[bucket]
679#endif
680
681static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
682 {
683# if BUCKETS_PER_POW2==1
684 0, 0,
685 (MIN_BUC_POW2==2 ? 512 : 0),
686 256, 128, 64, 64, /* 8 to 64 */
687 16*sizeof(union overhead),
688 8*sizeof(union overhead),
689 4*sizeof(union overhead),
690 2*sizeof(union overhead),
691# else
692 0, 0, 0, 0,
693 (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0),
694 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */
695 16*sizeof(union overhead), 16*sizeof(union overhead),
696 8*sizeof(union overhead), 8*sizeof(union overhead),
697 4*sizeof(union overhead), 4*sizeof(union overhead),
698 2*sizeof(union overhead), 2*sizeof(union overhead),
699# endif
700 };
cf5c4ad8 701
5bbd1ef5 702# define NEEDED_ALIGNMENT 0x800 /* 2k boundaries */
703# define WANTED_ALIGNMENT 0x800 /* 2k boundaries */
704
cf5c4ad8 705#else /* !PACK_MALLOC */
706
707# define OV_MAGIC(block,bucket) (block)->ov_magic
708# define OV_INDEX(block) (block)->ov_index
709# define CHUNK_SHIFT 1
e8bc2b5c 710# define MAX_PACKED -1
5bbd1ef5 711# define NEEDED_ALIGNMENT MEM_ALIGNBYTES
712# define WANTED_ALIGNMENT 0x400 /* 1k boundaries */
713
cf5c4ad8 714#endif /* !PACK_MALLOC */
715
e8bc2b5c 716#define M_OVERHEAD (sizeof(union overhead) + RSLOP)
717
718#ifdef PACK_MALLOC
719# define MEM_OVERHEAD(bucket) \
720 (bucket <= MAX_PACKED ? 0 : M_OVERHEAD)
721# ifdef SMALL_BUCKET_VIA_TABLE
722# define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2)
723# define START_SHIFT MAX_PACKED_POW2
724# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
725# define SIZE_TABLE_MAX 80
726# else
727# define SIZE_TABLE_MAX 64
728# endif
729static char bucket_of[] =
730 {
731# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
732 /* 0 to 15 in 4-byte increments. */
733 (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */
734 6, /* 8 */
274c7500 735 IF_ALIGN_8(8,7), 8, /* 16/12, 16 */
e8bc2b5c 736 9, 9, 10, 10, /* 24, 32 */
737 11, 11, 11, 11, /* 48 */
738 12, 12, 12, 12, /* 64 */
739 13, 13, 13, 13, /* 80 */
740 13, 13, 13, 13 /* 80 */
741# else /* !BUCKETS_ROOT2 */
742 /* 0 to 15 in 4-byte increments. */
743 (sizeof(void*) > 4 ? 3 : 2),
744 3,
745 4, 4,
746 5, 5, 5, 5,
747 6, 6, 6, 6,
748 6, 6, 6, 6
749# endif /* !BUCKETS_ROOT2 */
750 };
751# else /* !SMALL_BUCKET_VIA_TABLE */
752# define START_SHIFTS_BUCKET MIN_BUCKET
753# define START_SHIFT (MIN_BUC_POW2 - 1)
754# endif /* !SMALL_BUCKET_VIA_TABLE */
755#else /* !PACK_MALLOC */
756# define MEM_OVERHEAD(bucket) M_OVERHEAD
757# ifdef SMALL_BUCKET_VIA_TABLE
758# undef SMALL_BUCKET_VIA_TABLE
759# endif
760# define START_SHIFTS_BUCKET MIN_BUCKET
761# define START_SHIFT (MIN_BUC_POW2 - 1)
762#endif /* !PACK_MALLOC */
cf5c4ad8 763
8d063cd8 764/*
55497cff 765 * Big allocations are often of the size 2^n bytes. To make them a
766 * little bit better, make blocks of size 2^n+pagesize for big n.
767 */
768
769#ifdef TWO_POT_OPTIMIZE
770
5f05dabc 771# ifndef PERL_PAGESIZE
772# define PERL_PAGESIZE 4096
773# endif
e8bc2b5c 774# ifndef FIRST_BIG_POW2
775# define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */
5f05dabc 776# endif
e8bc2b5c 777# define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2)
55497cff 778/* If this value or more, check against bigger blocks. */
779# define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD)
780/* If less than this value, goes into 2^n-overhead-block. */
781# define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD)
782
e8bc2b5c 783# define POW2_OPTIMIZE_ADJUST(nbytes) \
784 ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0)
785# define POW2_OPTIMIZE_SURPLUS(bucket) \
786 ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0)
787
788#else /* !TWO_POT_OPTIMIZE */
789# define POW2_OPTIMIZE_ADJUST(nbytes)
790# define POW2_OPTIMIZE_SURPLUS(bucket) 0
791#endif /* !TWO_POT_OPTIMIZE */
792
793#if defined(HAS_64K_LIMIT) && defined(PERL_CORE)
794# define BARK_64K_LIMIT(what,nbytes,size) \
795 if (nbytes > 0xffff) { \
796 PerlIO_printf(PerlIO_stderr(), \
797 "%s too large: %lx\n", what, size); \
798 my_exit(1); \
799 }
800#else /* !HAS_64K_LIMIT || !PERL_CORE */
801# define BARK_64K_LIMIT(what,nbytes,size)
802#endif /* !HAS_64K_LIMIT || !PERL_CORE */
55497cff 803
e8bc2b5c 804#ifndef MIN_SBRK
805# define MIN_SBRK 2048
806#endif
807
808#ifndef FIRST_SBRK
d720c441 809# define FIRST_SBRK (48*1024)
e8bc2b5c 810#endif
811
812/* Minimal sbrk in percents of what is already alloced. */
813#ifndef MIN_SBRK_FRAC
814# define MIN_SBRK_FRAC 3
815#endif
816
817#ifndef SBRK_ALLOW_FAILURES
818# define SBRK_ALLOW_FAILURES 3
819#endif
55497cff 820
e8bc2b5c 821#ifndef SBRK_FAILURE_PRICE
822# define SBRK_FAILURE_PRICE 50
55497cff 823#endif
824
e8bc2b5c 825#if defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)
826
827# ifndef BIG_SIZE
828# define BIG_SIZE (1<<16) /* 64K */
829# endif
830
3541dd58 831#ifdef I_MACH_CTHREADS
772fe5b3 832# undef MUTEX_LOCK
833# define MUTEX_LOCK(m) STMT_START { if (*m) mutex_lock(*m); } STMT_END
834# undef MUTEX_UNLOCK
835# define MUTEX_UNLOCK(m) STMT_START { if (*m) mutex_unlock(*m); } STMT_END
3541dd58 836#endif
837
55497cff 838static char *emergency_buffer;
839static MEM_SIZE emergency_buffer_size;
840
cea2e8a9 841static int findbucket (union overhead *freep, int srchlen);
842static void morecore (register int bucket);
843# if defined(DEBUGGING)
844static void botch (char *diag, char *s);
845# endif
846static void add_to_chain (void *p, MEM_SIZE size, MEM_SIZE chip);
847static Malloc_t emergency_sbrk (MEM_SIZE size);
848static void* get_from_chain (MEM_SIZE size);
849static void* get_from_bigger_buckets(int bucket, MEM_SIZE size);
850static union overhead *getpages (int needed, int *nblksp, int bucket);
851static int getpages_adjacent(int require);
852
853static Malloc_t
854emergency_sbrk(MEM_SIZE size)
55497cff 855{
28ac10b1 856 MEM_SIZE rsize = (((size - 1)>>LOG_OF_MIN_ARENA) + 1)<<LOG_OF_MIN_ARENA;
857
55497cff 858 if (size >= BIG_SIZE) {
859 /* Give the possibility to recover: */
741df71a 860 MALLOC_UNLOCK;
1b979e0a 861 croak("Out of memory during \"large\" request for %i bytes", size);
55497cff 862 }
863
28ac10b1 864 if (emergency_buffer_size >= rsize) {
865 char *old = emergency_buffer;
866
867 emergency_buffer_size -= rsize;
868 emergency_buffer += rsize;
869 return old;
870 } else {
cea2e8a9 871 dTHX;
55497cff 872 /* First offense, give a possibility to recover by dieing. */
873 /* No malloc involved here: */
4a33f861 874 GV **gvp = (GV**)hv_fetch(PL_defstash, "^M", 2, 0);
55497cff 875 SV *sv;
876 char *pv;
28ac10b1 877 int have = 0;
2d8e6c8d 878 STRLEN n_a;
55497cff 879
28ac10b1 880 if (emergency_buffer_size) {
881 add_to_chain(emergency_buffer, emergency_buffer_size, 0);
882 emergency_buffer_size = 0;
883 emergency_buffer = Nullch;
884 have = 1;
885 }
4a33f861 886 if (!gvp) gvp = (GV**)hv_fetch(PL_defstash, "\015", 1, 0);
55497cff 887 if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv)
28ac10b1 888 || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD)) {
889 if (have)
890 goto do_croak;
55497cff 891 return (char *)-1; /* Now die die die... */
28ac10b1 892 }
55497cff 893 /* Got it, now detach SvPV: */
2d8e6c8d 894 pv = SvPV(sv, n_a);
55497cff 895 /* Check alignment: */
56431972 896 if ((PTR2UV(pv) - sizeof(union overhead)) & (NEEDED_ALIGNMENT - 1)) {
55497cff 897 PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
bbce6d69 898 return (char *)-1; /* die die die */
55497cff 899 }
900
28ac10b1 901 emergency_buffer = pv - sizeof(union overhead);
902 emergency_buffer_size = malloced_size(pv) + M_OVERHEAD;
55497cff 903 SvPOK_off(sv);
28ac10b1 904 SvPVX(sv) = Nullch;
905 SvCUR(sv) = SvLEN(sv) = 0;
55497cff 906 }
28ac10b1 907 do_croak:
741df71a 908 MALLOC_UNLOCK;
28ac10b1 909 croak("Out of memory during request for %i bytes", size);
ce70748c 910 /* NOTREACHED */
911 return Nullch;
55497cff 912}
913
e8bc2b5c 914#else /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 915# define emergency_sbrk(size) -1
e8bc2b5c 916#endif /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 917
918/*
e8bc2b5c 919 * nextf[i] is the pointer to the next free block of size 2^i. The
8d063cd8 920 * smallest allocatable block is 8 bytes. The overhead information
921 * precedes the data area returned to the user.
922 */
e8bc2b5c 923#define NBUCKETS (32*BUCKETS_PER_POW2 + 1)
8d063cd8 924static union overhead *nextf[NBUCKETS];
cf5c4ad8 925
e3663bad 926#if defined(PURIFY) && !defined(USE_PERL_SBRK)
927# define USE_PERL_SBRK
928#endif
929
cf5c4ad8 930#ifdef USE_PERL_SBRK
931#define sbrk(a) Perl_sbrk(a)
20ce7b12 932Malloc_t Perl_sbrk (int size);
8ac85365 933#else
934#ifdef DONT_DECLARE_STD
935#ifdef I_UNISTD
936#include <unistd.h>
937#endif
cf5c4ad8 938#else
52082926 939extern Malloc_t sbrk(int);
8ac85365 940#endif
cf5c4ad8 941#endif
8d063cd8 942
c07a80fd 943#ifdef DEBUGGING_MSTATS
8d063cd8 944/*
945 * nmalloc[i] is the difference between the number of mallocs and frees
946 * for a given block size.
947 */
948static u_int nmalloc[NBUCKETS];
5f05dabc 949static u_int sbrk_slack;
950static u_int start_slack;
8d063cd8 951#endif
952
e8bc2b5c 953static u_int goodsbrk;
954
760ac839 955#ifdef DEBUGGING
3541dd58 956#undef ASSERT
957#define ASSERT(p,diag) if (!(p)) botch(diag,STRINGIFY(p)); else
cea2e8a9 958static void
959botch(char *diag, char *s)
8d063cd8 960{
e8cd8248 961 dTHX;
d720c441 962 PerlIO_printf(PerlIO_stderr(), "assertion botched (%s?): %s\n", diag, s);
3028581b 963 PerlProc_abort();
8d063cd8 964}
965#else
3541dd58 966#define ASSERT(p, diag)
8d063cd8 967#endif
968
2304df62 969Malloc_t
86058a2d 970Perl_malloc(register size_t nbytes)
8d063cd8 971{
972 register union overhead *p;
e8bc2b5c 973 register int bucket;
ee0007ab 974 register MEM_SIZE shiftr;
8d063cd8 975
c2a5c2d2 976#if defined(DEBUGGING) || defined(RCHECK)
ee0007ab 977 MEM_SIZE size = nbytes;
45d8adaa 978#endif
979
e8bc2b5c 980 BARK_64K_LIMIT("Allocation",nbytes,nbytes);
45d8adaa 981#ifdef DEBUGGING
982 if ((long)nbytes < 0)
cea2e8a9 983 croak("%s", "panic: malloc");
45d8adaa 984#endif
45d8adaa 985
8d063cd8 986 /*
987 * Convert amount of memory requested into
988 * closest block size stored in hash buckets
989 * which satisfies request. Account for
990 * space used per block for accounting.
991 */
cf5c4ad8 992#ifdef PACK_MALLOC
e8bc2b5c 993# ifdef SMALL_BUCKET_VIA_TABLE
994 if (nbytes == 0)
995 bucket = MIN_BUCKET;
996 else if (nbytes <= SIZE_TABLE_MAX) {
997 bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT];
998 } else
999# else
043bf814 1000 if (nbytes == 0)
1001 nbytes = 1;
e8bc2b5c 1002 if (nbytes <= MAX_POW2_ALGO) goto do_shifts;
1003 else
1004# endif
55497cff 1005#endif
e8bc2b5c 1006 {
1007 POW2_OPTIMIZE_ADJUST(nbytes);
1008 nbytes += M_OVERHEAD;
1009 nbytes = (nbytes + 3) &~ 3;
1010 do_shifts:
1011 shiftr = (nbytes - 1) >> START_SHIFT;
1012 bucket = START_SHIFTS_BUCKET;
1013 /* apart from this loop, this is O(1) */
1014 while (shiftr >>= 1)
1015 bucket += BUCKETS_PER_POW2;
cf5c4ad8 1016 }
4ad56ec9 1017 MALLOC_LOCK;
8d063cd8 1018 /*
1019 * If nothing in hash bucket right now,
1020 * request more memory from the system.
1021 */
1022 if (nextf[bucket] == NULL)
1023 morecore(bucket);
e8bc2b5c 1024 if ((p = nextf[bucket]) == NULL) {
741df71a 1025 MALLOC_UNLOCK;
55497cff 1026#ifdef PERL_CORE
0b250b9e 1027 {
1028 dTHX;
1029 if (!PL_nomemok) {
1030 PerlIO_puts(PerlIO_stderr(),"Out of memory!\n");
1031 my_exit(1);
1032 }
ee0007ab 1033 }
45d8adaa 1034#endif
4ad56ec9 1035 return (NULL);
45d8adaa 1036 }
1037
e8bc2b5c 1038 DEBUG_m(PerlIO_printf(Perl_debug_log,
b900a521 1039 "0x%"UVxf": (%05lu) malloc %ld bytes\n",
1040 PTR2UV(p+1), (unsigned long)(PL_an++),
e8bc2b5c 1041 (long)size));
45d8adaa 1042
8d063cd8 1043 /* remove from linked list */
802004fa 1044#if defined(RCHECK)
32e30700 1045 if ((PTR2UV(p)) & (MEM_ALIGNBYTES - 1)) {
e8cd8248 1046 dTHX;
b900a521 1047 PerlIO_printf(PerlIO_stderr(),
7fa2f4f1 1048 "Unaligned pointer in the free chain 0x%"UVxf"\n",
1049 PTR2UV(p));
1050 }
1051 if ((PTR2UV(p->ov_next)) & (MEM_ALIGNBYTES - 1)) {
e8cd8248 1052 dTHX;
7fa2f4f1 1053 PerlIO_printf(PerlIO_stderr(),
1054 "Unaligned `next' pointer in the free "
1055 "chain 0x"UVxf" at 0x%"UVxf"\n",
1056 PTR2UV(p->ov_next), PTR2UV(p));
32e30700 1057 }
bf38876a 1058#endif
1059 nextf[bucket] = p->ov_next;
4ad56ec9 1060
1061 MALLOC_UNLOCK;
1062
e8bc2b5c 1063#ifdef IGNORE_SMALL_BAD_FREE
1064 if (bucket >= FIRST_BUCKET_WITH_CHECK)
1065#endif
1066 OV_MAGIC(p, bucket) = MAGIC;
cf5c4ad8 1067#ifndef PACK_MALLOC
1068 OV_INDEX(p) = bucket;
1069#endif
8d063cd8 1070#ifdef RCHECK
1071 /*
1072 * Record allocated size of block and
1073 * bound space with magic numbers.
1074 */
8d063cd8 1075 p->ov_rmagic = RMAGIC;
e8bc2b5c 1076 if (bucket <= MAX_SHORT_BUCKET) {
1077 int i;
1078
1079 nbytes = size + M_OVERHEAD;
1080 p->ov_size = nbytes - 1;
1081 if ((i = nbytes & 3)) {
1082 i = 4 - i;
1083 while (i--)
1084 *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C;
1085 }
1086 nbytes = (nbytes + 3) &~ 3;
1087 *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC;
1088 }
8d063cd8 1089#endif
cf5c4ad8 1090 return ((Malloc_t)(p + CHUNK_SHIFT));
8d063cd8 1091}
1092
e8bc2b5c 1093static char *last_sbrk_top;
1094static char *last_op; /* This arena can be easily extended. */
1095static int sbrked_remains;
1096static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE;
1097
1098#ifdef DEBUGGING_MSTATS
1099static int sbrks;
1100#endif
1101
1102struct chunk_chain_s {
1103 struct chunk_chain_s *next;
1104 MEM_SIZE size;
1105};
1106static struct chunk_chain_s *chunk_chain;
1107static int n_chunks;
1108static char max_bucket;
1109
1110/* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */
cea2e8a9 1111static void *
1112get_from_chain(MEM_SIZE size)
e8bc2b5c 1113{
1114 struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain;
1115 struct chunk_chain_s **oldgoodp = NULL;
1116 long min_remain = LONG_MAX;
1117
1118 while (elt) {
1119 if (elt->size >= size) {
1120 long remains = elt->size - size;
1121 if (remains >= 0 && remains < min_remain) {
1122 oldgoodp = oldp;
1123 min_remain = remains;
1124 }
1125 if (remains == 0) {
1126 break;
1127 }
1128 }
1129 oldp = &( elt->next );
1130 elt = elt->next;
1131 }
1132 if (!oldgoodp) return NULL;
1133 if (min_remain) {
1134 void *ret = *oldgoodp;
1135 struct chunk_chain_s *next = (*oldgoodp)->next;
1136
1137 *oldgoodp = (struct chunk_chain_s *)((char*)ret + size);
1138 (*oldgoodp)->size = min_remain;
1139 (*oldgoodp)->next = next;
1140 return ret;
1141 } else {
1142 void *ret = *oldgoodp;
1143 *oldgoodp = (*oldgoodp)->next;
1144 n_chunks--;
1145 return ret;
1146 }
1147}
1148
cea2e8a9 1149static void
1150add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip)
e8bc2b5c 1151{
1152 struct chunk_chain_s *next = chunk_chain;
1153 char *cp = (char*)p;
1154
1155 cp += chip;
1156 chunk_chain = (struct chunk_chain_s *)cp;
1157 chunk_chain->size = size - chip;
1158 chunk_chain->next = next;
1159 n_chunks++;
1160}
1161
cea2e8a9 1162static void *
1163get_from_bigger_buckets(int bucket, MEM_SIZE size)
e8bc2b5c 1164{
1165 int price = 1;
1166 static int bucketprice[NBUCKETS];
1167 while (bucket <= max_bucket) {
1168 /* We postpone stealing from bigger buckets until we want it
1169 often enough. */
1170 if (nextf[bucket] && bucketprice[bucket]++ >= price) {
1171 /* Steal it! */
1172 void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT);
1173 bucketprice[bucket] = 0;
1174 if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) {
1175 last_op = NULL; /* Disable optimization */
1176 }
1177 nextf[bucket] = nextf[bucket]->ov_next;
1178#ifdef DEBUGGING_MSTATS
1179 nmalloc[bucket]--;
1180 start_slack -= M_OVERHEAD;
1181#endif
1182 add_to_chain(ret, (BUCKET_SIZE(bucket) +
1183 POW2_OPTIMIZE_SURPLUS(bucket)),
1184 size);
1185 return ret;
1186 }
1187 bucket++;
1188 }
1189 return NULL;
1190}
1191
cea2e8a9 1192static union overhead *
1193getpages(int needed, int *nblksp, int bucket)
fa423c5b 1194{
1195 /* Need to do (possibly expensive) system call. Try to
1196 optimize it for rare calling. */
1197 MEM_SIZE require = needed - sbrked_remains;
1198 char *cp;
1199 union overhead *ovp;
1200 int slack = 0;
1201
1202 if (sbrk_good > 0) {
1203 if (!last_sbrk_top && require < FIRST_SBRK)
1204 require = FIRST_SBRK;
1205 else if (require < MIN_SBRK) require = MIN_SBRK;
1206
1207 if (require < goodsbrk * MIN_SBRK_FRAC / 100)
1208 require = goodsbrk * MIN_SBRK_FRAC / 100;
1209 require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK;
1210 } else {
1211 require = needed;
1212 last_sbrk_top = 0;
1213 sbrked_remains = 0;
1214 }
1215
1216 DEBUG_m(PerlIO_printf(Perl_debug_log,
1217 "sbrk(%ld) for %ld-byte-long arena\n",
1218 (long)require, (long) needed));
1219 cp = (char *)sbrk(require);
1220#ifdef DEBUGGING_MSTATS
1221 sbrks++;
1222#endif
1223 if (cp == last_sbrk_top) {
1224 /* Common case, anything is fine. */
1225 sbrk_good++;
1226 ovp = (union overhead *) (cp - sbrked_remains);
e9397286 1227 last_op = cp - sbrked_remains;
fa423c5b 1228 sbrked_remains = require - (needed - sbrked_remains);
1229 } else if (cp == (char *)-1) { /* no more room! */
1230 ovp = (union overhead *)emergency_sbrk(needed);
1231 if (ovp == (union overhead *)-1)
1232 return 0;
e9397286 1233 if (((char*)ovp) > last_op) { /* Cannot happen with current emergency_sbrk() */
1234 last_op = 0;
1235 }
fa423c5b 1236 return ovp;
1237 } else { /* Non-continuous or first sbrk(). */
1238 long add = sbrked_remains;
1239 char *newcp;
1240
1241 if (sbrked_remains) { /* Put rest into chain, we
1242 cannot use it right now. */
1243 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1244 sbrked_remains, 0);
1245 }
1246
1247 /* Second, check alignment. */
1248 slack = 0;
1249
61ae2fbf 1250#if !defined(atarist) && !defined(__MINT__) /* on the atari we dont have to worry about this */
fa423c5b 1251# ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */
5bbd1ef5 1252 /* WANTED_ALIGNMENT may be more than NEEDED_ALIGNMENT, but this may
1253 improve performance of memory access. */
56431972 1254 if (PTR2UV(cp) & (WANTED_ALIGNMENT - 1)) { /* Not aligned. */
1255 slack = WANTED_ALIGNMENT - (PTR2UV(cp) & (WANTED_ALIGNMENT - 1));
fa423c5b 1256 add += slack;
1257 }
1258# endif
61ae2fbf 1259#endif /* !atarist && !MINT */
fa423c5b 1260
1261 if (add) {
1262 DEBUG_m(PerlIO_printf(Perl_debug_log,
1263 "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n",
1264 (long)add, (long) slack,
1265 (long) sbrked_remains));
1266 newcp = (char *)sbrk(add);
1267#if defined(DEBUGGING_MSTATS)
1268 sbrks++;
1269 sbrk_slack += add;
1270#endif
1271 if (newcp != cp + require) {
1272 /* Too bad: even rounding sbrk() is not continuous.*/
1273 DEBUG_m(PerlIO_printf(Perl_debug_log,
1274 "failed to fix bad sbrk()\n"));
1275#ifdef PACK_MALLOC
1276 if (slack) {
741df71a 1277 MALLOC_UNLOCK;
5bbd1ef5 1278 fatalcroak("panic: Off-page sbrk\n");
fa423c5b 1279 }
1280#endif
1281 if (sbrked_remains) {
1282 /* Try again. */
1283#if defined(DEBUGGING_MSTATS)
1284 sbrk_slack += require;
1285#endif
1286 require = needed;
1287 DEBUG_m(PerlIO_printf(Perl_debug_log,
1288 "straight sbrk(%ld)\n",
1289 (long)require));
1290 cp = (char *)sbrk(require);
1291#ifdef DEBUGGING_MSTATS
1292 sbrks++;
1293#endif
1294 if (cp == (char *)-1)
1295 return 0;
1296 }
1297 sbrk_good = -1; /* Disable optimization!
1298 Continue with not-aligned... */
1299 } else {
1300 cp += slack;
1301 require += sbrked_remains;
1302 }
1303 }
1304
1305 if (last_sbrk_top) {
1306 sbrk_good -= SBRK_FAILURE_PRICE;
1307 }
1308
1309 ovp = (union overhead *) cp;
1310 /*
1311 * Round up to minimum allocation size boundary
1312 * and deduct from block count to reflect.
1313 */
1314
5bbd1ef5 1315# if NEEDED_ALIGNMENT > MEM_ALIGNBYTES
56431972 1316 if (PTR2UV(ovp) & (NEEDED_ALIGNMENT - 1))
5bbd1ef5 1317 fatalcroak("Misalignment of sbrk()\n");
1318 else
1319# endif
fa423c5b 1320#ifndef I286 /* Again, this should always be ok on an 80286 */
56431972 1321 if (PTR2UV(ovp) & (MEM_ALIGNBYTES - 1)) {
fa423c5b 1322 DEBUG_m(PerlIO_printf(Perl_debug_log,
1323 "fixing sbrk(): %d bytes off machine alignement\n",
56431972 1324 (int)(PTR2UV(ovp) & (MEM_ALIGNBYTES - 1))));
1325 ovp = INT2PTR(union overhead *,(PTR2UV(ovp) + MEM_ALIGNBYTES) &
5bbd1ef5 1326 (MEM_ALIGNBYTES - 1));
fa423c5b 1327 (*nblksp)--;
1328# if defined(DEBUGGING_MSTATS)
1329 /* This is only approx. if TWO_POT_OPTIMIZE: */
5bbd1ef5 1330 sbrk_slack += (1 << (bucket >> BUCKET_POW2_SHIFT));
fa423c5b 1331# endif
1332 }
1333#endif
5bbd1ef5 1334 ; /* Finish `else' */
fa423c5b 1335 sbrked_remains = require - needed;
e9397286 1336 last_op = cp;
fa423c5b 1337 }
1338 last_sbrk_top = cp + require;
fa423c5b 1339#ifdef DEBUGGING_MSTATS
1340 goodsbrk += require;
1341#endif
1342 return ovp;
1343}
1344
cea2e8a9 1345static int
1346getpages_adjacent(int require)
fa423c5b 1347{
1348 if (require <= sbrked_remains) {
1349 sbrked_remains -= require;
1350 } else {
1351 char *cp;
1352
1353 require -= sbrked_remains;
1354 /* We do not try to optimize sbrks here, we go for place. */
1355 cp = (char*) sbrk(require);
1356#ifdef DEBUGGING_MSTATS
1357 sbrks++;
1358 goodsbrk += require;
1359#endif
1360 if (cp == last_sbrk_top) {
1361 sbrked_remains = 0;
1362 last_sbrk_top = cp + require;
1363 } else {
28ac10b1 1364 if (cp == (char*)-1) { /* Out of memory */
1365#ifdef DEBUGGING_MSTATS
1366 goodsbrk -= require;
1367#endif
1368 return 0;
1369 }
fa423c5b 1370 /* Report the failure: */
1371 if (sbrked_remains)
1372 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1373 sbrked_remains, 0);
1374 add_to_chain((void*)cp, require, 0);
1375 sbrk_good -= SBRK_FAILURE_PRICE;
1376 sbrked_remains = 0;
1377 last_sbrk_top = 0;
1378 last_op = 0;
1379 return 0;
1380 }
1381 }
1382
1383 return 1;
1384}
1385
8d063cd8 1386/*
1387 * Allocate more memory to the indicated bucket.
1388 */
cea2e8a9 1389static void
1390morecore(register int bucket)
8d063cd8 1391{
72aaf631 1392 register union overhead *ovp;
8d063cd8 1393 register int rnu; /* 2^rnu bytes will be requested */
fa423c5b 1394 int nblks; /* become nblks blocks of the desired size */
bbce6d69 1395 register MEM_SIZE siz, needed;
8d063cd8 1396
1397 if (nextf[bucket])
1398 return;
e8bc2b5c 1399 if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) {
741df71a 1400 MALLOC_UNLOCK;
d720c441 1401 croak("%s", "Out of memory during ridiculously large request");
55497cff 1402 }
d720c441 1403 if (bucket > max_bucket)
e8bc2b5c 1404 max_bucket = bucket;
d720c441 1405
e8bc2b5c 1406 rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT))
1407 ? LOG_OF_MIN_ARENA
1408 : (bucket >> BUCKET_POW2_SHIFT) );
1409 /* This may be overwritten later: */
1410 nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */
1411 needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket);
1412 if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */
1413 ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT;
1414 nextf[rnu << BUCKET_POW2_SHIFT]
1415 = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next;
1416#ifdef DEBUGGING_MSTATS
1417 nmalloc[rnu << BUCKET_POW2_SHIFT]--;
1418 start_slack -= M_OVERHEAD;
1419#endif
1420 DEBUG_m(PerlIO_printf(Perl_debug_log,
1421 "stealing %ld bytes from %ld arena\n",
1422 (long) needed, (long) rnu << BUCKET_POW2_SHIFT));
1423 } else if (chunk_chain
1424 && (ovp = (union overhead*) get_from_chain(needed))) {
1425 DEBUG_m(PerlIO_printf(Perl_debug_log,
1426 "stealing %ld bytes from chain\n",
1427 (long) needed));
d720c441 1428 } else if ( (ovp = (union overhead*)
1429 get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1,
1430 needed)) ) {
e8bc2b5c 1431 DEBUG_m(PerlIO_printf(Perl_debug_log,
1432 "stealing %ld bytes from bigger buckets\n",
1433 (long) needed));
1434 } else if (needed <= sbrked_remains) {
1435 ovp = (union overhead *)(last_sbrk_top - sbrked_remains);
1436 sbrked_remains -= needed;
1437 last_op = (char*)ovp;
fa423c5b 1438 } else
1439 ovp = getpages(needed, &nblks, bucket);
e8bc2b5c 1440
fa423c5b 1441 if (!ovp)
1442 return;
e8bc2b5c 1443
8d063cd8 1444 /*
1445 * Add new memory allocated to that on
1446 * free list for this hash bucket.
1447 */
e8bc2b5c 1448 siz = BUCKET_SIZE(bucket);
cf5c4ad8 1449#ifdef PACK_MALLOC
72aaf631 1450 *(u_char*)ovp = bucket; /* Fill index. */
e8bc2b5c 1451 if (bucket <= MAX_PACKED) {
1452 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
1453 nblks = N_BLKS(bucket);
cf5c4ad8 1454# ifdef DEBUGGING_MSTATS
e8bc2b5c 1455 start_slack += BLK_SHIFT(bucket);
cf5c4ad8 1456# endif
e8bc2b5c 1457 } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) {
1458 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
cf5c4ad8 1459 siz -= sizeof(union overhead);
72aaf631 1460 } else ovp++; /* One chunk per block. */
e8bc2b5c 1461#endif /* PACK_MALLOC */
72aaf631 1462 nextf[bucket] = ovp;
5f05dabc 1463#ifdef DEBUGGING_MSTATS
1464 nmalloc[bucket] += nblks;
e8bc2b5c 1465 if (bucket > MAX_PACKED) {
1466 start_slack += M_OVERHEAD * nblks;
1467 }
5f05dabc 1468#endif
8d063cd8 1469 while (--nblks > 0) {
72aaf631 1470 ovp->ov_next = (union overhead *)((caddr_t)ovp + siz);
1471 ovp = (union overhead *)((caddr_t)ovp + siz);
8d063cd8 1472 }
8595d6f1 1473 /* Not all sbrks return zeroed memory.*/
72aaf631 1474 ovp->ov_next = (union overhead *)NULL;
cf5c4ad8 1475#ifdef PACK_MALLOC
e8bc2b5c 1476 if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */
1477 union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next;
1478 nextf[7*BUCKETS_PER_POW2] =
1479 (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2]
1480 - sizeof(union overhead));
1481 nextf[7*BUCKETS_PER_POW2]->ov_next = n_op;
cf5c4ad8 1482 }
1483#endif /* !PACK_MALLOC */
8d063cd8 1484}
1485
94b6baf5 1486Free_t
86058a2d 1487Perl_mfree(void *mp)
cea2e8a9 1488{
ee0007ab 1489 register MEM_SIZE size;
72aaf631 1490 register union overhead *ovp;
352d5a3a 1491 char *cp = (char*)mp;
cf5c4ad8 1492#ifdef PACK_MALLOC
1493 u_char bucket;
1494#endif
8d063cd8 1495
e8bc2b5c 1496 DEBUG_m(PerlIO_printf(Perl_debug_log,
b900a521 1497 "0x%"UVxf": (%05lu) free\n",
1498 PTR2UV(cp), (unsigned long)(PL_an++)));
45d8adaa 1499
cf5c4ad8 1500 if (cp == NULL)
1501 return;
72aaf631 1502 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c 1503 - sizeof (union overhead) * CHUNK_SHIFT);
cf5c4ad8 1504#ifdef PACK_MALLOC
72aaf631 1505 bucket = OV_INDEX(ovp);
cf5c4ad8 1506#endif
e8bc2b5c 1507#ifdef IGNORE_SMALL_BAD_FREE
1508 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
1509 && (OV_MAGIC(ovp, bucket) != MAGIC))
1510#else
1511 if (OV_MAGIC(ovp, bucket) != MAGIC)
1512#endif
1513 {
68dc0745 1514 static int bad_free_warn = -1;
cf5c4ad8 1515 if (bad_free_warn == -1) {
e8cd8248 1516 dTHX;
5fd9e9a4 1517 char *pbf = PerlEnv_getenv("PERL_BADFREE");
cf5c4ad8 1518 bad_free_warn = (pbf) ? atoi(pbf) : 1;
1519 }
1520 if (!bad_free_warn)
1521 return;
8990e307 1522#ifdef RCHECK
2ba999ec 1523#ifdef PERL_CORE
e8cd8248 1524 {
1525 dTHX;
1526 if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC))
1d860e85 1527 Perl_warner(aTHX_ WARN_MALLOC, "%s free() ignored",
e8cd8248 1528 ovp->ov_rmagic == RMAGIC - 1 ?
1529 "Duplicate" : "Bad");
2ba999ec 1530 }
e476b1b5 1531#else
2ba999ec 1532 warn("%s free() ignored",
1533 ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad");
e476b1b5 1534#endif
1535#else
1536#ifdef PERL_CORE
2ba999ec 1537 {
1538 dTHX;
1d860e85 1539 if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC))
1540 Perl_warner(aTHX_ WARN_MALLOC, "%s", "Bad free() ignored");
2ba999ec 1541 }
8990e307 1542#else
2ba999ec 1543 warn("%s", "Bad free() ignored");
8990e307 1544#endif
e476b1b5 1545#endif
8d063cd8 1546 return; /* sanity */
e8bc2b5c 1547 }
8d063cd8 1548#ifdef RCHECK
3541dd58 1549 ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite");
e8bc2b5c 1550 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1551 int i;
1552 MEM_SIZE nbytes = ovp->ov_size + 1;
1553
1554 if ((i = nbytes & 3)) {
1555 i = 4 - i;
1556 while (i--) {
3541dd58 1557 ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i))
d720c441 1558 == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c 1559 }
1560 }
1561 nbytes = (nbytes + 3) &~ 3;
3541dd58 1562 ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC, "chunk's tail overwrite");
e8bc2b5c 1563 }
72aaf631 1564 ovp->ov_rmagic = RMAGIC - 1;
8d063cd8 1565#endif
3541dd58 1566 ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite");
72aaf631 1567 size = OV_INDEX(ovp);
4ad56ec9 1568
1569 MALLOC_LOCK;
72aaf631 1570 ovp->ov_next = nextf[size];
1571 nextf[size] = ovp;
741df71a 1572 MALLOC_UNLOCK;
8d063cd8 1573}
1574
4ad56ec9 1575/* There is no need to do any locking in realloc (with an exception of
1576 trying to grow in place if we are at the end of the chain).
1577 If somebody calls us from a different thread with the same address,
1578 we are sole anyway. */
8d063cd8 1579
2304df62 1580Malloc_t
86058a2d 1581Perl_realloc(void *mp, size_t nbytes)
cea2e8a9 1582{
ee0007ab 1583 register MEM_SIZE onb;
72aaf631 1584 union overhead *ovp;
d720c441 1585 char *res;
1586 int prev_bucket;
e8bc2b5c 1587 register int bucket;
4ad56ec9 1588 int incr; /* 1 if does not fit, -1 if "easily" fits in a
1589 smaller bucket, otherwise 0. */
352d5a3a 1590 char *cp = (char*)mp;
8d063cd8 1591
e8bc2b5c 1592#if defined(DEBUGGING) || !defined(PERL_CORE)
ee0007ab 1593 MEM_SIZE size = nbytes;
45d8adaa 1594
45d8adaa 1595 if ((long)nbytes < 0)
cea2e8a9 1596 croak("%s", "panic: realloc");
45d8adaa 1597#endif
e8bc2b5c 1598
1599 BARK_64K_LIMIT("Reallocation",nbytes,size);
1600 if (!cp)
86058a2d 1601 return Perl_malloc(nbytes);
45d8adaa 1602
72aaf631 1603 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c 1604 - sizeof (union overhead) * CHUNK_SHIFT);
1605 bucket = OV_INDEX(ovp);
4ad56ec9 1606
e8bc2b5c 1607#ifdef IGNORE_SMALL_BAD_FREE
4ad56ec9 1608 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
1609 && (OV_MAGIC(ovp, bucket) != MAGIC))
e8bc2b5c 1610#else
4ad56ec9 1611 if (OV_MAGIC(ovp, bucket) != MAGIC)
e8bc2b5c 1612#endif
4ad56ec9 1613 {
1614 static int bad_free_warn = -1;
1615 if (bad_free_warn == -1) {
e8cd8248 1616 dTHX;
4ad56ec9 1617 char *pbf = PerlEnv_getenv("PERL_BADFREE");
1618 bad_free_warn = (pbf) ? atoi(pbf) : 1;
1619 }
1620 if (!bad_free_warn)
ce70748c 1621 return Nullch;
4ad56ec9 1622#ifdef RCHECK
2ba999ec 1623#ifdef PERL_CORE
e8cd8248 1624 {
1625 dTHX;
1626 if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC))
1d860e85 1627 Perl_warner(aTHX_ WARN_MALLOC, "%srealloc() %signored",
e8cd8248 1628 (ovp->ov_rmagic == RMAGIC - 1 ? "" : "Bad "),
1629 ovp->ov_rmagic == RMAGIC - 1
1630 ? "of freed memory " : "");
2ba999ec 1631 }
e476b1b5 1632#else
2ba999ec 1633 warn("%srealloc() %signored",
1634 (ovp->ov_rmagic == RMAGIC - 1 ? "" : "Bad "),
1635 ovp->ov_rmagic == RMAGIC - 1 ? "of freed memory " : "");
e476b1b5 1636#endif
1637#else
1638#ifdef PERL_CORE
2ba999ec 1639 {
1640 dTHX;
1d860e85 1641 if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC))
1642 Perl_warner(aTHX_ WARN_MALLOC, "%s",
1643 "Bad realloc() ignored");
2ba999ec 1644 }
4ad56ec9 1645#else
2ba999ec 1646 warn("%s", "Bad realloc() ignored");
4ad56ec9 1647#endif
e476b1b5 1648#endif
ce70748c 1649 return Nullch; /* sanity */
4ad56ec9 1650 }
1651
e8bc2b5c 1652 onb = BUCKET_SIZE_REAL(bucket);
55497cff 1653 /*
1654 * avoid the copy if same size block.
e8bc2b5c 1655 * We are not agressive with boundary cases. Note that it might
1656 * (for a small number of cases) give false negative if
55497cff 1657 * both new size and old one are in the bucket for
e8bc2b5c 1658 * FIRST_BIG_POW2, but the new one is near the lower end.
1659 *
1660 * We do not try to go to 1.5 times smaller bucket so far.
55497cff 1661 */
e8bc2b5c 1662 if (nbytes > onb) incr = 1;
1663 else {
1664#ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING
1665 if ( /* This is a little bit pessimal if PACK_MALLOC: */
1666 nbytes > ( (onb >> 1) - M_OVERHEAD )
1667# ifdef TWO_POT_OPTIMIZE
1668 || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND )
1669# endif
1670 )
1671#else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1672 prev_bucket = ( (bucket > MAX_PACKED + 1)
1673 ? bucket - BUCKETS_PER_POW2
1674 : bucket - 1);
1675 if (nbytes > BUCKET_SIZE_REAL(prev_bucket))
1676#endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1677 incr = 0;
1678 else incr = -1;
1679 }
2ce36478 1680#ifdef STRESS_REALLOC
4ad56ec9 1681 goto hard_way;
2ce36478 1682#endif
4ad56ec9 1683 if (incr == 0) {
852c2e52 1684 inplace_label:
a687059c 1685#ifdef RCHECK
1686 /*
1687 * Record new allocated size of block and
1688 * bound space with magic numbers.
1689 */
72aaf631 1690 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
e8bc2b5c 1691 int i, nb = ovp->ov_size + 1;
1692
1693 if ((i = nb & 3)) {
1694 i = 4 - i;
1695 while (i--) {
3541dd58 1696 ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c 1697 }
1698 }
1699 nb = (nb + 3) &~ 3;
3541dd58 1700 ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC, "chunk's tail overwrite");
a687059c 1701 /*
1702 * Convert amount of memory requested into
1703 * closest block size stored in hash buckets
1704 * which satisfies request. Account for
1705 * space used per block for accounting.
1706 */
cf5c4ad8 1707 nbytes += M_OVERHEAD;
72aaf631 1708 ovp->ov_size = nbytes - 1;
e8bc2b5c 1709 if ((i = nbytes & 3)) {
1710 i = 4 - i;
1711 while (i--)
1712 *((char *)((caddr_t)ovp + nbytes - RSLOP + i))
1713 = RMAGIC_C;
1714 }
1715 nbytes = (nbytes + 3) &~ 3;
72aaf631 1716 *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC;
a687059c 1717 }
1718#endif
45d8adaa 1719 res = cp;
42ac124e 1720 DEBUG_m(PerlIO_printf(Perl_debug_log,
b900a521 1721 "0x%"UVxf": (%05lu) realloc %ld bytes inplace\n",
1722 PTR2UV(res),(unsigned long)(PL_an++),
42ac124e 1723 (long)size));
e8bc2b5c 1724 } else if (incr == 1 && (cp - M_OVERHEAD == last_op)
1725 && (onb > (1 << LOG_OF_MIN_ARENA))) {
1726 MEM_SIZE require, newarena = nbytes, pow;
1727 int shiftr;
1728
1729 POW2_OPTIMIZE_ADJUST(newarena);
1730 newarena = newarena + M_OVERHEAD;
1731 /* newarena = (newarena + 3) &~ 3; */
1732 shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA;
1733 pow = LOG_OF_MIN_ARENA + 1;
1734 /* apart from this loop, this is O(1) */
1735 while (shiftr >>= 1)
1736 pow++;
1737 newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2);
1738 require = newarena - onb - M_OVERHEAD;
1739
4ad56ec9 1740 MALLOC_LOCK;
1741 if (cp - M_OVERHEAD == last_op /* We *still* are the last chunk */
1742 && getpages_adjacent(require)) {
e8bc2b5c 1743#ifdef DEBUGGING_MSTATS
fa423c5b 1744 nmalloc[bucket]--;
1745 nmalloc[pow * BUCKETS_PER_POW2]++;
e8bc2b5c 1746#endif
fa423c5b 1747 *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */
4ad56ec9 1748 MALLOC_UNLOCK;
fa423c5b 1749 goto inplace_label;
4ad56ec9 1750 } else {
1751 MALLOC_UNLOCK;
fa423c5b 1752 goto hard_way;
4ad56ec9 1753 }
e8bc2b5c 1754 } else {
1755 hard_way:
42ac124e 1756 DEBUG_m(PerlIO_printf(Perl_debug_log,
b900a521 1757 "0x%"UVxf": (%05lu) realloc %ld bytes the hard way\n",
1758 PTR2UV(cp),(unsigned long)(PL_an++),
42ac124e 1759 (long)size));
86058a2d 1760 if ((res = (char*)Perl_malloc(nbytes)) == NULL)
e8bc2b5c 1761 return (NULL);
1762 if (cp != res) /* common optimization */
1763 Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char);
4ad56ec9 1764 Perl_mfree(cp);
45d8adaa 1765 }
2304df62 1766 return ((Malloc_t)res);
8d063cd8 1767}
1768
1769/*
1770 * Search ``srchlen'' elements of each free list for a block whose
1771 * header starts at ``freep''. If srchlen is -1 search the whole list.
1772 * Return bucket number, or -1 if not found.
1773 */
ee0007ab 1774static int
8ac85365 1775findbucket(union overhead *freep, int srchlen)
8d063cd8 1776{
1777 register union overhead *p;
1778 register int i, j;
1779
1780 for (i = 0; i < NBUCKETS; i++) {
1781 j = 0;
1782 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
1783 if (p == freep)
1784 return (i);
1785 j++;
1786 }
1787 }
1788 return (-1);
1789}
1790
cf5c4ad8 1791Malloc_t
86058a2d 1792Perl_calloc(register size_t elements, register size_t size)
cf5c4ad8 1793{
1794 long sz = elements * size;
86058a2d 1795 Malloc_t p = Perl_malloc(sz);
cf5c4ad8 1796
1797 if (p) {
1798 memset((void*)p, 0, sz);
1799 }
1800 return p;
1801}
1802
4ad56ec9 1803char *
1804Perl_strdup(const char *s)
1805{
1806 MEM_SIZE l = strlen(s);
b48f1ba5 1807 char *s1 = (char *)Perl_malloc(l+1);
4ad56ec9 1808
b48f1ba5 1809 Copy(s, s1, (MEM_SIZE)(l+1), char);
4ad56ec9 1810 return s1;
1811}
1812
1813#ifdef PERL_CORE
1814int
1815Perl_putenv(char *a)
1816{
1817 /* Sometimes system's putenv conflicts with my_setenv() - this is system
1818 malloc vs Perl's free(). */
1819 dTHX;
1820 char *var;
1821 char *val = a;
1822 MEM_SIZE l;
1823 char buf[80];
1824
1825 while (*val && *val != '=')
1826 val++;
1827 if (!*val)
1828 return -1;
1829 l = val - a;
1830 if (l < sizeof(buf))
1831 var = buf;
1832 else
1833 var = Perl_malloc(l + 1);
1834 Copy(a, var, l, char);
b48f1ba5 1835 var[l + 1] = 0;
1836 my_setenv(var, val+1);
4ad56ec9 1837 if (var != buf)
1838 Perl_mfree(var);
1839 return 0;
1840}
1841# endif
1842
e8bc2b5c 1843MEM_SIZE
cea2e8a9 1844Perl_malloced_size(void *p)
e8bc2b5c 1845{
8d6dde3e 1846 union overhead *ovp = (union overhead *)
1847 ((caddr_t)p - sizeof (union overhead) * CHUNK_SHIFT);
1848 int bucket = OV_INDEX(ovp);
1849#ifdef RCHECK
1850 /* The caller wants to have a complete control over the chunk,
1851 disable the memory checking inside the chunk. */
1852 if (bucket <= MAX_SHORT_BUCKET) {
1853 MEM_SIZE size = BUCKET_SIZE_REAL(bucket);
1854 ovp->ov_size = size + M_OVERHEAD - 1;
1855 *((u_int *)((caddr_t)ovp + size + M_OVERHEAD - RSLOP)) = RMAGIC;
1856 }
1857#endif
e8bc2b5c 1858 return BUCKET_SIZE_REAL(bucket);
1859}
1860
e8bc2b5c 1861# ifdef BUCKETS_ROOT2
1862# define MIN_EVEN_REPORT 6
1863# else
1864# define MIN_EVEN_REPORT MIN_BUCKET
1865# endif
827e134a 1866
1867int
1868Perl_get_mstats(pTHX_ perl_mstats_t *buf, int buflen, int level)
8d063cd8 1869{
df31f264 1870#ifdef DEBUGGING_MSTATS
8d063cd8 1871 register int i, j;
1872 register union overhead *p;
4ad56ec9 1873 struct chunk_chain_s* nextchain;
8d063cd8 1874
827e134a 1875 buf->topbucket = buf->topbucket_ev = buf->topbucket_odd
1876 = buf->totfree = buf->total = buf->total_chain = 0;
1877
1878 buf->minbucket = MIN_BUCKET;
4ad56ec9 1879 MALLOC_LOCK;
e8bc2b5c 1880 for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
8d063cd8 1881 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
1882 ;
827e134a 1883 if (i < buflen) {
1884 buf->nfree[i] = j;
1885 buf->ntotal[i] = nmalloc[i];
1886 }
1887 buf->totfree += j * BUCKET_SIZE_REAL(i);
1888 buf->total += nmalloc[i] * BUCKET_SIZE_REAL(i);
e8bc2b5c 1889 if (nmalloc[i]) {
827e134a 1890 i % 2 ? (buf->topbucket_odd = i) : (buf->topbucket_ev = i);
1891 buf->topbucket = i;
e8bc2b5c 1892 }
c07a80fd 1893 }
4ad56ec9 1894 nextchain = chunk_chain;
1895 while (nextchain) {
827e134a 1896 buf->total_chain += nextchain->size;
4ad56ec9 1897 nextchain = nextchain->next;
1898 }
827e134a 1899 buf->total_sbrk = goodsbrk + sbrk_slack;
1900 buf->sbrks = sbrks;
1901 buf->sbrk_good = sbrk_good;
1902 buf->sbrk_slack = sbrk_slack;
1903 buf->start_slack = start_slack;
1904 buf->sbrked_remains = sbrked_remains;
4ad56ec9 1905 MALLOC_UNLOCK;
827e134a 1906 if (level) {
1907 for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
1908 if (i >= buflen)
1909 break;
1910 buf->bucket_mem_size[i] = BUCKET_SIZE(i);
1911 buf->bucket_available_size[i] = BUCKET_SIZE_REAL(i);
1912 }
1913 }
1914#endif /* defined DEBUGGING_MSTATS */
fe52b3b7 1915 return 0; /* XXX unused */
827e134a 1916}
1917/*
1918 * mstats - print out statistics about malloc
1919 *
1920 * Prints two lines of numbers, one showing the length of the free list
1921 * for each size category, the second showing the number of mallocs -
1922 * frees for each size category.
1923 */
1924void
1925Perl_dump_mstats(pTHX_ char *s)
1926{
1927#ifdef DEBUGGING_MSTATS
1928 register int i, j;
1929 register union overhead *p;
1930 perl_mstats_t buffer;
1931 unsigned long nf[NBUCKETS];
1932 unsigned long nt[NBUCKETS];
1933 struct chunk_chain_s* nextchain;
1934
1935 buffer.nfree = nf;
1936 buffer.ntotal = nt;
1937 get_mstats(&buffer, NBUCKETS, 0);
1938
c07a80fd 1939 if (s)
bf49b057 1940 PerlIO_printf(Perl_error_log,
d720c441 1941 "Memory allocation statistics %s (buckets %ld(%ld)..%ld(%ld)\n",
e8bc2b5c 1942 s,
d720c441 1943 (long)BUCKET_SIZE_REAL(MIN_BUCKET),
1944 (long)BUCKET_SIZE(MIN_BUCKET),
827e134a 1945 (long)BUCKET_SIZE_REAL(buffer.topbucket),
1946 (long)BUCKET_SIZE(buffer.topbucket));
76cfd9aa 1947 PerlIO_printf(Perl_error_log, "%8ld free:", buffer.totfree);
827e134a 1948 for (i = MIN_EVEN_REPORT; i <= buffer.topbucket; i += BUCKETS_PER_POW2) {
bf49b057 1949 PerlIO_printf(Perl_error_log,
e8bc2b5c 1950 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1951 ? " %5d"
1952 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
827e134a 1953 buffer.nfree[i]);
e8bc2b5c 1954 }
1955#ifdef BUCKETS_ROOT2
bf49b057 1956 PerlIO_printf(Perl_error_log, "\n\t ");
827e134a 1957 for (i = MIN_BUCKET + 1; i <= buffer.topbucket_odd; i += BUCKETS_PER_POW2) {
bf49b057 1958 PerlIO_printf(Perl_error_log,
e8bc2b5c 1959 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1960 ? " %5d"
1961 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
827e134a 1962 buffer.nfree[i]);
8d063cd8 1963 }
e8bc2b5c 1964#endif
76cfd9aa 1965 PerlIO_printf(Perl_error_log, "\n%8ld used:", buffer.total - buffer.totfree);
827e134a 1966 for (i = MIN_EVEN_REPORT; i <= buffer.topbucket; i += BUCKETS_PER_POW2) {
bf49b057 1967 PerlIO_printf(Perl_error_log,
e8bc2b5c 1968 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1969 ? " %5d"
1970 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
827e134a 1971 buffer.ntotal[i] - buffer.nfree[i]);
c07a80fd 1972 }
e8bc2b5c 1973#ifdef BUCKETS_ROOT2
bf49b057 1974 PerlIO_printf(Perl_error_log, "\n\t ");
827e134a 1975 for (i = MIN_BUCKET + 1; i <= buffer.topbucket_odd; i += BUCKETS_PER_POW2) {
bf49b057 1976 PerlIO_printf(Perl_error_log,
e8bc2b5c 1977 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1978 ? " %5d"
1979 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
827e134a 1980 buffer.ntotal[i] - buffer.nfree[i]);
e8bc2b5c 1981 }
1982#endif
76cfd9aa 1983 PerlIO_printf(Perl_error_log, "\nTotal sbrk(): %ld/%ld:%ld. Odd ends: pad+heads+chain+tail: %ld+%ld+%ld+%ld.\n",
827e134a 1984 buffer.total_sbrk, buffer.sbrks, buffer.sbrk_good,
1985 buffer.sbrk_slack, buffer.start_slack,
1986 buffer.total_chain, buffer.sbrked_remains);
df31f264 1987#endif /* DEBUGGING_MSTATS */
c07a80fd 1988}
a687059c 1989#endif /* lint */
cf5c4ad8 1990
cf5c4ad8 1991#ifdef USE_PERL_SBRK
1992
e3663bad 1993# if defined(__MACHTEN_PPC__) || defined(NeXT) || defined(__NeXT__) || defined(PURIFY)
38ac2dc8 1994# define PERL_SBRK_VIA_MALLOC
1995/*
1996 * MachTen's malloc() returns a buffer aligned on a two-byte boundary.
1997 * While this is adequate, it may slow down access to longer data
1998 * types by forcing multiple memory accesses. It also causes
1999 * complaints when RCHECK is in force. So we allocate six bytes
2000 * more than we need to, and return an address rounded up to an
2001 * eight-byte boundary.
2002 *
2003 * 980701 Dominic Dunlop <domo@computer.org>
2004 */
5bbd1ef5 2005# define SYSTEM_ALLOC_ALIGNMENT 2
38ac2dc8 2006# endif
2007
760ac839 2008# ifdef PERL_SBRK_VIA_MALLOC
cf5c4ad8 2009
2010/* it may seem schizophrenic to use perl's malloc and let it call system */
2011/* malloc, the reason for that is only the 3.2 version of the OS that had */
2012/* frequent core dumps within nxzonefreenolock. This sbrk routine put an */
2013/* end to the cores */
2014
38ac2dc8 2015# ifndef SYSTEM_ALLOC
2016# define SYSTEM_ALLOC(a) malloc(a)
2017# endif
5bbd1ef5 2018# ifndef SYSTEM_ALLOC_ALIGNMENT
2019# define SYSTEM_ALLOC_ALIGNMENT MEM_ALIGNBYTES
2020# endif
cf5c4ad8 2021
760ac839 2022# endif /* PERL_SBRK_VIA_MALLOC */
cf5c4ad8 2023
2024static IV Perl_sbrk_oldchunk;
2025static long Perl_sbrk_oldsize;
2026
760ac839 2027# define PERLSBRK_32_K (1<<15)
2028# define PERLSBRK_64_K (1<<16)
cf5c4ad8 2029
b63effbb 2030Malloc_t
df0003d4 2031Perl_sbrk(int size)
cf5c4ad8 2032{
2033 IV got;
2034 int small, reqsize;
2035
2036 if (!size) return 0;
55497cff 2037#ifdef PERL_CORE
cf5c4ad8 2038 reqsize = size; /* just for the DEBUG_m statement */
2039#endif
57569e04 2040#ifdef PACK_MALLOC
2041 size = (size + 0x7ff) & ~0x7ff;
2042#endif
cf5c4ad8 2043 if (size <= Perl_sbrk_oldsize) {
2044 got = Perl_sbrk_oldchunk;
2045 Perl_sbrk_oldchunk += size;
2046 Perl_sbrk_oldsize -= size;
2047 } else {
2048 if (size >= PERLSBRK_32_K) {
2049 small = 0;
2050 } else {
cf5c4ad8 2051 size = PERLSBRK_64_K;
2052 small = 1;
2053 }
5bbd1ef5 2054# if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT
2055 size += NEEDED_ALIGNMENT - SYSTEM_ALLOC_ALIGNMENT;
2056# endif
cf5c4ad8 2057 got = (IV)SYSTEM_ALLOC(size);
5bbd1ef5 2058# if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT
5a7d6335 2059 got = (got + NEEDED_ALIGNMENT - 1) & ~(NEEDED_ALIGNMENT - 1);
5bbd1ef5 2060# endif
cf5c4ad8 2061 if (small) {
2062 /* Chunk is small, register the rest for future allocs. */
2063 Perl_sbrk_oldchunk = got + reqsize;
2064 Perl_sbrk_oldsize = size - reqsize;
2065 }
2066 }
2067
b900a521 2068 DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%"UVxf"\n",
2069 size, reqsize, Perl_sbrk_oldsize, PTR2UV(got)));
cf5c4ad8 2070
2071 return (void *)got;
2072}
2073
2074#endif /* ! defined USE_PERL_SBRK */