make overload, Data::Dumper, and dumpvar understand qr// stringify
[p5sagit/p5-mst-13.2.git] / malloc.c
CommitLineData
a0d0e21e 1/* malloc.c
8d063cd8 2 *
8d063cd8 3 */
4
87c6202a 5/*
741df71a 6 Here are some notes on configuring Perl's malloc. (For non-perl
7 usage see below.)
87c6202a 8
9 There are two macros which serve as bulk disablers of advanced
10 features of this malloc: NO_FANCY_MALLOC, PLAIN_MALLOC (undef by
11 default). Look in the list of default values below to understand
12 their exact effect. Defining NO_FANCY_MALLOC returns malloc.c to the
13 state of the malloc in Perl 5.004. Additionally defining PLAIN_MALLOC
14 returns it to the state as of Perl 5.000.
15
16 Note that some of the settings below may be ignored in the code based
17 on values of other macros. The PERL_CORE symbol is only defined when
18 perl itself is being compiled (so malloc can make some assumptions
19 about perl's facilities being available to it).
20
21 Each config option has a short description, followed by its name,
22 default value, and a comment about the default (if applicable). Some
23 options take a precise value, while the others are just boolean.
24 The boolean ones are listed first.
25
26 # Enable code for an emergency memory pool in $^M. See perlvar.pod
27 # for a description of $^M.
28 PERL_EMERGENCY_SBRK (!PLAIN_MALLOC && PERL_CORE)
29
30 # Enable code for printing memory statistics.
31 DEBUGGING_MSTATS (!PLAIN_MALLOC && PERL_CORE)
32
33 # Move allocation info for small buckets into separate areas.
34 # Memory optimization (especially for small allocations, of the
35 # less than 64 bytes). Since perl usually makes a large number
36 # of small allocations, this is usually a win.
37 PACK_MALLOC (!PLAIN_MALLOC && !RCHECK)
38
39 # Add one page to big powers of two when calculating bucket size.
40 # This is targeted at big allocations, as are common in image
41 # processing.
42 TWO_POT_OPTIMIZE !PLAIN_MALLOC
43
44 # Use intermediate bucket sizes between powers-of-two. This is
45 # generally a memory optimization, and a (small) speed pessimization.
46 BUCKETS_ROOT2 !NO_FANCY_MALLOC
47
48 # Do not check small deallocations for bad free(). Memory
49 # and speed optimization, error reporting pessimization.
50 IGNORE_SMALL_BAD_FREE (!NO_FANCY_MALLOC && !RCHECK)
51
52 # Use table lookup to decide in which bucket a given allocation will go.
53 SMALL_BUCKET_VIA_TABLE !NO_FANCY_MALLOC
54
38ac2dc8 55 # Use a perl-defined sbrk() instead of the (presumably broken or
56 # missing) system-supplied sbrk().
57 USE_PERL_SBRK undef
58
59 # Use system malloc() (or calloc() etc.) to emulate sbrk(). Normally
60 # only used with broken sbrk()s.
87c6202a 61 PERL_SBRK_VIA_MALLOC undef
62
38ac2dc8 63 # Which allocator to use if PERL_SBRK_VIA_MALLOC
64 SYSTEM_ALLOC(a) malloc(a)
65
9ee81ef6 66 # Minimal alignment (in bytes, should be a power of 2) of SYSTEM_ALLOC
5bbd1ef5 67 SYSTEM_ALLOC_ALIGNMENT MEM_ALIGNBYTES
68
87c6202a 69 # Disable memory overwrite checking with DEBUGGING. Memory and speed
70 # optimization, error reporting pessimization.
71 NO_RCHECK undef
72
73 # Enable memory overwrite checking with DEBUGGING. Memory and speed
74 # pessimization, error reporting optimization
75 RCHECK (DEBUGGING && !NO_RCHECK)
76
77 # Failed allocations bigger than this size croak (if
78 # PERL_EMERGENCY_SBRK is enabled) without touching $^M. See
79 # perlvar.pod for a description of $^M.
80 BIG_SIZE (1<<16) # 64K
81
82 # Starting from this power of two, add an extra page to the
83 # size of the bucket. This enables optimized allocations of sizes
84 # close to powers of 2. Note that the value is indexed at 0.
85 FIRST_BIG_POW2 15 # 32K, 16K is used too often
86
87 # Estimate of minimal memory footprint. malloc uses this value to
88 # request the most reasonable largest blocks of memory from the system.
89 FIRST_SBRK (48*1024)
90
91 # Round up sbrk()s to multiples of this.
92 MIN_SBRK 2048
93
94 # Round up sbrk()s to multiples of this percent of footprint.
95 MIN_SBRK_FRAC 3
96
97 # Add this much memory to big powers of two to get the bucket size.
98 PERL_PAGESIZE 4096
99
100 # This many sbrk() discontinuities should be tolerated even
101 # from the start without deciding that sbrk() is usually
102 # discontinuous.
103 SBRK_ALLOW_FAILURES 3
104
105 # This many continuous sbrk()s compensate for one discontinuous one.
106 SBRK_FAILURE_PRICE 50
107
28ac10b1 108 # Some configurations may ask for 12-byte-or-so allocations which
109 # require 8-byte alignment (?!). In such situation one needs to
110 # define this to disable 12-byte bucket (will increase memory footprint)
111 STRICT_ALIGNMENT undef
112
87c6202a 113 This implementation assumes that calling PerlIO_printf() does not
114 result in any memory allocation calls (used during a panic).
115
116 */
117
741df71a 118/*
119 If used outside of Perl environment, it may be useful to redefine
120 the following macros (listed below with defaults):
121
122 # Type of address returned by allocation functions
123 Malloc_t void *
124
125 # Type of size argument for allocation functions
126 MEM_SIZE unsigned long
127
128 # Maximal value in LONG
129 LONG_MAX 0x7FFFFFFF
130
131 # Unsigned integer type big enough to keep a pointer
132 UV unsigned long
133
134 # Type of pointer with 1-byte granularity
135 caddr_t char *
136
137 # Type returned by free()
138 Free_t void
139
5bbd1ef5 140 # Very fatal condition reporting function (cannot call any )
141 fatalcroak(arg) write(2,arg,strlen(arg)) + exit(2)
142
741df71a 143 # Fatal error reporting function
144 croak(format, arg) warn(idem) + exit(1)
145
146 # Error reporting function
147 warn(format, arg) fprintf(stderr, idem)
148
149 # Locking/unlocking for MT operation
cea2e8a9 150 MALLOC_LOCK MUTEX_LOCK_NOCONTEXT(&PL_malloc_mutex)
151 MALLOC_UNLOCK MUTEX_UNLOCK_NOCONTEXT(&PL_malloc_mutex)
741df71a 152
153 # Locking/unlocking mutex for MT operation
154 MUTEX_LOCK(l) void
155 MUTEX_UNLOCK(l) void
156 */
157
e8bc2b5c 158#ifndef NO_FANCY_MALLOC
159# ifndef SMALL_BUCKET_VIA_TABLE
160# define SMALL_BUCKET_VIA_TABLE
161# endif
162# ifndef BUCKETS_ROOT2
163# define BUCKETS_ROOT2
164# endif
165# ifndef IGNORE_SMALL_BAD_FREE
166# define IGNORE_SMALL_BAD_FREE
167# endif
3562ef9b 168#endif
169
e8bc2b5c 170#ifndef PLAIN_MALLOC /* Bulk enable features */
171# ifndef PACK_MALLOC
172# define PACK_MALLOC
173# endif
174# ifndef TWO_POT_OPTIMIZE
175# define TWO_POT_OPTIMIZE
176# endif
d720c441 177# if defined(PERL_CORE) && !defined(PERL_EMERGENCY_SBRK)
178# define PERL_EMERGENCY_SBRK
e8bc2b5c 179# endif
180# if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS)
181# define DEBUGGING_MSTATS
182# endif
183#endif
184
185#define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */
186#define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2)
187
61ae2fbf 188#if !(defined(I286) || defined(atarist) || defined(__MINT__))
e8bc2b5c 189 /* take 2k unless the block is bigger than that */
190# define LOG_OF_MIN_ARENA 11
191#else
192 /* take 16k unless the block is bigger than that
193 (80286s like large segments!), probably good on the atari too */
194# define LOG_OF_MIN_ARENA 14
195#endif
196
8d063cd8 197#ifndef lint
1944739a 198# if defined(DEBUGGING) && !defined(NO_RCHECK)
199# define RCHECK
200# endif
e8bc2b5c 201# if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE)
202# undef IGNORE_SMALL_BAD_FREE
203# endif
8d063cd8 204/*
205 * malloc.c (Caltech) 2/21/82
206 * Chris Kingsley, kingsley@cit-20.
207 *
208 * This is a very fast storage allocator. It allocates blocks of a small
209 * number of different sizes, and keeps free lists of each size. Blocks that
210 * don't exactly fit are passed up to the next larger size. In this
211 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
cf5c4ad8 212 * If PACK_MALLOC is defined, small blocks are 2^n bytes long.
8d063cd8 213 * This is designed for use in a program that uses vast quantities of memory,
741df71a 214 * but bombs when it runs out.
215 *
4eb8286e 216 * Modifications Copyright Ilya Zakharevich 1996-99.
741df71a 217 *
218 * Still very quick, but much more thrifty. (Std config is 10% slower
219 * than it was, and takes 67% of old heap size for typical usage.)
220 *
221 * Allocations of small blocks are now table-driven to many different
222 * buckets. Sizes of really big buckets are increased to accomodata
223 * common size=power-of-2 blocks. Running-out-of-memory is made into
224 * an exception. Deeply configurable and thread-safe.
225 *
8d063cd8 226 */
227
d720c441 228#ifdef PERL_CORE
229# include "EXTERN.h"
864dbfa3 230#define PERL_IN_MALLOC_C
d720c441 231# include "perl.h"
cea2e8a9 232# if defined(PERL_IMPLICIT_CONTEXT)
233# define croak Perl_croak_nocontext
234# define warn Perl_warn_nocontext
235# endif
d720c441 236#else
237# ifdef PERL_FOR_X2P
238# include "../EXTERN.h"
239# include "../perl.h"
240# else
241# include <stdlib.h>
242# include <stdio.h>
243# include <memory.h>
244# define _(arg) arg
245# ifndef Malloc_t
246# define Malloc_t void *
247# endif
248# ifndef MEM_SIZE
249# define MEM_SIZE unsigned long
250# endif
251# ifndef LONG_MAX
252# define LONG_MAX 0x7FFFFFFF
253# endif
254# ifndef UV
255# define UV unsigned long
256# endif
257# ifndef caddr_t
258# define caddr_t char *
259# endif
260# ifndef Free_t
261# define Free_t void
262# endif
263# define Copy(s,d,n,t) (void)memcpy((char*)(d),(char*)(s), (n) * sizeof(t))
264# define PerlEnv_getenv getenv
265# define PerlIO_printf fprintf
266# define PerlIO_stderr() stderr
267# endif
e8bc2b5c 268# ifndef croak /* make depend */
741df71a 269# define croak(mess, arg) (warn((mess), (arg)), exit(1))
d720c441 270# endif
271# ifndef warn
741df71a 272# define warn(mess, arg) fprintf(stderr, (mess), (arg))
e8bc2b5c 273# endif
274# ifdef DEBUG_m
275# undef DEBUG_m
276# endif
277# define DEBUG_m(a)
278# ifdef DEBUGGING
279# undef DEBUGGING
280# endif
cea2e8a9 281# ifndef pTHX
282# define pTHX void
283# define pTHX_
284# define dTHX extern int Perl___notused
285# define WITH_THX(s) s
286# endif
e8bc2b5c 287#endif
288
289#ifndef MUTEX_LOCK
290# define MUTEX_LOCK(l)
291#endif
292
293#ifndef MUTEX_UNLOCK
294# define MUTEX_UNLOCK(l)
295#endif
296
741df71a 297#ifndef MALLOC_LOCK
cea2e8a9 298# define MALLOC_LOCK MUTEX_LOCK_NOCONTEXT(&PL_malloc_mutex)
741df71a 299#endif
300
301#ifndef MALLOC_UNLOCK
cea2e8a9 302# define MALLOC_UNLOCK MUTEX_UNLOCK_NOCONTEXT(&PL_malloc_mutex)
741df71a 303#endif
304
5bbd1ef5 305# ifndef fatalcroak /* make depend */
306# define fatalcroak(mess) (write(2, (mess), strlen(mess)), exit(2))
307# endif
308
760ac839 309#ifdef DEBUGGING
e8bc2b5c 310# undef DEBUG_m
0672f40e 311# define DEBUG_m(a) if (PL_curinterp && PL_debug & 128) a
760ac839 312#endif
313
e9397286 314/*
315 * Layout of memory:
316 * ~~~~~~~~~~~~~~~~
317 * The memory is broken into "blocks" which occupy multiples of 2K (and
318 * generally speaking, have size "close" to a power of 2). The addresses
319 * of such *unused* blocks are kept in nextf[i] with big enough i. (nextf
320 * is an array of linked lists.) (Addresses of used blocks are not known.)
321 *
322 * Moreover, since the algorithm may try to "bite" smaller blocks of out
323 * of unused bigger ones, there are also regions of "irregular" size,
324 * managed separately, by a linked list chunk_chain.
325 *
326 * The third type of storage is the sbrk()ed-but-not-yet-used space, its
327 * end and size are kept in last_sbrk_top and sbrked_remains.
328 *
329 * Growing blocks "in place":
330 * ~~~~~~~~~~~~~~~~~~~~~~~~~
331 * The address of the block with the greatest address is kept in last_op
332 * (if not known, last_op is 0). If it is known that the memory above
333 * last_op is not continuous, or contains a chunk from chunk_chain,
334 * last_op is set to 0.
335 *
336 * The chunk with address last_op may be grown by expanding into
337 * sbrk()ed-but-not-yet-used space, or trying to sbrk() more continuous
338 * memory.
339 *
340 * Management of last_op:
341 * ~~~~~~~~~~~~~~~~~~~~~
342 *
343 * free() never changes the boundaries of blocks, so is not relevant.
344 *
345 * The only way realloc() may change the boundaries of blocks is if it
346 * grows a block "in place". However, in the case of success such a
347 * chunk is automatically last_op, and it remains last_op. In the case
348 * of failure getpages_adjacent() clears last_op.
349 *
350 * malloc() may change blocks by calling morecore() only.
351 *
352 * morecore() may create new blocks by:
353 * a) biting pieces from chunk_chain (cannot create one above last_op);
354 * b) biting a piece from an unused block (if block was last_op, this
355 * may create a chunk from chain above last_op, thus last_op is
356 * invalidated in such a case).
357 * c) biting of sbrk()ed-but-not-yet-used space. This creates
358 * a block which is last_op.
359 * d) Allocating new pages by calling getpages();
360 *
361 * getpages() creates a new block. It marks last_op at the bottom of
362 * the chunk of memory it returns.
363 *
364 * Active pages footprint:
365 * ~~~~~~~~~~~~~~~~~~~~~~
366 * Note that we do not need to traverse the lists in nextf[i], just take
367 * the first element of this list. However, we *need* to traverse the
368 * list in chunk_chain, but most the time it should be a very short one,
369 * so we do not step on a lot of pages we are not going to use.
370 *
371 * Flaws:
372 * ~~~~~
373 * get_from_bigger_buckets(): forget to increment price => Quite
374 * aggressive.
375 */
376
135863df 377/* I don't much care whether these are defined in sys/types.h--LAW */
378
379#define u_char unsigned char
380#define u_int unsigned int
e8bc2b5c 381
382#ifdef HAS_QUAD
383# define u_bigint UV /* Needs to eat *void. */
384#else /* needed? */
385# define u_bigint unsigned long /* Needs to eat *void. */
386#endif
387
135863df 388#define u_short unsigned short
8d063cd8 389
cf5c4ad8 390/* 286 and atarist like big chunks, which gives too much overhead. */
61ae2fbf 391#if (defined(RCHECK) || defined(I286) || defined(atarist) || defined(__MINT__)) && defined(PACK_MALLOC)
e8bc2b5c 392# undef PACK_MALLOC
cf5c4ad8 393#endif
394
8d063cd8 395/*
cf5c4ad8 396 * The description below is applicable if PACK_MALLOC is not defined.
397 *
8d063cd8 398 * The overhead on a block is at least 4 bytes. When free, this space
399 * contains a pointer to the next free block, and the bottom two bits must
400 * be zero. When in use, the first byte is set to MAGIC, and the second
401 * byte is the size index. The remaining bytes are for alignment.
402 * If range checking is enabled and the size of the block fits
403 * in two bytes, then the top two bytes hold the size of the requested block
404 * plus the range checking words, and the header word MINUS ONE.
405 */
406union overhead {
407 union overhead *ov_next; /* when free */
85e6fe83 408#if MEM_ALIGNBYTES > 4
c623bd54 409 double strut; /* alignment problems */
a687059c 410#endif
8d063cd8 411 struct {
412 u_char ovu_magic; /* magic number */
413 u_char ovu_index; /* bucket # */
414#ifdef RCHECK
415 u_short ovu_size; /* actual block size */
416 u_int ovu_rmagic; /* range magic number */
417#endif
418 } ovu;
419#define ov_magic ovu.ovu_magic
420#define ov_index ovu.ovu_index
421#define ov_size ovu.ovu_size
422#define ov_rmagic ovu.ovu_rmagic
423};
424
425#define MAGIC 0xff /* magic # on accounting info */
426#define RMAGIC 0x55555555 /* magic # on range info */
e8bc2b5c 427#define RMAGIC_C 0x55 /* magic # on range info */
428
8d063cd8 429#ifdef RCHECK
c2a5c2d2 430# define RSLOP sizeof (u_int)
431# ifdef TWO_POT_OPTIMIZE
e8bc2b5c 432# define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2)
c2a5c2d2 433# else
e8bc2b5c 434# define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2)
c2a5c2d2 435# endif
8d063cd8 436#else
c2a5c2d2 437# define RSLOP 0
8d063cd8 438#endif
439
e8bc2b5c 440#if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2)
441# undef BUCKETS_ROOT2
442#endif
443
444#ifdef BUCKETS_ROOT2
445# define BUCKET_TABLE_SHIFT 2
446# define BUCKET_POW2_SHIFT 1
447# define BUCKETS_PER_POW2 2
448#else
449# define BUCKET_TABLE_SHIFT MIN_BUC_POW2
450# define BUCKET_POW2_SHIFT 0
451# define BUCKETS_PER_POW2 1
452#endif
453
274c7500 454#if !defined(MEM_ALIGNBYTES) || ((MEM_ALIGNBYTES > 4) && !defined(STRICT_ALIGNMENT))
455/* Figure out the alignment of void*. */
456struct aligner {
457 char c;
458 void *p;
459};
460# define ALIGN_SMALL ((int)((caddr_t)&(((struct aligner*)0)->p)))
461#else
462# define ALIGN_SMALL MEM_ALIGNBYTES
463#endif
464
465#define IF_ALIGN_8(yes,no) ((ALIGN_SMALL>4) ? (yes) : (no))
466
e8bc2b5c 467#ifdef BUCKETS_ROOT2
468# define MAX_BUCKET_BY_TABLE 13
469static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] =
470 {
471 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80,
472 };
473# define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT)))
474# define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \
475 ? buck_size[i] \
476 : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \
477 - MEM_OVERHEAD(i) \
478 + POW2_OPTIMIZE_SURPLUS(i)))
479#else
480# define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT))
481# define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i))
482#endif
483
484
cf5c4ad8 485#ifdef PACK_MALLOC
e8bc2b5c 486/* In this case it is assumed that if we do sbrk() in 2K units, we
487 * will get 2K aligned arenas (at least after some initial
488 * alignment). The bucket number of the given subblock is on the start
489 * of 2K arena which contains the subblock. Several following bytes
490 * contain the magic numbers for the subblocks in the block.
cf5c4ad8 491 *
492 * Sizes of chunks are powers of 2 for chunks in buckets <=
493 * MAX_PACKED, after this they are (2^n - sizeof(union overhead)) (to
494 * get alignment right).
495 *
e8bc2b5c 496 * Consider an arena for 2^n with n>MAX_PACKED. We suppose that
497 * starts of all the chunks in a 2K arena are in different
498 * 2^n-byte-long chunks. If the top of the last chunk is aligned on a
499 * boundary of 2K block, this means that sizeof(union
500 * overhead)*"number of chunks" < 2^n, or sizeof(union overhead)*2K <
501 * 4^n, or n > 6 + log2(sizeof()/2)/2, since a chunk of size 2^n -
502 * overhead is used. Since this rules out n = 7 for 8 byte alignment,
503 * we specialcase allocation of the first of 16 128-byte-long chunks.
cf5c4ad8 504 *
505 * Note that with the above assumption we automatically have enough
506 * place for MAGIC at the start of 2K block. Note also that we
e8bc2b5c 507 * overlay union overhead over the chunk, thus the start of small chunks
508 * is immediately overwritten after freeing. */
509# define MAX_PACKED_POW2 6
510# define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT)
511# define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD)
512# define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1)
513# define TWOK_MASKED(x) ((u_bigint)(x) & ~TWOK_MASK)
514# define TWOK_SHIFT(x) ((u_bigint)(x) & TWOK_MASK)
cf5c4ad8 515# define OV_INDEXp(block) ((u_char*)(TWOK_MASKED(block)))
516# define OV_INDEX(block) (*OV_INDEXp(block))
517# define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \
e8bc2b5c 518 (TWOK_SHIFT(block)>> \
519 (bucket>>BUCKET_POW2_SHIFT)) + \
520 (bucket >= MIN_NEEDS_SHIFT ? 1 : 0)))
521 /* A bucket can have a shift smaller than it size, we need to
522 shift its magic number so it will not overwrite index: */
523# ifdef BUCKETS_ROOT2
524# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */
525# else
526# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */
527# endif
cf5c4ad8 528# define CHUNK_SHIFT 0
529
e8bc2b5c 530/* Number of active buckets of given ordinal. */
531#ifdef IGNORE_SMALL_BAD_FREE
532#define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */
533# define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
534 ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \
535 : n_blks[bucket] )
536#else
537# define N_BLKS(bucket) n_blks[bucket]
538#endif
539
540static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
541 {
542# if BUCKETS_PER_POW2==1
543 0, 0,
544 (MIN_BUC_POW2==2 ? 384 : 0),
545 224, 120, 62, 31, 16, 8, 4, 2
546# else
547 0, 0, 0, 0,
548 (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */
549 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2
550# endif
551 };
552
553/* Shift of the first bucket with the given ordinal inside 2K chunk. */
554#ifdef IGNORE_SMALL_BAD_FREE
555# define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
556 ? ((1<<LOG_OF_MIN_ARENA) \
557 - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \
558 : blk_shift[bucket])
559#else
560# define BLK_SHIFT(bucket) blk_shift[bucket]
561#endif
562
563static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
564 {
565# if BUCKETS_PER_POW2==1
566 0, 0,
567 (MIN_BUC_POW2==2 ? 512 : 0),
568 256, 128, 64, 64, /* 8 to 64 */
569 16*sizeof(union overhead),
570 8*sizeof(union overhead),
571 4*sizeof(union overhead),
572 2*sizeof(union overhead),
573# else
574 0, 0, 0, 0,
575 (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0),
576 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */
577 16*sizeof(union overhead), 16*sizeof(union overhead),
578 8*sizeof(union overhead), 8*sizeof(union overhead),
579 4*sizeof(union overhead), 4*sizeof(union overhead),
580 2*sizeof(union overhead), 2*sizeof(union overhead),
581# endif
582 };
cf5c4ad8 583
5bbd1ef5 584# define NEEDED_ALIGNMENT 0x800 /* 2k boundaries */
585# define WANTED_ALIGNMENT 0x800 /* 2k boundaries */
586
cf5c4ad8 587#else /* !PACK_MALLOC */
588
589# define OV_MAGIC(block,bucket) (block)->ov_magic
590# define OV_INDEX(block) (block)->ov_index
591# define CHUNK_SHIFT 1
e8bc2b5c 592# define MAX_PACKED -1
5bbd1ef5 593# define NEEDED_ALIGNMENT MEM_ALIGNBYTES
594# define WANTED_ALIGNMENT 0x400 /* 1k boundaries */
595
cf5c4ad8 596#endif /* !PACK_MALLOC */
597
e8bc2b5c 598#define M_OVERHEAD (sizeof(union overhead) + RSLOP)
599
600#ifdef PACK_MALLOC
601# define MEM_OVERHEAD(bucket) \
602 (bucket <= MAX_PACKED ? 0 : M_OVERHEAD)
603# ifdef SMALL_BUCKET_VIA_TABLE
604# define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2)
605# define START_SHIFT MAX_PACKED_POW2
606# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
607# define SIZE_TABLE_MAX 80
608# else
609# define SIZE_TABLE_MAX 64
610# endif
611static char bucket_of[] =
612 {
613# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
614 /* 0 to 15 in 4-byte increments. */
615 (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */
616 6, /* 8 */
274c7500 617 IF_ALIGN_8(8,7), 8, /* 16/12, 16 */
e8bc2b5c 618 9, 9, 10, 10, /* 24, 32 */
619 11, 11, 11, 11, /* 48 */
620 12, 12, 12, 12, /* 64 */
621 13, 13, 13, 13, /* 80 */
622 13, 13, 13, 13 /* 80 */
623# else /* !BUCKETS_ROOT2 */
624 /* 0 to 15 in 4-byte increments. */
625 (sizeof(void*) > 4 ? 3 : 2),
626 3,
627 4, 4,
628 5, 5, 5, 5,
629 6, 6, 6, 6,
630 6, 6, 6, 6
631# endif /* !BUCKETS_ROOT2 */
632 };
633# else /* !SMALL_BUCKET_VIA_TABLE */
634# define START_SHIFTS_BUCKET MIN_BUCKET
635# define START_SHIFT (MIN_BUC_POW2 - 1)
636# endif /* !SMALL_BUCKET_VIA_TABLE */
637#else /* !PACK_MALLOC */
638# define MEM_OVERHEAD(bucket) M_OVERHEAD
639# ifdef SMALL_BUCKET_VIA_TABLE
640# undef SMALL_BUCKET_VIA_TABLE
641# endif
642# define START_SHIFTS_BUCKET MIN_BUCKET
643# define START_SHIFT (MIN_BUC_POW2 - 1)
644#endif /* !PACK_MALLOC */
cf5c4ad8 645
8d063cd8 646/*
55497cff 647 * Big allocations are often of the size 2^n bytes. To make them a
648 * little bit better, make blocks of size 2^n+pagesize for big n.
649 */
650
651#ifdef TWO_POT_OPTIMIZE
652
5f05dabc 653# ifndef PERL_PAGESIZE
654# define PERL_PAGESIZE 4096
655# endif
e8bc2b5c 656# ifndef FIRST_BIG_POW2
657# define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */
5f05dabc 658# endif
e8bc2b5c 659# define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2)
55497cff 660/* If this value or more, check against bigger blocks. */
661# define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD)
662/* If less than this value, goes into 2^n-overhead-block. */
663# define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD)
664
e8bc2b5c 665# define POW2_OPTIMIZE_ADJUST(nbytes) \
666 ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0)
667# define POW2_OPTIMIZE_SURPLUS(bucket) \
668 ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0)
669
670#else /* !TWO_POT_OPTIMIZE */
671# define POW2_OPTIMIZE_ADJUST(nbytes)
672# define POW2_OPTIMIZE_SURPLUS(bucket) 0
673#endif /* !TWO_POT_OPTIMIZE */
674
675#if defined(HAS_64K_LIMIT) && defined(PERL_CORE)
676# define BARK_64K_LIMIT(what,nbytes,size) \
677 if (nbytes > 0xffff) { \
678 PerlIO_printf(PerlIO_stderr(), \
679 "%s too large: %lx\n", what, size); \
680 my_exit(1); \
681 }
682#else /* !HAS_64K_LIMIT || !PERL_CORE */
683# define BARK_64K_LIMIT(what,nbytes,size)
684#endif /* !HAS_64K_LIMIT || !PERL_CORE */
55497cff 685
e8bc2b5c 686#ifndef MIN_SBRK
687# define MIN_SBRK 2048
688#endif
689
690#ifndef FIRST_SBRK
d720c441 691# define FIRST_SBRK (48*1024)
e8bc2b5c 692#endif
693
694/* Minimal sbrk in percents of what is already alloced. */
695#ifndef MIN_SBRK_FRAC
696# define MIN_SBRK_FRAC 3
697#endif
698
699#ifndef SBRK_ALLOW_FAILURES
700# define SBRK_ALLOW_FAILURES 3
701#endif
55497cff 702
e8bc2b5c 703#ifndef SBRK_FAILURE_PRICE
704# define SBRK_FAILURE_PRICE 50
55497cff 705#endif
706
e8bc2b5c 707#if defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)
708
709# ifndef BIG_SIZE
710# define BIG_SIZE (1<<16) /* 64K */
711# endif
712
3541dd58 713#ifdef I_MACH_CTHREADS
772fe5b3 714# undef MUTEX_LOCK
715# define MUTEX_LOCK(m) STMT_START { if (*m) mutex_lock(*m); } STMT_END
716# undef MUTEX_UNLOCK
717# define MUTEX_UNLOCK(m) STMT_START { if (*m) mutex_unlock(*m); } STMT_END
3541dd58 718#endif
719
55497cff 720static char *emergency_buffer;
721static MEM_SIZE emergency_buffer_size;
722
cea2e8a9 723static int findbucket (union overhead *freep, int srchlen);
724static void morecore (register int bucket);
725# if defined(DEBUGGING)
726static void botch (char *diag, char *s);
727# endif
728static void add_to_chain (void *p, MEM_SIZE size, MEM_SIZE chip);
729static Malloc_t emergency_sbrk (MEM_SIZE size);
730static void* get_from_chain (MEM_SIZE size);
731static void* get_from_bigger_buckets(int bucket, MEM_SIZE size);
732static union overhead *getpages (int needed, int *nblksp, int bucket);
733static int getpages_adjacent(int require);
734
735static Malloc_t
736emergency_sbrk(MEM_SIZE size)
55497cff 737{
28ac10b1 738 MEM_SIZE rsize = (((size - 1)>>LOG_OF_MIN_ARENA) + 1)<<LOG_OF_MIN_ARENA;
739
55497cff 740 if (size >= BIG_SIZE) {
741 /* Give the possibility to recover: */
741df71a 742 MALLOC_UNLOCK;
1b979e0a 743 croak("Out of memory during \"large\" request for %i bytes", size);
55497cff 744 }
745
28ac10b1 746 if (emergency_buffer_size >= rsize) {
747 char *old = emergency_buffer;
748
749 emergency_buffer_size -= rsize;
750 emergency_buffer += rsize;
751 return old;
752 } else {
cea2e8a9 753 dTHX;
55497cff 754 /* First offense, give a possibility to recover by dieing. */
755 /* No malloc involved here: */
4a33f861 756 GV **gvp = (GV**)hv_fetch(PL_defstash, "^M", 2, 0);
55497cff 757 SV *sv;
758 char *pv;
28ac10b1 759 int have = 0;
2d8e6c8d 760 STRLEN n_a;
55497cff 761
28ac10b1 762 if (emergency_buffer_size) {
763 add_to_chain(emergency_buffer, emergency_buffer_size, 0);
764 emergency_buffer_size = 0;
765 emergency_buffer = Nullch;
766 have = 1;
767 }
4a33f861 768 if (!gvp) gvp = (GV**)hv_fetch(PL_defstash, "\015", 1, 0);
55497cff 769 if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv)
28ac10b1 770 || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD)) {
771 if (have)
772 goto do_croak;
55497cff 773 return (char *)-1; /* Now die die die... */
28ac10b1 774 }
55497cff 775 /* Got it, now detach SvPV: */
2d8e6c8d 776 pv = SvPV(sv, n_a);
55497cff 777 /* Check alignment: */
5bbd1ef5 778 if (((UV)(pv - sizeof(union overhead))) & (NEEDED_ALIGNMENT - 1)) {
55497cff 779 PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
bbce6d69 780 return (char *)-1; /* die die die */
55497cff 781 }
782
28ac10b1 783 emergency_buffer = pv - sizeof(union overhead);
784 emergency_buffer_size = malloced_size(pv) + M_OVERHEAD;
55497cff 785 SvPOK_off(sv);
28ac10b1 786 SvPVX(sv) = Nullch;
787 SvCUR(sv) = SvLEN(sv) = 0;
55497cff 788 }
28ac10b1 789 do_croak:
741df71a 790 MALLOC_UNLOCK;
28ac10b1 791 croak("Out of memory during request for %i bytes", size);
55497cff 792}
793
e8bc2b5c 794#else /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 795# define emergency_sbrk(size) -1
e8bc2b5c 796#endif /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 797
798/*
e8bc2b5c 799 * nextf[i] is the pointer to the next free block of size 2^i. The
8d063cd8 800 * smallest allocatable block is 8 bytes. The overhead information
801 * precedes the data area returned to the user.
802 */
e8bc2b5c 803#define NBUCKETS (32*BUCKETS_PER_POW2 + 1)
8d063cd8 804static union overhead *nextf[NBUCKETS];
cf5c4ad8 805
806#ifdef USE_PERL_SBRK
807#define sbrk(a) Perl_sbrk(a)
20ce7b12 808Malloc_t Perl_sbrk (int size);
8ac85365 809#else
810#ifdef DONT_DECLARE_STD
811#ifdef I_UNISTD
812#include <unistd.h>
813#endif
cf5c4ad8 814#else
52082926 815extern Malloc_t sbrk(int);
8ac85365 816#endif
cf5c4ad8 817#endif
8d063cd8 818
c07a80fd 819#ifdef DEBUGGING_MSTATS
8d063cd8 820/*
821 * nmalloc[i] is the difference between the number of mallocs and frees
822 * for a given block size.
823 */
824static u_int nmalloc[NBUCKETS];
5f05dabc 825static u_int sbrk_slack;
826static u_int start_slack;
8d063cd8 827#endif
828
e8bc2b5c 829static u_int goodsbrk;
830
760ac839 831#ifdef DEBUGGING
3541dd58 832#undef ASSERT
833#define ASSERT(p,diag) if (!(p)) botch(diag,STRINGIFY(p)); else
cea2e8a9 834static void
835botch(char *diag, char *s)
8d063cd8 836{
d720c441 837 PerlIO_printf(PerlIO_stderr(), "assertion botched (%s?): %s\n", diag, s);
3028581b 838 PerlProc_abort();
8d063cd8 839}
840#else
3541dd58 841#define ASSERT(p, diag)
8d063cd8 842#endif
843
2304df62 844Malloc_t
86058a2d 845Perl_malloc(register size_t nbytes)
8d063cd8 846{
847 register union overhead *p;
e8bc2b5c 848 register int bucket;
ee0007ab 849 register MEM_SIZE shiftr;
8d063cd8 850
c2a5c2d2 851#if defined(DEBUGGING) || defined(RCHECK)
ee0007ab 852 MEM_SIZE size = nbytes;
45d8adaa 853#endif
854
e8bc2b5c 855 BARK_64K_LIMIT("Allocation",nbytes,nbytes);
45d8adaa 856#ifdef DEBUGGING
857 if ((long)nbytes < 0)
cea2e8a9 858 croak("%s", "panic: malloc");
45d8adaa 859#endif
45d8adaa 860
741df71a 861 MALLOC_LOCK;
8d063cd8 862 /*
863 * Convert amount of memory requested into
864 * closest block size stored in hash buckets
865 * which satisfies request. Account for
866 * space used per block for accounting.
867 */
cf5c4ad8 868#ifdef PACK_MALLOC
e8bc2b5c 869# ifdef SMALL_BUCKET_VIA_TABLE
870 if (nbytes == 0)
871 bucket = MIN_BUCKET;
872 else if (nbytes <= SIZE_TABLE_MAX) {
873 bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT];
874 } else
875# else
043bf814 876 if (nbytes == 0)
877 nbytes = 1;
e8bc2b5c 878 if (nbytes <= MAX_POW2_ALGO) goto do_shifts;
879 else
880# endif
55497cff 881#endif
e8bc2b5c 882 {
883 POW2_OPTIMIZE_ADJUST(nbytes);
884 nbytes += M_OVERHEAD;
885 nbytes = (nbytes + 3) &~ 3;
886 do_shifts:
887 shiftr = (nbytes - 1) >> START_SHIFT;
888 bucket = START_SHIFTS_BUCKET;
889 /* apart from this loop, this is O(1) */
890 while (shiftr >>= 1)
891 bucket += BUCKETS_PER_POW2;
cf5c4ad8 892 }
8d063cd8 893 /*
894 * If nothing in hash bucket right now,
895 * request more memory from the system.
896 */
897 if (nextf[bucket] == NULL)
898 morecore(bucket);
e8bc2b5c 899 if ((p = nextf[bucket]) == NULL) {
741df71a 900 MALLOC_UNLOCK;
55497cff 901#ifdef PERL_CORE
4a33f861 902 if (!PL_nomemok) {
760ac839 903 PerlIO_puts(PerlIO_stderr(),"Out of memory!\n");
cea2e8a9 904 WITH_THX(my_exit(1));
ee0007ab 905 }
45d8adaa 906#else
8d063cd8 907 return (NULL);
45d8adaa 908#endif
909 }
910
e8bc2b5c 911 DEBUG_m(PerlIO_printf(Perl_debug_log,
912 "0x%lx: (%05lu) malloc %ld bytes\n",
4a33f861 913 (unsigned long)(p+1), (unsigned long)(PL_an++),
e8bc2b5c 914 (long)size));
45d8adaa 915
8d063cd8 916 /* remove from linked list */
802004fa 917#if defined(RCHECK)
918 if (((UV)p) & (MEM_ALIGNBYTES - 1))
760ac839 919 PerlIO_printf(PerlIO_stderr(), "Corrupt malloc ptr 0x%lx at 0x%lx\n",
a0d0e21e 920 (unsigned long)*((int*)p),(unsigned long)p);
bf38876a 921#endif
922 nextf[bucket] = p->ov_next;
e8bc2b5c 923#ifdef IGNORE_SMALL_BAD_FREE
924 if (bucket >= FIRST_BUCKET_WITH_CHECK)
925#endif
926 OV_MAGIC(p, bucket) = MAGIC;
cf5c4ad8 927#ifndef PACK_MALLOC
928 OV_INDEX(p) = bucket;
929#endif
8d063cd8 930#ifdef RCHECK
931 /*
932 * Record allocated size of block and
933 * bound space with magic numbers.
934 */
8d063cd8 935 p->ov_rmagic = RMAGIC;
e8bc2b5c 936 if (bucket <= MAX_SHORT_BUCKET) {
937 int i;
938
939 nbytes = size + M_OVERHEAD;
940 p->ov_size = nbytes - 1;
941 if ((i = nbytes & 3)) {
942 i = 4 - i;
943 while (i--)
944 *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C;
945 }
946 nbytes = (nbytes + 3) &~ 3;
947 *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC;
948 }
8d063cd8 949#endif
741df71a 950 MALLOC_UNLOCK;
cf5c4ad8 951 return ((Malloc_t)(p + CHUNK_SHIFT));
8d063cd8 952}
953
e8bc2b5c 954static char *last_sbrk_top;
955static char *last_op; /* This arena can be easily extended. */
956static int sbrked_remains;
957static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE;
958
959#ifdef DEBUGGING_MSTATS
960static int sbrks;
961#endif
962
963struct chunk_chain_s {
964 struct chunk_chain_s *next;
965 MEM_SIZE size;
966};
967static struct chunk_chain_s *chunk_chain;
968static int n_chunks;
969static char max_bucket;
970
971/* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */
cea2e8a9 972static void *
973get_from_chain(MEM_SIZE size)
e8bc2b5c 974{
975 struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain;
976 struct chunk_chain_s **oldgoodp = NULL;
977 long min_remain = LONG_MAX;
978
979 while (elt) {
980 if (elt->size >= size) {
981 long remains = elt->size - size;
982 if (remains >= 0 && remains < min_remain) {
983 oldgoodp = oldp;
984 min_remain = remains;
985 }
986 if (remains == 0) {
987 break;
988 }
989 }
990 oldp = &( elt->next );
991 elt = elt->next;
992 }
993 if (!oldgoodp) return NULL;
994 if (min_remain) {
995 void *ret = *oldgoodp;
996 struct chunk_chain_s *next = (*oldgoodp)->next;
997
998 *oldgoodp = (struct chunk_chain_s *)((char*)ret + size);
999 (*oldgoodp)->size = min_remain;
1000 (*oldgoodp)->next = next;
1001 return ret;
1002 } else {
1003 void *ret = *oldgoodp;
1004 *oldgoodp = (*oldgoodp)->next;
1005 n_chunks--;
1006 return ret;
1007 }
1008}
1009
cea2e8a9 1010static void
1011add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip)
e8bc2b5c 1012{
1013 struct chunk_chain_s *next = chunk_chain;
1014 char *cp = (char*)p;
1015
1016 cp += chip;
1017 chunk_chain = (struct chunk_chain_s *)cp;
1018 chunk_chain->size = size - chip;
1019 chunk_chain->next = next;
1020 n_chunks++;
1021}
1022
cea2e8a9 1023static void *
1024get_from_bigger_buckets(int bucket, MEM_SIZE size)
e8bc2b5c 1025{
1026 int price = 1;
1027 static int bucketprice[NBUCKETS];
1028 while (bucket <= max_bucket) {
1029 /* We postpone stealing from bigger buckets until we want it
1030 often enough. */
1031 if (nextf[bucket] && bucketprice[bucket]++ >= price) {
1032 /* Steal it! */
1033 void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT);
1034 bucketprice[bucket] = 0;
1035 if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) {
1036 last_op = NULL; /* Disable optimization */
1037 }
1038 nextf[bucket] = nextf[bucket]->ov_next;
1039#ifdef DEBUGGING_MSTATS
1040 nmalloc[bucket]--;
1041 start_slack -= M_OVERHEAD;
1042#endif
1043 add_to_chain(ret, (BUCKET_SIZE(bucket) +
1044 POW2_OPTIMIZE_SURPLUS(bucket)),
1045 size);
1046 return ret;
1047 }
1048 bucket++;
1049 }
1050 return NULL;
1051}
1052
cea2e8a9 1053static union overhead *
1054getpages(int needed, int *nblksp, int bucket)
fa423c5b 1055{
1056 /* Need to do (possibly expensive) system call. Try to
1057 optimize it for rare calling. */
1058 MEM_SIZE require = needed - sbrked_remains;
1059 char *cp;
1060 union overhead *ovp;
1061 int slack = 0;
1062
1063 if (sbrk_good > 0) {
1064 if (!last_sbrk_top && require < FIRST_SBRK)
1065 require = FIRST_SBRK;
1066 else if (require < MIN_SBRK) require = MIN_SBRK;
1067
1068 if (require < goodsbrk * MIN_SBRK_FRAC / 100)
1069 require = goodsbrk * MIN_SBRK_FRAC / 100;
1070 require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK;
1071 } else {
1072 require = needed;
1073 last_sbrk_top = 0;
1074 sbrked_remains = 0;
1075 }
1076
1077 DEBUG_m(PerlIO_printf(Perl_debug_log,
1078 "sbrk(%ld) for %ld-byte-long arena\n",
1079 (long)require, (long) needed));
1080 cp = (char *)sbrk(require);
1081#ifdef DEBUGGING_MSTATS
1082 sbrks++;
1083#endif
1084 if (cp == last_sbrk_top) {
1085 /* Common case, anything is fine. */
1086 sbrk_good++;
1087 ovp = (union overhead *) (cp - sbrked_remains);
e9397286 1088 last_op = cp - sbrked_remains;
fa423c5b 1089 sbrked_remains = require - (needed - sbrked_remains);
1090 } else if (cp == (char *)-1) { /* no more room! */
1091 ovp = (union overhead *)emergency_sbrk(needed);
1092 if (ovp == (union overhead *)-1)
1093 return 0;
e9397286 1094 if (((char*)ovp) > last_op) { /* Cannot happen with current emergency_sbrk() */
1095 last_op = 0;
1096 }
fa423c5b 1097 return ovp;
1098 } else { /* Non-continuous or first sbrk(). */
1099 long add = sbrked_remains;
1100 char *newcp;
1101
1102 if (sbrked_remains) { /* Put rest into chain, we
1103 cannot use it right now. */
1104 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1105 sbrked_remains, 0);
1106 }
1107
1108 /* Second, check alignment. */
1109 slack = 0;
1110
61ae2fbf 1111#if !defined(atarist) && !defined(__MINT__) /* on the atari we dont have to worry about this */
fa423c5b 1112# ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */
5bbd1ef5 1113 /* WANTED_ALIGNMENT may be more than NEEDED_ALIGNMENT, but this may
1114 improve performance of memory access. */
1115 if ((UV)cp & (WANTED_ALIGNMENT - 1)) { /* Not aligned. */
1116 slack = WANTED_ALIGNMENT - ((UV)cp & (WANTED_ALIGNMENT - 1));
fa423c5b 1117 add += slack;
1118 }
1119# endif
61ae2fbf 1120#endif /* !atarist && !MINT */
fa423c5b 1121
1122 if (add) {
1123 DEBUG_m(PerlIO_printf(Perl_debug_log,
1124 "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n",
1125 (long)add, (long) slack,
1126 (long) sbrked_remains));
1127 newcp = (char *)sbrk(add);
1128#if defined(DEBUGGING_MSTATS)
1129 sbrks++;
1130 sbrk_slack += add;
1131#endif
1132 if (newcp != cp + require) {
1133 /* Too bad: even rounding sbrk() is not continuous.*/
1134 DEBUG_m(PerlIO_printf(Perl_debug_log,
1135 "failed to fix bad sbrk()\n"));
1136#ifdef PACK_MALLOC
1137 if (slack) {
741df71a 1138 MALLOC_UNLOCK;
5bbd1ef5 1139 fatalcroak("panic: Off-page sbrk\n");
fa423c5b 1140 }
1141#endif
1142 if (sbrked_remains) {
1143 /* Try again. */
1144#if defined(DEBUGGING_MSTATS)
1145 sbrk_slack += require;
1146#endif
1147 require = needed;
1148 DEBUG_m(PerlIO_printf(Perl_debug_log,
1149 "straight sbrk(%ld)\n",
1150 (long)require));
1151 cp = (char *)sbrk(require);
1152#ifdef DEBUGGING_MSTATS
1153 sbrks++;
1154#endif
1155 if (cp == (char *)-1)
1156 return 0;
1157 }
1158 sbrk_good = -1; /* Disable optimization!
1159 Continue with not-aligned... */
1160 } else {
1161 cp += slack;
1162 require += sbrked_remains;
1163 }
1164 }
1165
1166 if (last_sbrk_top) {
1167 sbrk_good -= SBRK_FAILURE_PRICE;
1168 }
1169
1170 ovp = (union overhead *) cp;
1171 /*
1172 * Round up to minimum allocation size boundary
1173 * and deduct from block count to reflect.
1174 */
1175
5bbd1ef5 1176# if NEEDED_ALIGNMENT > MEM_ALIGNBYTES
1177 if ((UV)ovp & (NEEDED_ALIGNMENT - 1))
1178 fatalcroak("Misalignment of sbrk()\n");
1179 else
1180# endif
fa423c5b 1181#ifndef I286 /* Again, this should always be ok on an 80286 */
5bbd1ef5 1182 if ((UV)ovp & (MEM_ALIGNBYTES - 1)) {
fa423c5b 1183 DEBUG_m(PerlIO_printf(Perl_debug_log,
1184 "fixing sbrk(): %d bytes off machine alignement\n",
5bbd1ef5 1185 (int)((UV)ovp & (MEM_ALIGNBYTES - 1))));
1186 ovp = (union overhead *)(((UV)ovp + MEM_ALIGNBYTES) &
1187 (MEM_ALIGNBYTES - 1));
fa423c5b 1188 (*nblksp)--;
1189# if defined(DEBUGGING_MSTATS)
1190 /* This is only approx. if TWO_POT_OPTIMIZE: */
5bbd1ef5 1191 sbrk_slack += (1 << (bucket >> BUCKET_POW2_SHIFT));
fa423c5b 1192# endif
1193 }
1194#endif
5bbd1ef5 1195 ; /* Finish `else' */
fa423c5b 1196 sbrked_remains = require - needed;
e9397286 1197 last_op = cp;
fa423c5b 1198 }
1199 last_sbrk_top = cp + require;
fa423c5b 1200#ifdef DEBUGGING_MSTATS
1201 goodsbrk += require;
1202#endif
1203 return ovp;
1204}
1205
cea2e8a9 1206static int
1207getpages_adjacent(int require)
fa423c5b 1208{
1209 if (require <= sbrked_remains) {
1210 sbrked_remains -= require;
1211 } else {
1212 char *cp;
1213
1214 require -= sbrked_remains;
1215 /* We do not try to optimize sbrks here, we go for place. */
1216 cp = (char*) sbrk(require);
1217#ifdef DEBUGGING_MSTATS
1218 sbrks++;
1219 goodsbrk += require;
1220#endif
1221 if (cp == last_sbrk_top) {
1222 sbrked_remains = 0;
1223 last_sbrk_top = cp + require;
1224 } else {
28ac10b1 1225 if (cp == (char*)-1) { /* Out of memory */
1226#ifdef DEBUGGING_MSTATS
1227 goodsbrk -= require;
1228#endif
1229 return 0;
1230 }
fa423c5b 1231 /* Report the failure: */
1232 if (sbrked_remains)
1233 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1234 sbrked_remains, 0);
1235 add_to_chain((void*)cp, require, 0);
1236 sbrk_good -= SBRK_FAILURE_PRICE;
1237 sbrked_remains = 0;
1238 last_sbrk_top = 0;
1239 last_op = 0;
1240 return 0;
1241 }
1242 }
1243
1244 return 1;
1245}
1246
8d063cd8 1247/*
1248 * Allocate more memory to the indicated bucket.
1249 */
cea2e8a9 1250static void
1251morecore(register int bucket)
8d063cd8 1252{
72aaf631 1253 register union overhead *ovp;
8d063cd8 1254 register int rnu; /* 2^rnu bytes will be requested */
fa423c5b 1255 int nblks; /* become nblks blocks of the desired size */
bbce6d69 1256 register MEM_SIZE siz, needed;
8d063cd8 1257
1258 if (nextf[bucket])
1259 return;
e8bc2b5c 1260 if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) {
741df71a 1261 MALLOC_UNLOCK;
d720c441 1262 croak("%s", "Out of memory during ridiculously large request");
55497cff 1263 }
d720c441 1264 if (bucket > max_bucket)
e8bc2b5c 1265 max_bucket = bucket;
d720c441 1266
e8bc2b5c 1267 rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT))
1268 ? LOG_OF_MIN_ARENA
1269 : (bucket >> BUCKET_POW2_SHIFT) );
1270 /* This may be overwritten later: */
1271 nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */
1272 needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket);
1273 if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */
1274 ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT;
1275 nextf[rnu << BUCKET_POW2_SHIFT]
1276 = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next;
1277#ifdef DEBUGGING_MSTATS
1278 nmalloc[rnu << BUCKET_POW2_SHIFT]--;
1279 start_slack -= M_OVERHEAD;
1280#endif
1281 DEBUG_m(PerlIO_printf(Perl_debug_log,
1282 "stealing %ld bytes from %ld arena\n",
1283 (long) needed, (long) rnu << BUCKET_POW2_SHIFT));
1284 } else if (chunk_chain
1285 && (ovp = (union overhead*) get_from_chain(needed))) {
1286 DEBUG_m(PerlIO_printf(Perl_debug_log,
1287 "stealing %ld bytes from chain\n",
1288 (long) needed));
d720c441 1289 } else if ( (ovp = (union overhead*)
1290 get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1,
1291 needed)) ) {
e8bc2b5c 1292 DEBUG_m(PerlIO_printf(Perl_debug_log,
1293 "stealing %ld bytes from bigger buckets\n",
1294 (long) needed));
1295 } else if (needed <= sbrked_remains) {
1296 ovp = (union overhead *)(last_sbrk_top - sbrked_remains);
1297 sbrked_remains -= needed;
1298 last_op = (char*)ovp;
fa423c5b 1299 } else
1300 ovp = getpages(needed, &nblks, bucket);
e8bc2b5c 1301
fa423c5b 1302 if (!ovp)
1303 return;
e8bc2b5c 1304
8d063cd8 1305 /*
1306 * Add new memory allocated to that on
1307 * free list for this hash bucket.
1308 */
e8bc2b5c 1309 siz = BUCKET_SIZE(bucket);
cf5c4ad8 1310#ifdef PACK_MALLOC
72aaf631 1311 *(u_char*)ovp = bucket; /* Fill index. */
e8bc2b5c 1312 if (bucket <= MAX_PACKED) {
1313 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
1314 nblks = N_BLKS(bucket);
cf5c4ad8 1315# ifdef DEBUGGING_MSTATS
e8bc2b5c 1316 start_slack += BLK_SHIFT(bucket);
cf5c4ad8 1317# endif
e8bc2b5c 1318 } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) {
1319 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
cf5c4ad8 1320 siz -= sizeof(union overhead);
72aaf631 1321 } else ovp++; /* One chunk per block. */
e8bc2b5c 1322#endif /* PACK_MALLOC */
72aaf631 1323 nextf[bucket] = ovp;
5f05dabc 1324#ifdef DEBUGGING_MSTATS
1325 nmalloc[bucket] += nblks;
e8bc2b5c 1326 if (bucket > MAX_PACKED) {
1327 start_slack += M_OVERHEAD * nblks;
1328 }
5f05dabc 1329#endif
8d063cd8 1330 while (--nblks > 0) {
72aaf631 1331 ovp->ov_next = (union overhead *)((caddr_t)ovp + siz);
1332 ovp = (union overhead *)((caddr_t)ovp + siz);
8d063cd8 1333 }
8595d6f1 1334 /* Not all sbrks return zeroed memory.*/
72aaf631 1335 ovp->ov_next = (union overhead *)NULL;
cf5c4ad8 1336#ifdef PACK_MALLOC
e8bc2b5c 1337 if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */
1338 union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next;
1339 nextf[7*BUCKETS_PER_POW2] =
1340 (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2]
1341 - sizeof(union overhead));
1342 nextf[7*BUCKETS_PER_POW2]->ov_next = n_op;
cf5c4ad8 1343 }
1344#endif /* !PACK_MALLOC */
8d063cd8 1345}
1346
94b6baf5 1347Free_t
86058a2d 1348Perl_mfree(void *mp)
cea2e8a9 1349{
ee0007ab 1350 register MEM_SIZE size;
72aaf631 1351 register union overhead *ovp;
352d5a3a 1352 char *cp = (char*)mp;
cf5c4ad8 1353#ifdef PACK_MALLOC
1354 u_char bucket;
1355#endif
8d063cd8 1356
e8bc2b5c 1357 DEBUG_m(PerlIO_printf(Perl_debug_log,
1358 "0x%lx: (%05lu) free\n",
4a33f861 1359 (unsigned long)cp, (unsigned long)(PL_an++)));
45d8adaa 1360
cf5c4ad8 1361 if (cp == NULL)
1362 return;
72aaf631 1363 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c 1364 - sizeof (union overhead) * CHUNK_SHIFT);
cf5c4ad8 1365#ifdef PACK_MALLOC
72aaf631 1366 bucket = OV_INDEX(ovp);
cf5c4ad8 1367#endif
e8bc2b5c 1368#ifdef IGNORE_SMALL_BAD_FREE
1369 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
1370 && (OV_MAGIC(ovp, bucket) != MAGIC))
1371#else
1372 if (OV_MAGIC(ovp, bucket) != MAGIC)
1373#endif
1374 {
68dc0745 1375 static int bad_free_warn = -1;
cf5c4ad8 1376 if (bad_free_warn == -1) {
5fd9e9a4 1377 char *pbf = PerlEnv_getenv("PERL_BADFREE");
cf5c4ad8 1378 bad_free_warn = (pbf) ? atoi(pbf) : 1;
1379 }
1380 if (!bad_free_warn)
1381 return;
8990e307 1382#ifdef RCHECK
a687059c 1383 warn("%s free() ignored",
72aaf631 1384 ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad");
8990e307 1385#else
d720c441 1386 warn("%s", "Bad free() ignored");
8990e307 1387#endif
8d063cd8 1388 return; /* sanity */
e8bc2b5c 1389 }
741df71a 1390 MALLOC_LOCK;
8d063cd8 1391#ifdef RCHECK
3541dd58 1392 ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite");
e8bc2b5c 1393 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1394 int i;
1395 MEM_SIZE nbytes = ovp->ov_size + 1;
1396
1397 if ((i = nbytes & 3)) {
1398 i = 4 - i;
1399 while (i--) {
3541dd58 1400 ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i))
d720c441 1401 == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c 1402 }
1403 }
1404 nbytes = (nbytes + 3) &~ 3;
3541dd58 1405 ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC, "chunk's tail overwrite");
e8bc2b5c 1406 }
72aaf631 1407 ovp->ov_rmagic = RMAGIC - 1;
8d063cd8 1408#endif
3541dd58 1409 ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite");
72aaf631 1410 size = OV_INDEX(ovp);
1411 ovp->ov_next = nextf[size];
1412 nextf[size] = ovp;
741df71a 1413 MALLOC_UNLOCK;
8d063cd8 1414}
1415
1416/*
1417 * When a program attempts "storage compaction" as mentioned in the
1418 * old malloc man page, it realloc's an already freed block. Usually
1419 * this is the last block it freed; occasionally it might be farther
1420 * back. We have to search all the free lists for the block in order
1421 * to determine its bucket: 1st we make one pass thru the lists
1422 * checking only the first block in each; if that fails we search
378cc40b 1423 * ``reall_srchlen'' blocks in each list for a match (the variable
8d063cd8 1424 * is extern so the caller can modify it). If that fails we just copy
1425 * however many bytes was given to realloc() and hope it's not huge.
1426 */
22c35a8c 1427#define reall_srchlen 4 /* 4 should be plenty, -1 =>'s whole list */
8d063cd8 1428
2304df62 1429Malloc_t
86058a2d 1430Perl_realloc(void *mp, size_t nbytes)
cea2e8a9 1431{
ee0007ab 1432 register MEM_SIZE onb;
72aaf631 1433 union overhead *ovp;
d720c441 1434 char *res;
1435 int prev_bucket;
e8bc2b5c 1436 register int bucket;
1437 int was_alloced = 0, incr;
352d5a3a 1438 char *cp = (char*)mp;
8d063cd8 1439
e8bc2b5c 1440#if defined(DEBUGGING) || !defined(PERL_CORE)
ee0007ab 1441 MEM_SIZE size = nbytes;
45d8adaa 1442
45d8adaa 1443 if ((long)nbytes < 0)
cea2e8a9 1444 croak("%s", "panic: realloc");
45d8adaa 1445#endif
e8bc2b5c 1446
1447 BARK_64K_LIMIT("Reallocation",nbytes,size);
1448 if (!cp)
86058a2d 1449 return Perl_malloc(nbytes);
45d8adaa 1450
741df71a 1451 MALLOC_LOCK;
72aaf631 1452 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c 1453 - sizeof (union overhead) * CHUNK_SHIFT);
1454 bucket = OV_INDEX(ovp);
1455#ifdef IGNORE_SMALL_BAD_FREE
1456 if ((bucket < FIRST_BUCKET_WITH_CHECK)
1457 || (OV_MAGIC(ovp, bucket) == MAGIC))
1458#else
1459 if (OV_MAGIC(ovp, bucket) == MAGIC)
1460#endif
1461 {
55497cff 1462 was_alloced = 1;
8d063cd8 1463 } else {
1464 /*
1465 * Already free, doing "compaction".
1466 *
1467 * Search for the old block of memory on the
1468 * free list. First, check the most common
1469 * case (last element free'd), then (this failing)
378cc40b 1470 * the last ``reall_srchlen'' items free'd.
8d063cd8 1471 * If all lookups fail, then assume the size of
1472 * the memory block being realloc'd is the
1473 * smallest possible.
1474 */
e8bc2b5c 1475 if ((bucket = findbucket(ovp, 1)) < 0 &&
1476 (bucket = findbucket(ovp, reall_srchlen)) < 0)
1477 bucket = 0;
8d063cd8 1478 }
e8bc2b5c 1479 onb = BUCKET_SIZE_REAL(bucket);
55497cff 1480 /*
1481 * avoid the copy if same size block.
e8bc2b5c 1482 * We are not agressive with boundary cases. Note that it might
1483 * (for a small number of cases) give false negative if
55497cff 1484 * both new size and old one are in the bucket for
e8bc2b5c 1485 * FIRST_BIG_POW2, but the new one is near the lower end.
1486 *
1487 * We do not try to go to 1.5 times smaller bucket so far.
55497cff 1488 */
e8bc2b5c 1489 if (nbytes > onb) incr = 1;
1490 else {
1491#ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING
1492 if ( /* This is a little bit pessimal if PACK_MALLOC: */
1493 nbytes > ( (onb >> 1) - M_OVERHEAD )
1494# ifdef TWO_POT_OPTIMIZE
1495 || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND )
1496# endif
1497 )
1498#else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1499 prev_bucket = ( (bucket > MAX_PACKED + 1)
1500 ? bucket - BUCKETS_PER_POW2
1501 : bucket - 1);
1502 if (nbytes > BUCKET_SIZE_REAL(prev_bucket))
1503#endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1504 incr = 0;
1505 else incr = -1;
1506 }
1507 if (!was_alloced
2ce36478 1508#ifdef STRESS_REALLOC
e8bc2b5c 1509 || 1 /* always do it the hard way */
2ce36478 1510#endif
e8bc2b5c 1511 ) goto hard_way;
1512 else if (incr == 0) {
852c2e52 1513 inplace_label:
a687059c 1514#ifdef RCHECK
1515 /*
1516 * Record new allocated size of block and
1517 * bound space with magic numbers.
1518 */
72aaf631 1519 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
e8bc2b5c 1520 int i, nb = ovp->ov_size + 1;
1521
1522 if ((i = nb & 3)) {
1523 i = 4 - i;
1524 while (i--) {
3541dd58 1525 ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c 1526 }
1527 }
1528 nb = (nb + 3) &~ 3;
3541dd58 1529 ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC, "chunk's tail overwrite");
a687059c 1530 /*
1531 * Convert amount of memory requested into
1532 * closest block size stored in hash buckets
1533 * which satisfies request. Account for
1534 * space used per block for accounting.
1535 */
cf5c4ad8 1536 nbytes += M_OVERHEAD;
72aaf631 1537 ovp->ov_size = nbytes - 1;
e8bc2b5c 1538 if ((i = nbytes & 3)) {
1539 i = 4 - i;
1540 while (i--)
1541 *((char *)((caddr_t)ovp + nbytes - RSLOP + i))
1542 = RMAGIC_C;
1543 }
1544 nbytes = (nbytes + 3) &~ 3;
72aaf631 1545 *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC;
a687059c 1546 }
1547#endif
45d8adaa 1548 res = cp;
741df71a 1549 MALLOC_UNLOCK;
42ac124e 1550 DEBUG_m(PerlIO_printf(Perl_debug_log,
1551 "0x%lx: (%05lu) realloc %ld bytes inplace\n",
1552 (unsigned long)res,(unsigned long)(PL_an++),
1553 (long)size));
e8bc2b5c 1554 } else if (incr == 1 && (cp - M_OVERHEAD == last_op)
1555 && (onb > (1 << LOG_OF_MIN_ARENA))) {
1556 MEM_SIZE require, newarena = nbytes, pow;
1557 int shiftr;
1558
1559 POW2_OPTIMIZE_ADJUST(newarena);
1560 newarena = newarena + M_OVERHEAD;
1561 /* newarena = (newarena + 3) &~ 3; */
1562 shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA;
1563 pow = LOG_OF_MIN_ARENA + 1;
1564 /* apart from this loop, this is O(1) */
1565 while (shiftr >>= 1)
1566 pow++;
1567 newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2);
1568 require = newarena - onb - M_OVERHEAD;
1569
fa423c5b 1570 if (getpages_adjacent(require)) {
e8bc2b5c 1571#ifdef DEBUGGING_MSTATS
fa423c5b 1572 nmalloc[bucket]--;
1573 nmalloc[pow * BUCKETS_PER_POW2]++;
e8bc2b5c 1574#endif
fa423c5b 1575 *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */
1576 goto inplace_label;
1577 } else
1578 goto hard_way;
e8bc2b5c 1579 } else {
1580 hard_way:
741df71a 1581 MALLOC_UNLOCK;
42ac124e 1582 DEBUG_m(PerlIO_printf(Perl_debug_log,
1583 "0x%lx: (%05lu) realloc %ld bytes the hard way\n",
1584 (unsigned long)cp,(unsigned long)(PL_an++),
1585 (long)size));
86058a2d 1586 if ((res = (char*)Perl_malloc(nbytes)) == NULL)
e8bc2b5c 1587 return (NULL);
1588 if (cp != res) /* common optimization */
1589 Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char);
1590 if (was_alloced)
86058a2d 1591 Perl_mfree(cp);
45d8adaa 1592 }
2304df62 1593 return ((Malloc_t)res);
8d063cd8 1594}
1595
1596/*
1597 * Search ``srchlen'' elements of each free list for a block whose
1598 * header starts at ``freep''. If srchlen is -1 search the whole list.
1599 * Return bucket number, or -1 if not found.
1600 */
ee0007ab 1601static int
8ac85365 1602findbucket(union overhead *freep, int srchlen)
8d063cd8 1603{
1604 register union overhead *p;
1605 register int i, j;
1606
1607 for (i = 0; i < NBUCKETS; i++) {
1608 j = 0;
1609 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
1610 if (p == freep)
1611 return (i);
1612 j++;
1613 }
1614 }
1615 return (-1);
1616}
1617
cf5c4ad8 1618Malloc_t
86058a2d 1619Perl_calloc(register size_t elements, register size_t size)
cf5c4ad8 1620{
1621 long sz = elements * size;
86058a2d 1622 Malloc_t p = Perl_malloc(sz);
cf5c4ad8 1623
1624 if (p) {
1625 memset((void*)p, 0, sz);
1626 }
1627 return p;
1628}
1629
e8bc2b5c 1630MEM_SIZE
cea2e8a9 1631Perl_malloced_size(void *p)
e8bc2b5c 1632{
8d6dde3e 1633 union overhead *ovp = (union overhead *)
1634 ((caddr_t)p - sizeof (union overhead) * CHUNK_SHIFT);
1635 int bucket = OV_INDEX(ovp);
1636#ifdef RCHECK
1637 /* The caller wants to have a complete control over the chunk,
1638 disable the memory checking inside the chunk. */
1639 if (bucket <= MAX_SHORT_BUCKET) {
1640 MEM_SIZE size = BUCKET_SIZE_REAL(bucket);
1641 ovp->ov_size = size + M_OVERHEAD - 1;
1642 *((u_int *)((caddr_t)ovp + size + M_OVERHEAD - RSLOP)) = RMAGIC;
1643 }
1644#endif
e8bc2b5c 1645 return BUCKET_SIZE_REAL(bucket);
1646}
1647
e8bc2b5c 1648# ifdef BUCKETS_ROOT2
1649# define MIN_EVEN_REPORT 6
1650# else
1651# define MIN_EVEN_REPORT MIN_BUCKET
1652# endif
8d063cd8 1653/*
1654 * mstats - print out statistics about malloc
1655 *
1656 * Prints two lines of numbers, one showing the length of the free list
1657 * for each size category, the second showing the number of mallocs -
1658 * frees for each size category.
1659 */
ee0007ab 1660void
864dbfa3 1661Perl_dump_mstats(pTHX_ char *s)
8d063cd8 1662{
df31f264 1663#ifdef DEBUGGING_MSTATS
8d063cd8 1664 register int i, j;
1665 register union overhead *p;
e8bc2b5c 1666 int topbucket=0, topbucket_ev=0, topbucket_odd=0, totfree=0, total=0;
c07a80fd 1667 u_int nfree[NBUCKETS];
e8bc2b5c 1668 int total_chain = 0;
1669 struct chunk_chain_s* nextchain = chunk_chain;
8d063cd8 1670
e8bc2b5c 1671 for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
8d063cd8 1672 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
1673 ;
c07a80fd 1674 nfree[i] = j;
e8bc2b5c 1675 totfree += nfree[i] * BUCKET_SIZE_REAL(i);
1676 total += nmalloc[i] * BUCKET_SIZE_REAL(i);
1677 if (nmalloc[i]) {
1678 i % 2 ? (topbucket_odd = i) : (topbucket_ev = i);
1679 topbucket = i;
1680 }
c07a80fd 1681 }
1682 if (s)
e8bc2b5c 1683 PerlIO_printf(PerlIO_stderr(),
d720c441 1684 "Memory allocation statistics %s (buckets %ld(%ld)..%ld(%ld)\n",
e8bc2b5c 1685 s,
d720c441 1686 (long)BUCKET_SIZE_REAL(MIN_BUCKET),
1687 (long)BUCKET_SIZE(MIN_BUCKET),
1688 (long)BUCKET_SIZE_REAL(topbucket), (long)BUCKET_SIZE(topbucket));
5f05dabc 1689 PerlIO_printf(PerlIO_stderr(), "%8d free:", totfree);
e8bc2b5c 1690 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1691 PerlIO_printf(PerlIO_stderr(),
1692 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1693 ? " %5d"
1694 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1695 nfree[i]);
1696 }
1697#ifdef BUCKETS_ROOT2
1698 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1699 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1700 PerlIO_printf(PerlIO_stderr(),
1701 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1702 ? " %5d"
1703 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1704 nfree[i]);
8d063cd8 1705 }
e8bc2b5c 1706#endif
5f05dabc 1707 PerlIO_printf(PerlIO_stderr(), "\n%8d used:", total - totfree);
e8bc2b5c 1708 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1709 PerlIO_printf(PerlIO_stderr(),
1710 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1711 ? " %5d"
1712 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1713 nmalloc[i] - nfree[i]);
c07a80fd 1714 }
e8bc2b5c 1715#ifdef BUCKETS_ROOT2
1716 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1717 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1718 PerlIO_printf(PerlIO_stderr(),
1719 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1720 ? " %5d"
1721 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1722 nmalloc[i] - nfree[i]);
1723 }
1724#endif
1725 while (nextchain) {
1726 total_chain += nextchain->size;
1727 nextchain = nextchain->next;
1728 }
1729 PerlIO_printf(PerlIO_stderr(), "\nTotal sbrk(): %d/%d:%d. Odd ends: pad+heads+chain+tail: %d+%d+%d+%d.\n",
1730 goodsbrk + sbrk_slack, sbrks, sbrk_good, sbrk_slack,
1731 start_slack, total_chain, sbrked_remains);
df31f264 1732#endif /* DEBUGGING_MSTATS */
c07a80fd 1733}
a687059c 1734#endif /* lint */
cf5c4ad8 1735
cf5c4ad8 1736#ifdef USE_PERL_SBRK
1737
8f1f23e8 1738# if defined(__MACHTEN_PPC__) || defined(NeXT) || defined(__NeXT__)
38ac2dc8 1739# define PERL_SBRK_VIA_MALLOC
1740/*
1741 * MachTen's malloc() returns a buffer aligned on a two-byte boundary.
1742 * While this is adequate, it may slow down access to longer data
1743 * types by forcing multiple memory accesses. It also causes
1744 * complaints when RCHECK is in force. So we allocate six bytes
1745 * more than we need to, and return an address rounded up to an
1746 * eight-byte boundary.
1747 *
1748 * 980701 Dominic Dunlop <domo@computer.org>
1749 */
5bbd1ef5 1750# define SYSTEM_ALLOC_ALIGNMENT 2
38ac2dc8 1751# endif
1752
760ac839 1753# ifdef PERL_SBRK_VIA_MALLOC
cf5c4ad8 1754
1755/* it may seem schizophrenic to use perl's malloc and let it call system */
1756/* malloc, the reason for that is only the 3.2 version of the OS that had */
1757/* frequent core dumps within nxzonefreenolock. This sbrk routine put an */
1758/* end to the cores */
1759
38ac2dc8 1760# ifndef SYSTEM_ALLOC
1761# define SYSTEM_ALLOC(a) malloc(a)
1762# endif
5bbd1ef5 1763# ifndef SYSTEM_ALLOC_ALIGNMENT
1764# define SYSTEM_ALLOC_ALIGNMENT MEM_ALIGNBYTES
1765# endif
cf5c4ad8 1766
760ac839 1767# endif /* PERL_SBRK_VIA_MALLOC */
cf5c4ad8 1768
1769static IV Perl_sbrk_oldchunk;
1770static long Perl_sbrk_oldsize;
1771
760ac839 1772# define PERLSBRK_32_K (1<<15)
1773# define PERLSBRK_64_K (1<<16)
cf5c4ad8 1774
b63effbb 1775Malloc_t
df0003d4 1776Perl_sbrk(int size)
cf5c4ad8 1777{
1778 IV got;
1779 int small, reqsize;
1780
1781 if (!size) return 0;
55497cff 1782#ifdef PERL_CORE
cf5c4ad8 1783 reqsize = size; /* just for the DEBUG_m statement */
1784#endif
57569e04 1785#ifdef PACK_MALLOC
1786 size = (size + 0x7ff) & ~0x7ff;
1787#endif
cf5c4ad8 1788 if (size <= Perl_sbrk_oldsize) {
1789 got = Perl_sbrk_oldchunk;
1790 Perl_sbrk_oldchunk += size;
1791 Perl_sbrk_oldsize -= size;
1792 } else {
1793 if (size >= PERLSBRK_32_K) {
1794 small = 0;
1795 } else {
cf5c4ad8 1796 size = PERLSBRK_64_K;
1797 small = 1;
1798 }
5bbd1ef5 1799# if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT
1800 size += NEEDED_ALIGNMENT - SYSTEM_ALLOC_ALIGNMENT;
1801# endif
cf5c4ad8 1802 got = (IV)SYSTEM_ALLOC(size);
5bbd1ef5 1803# if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT
5a7d6335 1804 got = (got + NEEDED_ALIGNMENT - 1) & ~(NEEDED_ALIGNMENT - 1);
5bbd1ef5 1805# endif
cf5c4ad8 1806 if (small) {
1807 /* Chunk is small, register the rest for future allocs. */
1808 Perl_sbrk_oldchunk = got + reqsize;
1809 Perl_sbrk_oldsize = size - reqsize;
1810 }
1811 }
1812
fb73857a 1813 DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%lx\n",
cf5c4ad8 1814 size, reqsize, Perl_sbrk_oldsize, got));
cf5c4ad8 1815
1816 return (void *)got;
1817}
1818
1819#endif /* ! defined USE_PERL_SBRK */