Commit | Line | Data |
a0d0e21e |
1 | /* malloc.c |
8d063cd8 |
2 | * |
8d063cd8 |
3 | */ |
4 | |
87c6202a |
5 | /* |
741df71a |
6 | Here are some notes on configuring Perl's malloc. (For non-perl |
7 | usage see below.) |
87c6202a |
8 | |
9 | There are two macros which serve as bulk disablers of advanced |
10 | features of this malloc: NO_FANCY_MALLOC, PLAIN_MALLOC (undef by |
11 | default). Look in the list of default values below to understand |
12 | their exact effect. Defining NO_FANCY_MALLOC returns malloc.c to the |
13 | state of the malloc in Perl 5.004. Additionally defining PLAIN_MALLOC |
14 | returns it to the state as of Perl 5.000. |
15 | |
16 | Note that some of the settings below may be ignored in the code based |
17 | on values of other macros. The PERL_CORE symbol is only defined when |
18 | perl itself is being compiled (so malloc can make some assumptions |
19 | about perl's facilities being available to it). |
20 | |
21 | Each config option has a short description, followed by its name, |
22 | default value, and a comment about the default (if applicable). Some |
23 | options take a precise value, while the others are just boolean. |
24 | The boolean ones are listed first. |
25 | |
26 | # Enable code for an emergency memory pool in $^M. See perlvar.pod |
27 | # for a description of $^M. |
28 | PERL_EMERGENCY_SBRK (!PLAIN_MALLOC && PERL_CORE) |
29 | |
30 | # Enable code for printing memory statistics. |
31 | DEBUGGING_MSTATS (!PLAIN_MALLOC && PERL_CORE) |
32 | |
33 | # Move allocation info for small buckets into separate areas. |
34 | # Memory optimization (especially for small allocations, of the |
35 | # less than 64 bytes). Since perl usually makes a large number |
36 | # of small allocations, this is usually a win. |
37 | PACK_MALLOC (!PLAIN_MALLOC && !RCHECK) |
38 | |
39 | # Add one page to big powers of two when calculating bucket size. |
40 | # This is targeted at big allocations, as are common in image |
41 | # processing. |
42 | TWO_POT_OPTIMIZE !PLAIN_MALLOC |
43 | |
44 | # Use intermediate bucket sizes between powers-of-two. This is |
45 | # generally a memory optimization, and a (small) speed pessimization. |
46 | BUCKETS_ROOT2 !NO_FANCY_MALLOC |
47 | |
48 | # Do not check small deallocations for bad free(). Memory |
49 | # and speed optimization, error reporting pessimization. |
50 | IGNORE_SMALL_BAD_FREE (!NO_FANCY_MALLOC && !RCHECK) |
51 | |
52 | # Use table lookup to decide in which bucket a given allocation will go. |
53 | SMALL_BUCKET_VIA_TABLE !NO_FANCY_MALLOC |
54 | |
38ac2dc8 |
55 | # Use a perl-defined sbrk() instead of the (presumably broken or |
56 | # missing) system-supplied sbrk(). |
57 | USE_PERL_SBRK undef |
58 | |
59 | # Use system malloc() (or calloc() etc.) to emulate sbrk(). Normally |
60 | # only used with broken sbrk()s. |
87c6202a |
61 | PERL_SBRK_VIA_MALLOC undef |
62 | |
38ac2dc8 |
63 | # Which allocator to use if PERL_SBRK_VIA_MALLOC |
64 | SYSTEM_ALLOC(a) malloc(a) |
65 | |
87c6202a |
66 | # Disable memory overwrite checking with DEBUGGING. Memory and speed |
67 | # optimization, error reporting pessimization. |
68 | NO_RCHECK undef |
69 | |
70 | # Enable memory overwrite checking with DEBUGGING. Memory and speed |
71 | # pessimization, error reporting optimization |
72 | RCHECK (DEBUGGING && !NO_RCHECK) |
73 | |
74 | # Failed allocations bigger than this size croak (if |
75 | # PERL_EMERGENCY_SBRK is enabled) without touching $^M. See |
76 | # perlvar.pod for a description of $^M. |
77 | BIG_SIZE (1<<16) # 64K |
78 | |
79 | # Starting from this power of two, add an extra page to the |
80 | # size of the bucket. This enables optimized allocations of sizes |
81 | # close to powers of 2. Note that the value is indexed at 0. |
82 | FIRST_BIG_POW2 15 # 32K, 16K is used too often |
83 | |
84 | # Estimate of minimal memory footprint. malloc uses this value to |
85 | # request the most reasonable largest blocks of memory from the system. |
86 | FIRST_SBRK (48*1024) |
87 | |
88 | # Round up sbrk()s to multiples of this. |
89 | MIN_SBRK 2048 |
90 | |
91 | # Round up sbrk()s to multiples of this percent of footprint. |
92 | MIN_SBRK_FRAC 3 |
93 | |
94 | # Add this much memory to big powers of two to get the bucket size. |
95 | PERL_PAGESIZE 4096 |
96 | |
97 | # This many sbrk() discontinuities should be tolerated even |
98 | # from the start without deciding that sbrk() is usually |
99 | # discontinuous. |
100 | SBRK_ALLOW_FAILURES 3 |
101 | |
102 | # This many continuous sbrk()s compensate for one discontinuous one. |
103 | SBRK_FAILURE_PRICE 50 |
104 | |
28ac10b1 |
105 | # Some configurations may ask for 12-byte-or-so allocations which |
106 | # require 8-byte alignment (?!). In such situation one needs to |
107 | # define this to disable 12-byte bucket (will increase memory footprint) |
108 | STRICT_ALIGNMENT undef |
109 | |
87c6202a |
110 | This implementation assumes that calling PerlIO_printf() does not |
111 | result in any memory allocation calls (used during a panic). |
112 | |
113 | */ |
114 | |
741df71a |
115 | /* |
116 | If used outside of Perl environment, it may be useful to redefine |
117 | the following macros (listed below with defaults): |
118 | |
119 | # Type of address returned by allocation functions |
120 | Malloc_t void * |
121 | |
122 | # Type of size argument for allocation functions |
123 | MEM_SIZE unsigned long |
124 | |
125 | # Maximal value in LONG |
126 | LONG_MAX 0x7FFFFFFF |
127 | |
128 | # Unsigned integer type big enough to keep a pointer |
129 | UV unsigned long |
130 | |
131 | # Type of pointer with 1-byte granularity |
132 | caddr_t char * |
133 | |
134 | # Type returned by free() |
135 | Free_t void |
136 | |
137 | # Fatal error reporting function |
138 | croak(format, arg) warn(idem) + exit(1) |
139 | |
140 | # Error reporting function |
141 | warn(format, arg) fprintf(stderr, idem) |
142 | |
143 | # Locking/unlocking for MT operation |
64f996d1 |
144 | MALLOC_LOCK MUTEX_LOCK(&PL_malloc_mutex) |
145 | MALLOC_UNLOCK MUTEX_UNLOCK(&PL_malloc_mutex) |
741df71a |
146 | |
147 | # Locking/unlocking mutex for MT operation |
148 | MUTEX_LOCK(l) void |
149 | MUTEX_UNLOCK(l) void |
150 | */ |
151 | |
e8bc2b5c |
152 | #ifndef NO_FANCY_MALLOC |
153 | # ifndef SMALL_BUCKET_VIA_TABLE |
154 | # define SMALL_BUCKET_VIA_TABLE |
155 | # endif |
156 | # ifndef BUCKETS_ROOT2 |
157 | # define BUCKETS_ROOT2 |
158 | # endif |
159 | # ifndef IGNORE_SMALL_BAD_FREE |
160 | # define IGNORE_SMALL_BAD_FREE |
161 | # endif |
3562ef9b |
162 | #endif |
163 | |
e8bc2b5c |
164 | #ifndef PLAIN_MALLOC /* Bulk enable features */ |
165 | # ifndef PACK_MALLOC |
166 | # define PACK_MALLOC |
167 | # endif |
168 | # ifndef TWO_POT_OPTIMIZE |
169 | # define TWO_POT_OPTIMIZE |
170 | # endif |
d720c441 |
171 | # if defined(PERL_CORE) && !defined(PERL_EMERGENCY_SBRK) |
172 | # define PERL_EMERGENCY_SBRK |
e8bc2b5c |
173 | # endif |
174 | # if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS) |
175 | # define DEBUGGING_MSTATS |
176 | # endif |
177 | #endif |
178 | |
179 | #define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */ |
180 | #define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2) |
181 | |
182 | #if !(defined(I286) || defined(atarist)) |
183 | /* take 2k unless the block is bigger than that */ |
184 | # define LOG_OF_MIN_ARENA 11 |
185 | #else |
186 | /* take 16k unless the block is bigger than that |
187 | (80286s like large segments!), probably good on the atari too */ |
188 | # define LOG_OF_MIN_ARENA 14 |
189 | #endif |
190 | |
8d063cd8 |
191 | #ifndef lint |
1944739a |
192 | # if defined(DEBUGGING) && !defined(NO_RCHECK) |
193 | # define RCHECK |
194 | # endif |
e8bc2b5c |
195 | # if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE) |
196 | # undef IGNORE_SMALL_BAD_FREE |
197 | # endif |
8d063cd8 |
198 | /* |
199 | * malloc.c (Caltech) 2/21/82 |
200 | * Chris Kingsley, kingsley@cit-20. |
201 | * |
202 | * This is a very fast storage allocator. It allocates blocks of a small |
203 | * number of different sizes, and keeps free lists of each size. Blocks that |
204 | * don't exactly fit are passed up to the next larger size. In this |
205 | * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long. |
cf5c4ad8 |
206 | * If PACK_MALLOC is defined, small blocks are 2^n bytes long. |
8d063cd8 |
207 | * This is designed for use in a program that uses vast quantities of memory, |
741df71a |
208 | * but bombs when it runs out. |
209 | * |
210 | * Modifications Copyright Ilya Zakharevich 1996-98. |
211 | * |
212 | * Still very quick, but much more thrifty. (Std config is 10% slower |
213 | * than it was, and takes 67% of old heap size for typical usage.) |
214 | * |
215 | * Allocations of small blocks are now table-driven to many different |
216 | * buckets. Sizes of really big buckets are increased to accomodata |
217 | * common size=power-of-2 blocks. Running-out-of-memory is made into |
218 | * an exception. Deeply configurable and thread-safe. |
219 | * |
8d063cd8 |
220 | */ |
221 | |
d720c441 |
222 | #ifdef PERL_CORE |
223 | # include "EXTERN.h" |
224 | # include "perl.h" |
225 | #else |
226 | # ifdef PERL_FOR_X2P |
227 | # include "../EXTERN.h" |
228 | # include "../perl.h" |
229 | # else |
230 | # include <stdlib.h> |
231 | # include <stdio.h> |
232 | # include <memory.h> |
233 | # define _(arg) arg |
234 | # ifndef Malloc_t |
235 | # define Malloc_t void * |
236 | # endif |
237 | # ifndef MEM_SIZE |
238 | # define MEM_SIZE unsigned long |
239 | # endif |
240 | # ifndef LONG_MAX |
241 | # define LONG_MAX 0x7FFFFFFF |
242 | # endif |
243 | # ifndef UV |
244 | # define UV unsigned long |
245 | # endif |
246 | # ifndef caddr_t |
247 | # define caddr_t char * |
248 | # endif |
249 | # ifndef Free_t |
250 | # define Free_t void |
251 | # endif |
252 | # define Copy(s,d,n,t) (void)memcpy((char*)(d),(char*)(s), (n) * sizeof(t)) |
253 | # define PerlEnv_getenv getenv |
254 | # define PerlIO_printf fprintf |
255 | # define PerlIO_stderr() stderr |
256 | # endif |
e8bc2b5c |
257 | # ifndef croak /* make depend */ |
741df71a |
258 | # define croak(mess, arg) (warn((mess), (arg)), exit(1)) |
d720c441 |
259 | # endif |
260 | # ifndef warn |
741df71a |
261 | # define warn(mess, arg) fprintf(stderr, (mess), (arg)) |
e8bc2b5c |
262 | # endif |
263 | # ifdef DEBUG_m |
264 | # undef DEBUG_m |
265 | # endif |
266 | # define DEBUG_m(a) |
267 | # ifdef DEBUGGING |
268 | # undef DEBUGGING |
269 | # endif |
270 | #endif |
271 | |
272 | #ifndef MUTEX_LOCK |
273 | # define MUTEX_LOCK(l) |
274 | #endif |
275 | |
276 | #ifndef MUTEX_UNLOCK |
277 | # define MUTEX_UNLOCK(l) |
278 | #endif |
279 | |
741df71a |
280 | #ifndef MALLOC_LOCK |
64f996d1 |
281 | # define MALLOC_LOCK MUTEX_LOCK(&PL_malloc_mutex) |
741df71a |
282 | #endif |
283 | |
284 | #ifndef MALLOC_UNLOCK |
64f996d1 |
285 | # define MALLOC_UNLOCK MUTEX_UNLOCK(&PL_malloc_mutex) |
741df71a |
286 | #endif |
287 | |
760ac839 |
288 | #ifdef DEBUGGING |
e8bc2b5c |
289 | # undef DEBUG_m |
4a33f861 |
290 | # define DEBUG_m(a) if (PL_debug & 128) a |
760ac839 |
291 | #endif |
292 | |
e9397286 |
293 | /* |
294 | * Layout of memory: |
295 | * ~~~~~~~~~~~~~~~~ |
296 | * The memory is broken into "blocks" which occupy multiples of 2K (and |
297 | * generally speaking, have size "close" to a power of 2). The addresses |
298 | * of such *unused* blocks are kept in nextf[i] with big enough i. (nextf |
299 | * is an array of linked lists.) (Addresses of used blocks are not known.) |
300 | * |
301 | * Moreover, since the algorithm may try to "bite" smaller blocks of out |
302 | * of unused bigger ones, there are also regions of "irregular" size, |
303 | * managed separately, by a linked list chunk_chain. |
304 | * |
305 | * The third type of storage is the sbrk()ed-but-not-yet-used space, its |
306 | * end and size are kept in last_sbrk_top and sbrked_remains. |
307 | * |
308 | * Growing blocks "in place": |
309 | * ~~~~~~~~~~~~~~~~~~~~~~~~~ |
310 | * The address of the block with the greatest address is kept in last_op |
311 | * (if not known, last_op is 0). If it is known that the memory above |
312 | * last_op is not continuous, or contains a chunk from chunk_chain, |
313 | * last_op is set to 0. |
314 | * |
315 | * The chunk with address last_op may be grown by expanding into |
316 | * sbrk()ed-but-not-yet-used space, or trying to sbrk() more continuous |
317 | * memory. |
318 | * |
319 | * Management of last_op: |
320 | * ~~~~~~~~~~~~~~~~~~~~~ |
321 | * |
322 | * free() never changes the boundaries of blocks, so is not relevant. |
323 | * |
324 | * The only way realloc() may change the boundaries of blocks is if it |
325 | * grows a block "in place". However, in the case of success such a |
326 | * chunk is automatically last_op, and it remains last_op. In the case |
327 | * of failure getpages_adjacent() clears last_op. |
328 | * |
329 | * malloc() may change blocks by calling morecore() only. |
330 | * |
331 | * morecore() may create new blocks by: |
332 | * a) biting pieces from chunk_chain (cannot create one above last_op); |
333 | * b) biting a piece from an unused block (if block was last_op, this |
334 | * may create a chunk from chain above last_op, thus last_op is |
335 | * invalidated in such a case). |
336 | * c) biting of sbrk()ed-but-not-yet-used space. This creates |
337 | * a block which is last_op. |
338 | * d) Allocating new pages by calling getpages(); |
339 | * |
340 | * getpages() creates a new block. It marks last_op at the bottom of |
341 | * the chunk of memory it returns. |
342 | * |
343 | * Active pages footprint: |
344 | * ~~~~~~~~~~~~~~~~~~~~~~ |
345 | * Note that we do not need to traverse the lists in nextf[i], just take |
346 | * the first element of this list. However, we *need* to traverse the |
347 | * list in chunk_chain, but most the time it should be a very short one, |
348 | * so we do not step on a lot of pages we are not going to use. |
349 | * |
350 | * Flaws: |
351 | * ~~~~~ |
352 | * get_from_bigger_buckets(): forget to increment price => Quite |
353 | * aggressive. |
354 | */ |
355 | |
135863df |
356 | /* I don't much care whether these are defined in sys/types.h--LAW */ |
357 | |
358 | #define u_char unsigned char |
359 | #define u_int unsigned int |
e8bc2b5c |
360 | |
361 | #ifdef HAS_QUAD |
362 | # define u_bigint UV /* Needs to eat *void. */ |
363 | #else /* needed? */ |
364 | # define u_bigint unsigned long /* Needs to eat *void. */ |
365 | #endif |
366 | |
135863df |
367 | #define u_short unsigned short |
8d063cd8 |
368 | |
cf5c4ad8 |
369 | /* 286 and atarist like big chunks, which gives too much overhead. */ |
370 | #if (defined(RCHECK) || defined(I286) || defined(atarist)) && defined(PACK_MALLOC) |
e8bc2b5c |
371 | # undef PACK_MALLOC |
cf5c4ad8 |
372 | #endif |
373 | |
8d063cd8 |
374 | /* |
cf5c4ad8 |
375 | * The description below is applicable if PACK_MALLOC is not defined. |
376 | * |
8d063cd8 |
377 | * The overhead on a block is at least 4 bytes. When free, this space |
378 | * contains a pointer to the next free block, and the bottom two bits must |
379 | * be zero. When in use, the first byte is set to MAGIC, and the second |
380 | * byte is the size index. The remaining bytes are for alignment. |
381 | * If range checking is enabled and the size of the block fits |
382 | * in two bytes, then the top two bytes hold the size of the requested block |
383 | * plus the range checking words, and the header word MINUS ONE. |
384 | */ |
385 | union overhead { |
386 | union overhead *ov_next; /* when free */ |
85e6fe83 |
387 | #if MEM_ALIGNBYTES > 4 |
c623bd54 |
388 | double strut; /* alignment problems */ |
a687059c |
389 | #endif |
8d063cd8 |
390 | struct { |
391 | u_char ovu_magic; /* magic number */ |
392 | u_char ovu_index; /* bucket # */ |
393 | #ifdef RCHECK |
394 | u_short ovu_size; /* actual block size */ |
395 | u_int ovu_rmagic; /* range magic number */ |
396 | #endif |
397 | } ovu; |
398 | #define ov_magic ovu.ovu_magic |
399 | #define ov_index ovu.ovu_index |
400 | #define ov_size ovu.ovu_size |
401 | #define ov_rmagic ovu.ovu_rmagic |
402 | }; |
403 | |
760ac839 |
404 | #ifdef DEBUGGING |
d720c441 |
405 | static void botch _((char *diag, char *s)); |
a0d0e21e |
406 | #endif |
407 | static void morecore _((int bucket)); |
408 | static int findbucket _((union overhead *freep, int srchlen)); |
28ac10b1 |
409 | static void add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip); |
a0d0e21e |
410 | |
8d063cd8 |
411 | #define MAGIC 0xff /* magic # on accounting info */ |
412 | #define RMAGIC 0x55555555 /* magic # on range info */ |
e8bc2b5c |
413 | #define RMAGIC_C 0x55 /* magic # on range info */ |
414 | |
8d063cd8 |
415 | #ifdef RCHECK |
c2a5c2d2 |
416 | # define RSLOP sizeof (u_int) |
417 | # ifdef TWO_POT_OPTIMIZE |
e8bc2b5c |
418 | # define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2) |
c2a5c2d2 |
419 | # else |
e8bc2b5c |
420 | # define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2) |
c2a5c2d2 |
421 | # endif |
8d063cd8 |
422 | #else |
c2a5c2d2 |
423 | # define RSLOP 0 |
8d063cd8 |
424 | #endif |
425 | |
e8bc2b5c |
426 | #if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2) |
427 | # undef BUCKETS_ROOT2 |
428 | #endif |
429 | |
430 | #ifdef BUCKETS_ROOT2 |
431 | # define BUCKET_TABLE_SHIFT 2 |
432 | # define BUCKET_POW2_SHIFT 1 |
433 | # define BUCKETS_PER_POW2 2 |
434 | #else |
435 | # define BUCKET_TABLE_SHIFT MIN_BUC_POW2 |
436 | # define BUCKET_POW2_SHIFT 0 |
437 | # define BUCKETS_PER_POW2 1 |
438 | #endif |
439 | |
274c7500 |
440 | #if !defined(MEM_ALIGNBYTES) || ((MEM_ALIGNBYTES > 4) && !defined(STRICT_ALIGNMENT)) |
441 | /* Figure out the alignment of void*. */ |
442 | struct aligner { |
443 | char c; |
444 | void *p; |
445 | }; |
446 | # define ALIGN_SMALL ((int)((caddr_t)&(((struct aligner*)0)->p))) |
447 | #else |
448 | # define ALIGN_SMALL MEM_ALIGNBYTES |
449 | #endif |
450 | |
451 | #define IF_ALIGN_8(yes,no) ((ALIGN_SMALL>4) ? (yes) : (no)) |
452 | |
e8bc2b5c |
453 | #ifdef BUCKETS_ROOT2 |
454 | # define MAX_BUCKET_BY_TABLE 13 |
455 | static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] = |
456 | { |
457 | 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80, |
458 | }; |
459 | # define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT))) |
460 | # define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \ |
461 | ? buck_size[i] \ |
462 | : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \ |
463 | - MEM_OVERHEAD(i) \ |
464 | + POW2_OPTIMIZE_SURPLUS(i))) |
465 | #else |
466 | # define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT)) |
467 | # define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i)) |
468 | #endif |
469 | |
470 | |
cf5c4ad8 |
471 | #ifdef PACK_MALLOC |
e8bc2b5c |
472 | /* In this case it is assumed that if we do sbrk() in 2K units, we |
473 | * will get 2K aligned arenas (at least after some initial |
474 | * alignment). The bucket number of the given subblock is on the start |
475 | * of 2K arena which contains the subblock. Several following bytes |
476 | * contain the magic numbers for the subblocks in the block. |
cf5c4ad8 |
477 | * |
478 | * Sizes of chunks are powers of 2 for chunks in buckets <= |
479 | * MAX_PACKED, after this they are (2^n - sizeof(union overhead)) (to |
480 | * get alignment right). |
481 | * |
e8bc2b5c |
482 | * Consider an arena for 2^n with n>MAX_PACKED. We suppose that |
483 | * starts of all the chunks in a 2K arena are in different |
484 | * 2^n-byte-long chunks. If the top of the last chunk is aligned on a |
485 | * boundary of 2K block, this means that sizeof(union |
486 | * overhead)*"number of chunks" < 2^n, or sizeof(union overhead)*2K < |
487 | * 4^n, or n > 6 + log2(sizeof()/2)/2, since a chunk of size 2^n - |
488 | * overhead is used. Since this rules out n = 7 for 8 byte alignment, |
489 | * we specialcase allocation of the first of 16 128-byte-long chunks. |
cf5c4ad8 |
490 | * |
491 | * Note that with the above assumption we automatically have enough |
492 | * place for MAGIC at the start of 2K block. Note also that we |
e8bc2b5c |
493 | * overlay union overhead over the chunk, thus the start of small chunks |
494 | * is immediately overwritten after freeing. */ |
495 | # define MAX_PACKED_POW2 6 |
496 | # define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT) |
497 | # define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD) |
498 | # define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1) |
499 | # define TWOK_MASKED(x) ((u_bigint)(x) & ~TWOK_MASK) |
500 | # define TWOK_SHIFT(x) ((u_bigint)(x) & TWOK_MASK) |
cf5c4ad8 |
501 | # define OV_INDEXp(block) ((u_char*)(TWOK_MASKED(block))) |
502 | # define OV_INDEX(block) (*OV_INDEXp(block)) |
503 | # define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \ |
e8bc2b5c |
504 | (TWOK_SHIFT(block)>> \ |
505 | (bucket>>BUCKET_POW2_SHIFT)) + \ |
506 | (bucket >= MIN_NEEDS_SHIFT ? 1 : 0))) |
507 | /* A bucket can have a shift smaller than it size, we need to |
508 | shift its magic number so it will not overwrite index: */ |
509 | # ifdef BUCKETS_ROOT2 |
510 | # define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */ |
511 | # else |
512 | # define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */ |
513 | # endif |
cf5c4ad8 |
514 | # define CHUNK_SHIFT 0 |
515 | |
e8bc2b5c |
516 | /* Number of active buckets of given ordinal. */ |
517 | #ifdef IGNORE_SMALL_BAD_FREE |
518 | #define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */ |
519 | # define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \ |
520 | ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \ |
521 | : n_blks[bucket] ) |
522 | #else |
523 | # define N_BLKS(bucket) n_blks[bucket] |
524 | #endif |
525 | |
526 | static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] = |
527 | { |
528 | # if BUCKETS_PER_POW2==1 |
529 | 0, 0, |
530 | (MIN_BUC_POW2==2 ? 384 : 0), |
531 | 224, 120, 62, 31, 16, 8, 4, 2 |
532 | # else |
533 | 0, 0, 0, 0, |
534 | (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */ |
535 | 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2 |
536 | # endif |
537 | }; |
538 | |
539 | /* Shift of the first bucket with the given ordinal inside 2K chunk. */ |
540 | #ifdef IGNORE_SMALL_BAD_FREE |
541 | # define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \ |
542 | ? ((1<<LOG_OF_MIN_ARENA) \ |
543 | - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \ |
544 | : blk_shift[bucket]) |
545 | #else |
546 | # define BLK_SHIFT(bucket) blk_shift[bucket] |
547 | #endif |
548 | |
549 | static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] = |
550 | { |
551 | # if BUCKETS_PER_POW2==1 |
552 | 0, 0, |
553 | (MIN_BUC_POW2==2 ? 512 : 0), |
554 | 256, 128, 64, 64, /* 8 to 64 */ |
555 | 16*sizeof(union overhead), |
556 | 8*sizeof(union overhead), |
557 | 4*sizeof(union overhead), |
558 | 2*sizeof(union overhead), |
559 | # else |
560 | 0, 0, 0, 0, |
561 | (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0), |
562 | 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */ |
563 | 16*sizeof(union overhead), 16*sizeof(union overhead), |
564 | 8*sizeof(union overhead), 8*sizeof(union overhead), |
565 | 4*sizeof(union overhead), 4*sizeof(union overhead), |
566 | 2*sizeof(union overhead), 2*sizeof(union overhead), |
567 | # endif |
568 | }; |
cf5c4ad8 |
569 | |
cf5c4ad8 |
570 | #else /* !PACK_MALLOC */ |
571 | |
572 | # define OV_MAGIC(block,bucket) (block)->ov_magic |
573 | # define OV_INDEX(block) (block)->ov_index |
574 | # define CHUNK_SHIFT 1 |
e8bc2b5c |
575 | # define MAX_PACKED -1 |
cf5c4ad8 |
576 | #endif /* !PACK_MALLOC */ |
577 | |
e8bc2b5c |
578 | #define M_OVERHEAD (sizeof(union overhead) + RSLOP) |
579 | |
580 | #ifdef PACK_MALLOC |
581 | # define MEM_OVERHEAD(bucket) \ |
582 | (bucket <= MAX_PACKED ? 0 : M_OVERHEAD) |
583 | # ifdef SMALL_BUCKET_VIA_TABLE |
584 | # define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2) |
585 | # define START_SHIFT MAX_PACKED_POW2 |
586 | # ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */ |
587 | # define SIZE_TABLE_MAX 80 |
588 | # else |
589 | # define SIZE_TABLE_MAX 64 |
590 | # endif |
591 | static char bucket_of[] = |
592 | { |
593 | # ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */ |
594 | /* 0 to 15 in 4-byte increments. */ |
595 | (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */ |
596 | 6, /* 8 */ |
274c7500 |
597 | IF_ALIGN_8(8,7), 8, /* 16/12, 16 */ |
e8bc2b5c |
598 | 9, 9, 10, 10, /* 24, 32 */ |
599 | 11, 11, 11, 11, /* 48 */ |
600 | 12, 12, 12, 12, /* 64 */ |
601 | 13, 13, 13, 13, /* 80 */ |
602 | 13, 13, 13, 13 /* 80 */ |
603 | # else /* !BUCKETS_ROOT2 */ |
604 | /* 0 to 15 in 4-byte increments. */ |
605 | (sizeof(void*) > 4 ? 3 : 2), |
606 | 3, |
607 | 4, 4, |
608 | 5, 5, 5, 5, |
609 | 6, 6, 6, 6, |
610 | 6, 6, 6, 6 |
611 | # endif /* !BUCKETS_ROOT2 */ |
612 | }; |
613 | # else /* !SMALL_BUCKET_VIA_TABLE */ |
614 | # define START_SHIFTS_BUCKET MIN_BUCKET |
615 | # define START_SHIFT (MIN_BUC_POW2 - 1) |
616 | # endif /* !SMALL_BUCKET_VIA_TABLE */ |
617 | #else /* !PACK_MALLOC */ |
618 | # define MEM_OVERHEAD(bucket) M_OVERHEAD |
619 | # ifdef SMALL_BUCKET_VIA_TABLE |
620 | # undef SMALL_BUCKET_VIA_TABLE |
621 | # endif |
622 | # define START_SHIFTS_BUCKET MIN_BUCKET |
623 | # define START_SHIFT (MIN_BUC_POW2 - 1) |
624 | #endif /* !PACK_MALLOC */ |
cf5c4ad8 |
625 | |
8d063cd8 |
626 | /* |
55497cff |
627 | * Big allocations are often of the size 2^n bytes. To make them a |
628 | * little bit better, make blocks of size 2^n+pagesize for big n. |
629 | */ |
630 | |
631 | #ifdef TWO_POT_OPTIMIZE |
632 | |
5f05dabc |
633 | # ifndef PERL_PAGESIZE |
634 | # define PERL_PAGESIZE 4096 |
635 | # endif |
e8bc2b5c |
636 | # ifndef FIRST_BIG_POW2 |
637 | # define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */ |
5f05dabc |
638 | # endif |
e8bc2b5c |
639 | # define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2) |
55497cff |
640 | /* If this value or more, check against bigger blocks. */ |
641 | # define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD) |
642 | /* If less than this value, goes into 2^n-overhead-block. */ |
643 | # define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD) |
644 | |
e8bc2b5c |
645 | # define POW2_OPTIMIZE_ADJUST(nbytes) \ |
646 | ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0) |
647 | # define POW2_OPTIMIZE_SURPLUS(bucket) \ |
648 | ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0) |
649 | |
650 | #else /* !TWO_POT_OPTIMIZE */ |
651 | # define POW2_OPTIMIZE_ADJUST(nbytes) |
652 | # define POW2_OPTIMIZE_SURPLUS(bucket) 0 |
653 | #endif /* !TWO_POT_OPTIMIZE */ |
654 | |
655 | #if defined(HAS_64K_LIMIT) && defined(PERL_CORE) |
656 | # define BARK_64K_LIMIT(what,nbytes,size) \ |
657 | if (nbytes > 0xffff) { \ |
658 | PerlIO_printf(PerlIO_stderr(), \ |
659 | "%s too large: %lx\n", what, size); \ |
660 | my_exit(1); \ |
661 | } |
662 | #else /* !HAS_64K_LIMIT || !PERL_CORE */ |
663 | # define BARK_64K_LIMIT(what,nbytes,size) |
664 | #endif /* !HAS_64K_LIMIT || !PERL_CORE */ |
55497cff |
665 | |
e8bc2b5c |
666 | #ifndef MIN_SBRK |
667 | # define MIN_SBRK 2048 |
668 | #endif |
669 | |
670 | #ifndef FIRST_SBRK |
d720c441 |
671 | # define FIRST_SBRK (48*1024) |
e8bc2b5c |
672 | #endif |
673 | |
674 | /* Minimal sbrk in percents of what is already alloced. */ |
675 | #ifndef MIN_SBRK_FRAC |
676 | # define MIN_SBRK_FRAC 3 |
677 | #endif |
678 | |
679 | #ifndef SBRK_ALLOW_FAILURES |
680 | # define SBRK_ALLOW_FAILURES 3 |
681 | #endif |
55497cff |
682 | |
e8bc2b5c |
683 | #ifndef SBRK_FAILURE_PRICE |
684 | # define SBRK_FAILURE_PRICE 50 |
55497cff |
685 | #endif |
686 | |
e8bc2b5c |
687 | #if defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE) |
688 | |
689 | # ifndef BIG_SIZE |
690 | # define BIG_SIZE (1<<16) /* 64K */ |
691 | # endif |
692 | |
3541dd58 |
693 | #ifdef I_MACH_CTHREADS |
772fe5b3 |
694 | # undef MUTEX_LOCK |
695 | # define MUTEX_LOCK(m) STMT_START { if (*m) mutex_lock(*m); } STMT_END |
696 | # undef MUTEX_UNLOCK |
697 | # define MUTEX_UNLOCK(m) STMT_START { if (*m) mutex_unlock(*m); } STMT_END |
3541dd58 |
698 | #endif |
699 | |
55497cff |
700 | static char *emergency_buffer; |
701 | static MEM_SIZE emergency_buffer_size; |
df0003d4 |
702 | static Malloc_t emergency_sbrk(MEM_SIZE size); |
55497cff |
703 | |
52082926 |
704 | static Malloc_t |
df0003d4 |
705 | emergency_sbrk(MEM_SIZE size) |
55497cff |
706 | { |
28ac10b1 |
707 | MEM_SIZE rsize = (((size - 1)>>LOG_OF_MIN_ARENA) + 1)<<LOG_OF_MIN_ARENA; |
708 | |
55497cff |
709 | if (size >= BIG_SIZE) { |
710 | /* Give the possibility to recover: */ |
741df71a |
711 | MALLOC_UNLOCK; |
1b979e0a |
712 | croak("Out of memory during \"large\" request for %i bytes", size); |
55497cff |
713 | } |
714 | |
28ac10b1 |
715 | if (emergency_buffer_size >= rsize) { |
716 | char *old = emergency_buffer; |
717 | |
718 | emergency_buffer_size -= rsize; |
719 | emergency_buffer += rsize; |
720 | return old; |
721 | } else { |
18f739ee |
722 | dTHR; |
55497cff |
723 | /* First offense, give a possibility to recover by dieing. */ |
724 | /* No malloc involved here: */ |
4a33f861 |
725 | GV **gvp = (GV**)hv_fetch(PL_defstash, "^M", 2, 0); |
55497cff |
726 | SV *sv; |
727 | char *pv; |
28ac10b1 |
728 | int have = 0; |
2d8e6c8d |
729 | STRLEN n_a; |
55497cff |
730 | |
28ac10b1 |
731 | if (emergency_buffer_size) { |
732 | add_to_chain(emergency_buffer, emergency_buffer_size, 0); |
733 | emergency_buffer_size = 0; |
734 | emergency_buffer = Nullch; |
735 | have = 1; |
736 | } |
4a33f861 |
737 | if (!gvp) gvp = (GV**)hv_fetch(PL_defstash, "\015", 1, 0); |
55497cff |
738 | if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv) |
28ac10b1 |
739 | || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD)) { |
740 | if (have) |
741 | goto do_croak; |
55497cff |
742 | return (char *)-1; /* Now die die die... */ |
28ac10b1 |
743 | } |
55497cff |
744 | /* Got it, now detach SvPV: */ |
2d8e6c8d |
745 | pv = SvPV(sv, n_a); |
55497cff |
746 | /* Check alignment: */ |
28ac10b1 |
747 | if (((UV)(pv - sizeof(union overhead))) & ((1<<LOG_OF_MIN_ARENA) - 1)) { |
55497cff |
748 | PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n"); |
bbce6d69 |
749 | return (char *)-1; /* die die die */ |
55497cff |
750 | } |
751 | |
28ac10b1 |
752 | emergency_buffer = pv - sizeof(union overhead); |
753 | emergency_buffer_size = malloced_size(pv) + M_OVERHEAD; |
55497cff |
754 | SvPOK_off(sv); |
28ac10b1 |
755 | SvPVX(sv) = Nullch; |
756 | SvCUR(sv) = SvLEN(sv) = 0; |
55497cff |
757 | } |
28ac10b1 |
758 | do_croak: |
741df71a |
759 | MALLOC_UNLOCK; |
28ac10b1 |
760 | croak("Out of memory during request for %i bytes", size); |
55497cff |
761 | } |
762 | |
e8bc2b5c |
763 | #else /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */ |
55497cff |
764 | # define emergency_sbrk(size) -1 |
e8bc2b5c |
765 | #endif /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */ |
55497cff |
766 | |
767 | /* |
e8bc2b5c |
768 | * nextf[i] is the pointer to the next free block of size 2^i. The |
8d063cd8 |
769 | * smallest allocatable block is 8 bytes. The overhead information |
770 | * precedes the data area returned to the user. |
771 | */ |
e8bc2b5c |
772 | #define NBUCKETS (32*BUCKETS_PER_POW2 + 1) |
8d063cd8 |
773 | static union overhead *nextf[NBUCKETS]; |
cf5c4ad8 |
774 | |
775 | #ifdef USE_PERL_SBRK |
776 | #define sbrk(a) Perl_sbrk(a) |
52082926 |
777 | Malloc_t Perl_sbrk _((int size)); |
8ac85365 |
778 | #else |
779 | #ifdef DONT_DECLARE_STD |
780 | #ifdef I_UNISTD |
781 | #include <unistd.h> |
782 | #endif |
cf5c4ad8 |
783 | #else |
52082926 |
784 | extern Malloc_t sbrk(int); |
8ac85365 |
785 | #endif |
cf5c4ad8 |
786 | #endif |
8d063cd8 |
787 | |
c07a80fd |
788 | #ifdef DEBUGGING_MSTATS |
8d063cd8 |
789 | /* |
790 | * nmalloc[i] is the difference between the number of mallocs and frees |
791 | * for a given block size. |
792 | */ |
793 | static u_int nmalloc[NBUCKETS]; |
5f05dabc |
794 | static u_int sbrk_slack; |
795 | static u_int start_slack; |
8d063cd8 |
796 | #endif |
797 | |
e8bc2b5c |
798 | static u_int goodsbrk; |
799 | |
760ac839 |
800 | #ifdef DEBUGGING |
3541dd58 |
801 | #undef ASSERT |
802 | #define ASSERT(p,diag) if (!(p)) botch(diag,STRINGIFY(p)); else |
ee0007ab |
803 | static void |
d720c441 |
804 | botch(char *diag, char *s) |
8d063cd8 |
805 | { |
d720c441 |
806 | PerlIO_printf(PerlIO_stderr(), "assertion botched (%s?): %s\n", diag, s); |
3028581b |
807 | PerlProc_abort(); |
8d063cd8 |
808 | } |
809 | #else |
3541dd58 |
810 | #define ASSERT(p, diag) |
8d063cd8 |
811 | #endif |
812 | |
2304df62 |
813 | Malloc_t |
8ac85365 |
814 | malloc(register size_t nbytes) |
8d063cd8 |
815 | { |
816 | register union overhead *p; |
e8bc2b5c |
817 | register int bucket; |
ee0007ab |
818 | register MEM_SIZE shiftr; |
8d063cd8 |
819 | |
c2a5c2d2 |
820 | #if defined(DEBUGGING) || defined(RCHECK) |
ee0007ab |
821 | MEM_SIZE size = nbytes; |
45d8adaa |
822 | #endif |
823 | |
e8bc2b5c |
824 | BARK_64K_LIMIT("Allocation",nbytes,nbytes); |
45d8adaa |
825 | #ifdef DEBUGGING |
826 | if ((long)nbytes < 0) |
d720c441 |
827 | croak("%s", "panic: malloc"); |
45d8adaa |
828 | #endif |
45d8adaa |
829 | |
741df71a |
830 | MALLOC_LOCK; |
8d063cd8 |
831 | /* |
832 | * Convert amount of memory requested into |
833 | * closest block size stored in hash buckets |
834 | * which satisfies request. Account for |
835 | * space used per block for accounting. |
836 | */ |
cf5c4ad8 |
837 | #ifdef PACK_MALLOC |
e8bc2b5c |
838 | # ifdef SMALL_BUCKET_VIA_TABLE |
839 | if (nbytes == 0) |
840 | bucket = MIN_BUCKET; |
841 | else if (nbytes <= SIZE_TABLE_MAX) { |
842 | bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT]; |
843 | } else |
844 | # else |
043bf814 |
845 | if (nbytes == 0) |
846 | nbytes = 1; |
e8bc2b5c |
847 | if (nbytes <= MAX_POW2_ALGO) goto do_shifts; |
848 | else |
849 | # endif |
55497cff |
850 | #endif |
e8bc2b5c |
851 | { |
852 | POW2_OPTIMIZE_ADJUST(nbytes); |
853 | nbytes += M_OVERHEAD; |
854 | nbytes = (nbytes + 3) &~ 3; |
855 | do_shifts: |
856 | shiftr = (nbytes - 1) >> START_SHIFT; |
857 | bucket = START_SHIFTS_BUCKET; |
858 | /* apart from this loop, this is O(1) */ |
859 | while (shiftr >>= 1) |
860 | bucket += BUCKETS_PER_POW2; |
cf5c4ad8 |
861 | } |
8d063cd8 |
862 | /* |
863 | * If nothing in hash bucket right now, |
864 | * request more memory from the system. |
865 | */ |
866 | if (nextf[bucket] == NULL) |
867 | morecore(bucket); |
e8bc2b5c |
868 | if ((p = nextf[bucket]) == NULL) { |
741df71a |
869 | MALLOC_UNLOCK; |
55497cff |
870 | #ifdef PERL_CORE |
4a33f861 |
871 | if (!PL_nomemok) { |
760ac839 |
872 | PerlIO_puts(PerlIO_stderr(),"Out of memory!\n"); |
79072805 |
873 | my_exit(1); |
ee0007ab |
874 | } |
45d8adaa |
875 | #else |
8d063cd8 |
876 | return (NULL); |
45d8adaa |
877 | #endif |
878 | } |
879 | |
e8bc2b5c |
880 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
881 | "0x%lx: (%05lu) malloc %ld bytes\n", |
4a33f861 |
882 | (unsigned long)(p+1), (unsigned long)(PL_an++), |
e8bc2b5c |
883 | (long)size)); |
45d8adaa |
884 | |
8d063cd8 |
885 | /* remove from linked list */ |
802004fa |
886 | #if defined(RCHECK) |
887 | if (((UV)p) & (MEM_ALIGNBYTES - 1)) |
760ac839 |
888 | PerlIO_printf(PerlIO_stderr(), "Corrupt malloc ptr 0x%lx at 0x%lx\n", |
a0d0e21e |
889 | (unsigned long)*((int*)p),(unsigned long)p); |
bf38876a |
890 | #endif |
891 | nextf[bucket] = p->ov_next; |
e8bc2b5c |
892 | #ifdef IGNORE_SMALL_BAD_FREE |
893 | if (bucket >= FIRST_BUCKET_WITH_CHECK) |
894 | #endif |
895 | OV_MAGIC(p, bucket) = MAGIC; |
cf5c4ad8 |
896 | #ifndef PACK_MALLOC |
897 | OV_INDEX(p) = bucket; |
898 | #endif |
8d063cd8 |
899 | #ifdef RCHECK |
900 | /* |
901 | * Record allocated size of block and |
902 | * bound space with magic numbers. |
903 | */ |
8d063cd8 |
904 | p->ov_rmagic = RMAGIC; |
e8bc2b5c |
905 | if (bucket <= MAX_SHORT_BUCKET) { |
906 | int i; |
907 | |
908 | nbytes = size + M_OVERHEAD; |
909 | p->ov_size = nbytes - 1; |
910 | if ((i = nbytes & 3)) { |
911 | i = 4 - i; |
912 | while (i--) |
913 | *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C; |
914 | } |
915 | nbytes = (nbytes + 3) &~ 3; |
916 | *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC; |
917 | } |
8d063cd8 |
918 | #endif |
741df71a |
919 | MALLOC_UNLOCK; |
cf5c4ad8 |
920 | return ((Malloc_t)(p + CHUNK_SHIFT)); |
8d063cd8 |
921 | } |
922 | |
e8bc2b5c |
923 | static char *last_sbrk_top; |
924 | static char *last_op; /* This arena can be easily extended. */ |
925 | static int sbrked_remains; |
926 | static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE; |
927 | |
928 | #ifdef DEBUGGING_MSTATS |
929 | static int sbrks; |
930 | #endif |
931 | |
932 | struct chunk_chain_s { |
933 | struct chunk_chain_s *next; |
934 | MEM_SIZE size; |
935 | }; |
936 | static struct chunk_chain_s *chunk_chain; |
937 | static int n_chunks; |
938 | static char max_bucket; |
939 | |
940 | /* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */ |
941 | static void * |
942 | get_from_chain(MEM_SIZE size) |
943 | { |
944 | struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain; |
945 | struct chunk_chain_s **oldgoodp = NULL; |
946 | long min_remain = LONG_MAX; |
947 | |
948 | while (elt) { |
949 | if (elt->size >= size) { |
950 | long remains = elt->size - size; |
951 | if (remains >= 0 && remains < min_remain) { |
952 | oldgoodp = oldp; |
953 | min_remain = remains; |
954 | } |
955 | if (remains == 0) { |
956 | break; |
957 | } |
958 | } |
959 | oldp = &( elt->next ); |
960 | elt = elt->next; |
961 | } |
962 | if (!oldgoodp) return NULL; |
963 | if (min_remain) { |
964 | void *ret = *oldgoodp; |
965 | struct chunk_chain_s *next = (*oldgoodp)->next; |
966 | |
967 | *oldgoodp = (struct chunk_chain_s *)((char*)ret + size); |
968 | (*oldgoodp)->size = min_remain; |
969 | (*oldgoodp)->next = next; |
970 | return ret; |
971 | } else { |
972 | void *ret = *oldgoodp; |
973 | *oldgoodp = (*oldgoodp)->next; |
974 | n_chunks--; |
975 | return ret; |
976 | } |
977 | } |
978 | |
979 | static void |
980 | add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip) |
981 | { |
982 | struct chunk_chain_s *next = chunk_chain; |
983 | char *cp = (char*)p; |
984 | |
985 | cp += chip; |
986 | chunk_chain = (struct chunk_chain_s *)cp; |
987 | chunk_chain->size = size - chip; |
988 | chunk_chain->next = next; |
989 | n_chunks++; |
990 | } |
991 | |
992 | static void * |
993 | get_from_bigger_buckets(int bucket, MEM_SIZE size) |
994 | { |
995 | int price = 1; |
996 | static int bucketprice[NBUCKETS]; |
997 | while (bucket <= max_bucket) { |
998 | /* We postpone stealing from bigger buckets until we want it |
999 | often enough. */ |
1000 | if (nextf[bucket] && bucketprice[bucket]++ >= price) { |
1001 | /* Steal it! */ |
1002 | void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT); |
1003 | bucketprice[bucket] = 0; |
1004 | if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) { |
1005 | last_op = NULL; /* Disable optimization */ |
1006 | } |
1007 | nextf[bucket] = nextf[bucket]->ov_next; |
1008 | #ifdef DEBUGGING_MSTATS |
1009 | nmalloc[bucket]--; |
1010 | start_slack -= M_OVERHEAD; |
1011 | #endif |
1012 | add_to_chain(ret, (BUCKET_SIZE(bucket) + |
1013 | POW2_OPTIMIZE_SURPLUS(bucket)), |
1014 | size); |
1015 | return ret; |
1016 | } |
1017 | bucket++; |
1018 | } |
1019 | return NULL; |
1020 | } |
1021 | |
fa423c5b |
1022 | static union overhead * |
1023 | getpages(int needed, int *nblksp, int bucket) |
1024 | { |
1025 | /* Need to do (possibly expensive) system call. Try to |
1026 | optimize it for rare calling. */ |
1027 | MEM_SIZE require = needed - sbrked_remains; |
1028 | char *cp; |
1029 | union overhead *ovp; |
1030 | int slack = 0; |
1031 | |
1032 | if (sbrk_good > 0) { |
1033 | if (!last_sbrk_top && require < FIRST_SBRK) |
1034 | require = FIRST_SBRK; |
1035 | else if (require < MIN_SBRK) require = MIN_SBRK; |
1036 | |
1037 | if (require < goodsbrk * MIN_SBRK_FRAC / 100) |
1038 | require = goodsbrk * MIN_SBRK_FRAC / 100; |
1039 | require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK; |
1040 | } else { |
1041 | require = needed; |
1042 | last_sbrk_top = 0; |
1043 | sbrked_remains = 0; |
1044 | } |
1045 | |
1046 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1047 | "sbrk(%ld) for %ld-byte-long arena\n", |
1048 | (long)require, (long) needed)); |
1049 | cp = (char *)sbrk(require); |
1050 | #ifdef DEBUGGING_MSTATS |
1051 | sbrks++; |
1052 | #endif |
1053 | if (cp == last_sbrk_top) { |
1054 | /* Common case, anything is fine. */ |
1055 | sbrk_good++; |
1056 | ovp = (union overhead *) (cp - sbrked_remains); |
e9397286 |
1057 | last_op = cp - sbrked_remains; |
fa423c5b |
1058 | sbrked_remains = require - (needed - sbrked_remains); |
1059 | } else if (cp == (char *)-1) { /* no more room! */ |
1060 | ovp = (union overhead *)emergency_sbrk(needed); |
1061 | if (ovp == (union overhead *)-1) |
1062 | return 0; |
e9397286 |
1063 | if (((char*)ovp) > last_op) { /* Cannot happen with current emergency_sbrk() */ |
1064 | last_op = 0; |
1065 | } |
fa423c5b |
1066 | return ovp; |
1067 | } else { /* Non-continuous or first sbrk(). */ |
1068 | long add = sbrked_remains; |
1069 | char *newcp; |
1070 | |
1071 | if (sbrked_remains) { /* Put rest into chain, we |
1072 | cannot use it right now. */ |
1073 | add_to_chain((void*)(last_sbrk_top - sbrked_remains), |
1074 | sbrked_remains, 0); |
1075 | } |
1076 | |
1077 | /* Second, check alignment. */ |
1078 | slack = 0; |
1079 | |
1080 | #ifndef atarist /* on the atari we dont have to worry about this */ |
1081 | # ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */ |
1082 | |
1083 | /* CHUNK_SHIFT is 1 for PACK_MALLOC, 0 otherwise. */ |
1084 | if ((UV)cp & (0x7FF >> CHUNK_SHIFT)) { /* Not aligned. */ |
1085 | slack = (0x800 >> CHUNK_SHIFT) |
1086 | - ((UV)cp & (0x7FF >> CHUNK_SHIFT)); |
1087 | add += slack; |
1088 | } |
1089 | # endif |
1090 | #endif /* atarist */ |
1091 | |
1092 | if (add) { |
1093 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1094 | "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n", |
1095 | (long)add, (long) slack, |
1096 | (long) sbrked_remains)); |
1097 | newcp = (char *)sbrk(add); |
1098 | #if defined(DEBUGGING_MSTATS) |
1099 | sbrks++; |
1100 | sbrk_slack += add; |
1101 | #endif |
1102 | if (newcp != cp + require) { |
1103 | /* Too bad: even rounding sbrk() is not continuous.*/ |
1104 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1105 | "failed to fix bad sbrk()\n")); |
1106 | #ifdef PACK_MALLOC |
1107 | if (slack) { |
741df71a |
1108 | MALLOC_UNLOCK; |
fa423c5b |
1109 | croak("%s", "panic: Off-page sbrk"); |
1110 | } |
1111 | #endif |
1112 | if (sbrked_remains) { |
1113 | /* Try again. */ |
1114 | #if defined(DEBUGGING_MSTATS) |
1115 | sbrk_slack += require; |
1116 | #endif |
1117 | require = needed; |
1118 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1119 | "straight sbrk(%ld)\n", |
1120 | (long)require)); |
1121 | cp = (char *)sbrk(require); |
1122 | #ifdef DEBUGGING_MSTATS |
1123 | sbrks++; |
1124 | #endif |
1125 | if (cp == (char *)-1) |
1126 | return 0; |
1127 | } |
1128 | sbrk_good = -1; /* Disable optimization! |
1129 | Continue with not-aligned... */ |
1130 | } else { |
1131 | cp += slack; |
1132 | require += sbrked_remains; |
1133 | } |
1134 | } |
1135 | |
1136 | if (last_sbrk_top) { |
1137 | sbrk_good -= SBRK_FAILURE_PRICE; |
1138 | } |
1139 | |
1140 | ovp = (union overhead *) cp; |
1141 | /* |
1142 | * Round up to minimum allocation size boundary |
1143 | * and deduct from block count to reflect. |
1144 | */ |
1145 | |
1146 | #ifndef I286 /* Again, this should always be ok on an 80286 */ |
1147 | if ((UV)ovp & 7) { |
1148 | ovp = (union overhead *)(((UV)ovp + 8) & ~7); |
1149 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1150 | "fixing sbrk(): %d bytes off machine alignement\n", |
1151 | (int)((UV)ovp & 7))); |
1152 | (*nblksp)--; |
1153 | # if defined(DEBUGGING_MSTATS) |
1154 | /* This is only approx. if TWO_POT_OPTIMIZE: */ |
1155 | sbrk_slack += (1 << bucket); |
1156 | # endif |
1157 | } |
1158 | #endif |
1159 | sbrked_remains = require - needed; |
e9397286 |
1160 | last_op = cp; |
fa423c5b |
1161 | } |
1162 | last_sbrk_top = cp + require; |
fa423c5b |
1163 | #ifdef DEBUGGING_MSTATS |
1164 | goodsbrk += require; |
1165 | #endif |
1166 | return ovp; |
1167 | } |
1168 | |
1169 | static int |
1170 | getpages_adjacent(int require) |
1171 | { |
1172 | if (require <= sbrked_remains) { |
1173 | sbrked_remains -= require; |
1174 | } else { |
1175 | char *cp; |
1176 | |
1177 | require -= sbrked_remains; |
1178 | /* We do not try to optimize sbrks here, we go for place. */ |
1179 | cp = (char*) sbrk(require); |
1180 | #ifdef DEBUGGING_MSTATS |
1181 | sbrks++; |
1182 | goodsbrk += require; |
1183 | #endif |
1184 | if (cp == last_sbrk_top) { |
1185 | sbrked_remains = 0; |
1186 | last_sbrk_top = cp + require; |
1187 | } else { |
28ac10b1 |
1188 | if (cp == (char*)-1) { /* Out of memory */ |
1189 | #ifdef DEBUGGING_MSTATS |
1190 | goodsbrk -= require; |
1191 | #endif |
1192 | return 0; |
1193 | } |
fa423c5b |
1194 | /* Report the failure: */ |
1195 | if (sbrked_remains) |
1196 | add_to_chain((void*)(last_sbrk_top - sbrked_remains), |
1197 | sbrked_remains, 0); |
1198 | add_to_chain((void*)cp, require, 0); |
1199 | sbrk_good -= SBRK_FAILURE_PRICE; |
1200 | sbrked_remains = 0; |
1201 | last_sbrk_top = 0; |
1202 | last_op = 0; |
1203 | return 0; |
1204 | } |
1205 | } |
1206 | |
1207 | return 1; |
1208 | } |
1209 | |
8d063cd8 |
1210 | /* |
1211 | * Allocate more memory to the indicated bucket. |
1212 | */ |
a0d0e21e |
1213 | static void |
8ac85365 |
1214 | morecore(register int bucket) |
8d063cd8 |
1215 | { |
72aaf631 |
1216 | register union overhead *ovp; |
8d063cd8 |
1217 | register int rnu; /* 2^rnu bytes will be requested */ |
fa423c5b |
1218 | int nblks; /* become nblks blocks of the desired size */ |
bbce6d69 |
1219 | register MEM_SIZE siz, needed; |
8d063cd8 |
1220 | |
1221 | if (nextf[bucket]) |
1222 | return; |
e8bc2b5c |
1223 | if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) { |
741df71a |
1224 | MALLOC_UNLOCK; |
d720c441 |
1225 | croak("%s", "Out of memory during ridiculously large request"); |
55497cff |
1226 | } |
d720c441 |
1227 | if (bucket > max_bucket) |
e8bc2b5c |
1228 | max_bucket = bucket; |
d720c441 |
1229 | |
e8bc2b5c |
1230 | rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT)) |
1231 | ? LOG_OF_MIN_ARENA |
1232 | : (bucket >> BUCKET_POW2_SHIFT) ); |
1233 | /* This may be overwritten later: */ |
1234 | nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */ |
1235 | needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket); |
1236 | if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */ |
1237 | ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT; |
1238 | nextf[rnu << BUCKET_POW2_SHIFT] |
1239 | = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next; |
1240 | #ifdef DEBUGGING_MSTATS |
1241 | nmalloc[rnu << BUCKET_POW2_SHIFT]--; |
1242 | start_slack -= M_OVERHEAD; |
1243 | #endif |
1244 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1245 | "stealing %ld bytes from %ld arena\n", |
1246 | (long) needed, (long) rnu << BUCKET_POW2_SHIFT)); |
1247 | } else if (chunk_chain |
1248 | && (ovp = (union overhead*) get_from_chain(needed))) { |
1249 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1250 | "stealing %ld bytes from chain\n", |
1251 | (long) needed)); |
d720c441 |
1252 | } else if ( (ovp = (union overhead*) |
1253 | get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1, |
1254 | needed)) ) { |
e8bc2b5c |
1255 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1256 | "stealing %ld bytes from bigger buckets\n", |
1257 | (long) needed)); |
1258 | } else if (needed <= sbrked_remains) { |
1259 | ovp = (union overhead *)(last_sbrk_top - sbrked_remains); |
1260 | sbrked_remains -= needed; |
1261 | last_op = (char*)ovp; |
fa423c5b |
1262 | } else |
1263 | ovp = getpages(needed, &nblks, bucket); |
e8bc2b5c |
1264 | |
fa423c5b |
1265 | if (!ovp) |
1266 | return; |
e8bc2b5c |
1267 | |
8d063cd8 |
1268 | /* |
1269 | * Add new memory allocated to that on |
1270 | * free list for this hash bucket. |
1271 | */ |
e8bc2b5c |
1272 | siz = BUCKET_SIZE(bucket); |
cf5c4ad8 |
1273 | #ifdef PACK_MALLOC |
72aaf631 |
1274 | *(u_char*)ovp = bucket; /* Fill index. */ |
e8bc2b5c |
1275 | if (bucket <= MAX_PACKED) { |
1276 | ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket)); |
1277 | nblks = N_BLKS(bucket); |
cf5c4ad8 |
1278 | # ifdef DEBUGGING_MSTATS |
e8bc2b5c |
1279 | start_slack += BLK_SHIFT(bucket); |
cf5c4ad8 |
1280 | # endif |
e8bc2b5c |
1281 | } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) { |
1282 | ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket)); |
cf5c4ad8 |
1283 | siz -= sizeof(union overhead); |
72aaf631 |
1284 | } else ovp++; /* One chunk per block. */ |
e8bc2b5c |
1285 | #endif /* PACK_MALLOC */ |
72aaf631 |
1286 | nextf[bucket] = ovp; |
5f05dabc |
1287 | #ifdef DEBUGGING_MSTATS |
1288 | nmalloc[bucket] += nblks; |
e8bc2b5c |
1289 | if (bucket > MAX_PACKED) { |
1290 | start_slack += M_OVERHEAD * nblks; |
1291 | } |
5f05dabc |
1292 | #endif |
8d063cd8 |
1293 | while (--nblks > 0) { |
72aaf631 |
1294 | ovp->ov_next = (union overhead *)((caddr_t)ovp + siz); |
1295 | ovp = (union overhead *)((caddr_t)ovp + siz); |
8d063cd8 |
1296 | } |
8595d6f1 |
1297 | /* Not all sbrks return zeroed memory.*/ |
72aaf631 |
1298 | ovp->ov_next = (union overhead *)NULL; |
cf5c4ad8 |
1299 | #ifdef PACK_MALLOC |
e8bc2b5c |
1300 | if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */ |
1301 | union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next; |
1302 | nextf[7*BUCKETS_PER_POW2] = |
1303 | (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2] |
1304 | - sizeof(union overhead)); |
1305 | nextf[7*BUCKETS_PER_POW2]->ov_next = n_op; |
cf5c4ad8 |
1306 | } |
1307 | #endif /* !PACK_MALLOC */ |
8d063cd8 |
1308 | } |
1309 | |
94b6baf5 |
1310 | Free_t |
8ac85365 |
1311 | free(void *mp) |
8d063cd8 |
1312 | { |
ee0007ab |
1313 | register MEM_SIZE size; |
72aaf631 |
1314 | register union overhead *ovp; |
352d5a3a |
1315 | char *cp = (char*)mp; |
cf5c4ad8 |
1316 | #ifdef PACK_MALLOC |
1317 | u_char bucket; |
1318 | #endif |
8d063cd8 |
1319 | |
e8bc2b5c |
1320 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1321 | "0x%lx: (%05lu) free\n", |
4a33f861 |
1322 | (unsigned long)cp, (unsigned long)(PL_an++))); |
45d8adaa |
1323 | |
cf5c4ad8 |
1324 | if (cp == NULL) |
1325 | return; |
72aaf631 |
1326 | ovp = (union overhead *)((caddr_t)cp |
e8bc2b5c |
1327 | - sizeof (union overhead) * CHUNK_SHIFT); |
cf5c4ad8 |
1328 | #ifdef PACK_MALLOC |
72aaf631 |
1329 | bucket = OV_INDEX(ovp); |
cf5c4ad8 |
1330 | #endif |
e8bc2b5c |
1331 | #ifdef IGNORE_SMALL_BAD_FREE |
1332 | if ((bucket >= FIRST_BUCKET_WITH_CHECK) |
1333 | && (OV_MAGIC(ovp, bucket) != MAGIC)) |
1334 | #else |
1335 | if (OV_MAGIC(ovp, bucket) != MAGIC) |
1336 | #endif |
1337 | { |
68dc0745 |
1338 | static int bad_free_warn = -1; |
cf5c4ad8 |
1339 | if (bad_free_warn == -1) { |
5fd9e9a4 |
1340 | char *pbf = PerlEnv_getenv("PERL_BADFREE"); |
cf5c4ad8 |
1341 | bad_free_warn = (pbf) ? atoi(pbf) : 1; |
1342 | } |
1343 | if (!bad_free_warn) |
1344 | return; |
8990e307 |
1345 | #ifdef RCHECK |
a687059c |
1346 | warn("%s free() ignored", |
72aaf631 |
1347 | ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad"); |
8990e307 |
1348 | #else |
d720c441 |
1349 | warn("%s", "Bad free() ignored"); |
8990e307 |
1350 | #endif |
8d063cd8 |
1351 | return; /* sanity */ |
e8bc2b5c |
1352 | } |
741df71a |
1353 | MALLOC_LOCK; |
8d063cd8 |
1354 | #ifdef RCHECK |
3541dd58 |
1355 | ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite"); |
e8bc2b5c |
1356 | if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) { |
1357 | int i; |
1358 | MEM_SIZE nbytes = ovp->ov_size + 1; |
1359 | |
1360 | if ((i = nbytes & 3)) { |
1361 | i = 4 - i; |
1362 | while (i--) { |
3541dd58 |
1363 | ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i)) |
d720c441 |
1364 | == RMAGIC_C, "chunk's tail overwrite"); |
e8bc2b5c |
1365 | } |
1366 | } |
1367 | nbytes = (nbytes + 3) &~ 3; |
3541dd58 |
1368 | ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC, "chunk's tail overwrite"); |
e8bc2b5c |
1369 | } |
72aaf631 |
1370 | ovp->ov_rmagic = RMAGIC - 1; |
8d063cd8 |
1371 | #endif |
3541dd58 |
1372 | ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite"); |
72aaf631 |
1373 | size = OV_INDEX(ovp); |
1374 | ovp->ov_next = nextf[size]; |
1375 | nextf[size] = ovp; |
741df71a |
1376 | MALLOC_UNLOCK; |
8d063cd8 |
1377 | } |
1378 | |
1379 | /* |
1380 | * When a program attempts "storage compaction" as mentioned in the |
1381 | * old malloc man page, it realloc's an already freed block. Usually |
1382 | * this is the last block it freed; occasionally it might be farther |
1383 | * back. We have to search all the free lists for the block in order |
1384 | * to determine its bucket: 1st we make one pass thru the lists |
1385 | * checking only the first block in each; if that fails we search |
378cc40b |
1386 | * ``reall_srchlen'' blocks in each list for a match (the variable |
8d063cd8 |
1387 | * is extern so the caller can modify it). If that fails we just copy |
1388 | * however many bytes was given to realloc() and hope it's not huge. |
1389 | */ |
22c35a8c |
1390 | #define reall_srchlen 4 /* 4 should be plenty, -1 =>'s whole list */ |
8d063cd8 |
1391 | |
2304df62 |
1392 | Malloc_t |
8ac85365 |
1393 | realloc(void *mp, size_t nbytes) |
8d063cd8 |
1394 | { |
ee0007ab |
1395 | register MEM_SIZE onb; |
72aaf631 |
1396 | union overhead *ovp; |
d720c441 |
1397 | char *res; |
1398 | int prev_bucket; |
e8bc2b5c |
1399 | register int bucket; |
1400 | int was_alloced = 0, incr; |
352d5a3a |
1401 | char *cp = (char*)mp; |
8d063cd8 |
1402 | |
e8bc2b5c |
1403 | #if defined(DEBUGGING) || !defined(PERL_CORE) |
ee0007ab |
1404 | MEM_SIZE size = nbytes; |
45d8adaa |
1405 | |
45d8adaa |
1406 | if ((long)nbytes < 0) |
d720c441 |
1407 | croak("%s", "panic: realloc"); |
45d8adaa |
1408 | #endif |
e8bc2b5c |
1409 | |
1410 | BARK_64K_LIMIT("Reallocation",nbytes,size); |
1411 | if (!cp) |
1412 | return malloc(nbytes); |
45d8adaa |
1413 | |
741df71a |
1414 | MALLOC_LOCK; |
72aaf631 |
1415 | ovp = (union overhead *)((caddr_t)cp |
e8bc2b5c |
1416 | - sizeof (union overhead) * CHUNK_SHIFT); |
1417 | bucket = OV_INDEX(ovp); |
1418 | #ifdef IGNORE_SMALL_BAD_FREE |
1419 | if ((bucket < FIRST_BUCKET_WITH_CHECK) |
1420 | || (OV_MAGIC(ovp, bucket) == MAGIC)) |
1421 | #else |
1422 | if (OV_MAGIC(ovp, bucket) == MAGIC) |
1423 | #endif |
1424 | { |
55497cff |
1425 | was_alloced = 1; |
8d063cd8 |
1426 | } else { |
1427 | /* |
1428 | * Already free, doing "compaction". |
1429 | * |
1430 | * Search for the old block of memory on the |
1431 | * free list. First, check the most common |
1432 | * case (last element free'd), then (this failing) |
378cc40b |
1433 | * the last ``reall_srchlen'' items free'd. |
8d063cd8 |
1434 | * If all lookups fail, then assume the size of |
1435 | * the memory block being realloc'd is the |
1436 | * smallest possible. |
1437 | */ |
e8bc2b5c |
1438 | if ((bucket = findbucket(ovp, 1)) < 0 && |
1439 | (bucket = findbucket(ovp, reall_srchlen)) < 0) |
1440 | bucket = 0; |
8d063cd8 |
1441 | } |
e8bc2b5c |
1442 | onb = BUCKET_SIZE_REAL(bucket); |
55497cff |
1443 | /* |
1444 | * avoid the copy if same size block. |
e8bc2b5c |
1445 | * We are not agressive with boundary cases. Note that it might |
1446 | * (for a small number of cases) give false negative if |
55497cff |
1447 | * both new size and old one are in the bucket for |
e8bc2b5c |
1448 | * FIRST_BIG_POW2, but the new one is near the lower end. |
1449 | * |
1450 | * We do not try to go to 1.5 times smaller bucket so far. |
55497cff |
1451 | */ |
e8bc2b5c |
1452 | if (nbytes > onb) incr = 1; |
1453 | else { |
1454 | #ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING |
1455 | if ( /* This is a little bit pessimal if PACK_MALLOC: */ |
1456 | nbytes > ( (onb >> 1) - M_OVERHEAD ) |
1457 | # ifdef TWO_POT_OPTIMIZE |
1458 | || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND ) |
1459 | # endif |
1460 | ) |
1461 | #else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */ |
1462 | prev_bucket = ( (bucket > MAX_PACKED + 1) |
1463 | ? bucket - BUCKETS_PER_POW2 |
1464 | : bucket - 1); |
1465 | if (nbytes > BUCKET_SIZE_REAL(prev_bucket)) |
1466 | #endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */ |
1467 | incr = 0; |
1468 | else incr = -1; |
1469 | } |
1470 | if (!was_alloced |
2ce36478 |
1471 | #ifdef STRESS_REALLOC |
e8bc2b5c |
1472 | || 1 /* always do it the hard way */ |
2ce36478 |
1473 | #endif |
e8bc2b5c |
1474 | ) goto hard_way; |
1475 | else if (incr == 0) { |
852c2e52 |
1476 | inplace_label: |
a687059c |
1477 | #ifdef RCHECK |
1478 | /* |
1479 | * Record new allocated size of block and |
1480 | * bound space with magic numbers. |
1481 | */ |
72aaf631 |
1482 | if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) { |
e8bc2b5c |
1483 | int i, nb = ovp->ov_size + 1; |
1484 | |
1485 | if ((i = nb & 3)) { |
1486 | i = 4 - i; |
1487 | while (i--) { |
3541dd58 |
1488 | ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C, "chunk's tail overwrite"); |
e8bc2b5c |
1489 | } |
1490 | } |
1491 | nb = (nb + 3) &~ 3; |
3541dd58 |
1492 | ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC, "chunk's tail overwrite"); |
a687059c |
1493 | /* |
1494 | * Convert amount of memory requested into |
1495 | * closest block size stored in hash buckets |
1496 | * which satisfies request. Account for |
1497 | * space used per block for accounting. |
1498 | */ |
cf5c4ad8 |
1499 | nbytes += M_OVERHEAD; |
72aaf631 |
1500 | ovp->ov_size = nbytes - 1; |
e8bc2b5c |
1501 | if ((i = nbytes & 3)) { |
1502 | i = 4 - i; |
1503 | while (i--) |
1504 | *((char *)((caddr_t)ovp + nbytes - RSLOP + i)) |
1505 | = RMAGIC_C; |
1506 | } |
1507 | nbytes = (nbytes + 3) &~ 3; |
72aaf631 |
1508 | *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC; |
a687059c |
1509 | } |
1510 | #endif |
45d8adaa |
1511 | res = cp; |
741df71a |
1512 | MALLOC_UNLOCK; |
42ac124e |
1513 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1514 | "0x%lx: (%05lu) realloc %ld bytes inplace\n", |
1515 | (unsigned long)res,(unsigned long)(PL_an++), |
1516 | (long)size)); |
e8bc2b5c |
1517 | } else if (incr == 1 && (cp - M_OVERHEAD == last_op) |
1518 | && (onb > (1 << LOG_OF_MIN_ARENA))) { |
1519 | MEM_SIZE require, newarena = nbytes, pow; |
1520 | int shiftr; |
1521 | |
1522 | POW2_OPTIMIZE_ADJUST(newarena); |
1523 | newarena = newarena + M_OVERHEAD; |
1524 | /* newarena = (newarena + 3) &~ 3; */ |
1525 | shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA; |
1526 | pow = LOG_OF_MIN_ARENA + 1; |
1527 | /* apart from this loop, this is O(1) */ |
1528 | while (shiftr >>= 1) |
1529 | pow++; |
1530 | newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2); |
1531 | require = newarena - onb - M_OVERHEAD; |
1532 | |
fa423c5b |
1533 | if (getpages_adjacent(require)) { |
e8bc2b5c |
1534 | #ifdef DEBUGGING_MSTATS |
fa423c5b |
1535 | nmalloc[bucket]--; |
1536 | nmalloc[pow * BUCKETS_PER_POW2]++; |
e8bc2b5c |
1537 | #endif |
fa423c5b |
1538 | *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */ |
1539 | goto inplace_label; |
1540 | } else |
1541 | goto hard_way; |
e8bc2b5c |
1542 | } else { |
1543 | hard_way: |
741df71a |
1544 | MALLOC_UNLOCK; |
42ac124e |
1545 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1546 | "0x%lx: (%05lu) realloc %ld bytes the hard way\n", |
1547 | (unsigned long)cp,(unsigned long)(PL_an++), |
1548 | (long)size)); |
e8bc2b5c |
1549 | if ((res = (char*)malloc(nbytes)) == NULL) |
1550 | return (NULL); |
1551 | if (cp != res) /* common optimization */ |
1552 | Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char); |
1553 | if (was_alloced) |
1554 | free(cp); |
45d8adaa |
1555 | } |
2304df62 |
1556 | return ((Malloc_t)res); |
8d063cd8 |
1557 | } |
1558 | |
1559 | /* |
1560 | * Search ``srchlen'' elements of each free list for a block whose |
1561 | * header starts at ``freep''. If srchlen is -1 search the whole list. |
1562 | * Return bucket number, or -1 if not found. |
1563 | */ |
ee0007ab |
1564 | static int |
8ac85365 |
1565 | findbucket(union overhead *freep, int srchlen) |
8d063cd8 |
1566 | { |
1567 | register union overhead *p; |
1568 | register int i, j; |
1569 | |
1570 | for (i = 0; i < NBUCKETS; i++) { |
1571 | j = 0; |
1572 | for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { |
1573 | if (p == freep) |
1574 | return (i); |
1575 | j++; |
1576 | } |
1577 | } |
1578 | return (-1); |
1579 | } |
1580 | |
cf5c4ad8 |
1581 | Malloc_t |
8ac85365 |
1582 | calloc(register size_t elements, register size_t size) |
cf5c4ad8 |
1583 | { |
1584 | long sz = elements * size; |
1585 | Malloc_t p = malloc(sz); |
1586 | |
1587 | if (p) { |
1588 | memset((void*)p, 0, sz); |
1589 | } |
1590 | return p; |
1591 | } |
1592 | |
e8bc2b5c |
1593 | MEM_SIZE |
1594 | malloced_size(void *p) |
1595 | { |
8d6dde3e |
1596 | union overhead *ovp = (union overhead *) |
1597 | ((caddr_t)p - sizeof (union overhead) * CHUNK_SHIFT); |
1598 | int bucket = OV_INDEX(ovp); |
1599 | #ifdef RCHECK |
1600 | /* The caller wants to have a complete control over the chunk, |
1601 | disable the memory checking inside the chunk. */ |
1602 | if (bucket <= MAX_SHORT_BUCKET) { |
1603 | MEM_SIZE size = BUCKET_SIZE_REAL(bucket); |
1604 | ovp->ov_size = size + M_OVERHEAD - 1; |
1605 | *((u_int *)((caddr_t)ovp + size + M_OVERHEAD - RSLOP)) = RMAGIC; |
1606 | } |
1607 | #endif |
e8bc2b5c |
1608 | return BUCKET_SIZE_REAL(bucket); |
1609 | } |
1610 | |
c07a80fd |
1611 | #ifdef DEBUGGING_MSTATS |
e8bc2b5c |
1612 | |
1613 | # ifdef BUCKETS_ROOT2 |
1614 | # define MIN_EVEN_REPORT 6 |
1615 | # else |
1616 | # define MIN_EVEN_REPORT MIN_BUCKET |
1617 | # endif |
8d063cd8 |
1618 | /* |
1619 | * mstats - print out statistics about malloc |
1620 | * |
1621 | * Prints two lines of numbers, one showing the length of the free list |
1622 | * for each size category, the second showing the number of mallocs - |
1623 | * frees for each size category. |
1624 | */ |
ee0007ab |
1625 | void |
8ac85365 |
1626 | dump_mstats(char *s) |
8d063cd8 |
1627 | { |
1628 | register int i, j; |
1629 | register union overhead *p; |
e8bc2b5c |
1630 | int topbucket=0, topbucket_ev=0, topbucket_odd=0, totfree=0, total=0; |
c07a80fd |
1631 | u_int nfree[NBUCKETS]; |
e8bc2b5c |
1632 | int total_chain = 0; |
1633 | struct chunk_chain_s* nextchain = chunk_chain; |
8d063cd8 |
1634 | |
e8bc2b5c |
1635 | for (i = MIN_BUCKET ; i < NBUCKETS; i++) { |
8d063cd8 |
1636 | for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) |
1637 | ; |
c07a80fd |
1638 | nfree[i] = j; |
e8bc2b5c |
1639 | totfree += nfree[i] * BUCKET_SIZE_REAL(i); |
1640 | total += nmalloc[i] * BUCKET_SIZE_REAL(i); |
1641 | if (nmalloc[i]) { |
1642 | i % 2 ? (topbucket_odd = i) : (topbucket_ev = i); |
1643 | topbucket = i; |
1644 | } |
c07a80fd |
1645 | } |
1646 | if (s) |
e8bc2b5c |
1647 | PerlIO_printf(PerlIO_stderr(), |
d720c441 |
1648 | "Memory allocation statistics %s (buckets %ld(%ld)..%ld(%ld)\n", |
e8bc2b5c |
1649 | s, |
d720c441 |
1650 | (long)BUCKET_SIZE_REAL(MIN_BUCKET), |
1651 | (long)BUCKET_SIZE(MIN_BUCKET), |
1652 | (long)BUCKET_SIZE_REAL(topbucket), (long)BUCKET_SIZE(topbucket)); |
5f05dabc |
1653 | PerlIO_printf(PerlIO_stderr(), "%8d free:", totfree); |
e8bc2b5c |
1654 | for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) { |
1655 | PerlIO_printf(PerlIO_stderr(), |
1656 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
1657 | ? " %5d" |
1658 | : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")), |
1659 | nfree[i]); |
1660 | } |
1661 | #ifdef BUCKETS_ROOT2 |
1662 | PerlIO_printf(PerlIO_stderr(), "\n\t "); |
1663 | for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) { |
1664 | PerlIO_printf(PerlIO_stderr(), |
1665 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
1666 | ? " %5d" |
1667 | : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")), |
1668 | nfree[i]); |
8d063cd8 |
1669 | } |
e8bc2b5c |
1670 | #endif |
5f05dabc |
1671 | PerlIO_printf(PerlIO_stderr(), "\n%8d used:", total - totfree); |
e8bc2b5c |
1672 | for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) { |
1673 | PerlIO_printf(PerlIO_stderr(), |
1674 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
1675 | ? " %5d" |
1676 | : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")), |
1677 | nmalloc[i] - nfree[i]); |
c07a80fd |
1678 | } |
e8bc2b5c |
1679 | #ifdef BUCKETS_ROOT2 |
1680 | PerlIO_printf(PerlIO_stderr(), "\n\t "); |
1681 | for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) { |
1682 | PerlIO_printf(PerlIO_stderr(), |
1683 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
1684 | ? " %5d" |
1685 | : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")), |
1686 | nmalloc[i] - nfree[i]); |
1687 | } |
1688 | #endif |
1689 | while (nextchain) { |
1690 | total_chain += nextchain->size; |
1691 | nextchain = nextchain->next; |
1692 | } |
1693 | PerlIO_printf(PerlIO_stderr(), "\nTotal sbrk(): %d/%d:%d. Odd ends: pad+heads+chain+tail: %d+%d+%d+%d.\n", |
1694 | goodsbrk + sbrk_slack, sbrks, sbrk_good, sbrk_slack, |
1695 | start_slack, total_chain, sbrked_remains); |
c07a80fd |
1696 | } |
1697 | #else |
1698 | void |
8ac85365 |
1699 | dump_mstats(char *s) |
c07a80fd |
1700 | { |
8d063cd8 |
1701 | } |
1702 | #endif |
a687059c |
1703 | #endif /* lint */ |
cf5c4ad8 |
1704 | |
1705 | |
1706 | #ifdef USE_PERL_SBRK |
1707 | |
2c92fcc0 |
1708 | # if defined(__MACHTEN_PPC__) || defined(__NeXT__) |
38ac2dc8 |
1709 | # define PERL_SBRK_VIA_MALLOC |
1710 | /* |
1711 | * MachTen's malloc() returns a buffer aligned on a two-byte boundary. |
1712 | * While this is adequate, it may slow down access to longer data |
1713 | * types by forcing multiple memory accesses. It also causes |
1714 | * complaints when RCHECK is in force. So we allocate six bytes |
1715 | * more than we need to, and return an address rounded up to an |
1716 | * eight-byte boundary. |
1717 | * |
1718 | * 980701 Dominic Dunlop <domo@computer.org> |
1719 | */ |
1720 | # define SYSTEM_ALLOC(a) ((void *)(((unsigned)malloc((a)+6)+6)&~7)) |
1721 | # endif |
1722 | |
760ac839 |
1723 | # ifdef PERL_SBRK_VIA_MALLOC |
72e5b9db |
1724 | # if defined(HIDEMYMALLOC) || defined(EMBEDMYMALLOC) |
38ac2dc8 |
1725 | # undef malloc /* Expose names that */ |
1726 | # undef calloc /* HIDEMYMALLOC hides */ |
1727 | # undef realloc |
1728 | # undef free |
760ac839 |
1729 | # else |
72e5b9db |
1730 | # include "Error: -DPERL_SBRK_VIA_MALLOC needs -D(HIDE|EMBED)MYMALLOC" |
760ac839 |
1731 | # endif |
cf5c4ad8 |
1732 | |
1733 | /* it may seem schizophrenic to use perl's malloc and let it call system */ |
1734 | /* malloc, the reason for that is only the 3.2 version of the OS that had */ |
1735 | /* frequent core dumps within nxzonefreenolock. This sbrk routine put an */ |
1736 | /* end to the cores */ |
1737 | |
38ac2dc8 |
1738 | # ifndef SYSTEM_ALLOC |
1739 | # define SYSTEM_ALLOC(a) malloc(a) |
1740 | # endif |
cf5c4ad8 |
1741 | |
760ac839 |
1742 | # endif /* PERL_SBRK_VIA_MALLOC */ |
cf5c4ad8 |
1743 | |
1744 | static IV Perl_sbrk_oldchunk; |
1745 | static long Perl_sbrk_oldsize; |
1746 | |
760ac839 |
1747 | # define PERLSBRK_32_K (1<<15) |
1748 | # define PERLSBRK_64_K (1<<16) |
cf5c4ad8 |
1749 | |
b63effbb |
1750 | Malloc_t |
df0003d4 |
1751 | Perl_sbrk(int size) |
cf5c4ad8 |
1752 | { |
1753 | IV got; |
1754 | int small, reqsize; |
1755 | |
1756 | if (!size) return 0; |
55497cff |
1757 | #ifdef PERL_CORE |
cf5c4ad8 |
1758 | reqsize = size; /* just for the DEBUG_m statement */ |
1759 | #endif |
57569e04 |
1760 | #ifdef PACK_MALLOC |
1761 | size = (size + 0x7ff) & ~0x7ff; |
1762 | #endif |
cf5c4ad8 |
1763 | if (size <= Perl_sbrk_oldsize) { |
1764 | got = Perl_sbrk_oldchunk; |
1765 | Perl_sbrk_oldchunk += size; |
1766 | Perl_sbrk_oldsize -= size; |
1767 | } else { |
1768 | if (size >= PERLSBRK_32_K) { |
1769 | small = 0; |
1770 | } else { |
cf5c4ad8 |
1771 | size = PERLSBRK_64_K; |
1772 | small = 1; |
1773 | } |
1774 | got = (IV)SYSTEM_ALLOC(size); |
57569e04 |
1775 | #ifdef PACK_MALLOC |
1776 | got = (got + 0x7ff) & ~0x7ff; |
1777 | #endif |
cf5c4ad8 |
1778 | if (small) { |
1779 | /* Chunk is small, register the rest for future allocs. */ |
1780 | Perl_sbrk_oldchunk = got + reqsize; |
1781 | Perl_sbrk_oldsize = size - reqsize; |
1782 | } |
1783 | } |
1784 | |
fb73857a |
1785 | DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%lx\n", |
cf5c4ad8 |
1786 | size, reqsize, Perl_sbrk_oldsize, got)); |
cf5c4ad8 |
1787 | |
1788 | return (void *)got; |
1789 | } |
1790 | |
1791 | #endif /* ! defined USE_PERL_SBRK */ |