Commit | Line | Data |
a0d0e21e |
1 | /* malloc.c |
8d063cd8 |
2 | * |
8d063cd8 |
3 | */ |
4 | |
87c6202a |
5 | /* |
741df71a |
6 | Here are some notes on configuring Perl's malloc. (For non-perl |
7 | usage see below.) |
87c6202a |
8 | |
9 | There are two macros which serve as bulk disablers of advanced |
10 | features of this malloc: NO_FANCY_MALLOC, PLAIN_MALLOC (undef by |
11 | default). Look in the list of default values below to understand |
12 | their exact effect. Defining NO_FANCY_MALLOC returns malloc.c to the |
13 | state of the malloc in Perl 5.004. Additionally defining PLAIN_MALLOC |
14 | returns it to the state as of Perl 5.000. |
15 | |
16 | Note that some of the settings below may be ignored in the code based |
17 | on values of other macros. The PERL_CORE symbol is only defined when |
18 | perl itself is being compiled (so malloc can make some assumptions |
19 | about perl's facilities being available to it). |
20 | |
21 | Each config option has a short description, followed by its name, |
22 | default value, and a comment about the default (if applicable). Some |
23 | options take a precise value, while the others are just boolean. |
24 | The boolean ones are listed first. |
25 | |
26 | # Enable code for an emergency memory pool in $^M. See perlvar.pod |
27 | # for a description of $^M. |
28 | PERL_EMERGENCY_SBRK (!PLAIN_MALLOC && PERL_CORE) |
29 | |
30 | # Enable code for printing memory statistics. |
31 | DEBUGGING_MSTATS (!PLAIN_MALLOC && PERL_CORE) |
32 | |
33 | # Move allocation info for small buckets into separate areas. |
34 | # Memory optimization (especially for small allocations, of the |
35 | # less than 64 bytes). Since perl usually makes a large number |
36 | # of small allocations, this is usually a win. |
37 | PACK_MALLOC (!PLAIN_MALLOC && !RCHECK) |
38 | |
39 | # Add one page to big powers of two when calculating bucket size. |
40 | # This is targeted at big allocations, as are common in image |
41 | # processing. |
42 | TWO_POT_OPTIMIZE !PLAIN_MALLOC |
43 | |
44 | # Use intermediate bucket sizes between powers-of-two. This is |
45 | # generally a memory optimization, and a (small) speed pessimization. |
46 | BUCKETS_ROOT2 !NO_FANCY_MALLOC |
47 | |
48 | # Do not check small deallocations for bad free(). Memory |
49 | # and speed optimization, error reporting pessimization. |
50 | IGNORE_SMALL_BAD_FREE (!NO_FANCY_MALLOC && !RCHECK) |
51 | |
52 | # Use table lookup to decide in which bucket a given allocation will go. |
53 | SMALL_BUCKET_VIA_TABLE !NO_FANCY_MALLOC |
54 | |
38ac2dc8 |
55 | # Use a perl-defined sbrk() instead of the (presumably broken or |
56 | # missing) system-supplied sbrk(). |
57 | USE_PERL_SBRK undef |
58 | |
59 | # Use system malloc() (or calloc() etc.) to emulate sbrk(). Normally |
60 | # only used with broken sbrk()s. |
87c6202a |
61 | PERL_SBRK_VIA_MALLOC undef |
62 | |
38ac2dc8 |
63 | # Which allocator to use if PERL_SBRK_VIA_MALLOC |
64 | SYSTEM_ALLOC(a) malloc(a) |
65 | |
9ee81ef6 |
66 | # Minimal alignment (in bytes, should be a power of 2) of SYSTEM_ALLOC |
5bbd1ef5 |
67 | SYSTEM_ALLOC_ALIGNMENT MEM_ALIGNBYTES |
68 | |
87c6202a |
69 | # Disable memory overwrite checking with DEBUGGING. Memory and speed |
70 | # optimization, error reporting pessimization. |
71 | NO_RCHECK undef |
72 | |
73 | # Enable memory overwrite checking with DEBUGGING. Memory and speed |
74 | # pessimization, error reporting optimization |
75 | RCHECK (DEBUGGING && !NO_RCHECK) |
76 | |
77 | # Failed allocations bigger than this size croak (if |
78 | # PERL_EMERGENCY_SBRK is enabled) without touching $^M. See |
79 | # perlvar.pod for a description of $^M. |
80 | BIG_SIZE (1<<16) # 64K |
81 | |
82 | # Starting from this power of two, add an extra page to the |
83 | # size of the bucket. This enables optimized allocations of sizes |
84 | # close to powers of 2. Note that the value is indexed at 0. |
85 | FIRST_BIG_POW2 15 # 32K, 16K is used too often |
86 | |
87 | # Estimate of minimal memory footprint. malloc uses this value to |
88 | # request the most reasonable largest blocks of memory from the system. |
89 | FIRST_SBRK (48*1024) |
90 | |
91 | # Round up sbrk()s to multiples of this. |
92 | MIN_SBRK 2048 |
93 | |
94 | # Round up sbrk()s to multiples of this percent of footprint. |
95 | MIN_SBRK_FRAC 3 |
96 | |
97 | # Add this much memory to big powers of two to get the bucket size. |
98 | PERL_PAGESIZE 4096 |
99 | |
100 | # This many sbrk() discontinuities should be tolerated even |
101 | # from the start without deciding that sbrk() is usually |
102 | # discontinuous. |
103 | SBRK_ALLOW_FAILURES 3 |
104 | |
105 | # This many continuous sbrk()s compensate for one discontinuous one. |
106 | SBRK_FAILURE_PRICE 50 |
107 | |
28ac10b1 |
108 | # Some configurations may ask for 12-byte-or-so allocations which |
109 | # require 8-byte alignment (?!). In such situation one needs to |
110 | # define this to disable 12-byte bucket (will increase memory footprint) |
111 | STRICT_ALIGNMENT undef |
112 | |
87c6202a |
113 | This implementation assumes that calling PerlIO_printf() does not |
114 | result in any memory allocation calls (used during a panic). |
115 | |
116 | */ |
117 | |
741df71a |
118 | /* |
119 | If used outside of Perl environment, it may be useful to redefine |
120 | the following macros (listed below with defaults): |
121 | |
122 | # Type of address returned by allocation functions |
123 | Malloc_t void * |
124 | |
125 | # Type of size argument for allocation functions |
126 | MEM_SIZE unsigned long |
127 | |
c7374474 |
128 | # size of void* |
129 | PTRSIZE 4 |
130 | |
741df71a |
131 | # Maximal value in LONG |
132 | LONG_MAX 0x7FFFFFFF |
133 | |
134 | # Unsigned integer type big enough to keep a pointer |
135 | UV unsigned long |
136 | |
137 | # Type of pointer with 1-byte granularity |
138 | caddr_t char * |
139 | |
140 | # Type returned by free() |
141 | Free_t void |
142 | |
5bbd1ef5 |
143 | # Very fatal condition reporting function (cannot call any ) |
144 | fatalcroak(arg) write(2,arg,strlen(arg)) + exit(2) |
145 | |
741df71a |
146 | # Fatal error reporting function |
147 | croak(format, arg) warn(idem) + exit(1) |
148 | |
149 | # Error reporting function |
150 | warn(format, arg) fprintf(stderr, idem) |
151 | |
152 | # Locking/unlocking for MT operation |
cea2e8a9 |
153 | MALLOC_LOCK MUTEX_LOCK_NOCONTEXT(&PL_malloc_mutex) |
154 | MALLOC_UNLOCK MUTEX_UNLOCK_NOCONTEXT(&PL_malloc_mutex) |
741df71a |
155 | |
156 | # Locking/unlocking mutex for MT operation |
157 | MUTEX_LOCK(l) void |
158 | MUTEX_UNLOCK(l) void |
159 | */ |
160 | |
e8bc2b5c |
161 | #ifndef NO_FANCY_MALLOC |
162 | # ifndef SMALL_BUCKET_VIA_TABLE |
163 | # define SMALL_BUCKET_VIA_TABLE |
164 | # endif |
165 | # ifndef BUCKETS_ROOT2 |
166 | # define BUCKETS_ROOT2 |
167 | # endif |
168 | # ifndef IGNORE_SMALL_BAD_FREE |
169 | # define IGNORE_SMALL_BAD_FREE |
170 | # endif |
3562ef9b |
171 | #endif |
172 | |
e8bc2b5c |
173 | #ifndef PLAIN_MALLOC /* Bulk enable features */ |
174 | # ifndef PACK_MALLOC |
175 | # define PACK_MALLOC |
176 | # endif |
177 | # ifndef TWO_POT_OPTIMIZE |
178 | # define TWO_POT_OPTIMIZE |
179 | # endif |
d720c441 |
180 | # if defined(PERL_CORE) && !defined(PERL_EMERGENCY_SBRK) |
181 | # define PERL_EMERGENCY_SBRK |
e8bc2b5c |
182 | # endif |
183 | # if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS) |
184 | # define DEBUGGING_MSTATS |
185 | # endif |
186 | #endif |
187 | |
188 | #define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */ |
189 | #define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2) |
190 | |
61ae2fbf |
191 | #if !(defined(I286) || defined(atarist) || defined(__MINT__)) |
e8bc2b5c |
192 | /* take 2k unless the block is bigger than that */ |
193 | # define LOG_OF_MIN_ARENA 11 |
194 | #else |
195 | /* take 16k unless the block is bigger than that |
196 | (80286s like large segments!), probably good on the atari too */ |
197 | # define LOG_OF_MIN_ARENA 14 |
198 | #endif |
199 | |
8d063cd8 |
200 | #ifndef lint |
1944739a |
201 | # if defined(DEBUGGING) && !defined(NO_RCHECK) |
202 | # define RCHECK |
203 | # endif |
e8bc2b5c |
204 | # if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE) |
205 | # undef IGNORE_SMALL_BAD_FREE |
206 | # endif |
8d063cd8 |
207 | /* |
208 | * malloc.c (Caltech) 2/21/82 |
209 | * Chris Kingsley, kingsley@cit-20. |
210 | * |
211 | * This is a very fast storage allocator. It allocates blocks of a small |
212 | * number of different sizes, and keeps free lists of each size. Blocks that |
213 | * don't exactly fit are passed up to the next larger size. In this |
214 | * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long. |
cf5c4ad8 |
215 | * If PACK_MALLOC is defined, small blocks are 2^n bytes long. |
8d063cd8 |
216 | * This is designed for use in a program that uses vast quantities of memory, |
741df71a |
217 | * but bombs when it runs out. |
218 | * |
4eb8286e |
219 | * Modifications Copyright Ilya Zakharevich 1996-99. |
741df71a |
220 | * |
221 | * Still very quick, but much more thrifty. (Std config is 10% slower |
222 | * than it was, and takes 67% of old heap size for typical usage.) |
223 | * |
224 | * Allocations of small blocks are now table-driven to many different |
225 | * buckets. Sizes of really big buckets are increased to accomodata |
226 | * common size=power-of-2 blocks. Running-out-of-memory is made into |
227 | * an exception. Deeply configurable and thread-safe. |
228 | * |
8d063cd8 |
229 | */ |
230 | |
d720c441 |
231 | #ifdef PERL_CORE |
232 | # include "EXTERN.h" |
4ad56ec9 |
233 | # define PERL_IN_MALLOC_C |
d720c441 |
234 | # include "perl.h" |
cea2e8a9 |
235 | # if defined(PERL_IMPLICIT_CONTEXT) |
236 | # define croak Perl_croak_nocontext |
237 | # define warn Perl_warn_nocontext |
238 | # endif |
d720c441 |
239 | #else |
240 | # ifdef PERL_FOR_X2P |
241 | # include "../EXTERN.h" |
242 | # include "../perl.h" |
243 | # else |
244 | # include <stdlib.h> |
245 | # include <stdio.h> |
246 | # include <memory.h> |
247 | # define _(arg) arg |
248 | # ifndef Malloc_t |
249 | # define Malloc_t void * |
250 | # endif |
c7374474 |
251 | # ifndef PTRSIZE |
252 | # define PTRSIZE 4 |
253 | # endif |
d720c441 |
254 | # ifndef MEM_SIZE |
255 | # define MEM_SIZE unsigned long |
256 | # endif |
257 | # ifndef LONG_MAX |
258 | # define LONG_MAX 0x7FFFFFFF |
259 | # endif |
260 | # ifndef UV |
261 | # define UV unsigned long |
262 | # endif |
263 | # ifndef caddr_t |
264 | # define caddr_t char * |
265 | # endif |
266 | # ifndef Free_t |
267 | # define Free_t void |
268 | # endif |
269 | # define Copy(s,d,n,t) (void)memcpy((char*)(d),(char*)(s), (n) * sizeof(t)) |
270 | # define PerlEnv_getenv getenv |
271 | # define PerlIO_printf fprintf |
272 | # define PerlIO_stderr() stderr |
273 | # endif |
e8bc2b5c |
274 | # ifndef croak /* make depend */ |
741df71a |
275 | # define croak(mess, arg) (warn((mess), (arg)), exit(1)) |
d720c441 |
276 | # endif |
277 | # ifndef warn |
741df71a |
278 | # define warn(mess, arg) fprintf(stderr, (mess), (arg)) |
e8bc2b5c |
279 | # endif |
280 | # ifdef DEBUG_m |
281 | # undef DEBUG_m |
282 | # endif |
283 | # define DEBUG_m(a) |
284 | # ifdef DEBUGGING |
285 | # undef DEBUGGING |
286 | # endif |
cea2e8a9 |
287 | # ifndef pTHX |
288 | # define pTHX void |
289 | # define pTHX_ |
290 | # define dTHX extern int Perl___notused |
291 | # define WITH_THX(s) s |
292 | # endif |
c5be433b |
293 | # ifndef PERL_GET_INTERP |
294 | # define PERL_GET_INTERP PL_curinterp |
295 | # endif |
4ad56ec9 |
296 | # ifndef Perl_malloc |
297 | # define Perl_malloc malloc |
298 | # endif |
299 | # ifndef Perl_mfree |
300 | # define Perl_mfree free |
301 | # endif |
302 | # ifndef Perl_realloc |
303 | # define Perl_realloc realloc |
304 | # endif |
305 | # ifndef Perl_calloc |
306 | # define Perl_calloc calloc |
307 | # endif |
308 | # ifndef Perl_strdup |
309 | # define Perl_strdup strdup |
310 | # endif |
e8bc2b5c |
311 | #endif |
312 | |
313 | #ifndef MUTEX_LOCK |
314 | # define MUTEX_LOCK(l) |
315 | #endif |
316 | |
317 | #ifndef MUTEX_UNLOCK |
318 | # define MUTEX_UNLOCK(l) |
319 | #endif |
320 | |
741df71a |
321 | #ifndef MALLOC_LOCK |
cea2e8a9 |
322 | # define MALLOC_LOCK MUTEX_LOCK_NOCONTEXT(&PL_malloc_mutex) |
741df71a |
323 | #endif |
324 | |
325 | #ifndef MALLOC_UNLOCK |
cea2e8a9 |
326 | # define MALLOC_UNLOCK MUTEX_UNLOCK_NOCONTEXT(&PL_malloc_mutex) |
741df71a |
327 | #endif |
328 | |
5bbd1ef5 |
329 | # ifndef fatalcroak /* make depend */ |
330 | # define fatalcroak(mess) (write(2, (mess), strlen(mess)), exit(2)) |
331 | # endif |
332 | |
760ac839 |
333 | #ifdef DEBUGGING |
e8bc2b5c |
334 | # undef DEBUG_m |
0b250b9e |
335 | # define DEBUG_m(a) \ |
336 | STMT_START { \ |
337 | if (PERL_GET_INTERP) { dTHX; if (PL_debug & 128) { a; } } \ |
338 | } STMT_END |
760ac839 |
339 | #endif |
340 | |
e476b1b5 |
341 | #ifdef PERL_IMPLICIT_CONTEXT |
342 | # define PERL_IS_ALIVE aTHX |
343 | #else |
344 | # define PERL_IS_ALIVE TRUE |
345 | #endif |
346 | |
347 | |
e9397286 |
348 | /* |
349 | * Layout of memory: |
350 | * ~~~~~~~~~~~~~~~~ |
351 | * The memory is broken into "blocks" which occupy multiples of 2K (and |
352 | * generally speaking, have size "close" to a power of 2). The addresses |
353 | * of such *unused* blocks are kept in nextf[i] with big enough i. (nextf |
354 | * is an array of linked lists.) (Addresses of used blocks are not known.) |
355 | * |
4ad56ec9 |
356 | * Moreover, since the algorithm may try to "bite" smaller blocks out |
e9397286 |
357 | * of unused bigger ones, there are also regions of "irregular" size, |
358 | * managed separately, by a linked list chunk_chain. |
359 | * |
360 | * The third type of storage is the sbrk()ed-but-not-yet-used space, its |
361 | * end and size are kept in last_sbrk_top and sbrked_remains. |
362 | * |
363 | * Growing blocks "in place": |
364 | * ~~~~~~~~~~~~~~~~~~~~~~~~~ |
365 | * The address of the block with the greatest address is kept in last_op |
366 | * (if not known, last_op is 0). If it is known that the memory above |
367 | * last_op is not continuous, or contains a chunk from chunk_chain, |
368 | * last_op is set to 0. |
369 | * |
370 | * The chunk with address last_op may be grown by expanding into |
371 | * sbrk()ed-but-not-yet-used space, or trying to sbrk() more continuous |
372 | * memory. |
373 | * |
374 | * Management of last_op: |
375 | * ~~~~~~~~~~~~~~~~~~~~~ |
376 | * |
377 | * free() never changes the boundaries of blocks, so is not relevant. |
378 | * |
379 | * The only way realloc() may change the boundaries of blocks is if it |
380 | * grows a block "in place". However, in the case of success such a |
381 | * chunk is automatically last_op, and it remains last_op. In the case |
382 | * of failure getpages_adjacent() clears last_op. |
383 | * |
384 | * malloc() may change blocks by calling morecore() only. |
385 | * |
386 | * morecore() may create new blocks by: |
387 | * a) biting pieces from chunk_chain (cannot create one above last_op); |
388 | * b) biting a piece from an unused block (if block was last_op, this |
389 | * may create a chunk from chain above last_op, thus last_op is |
390 | * invalidated in such a case). |
391 | * c) biting of sbrk()ed-but-not-yet-used space. This creates |
392 | * a block which is last_op. |
393 | * d) Allocating new pages by calling getpages(); |
394 | * |
395 | * getpages() creates a new block. It marks last_op at the bottom of |
396 | * the chunk of memory it returns. |
397 | * |
398 | * Active pages footprint: |
399 | * ~~~~~~~~~~~~~~~~~~~~~~ |
400 | * Note that we do not need to traverse the lists in nextf[i], just take |
401 | * the first element of this list. However, we *need* to traverse the |
402 | * list in chunk_chain, but most the time it should be a very short one, |
403 | * so we do not step on a lot of pages we are not going to use. |
404 | * |
405 | * Flaws: |
406 | * ~~~~~ |
407 | * get_from_bigger_buckets(): forget to increment price => Quite |
408 | * aggressive. |
409 | */ |
410 | |
135863df |
411 | /* I don't much care whether these are defined in sys/types.h--LAW */ |
412 | |
413 | #define u_char unsigned char |
414 | #define u_int unsigned int |
56431972 |
415 | /* |
416 | * I removed the definition of u_bigint which appeared to be u_bigint = UV |
417 | * u_bigint was only used in TWOK_MASKED and TWOK_SHIFT |
418 | * where I have used PTR2UV. RMB |
419 | */ |
135863df |
420 | #define u_short unsigned short |
8d063cd8 |
421 | |
cf5c4ad8 |
422 | /* 286 and atarist like big chunks, which gives too much overhead. */ |
61ae2fbf |
423 | #if (defined(RCHECK) || defined(I286) || defined(atarist) || defined(__MINT__)) && defined(PACK_MALLOC) |
e8bc2b5c |
424 | # undef PACK_MALLOC |
cf5c4ad8 |
425 | #endif |
426 | |
8d063cd8 |
427 | /* |
cf5c4ad8 |
428 | * The description below is applicable if PACK_MALLOC is not defined. |
429 | * |
8d063cd8 |
430 | * The overhead on a block is at least 4 bytes. When free, this space |
431 | * contains a pointer to the next free block, and the bottom two bits must |
432 | * be zero. When in use, the first byte is set to MAGIC, and the second |
433 | * byte is the size index. The remaining bytes are for alignment. |
434 | * If range checking is enabled and the size of the block fits |
435 | * in two bytes, then the top two bytes hold the size of the requested block |
436 | * plus the range checking words, and the header word MINUS ONE. |
437 | */ |
438 | union overhead { |
439 | union overhead *ov_next; /* when free */ |
85e6fe83 |
440 | #if MEM_ALIGNBYTES > 4 |
c623bd54 |
441 | double strut; /* alignment problems */ |
a687059c |
442 | #endif |
8d063cd8 |
443 | struct { |
8d063cd8 |
444 | u_char ovu_index; /* bucket # */ |
61eff3bc |
445 | u_char ovu_magic; /* magic number */ |
8d063cd8 |
446 | #ifdef RCHECK |
447 | u_short ovu_size; /* actual block size */ |
448 | u_int ovu_rmagic; /* range magic number */ |
449 | #endif |
450 | } ovu; |
451 | #define ov_magic ovu.ovu_magic |
452 | #define ov_index ovu.ovu_index |
453 | #define ov_size ovu.ovu_size |
454 | #define ov_rmagic ovu.ovu_rmagic |
455 | }; |
456 | |
457 | #define MAGIC 0xff /* magic # on accounting info */ |
458 | #define RMAGIC 0x55555555 /* magic # on range info */ |
e8bc2b5c |
459 | #define RMAGIC_C 0x55 /* magic # on range info */ |
460 | |
8d063cd8 |
461 | #ifdef RCHECK |
c2a5c2d2 |
462 | # define RSLOP sizeof (u_int) |
463 | # ifdef TWO_POT_OPTIMIZE |
e8bc2b5c |
464 | # define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2) |
c2a5c2d2 |
465 | # else |
e8bc2b5c |
466 | # define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2) |
c2a5c2d2 |
467 | # endif |
8d063cd8 |
468 | #else |
c2a5c2d2 |
469 | # define RSLOP 0 |
8d063cd8 |
470 | #endif |
471 | |
e8bc2b5c |
472 | #if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2) |
473 | # undef BUCKETS_ROOT2 |
474 | #endif |
475 | |
476 | #ifdef BUCKETS_ROOT2 |
477 | # define BUCKET_TABLE_SHIFT 2 |
478 | # define BUCKET_POW2_SHIFT 1 |
479 | # define BUCKETS_PER_POW2 2 |
480 | #else |
481 | # define BUCKET_TABLE_SHIFT MIN_BUC_POW2 |
482 | # define BUCKET_POW2_SHIFT 0 |
483 | # define BUCKETS_PER_POW2 1 |
484 | #endif |
485 | |
274c7500 |
486 | #if !defined(MEM_ALIGNBYTES) || ((MEM_ALIGNBYTES > 4) && !defined(STRICT_ALIGNMENT)) |
487 | /* Figure out the alignment of void*. */ |
488 | struct aligner { |
489 | char c; |
490 | void *p; |
491 | }; |
492 | # define ALIGN_SMALL ((int)((caddr_t)&(((struct aligner*)0)->p))) |
493 | #else |
494 | # define ALIGN_SMALL MEM_ALIGNBYTES |
495 | #endif |
496 | |
497 | #define IF_ALIGN_8(yes,no) ((ALIGN_SMALL>4) ? (yes) : (no)) |
498 | |
e8bc2b5c |
499 | #ifdef BUCKETS_ROOT2 |
500 | # define MAX_BUCKET_BY_TABLE 13 |
501 | static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] = |
502 | { |
503 | 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80, |
504 | }; |
505 | # define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT))) |
506 | # define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \ |
507 | ? buck_size[i] \ |
508 | : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \ |
509 | - MEM_OVERHEAD(i) \ |
510 | + POW2_OPTIMIZE_SURPLUS(i))) |
511 | #else |
512 | # define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT)) |
513 | # define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i)) |
514 | #endif |
515 | |
516 | |
cf5c4ad8 |
517 | #ifdef PACK_MALLOC |
4ad56ec9 |
518 | /* In this case there are several possible layout of arenas depending |
519 | * on the size. Arenas are of sizes multiple to 2K, 2K-aligned, and |
520 | * have a size close to a power of 2. |
521 | * |
522 | * Arenas of the size >= 4K keep one chunk only. Arenas of size 2K |
523 | * may keep one chunk or multiple chunks. Here are the possible |
524 | * layouts of arenas: |
525 | * |
526 | * # One chunk only, chunksize 2^k + SOMETHING - ALIGN, k >= 11 |
527 | * |
528 | * INDEX MAGIC1 UNUSED CHUNK1 |
529 | * |
530 | * # Multichunk with sanity checking and chunksize 2^k-ALIGN, k>7 |
531 | * |
532 | * INDEX MAGIC1 MAGIC2 MAGIC3 UNUSED CHUNK1 CHUNK2 CHUNK3 ... |
533 | * |
534 | * # Multichunk with sanity checking and size 2^k-ALIGN, k=7 |
535 | * |
536 | * INDEX MAGIC1 MAGIC2 MAGIC3 UNUSED CHUNK1 UNUSED CHUNK2 CHUNK3 ... |
537 | * |
538 | * # Multichunk with sanity checking and size up to 80 |
539 | * |
540 | * INDEX UNUSED MAGIC1 UNUSED MAGIC2 UNUSED ... CHUNK1 CHUNK2 CHUNK3 ... |
541 | * |
542 | * # No sanity check (usually up to 48=byte-long buckets) |
543 | * INDEX UNUSED CHUNK1 CHUNK2 ... |
544 | * |
545 | * Above INDEX and MAGIC are one-byte-long. Sizes of UNUSED are |
546 | * appropriate to keep algorithms simple and memory aligned. INDEX |
547 | * encodes the size of the chunk, while MAGICn encodes state (used, |
548 | * free or non-managed-by-us-so-it-indicates-a-bug) of CHUNKn. MAGIC |
549 | * is used for sanity checking purposes only. SOMETHING is 0 or 4K |
550 | * (to make size of big CHUNK accomodate allocations for powers of two |
551 | * better). |
552 | * |
553 | * [There is no need to alignment between chunks, since C rules ensure |
554 | * that structs which need 2^k alignment have sizeof which is |
555 | * divisible by 2^k. Thus as far as the last chunk is aligned at the |
556 | * end of the arena, and 2K-alignment does not contradict things, |
557 | * everything is going to be OK for sizes of chunks 2^n and 2^n + |
558 | * 2^k. Say, 80-bit buckets will be 16-bit aligned, and as far as we |
559 | * put allocations for requests in 65..80 range, all is fine. |
560 | * |
561 | * Note, however, that standard malloc() puts more strict |
562 | * requirements than the above C rules. Moreover, our algorithms of |
563 | * realloc() may break this idyll, but we suppose that realloc() does |
564 | * need not change alignment.] |
565 | * |
566 | * Is very important to make calculation of the offset of MAGICm as |
567 | * quick as possible, since it is done on each malloc()/free(). In |
568 | * fact it is so quick that it has quite little effect on the speed of |
569 | * doing malloc()/free(). [By default] We forego such calculations |
570 | * for small chunks, but only to save extra 3% of memory, not because |
571 | * of speed considerations. |
572 | * |
573 | * Here is the algorithm [which is the same for all the allocations |
574 | * schemes above], see OV_MAGIC(block,bucket). Let OFFSETm be the |
575 | * offset of the CHUNKm from the start of ARENA. Then offset of |
576 | * MAGICm is (OFFSET1 >> SHIFT) + ADDOFFSET. Here SHIFT and ADDOFFSET |
577 | * are numbers which depend on the size of the chunks only. |
578 | * |
579 | * Let as check some sanity conditions. Numbers OFFSETm>>SHIFT are |
580 | * different for all the chunks in the arena if 2^SHIFT is not greater |
581 | * than size of the chunks in the arena. MAGIC1 will not overwrite |
582 | * INDEX provided ADDOFFSET is >0 if OFFSET1 < 2^SHIFT. MAGIClast |
583 | * will not overwrite CHUNK1 if OFFSET1 > (OFFSETlast >> SHIFT) + |
584 | * ADDOFFSET. |
585 | * |
586 | * Make SHIFT the maximal possible (there is no point in making it |
587 | * smaller). Since OFFSETlast is 2K - CHUNKSIZE, above restrictions |
588 | * give restrictions on OFFSET1 and on ADDOFFSET. |
589 | * |
590 | * In particular, for chunks of size 2^k with k>=6 we can put |
591 | * ADDOFFSET to be from 0 to 2^k - 2^(11-k), and have |
592 | * OFFSET1==chunksize. For chunks of size 80 OFFSET1 of 2K%80=48 is |
593 | * large enough to have ADDOFFSET between 1 and 16 (similarly for 96, |
594 | * when ADDOFFSET should be 1). In particular, keeping MAGICs for |
595 | * these sizes gives no additional size penalty. |
596 | * |
597 | * However, for chunks of size 2^k with k<=5 this gives OFFSET1 >= |
598 | * ADDOFSET + 2^(11-k). Keeping ADDOFFSET 0 allows for 2^(11-k)-2^(11-2k) |
599 | * chunks per arena. This is smaller than 2^(11-k) - 1 which are |
600 | * needed if no MAGIC is kept. [In fact, having a negative ADDOFFSET |
601 | * would allow for slightly more buckets per arena for k=2,3.] |
602 | * |
603 | * Similarly, for chunks of size 3/2*2^k with k<=5 MAGICs would span |
604 | * the area up to 2^(11-k)+ADDOFFSET. For k=4 this give optimal |
605 | * ADDOFFSET as -7..0. For k=3 ADDOFFSET can go up to 4 (with tiny |
606 | * savings for negative ADDOFFSET). For k=5 ADDOFFSET can go -1..16 |
607 | * (with no savings for negative values). |
cf5c4ad8 |
608 | * |
4ad56ec9 |
609 | * In particular, keeping ADDOFFSET 0 for sizes of chunks up to 2^6 |
610 | * leads to tiny pessimizations in case of sizes 4, 8, 12, 24, and |
611 | * leads to no contradictions except for size=80 (or 96.) |
cf5c4ad8 |
612 | * |
4ad56ec9 |
613 | * However, it also makes sense to keep no magic for sizes 48 or less. |
614 | * This is what we do. In this case one needs ADDOFFSET>=1 also for |
615 | * chunksizes 12, 24, and 48, unless one gets one less chunk per |
616 | * arena. |
617 | * |
618 | * The algo of OV_MAGIC(block,bucket) keeps ADDOFFSET 0 until |
619 | * chunksize of 64, then makes it 1. |
cf5c4ad8 |
620 | * |
4ad56ec9 |
621 | * This allows for an additional optimization: the above scheme leads |
622 | * to giant overheads for sizes 128 or more (one whole chunk needs to |
623 | * be sacrifised to keep INDEX). Instead we use chunks not of size |
624 | * 2^k, but of size 2^k-ALIGN. If we pack these chunks at the end of |
625 | * the arena, then the beginnings are still in different 2^k-long |
626 | * sections of the arena if k>=7 for ALIGN==4, and k>=8 if ALIGN=8. |
627 | * Thus for k>7 the above algo of calculating the offset of the magic |
628 | * will still give different answers for different chunks. And to |
629 | * avoid the overrun of MAGIC1 into INDEX, one needs ADDOFFSET of >=1. |
630 | * In the case k=7 we just move the first chunk an extra ALIGN |
631 | * backward inside the ARENA (this is done once per arena lifetime, |
632 | * thus is not a big overhead). */ |
e8bc2b5c |
633 | # define MAX_PACKED_POW2 6 |
634 | # define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT) |
635 | # define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD) |
636 | # define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1) |
56431972 |
637 | # define TWOK_MASKED(x) (PTR2UV(x) & ~TWOK_MASK) |
638 | # define TWOK_SHIFT(x) (PTR2UV(x) & TWOK_MASK) |
639 | # define OV_INDEXp(block) (INT2PTR(u_char*,TWOK_MASKED(block))) |
cf5c4ad8 |
640 | # define OV_INDEX(block) (*OV_INDEXp(block)) |
641 | # define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \ |
e8bc2b5c |
642 | (TWOK_SHIFT(block)>> \ |
643 | (bucket>>BUCKET_POW2_SHIFT)) + \ |
644 | (bucket >= MIN_NEEDS_SHIFT ? 1 : 0))) |
645 | /* A bucket can have a shift smaller than it size, we need to |
646 | shift its magic number so it will not overwrite index: */ |
647 | # ifdef BUCKETS_ROOT2 |
648 | # define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */ |
649 | # else |
650 | # define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */ |
651 | # endif |
cf5c4ad8 |
652 | # define CHUNK_SHIFT 0 |
653 | |
e8bc2b5c |
654 | /* Number of active buckets of given ordinal. */ |
655 | #ifdef IGNORE_SMALL_BAD_FREE |
656 | #define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */ |
657 | # define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \ |
658 | ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \ |
659 | : n_blks[bucket] ) |
660 | #else |
661 | # define N_BLKS(bucket) n_blks[bucket] |
662 | #endif |
663 | |
664 | static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] = |
665 | { |
666 | # if BUCKETS_PER_POW2==1 |
667 | 0, 0, |
668 | (MIN_BUC_POW2==2 ? 384 : 0), |
669 | 224, 120, 62, 31, 16, 8, 4, 2 |
670 | # else |
671 | 0, 0, 0, 0, |
672 | (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */ |
673 | 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2 |
674 | # endif |
675 | }; |
676 | |
677 | /* Shift of the first bucket with the given ordinal inside 2K chunk. */ |
678 | #ifdef IGNORE_SMALL_BAD_FREE |
679 | # define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \ |
680 | ? ((1<<LOG_OF_MIN_ARENA) \ |
681 | - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \ |
682 | : blk_shift[bucket]) |
683 | #else |
684 | # define BLK_SHIFT(bucket) blk_shift[bucket] |
685 | #endif |
686 | |
687 | static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] = |
688 | { |
689 | # if BUCKETS_PER_POW2==1 |
690 | 0, 0, |
691 | (MIN_BUC_POW2==2 ? 512 : 0), |
692 | 256, 128, 64, 64, /* 8 to 64 */ |
693 | 16*sizeof(union overhead), |
694 | 8*sizeof(union overhead), |
695 | 4*sizeof(union overhead), |
696 | 2*sizeof(union overhead), |
697 | # else |
698 | 0, 0, 0, 0, |
699 | (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0), |
700 | 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */ |
701 | 16*sizeof(union overhead), 16*sizeof(union overhead), |
702 | 8*sizeof(union overhead), 8*sizeof(union overhead), |
703 | 4*sizeof(union overhead), 4*sizeof(union overhead), |
704 | 2*sizeof(union overhead), 2*sizeof(union overhead), |
705 | # endif |
706 | }; |
cf5c4ad8 |
707 | |
5bbd1ef5 |
708 | # define NEEDED_ALIGNMENT 0x800 /* 2k boundaries */ |
709 | # define WANTED_ALIGNMENT 0x800 /* 2k boundaries */ |
710 | |
cf5c4ad8 |
711 | #else /* !PACK_MALLOC */ |
712 | |
713 | # define OV_MAGIC(block,bucket) (block)->ov_magic |
714 | # define OV_INDEX(block) (block)->ov_index |
715 | # define CHUNK_SHIFT 1 |
e8bc2b5c |
716 | # define MAX_PACKED -1 |
5bbd1ef5 |
717 | # define NEEDED_ALIGNMENT MEM_ALIGNBYTES |
718 | # define WANTED_ALIGNMENT 0x400 /* 1k boundaries */ |
719 | |
cf5c4ad8 |
720 | #endif /* !PACK_MALLOC */ |
721 | |
e8bc2b5c |
722 | #define M_OVERHEAD (sizeof(union overhead) + RSLOP) |
723 | |
724 | #ifdef PACK_MALLOC |
725 | # define MEM_OVERHEAD(bucket) \ |
726 | (bucket <= MAX_PACKED ? 0 : M_OVERHEAD) |
727 | # ifdef SMALL_BUCKET_VIA_TABLE |
728 | # define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2) |
729 | # define START_SHIFT MAX_PACKED_POW2 |
730 | # ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */ |
731 | # define SIZE_TABLE_MAX 80 |
732 | # else |
733 | # define SIZE_TABLE_MAX 64 |
734 | # endif |
735 | static char bucket_of[] = |
736 | { |
737 | # ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */ |
738 | /* 0 to 15 in 4-byte increments. */ |
739 | (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */ |
740 | 6, /* 8 */ |
274c7500 |
741 | IF_ALIGN_8(8,7), 8, /* 16/12, 16 */ |
e8bc2b5c |
742 | 9, 9, 10, 10, /* 24, 32 */ |
743 | 11, 11, 11, 11, /* 48 */ |
744 | 12, 12, 12, 12, /* 64 */ |
745 | 13, 13, 13, 13, /* 80 */ |
746 | 13, 13, 13, 13 /* 80 */ |
747 | # else /* !BUCKETS_ROOT2 */ |
748 | /* 0 to 15 in 4-byte increments. */ |
749 | (sizeof(void*) > 4 ? 3 : 2), |
750 | 3, |
751 | 4, 4, |
752 | 5, 5, 5, 5, |
753 | 6, 6, 6, 6, |
754 | 6, 6, 6, 6 |
755 | # endif /* !BUCKETS_ROOT2 */ |
756 | }; |
757 | # else /* !SMALL_BUCKET_VIA_TABLE */ |
758 | # define START_SHIFTS_BUCKET MIN_BUCKET |
759 | # define START_SHIFT (MIN_BUC_POW2 - 1) |
760 | # endif /* !SMALL_BUCKET_VIA_TABLE */ |
761 | #else /* !PACK_MALLOC */ |
762 | # define MEM_OVERHEAD(bucket) M_OVERHEAD |
763 | # ifdef SMALL_BUCKET_VIA_TABLE |
764 | # undef SMALL_BUCKET_VIA_TABLE |
765 | # endif |
766 | # define START_SHIFTS_BUCKET MIN_BUCKET |
767 | # define START_SHIFT (MIN_BUC_POW2 - 1) |
768 | #endif /* !PACK_MALLOC */ |
cf5c4ad8 |
769 | |
8d063cd8 |
770 | /* |
55497cff |
771 | * Big allocations are often of the size 2^n bytes. To make them a |
772 | * little bit better, make blocks of size 2^n+pagesize for big n. |
773 | */ |
774 | |
775 | #ifdef TWO_POT_OPTIMIZE |
776 | |
5f05dabc |
777 | # ifndef PERL_PAGESIZE |
778 | # define PERL_PAGESIZE 4096 |
779 | # endif |
e8bc2b5c |
780 | # ifndef FIRST_BIG_POW2 |
781 | # define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */ |
5f05dabc |
782 | # endif |
e8bc2b5c |
783 | # define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2) |
55497cff |
784 | /* If this value or more, check against bigger blocks. */ |
785 | # define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD) |
786 | /* If less than this value, goes into 2^n-overhead-block. */ |
787 | # define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD) |
788 | |
e8bc2b5c |
789 | # define POW2_OPTIMIZE_ADJUST(nbytes) \ |
790 | ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0) |
791 | # define POW2_OPTIMIZE_SURPLUS(bucket) \ |
792 | ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0) |
793 | |
794 | #else /* !TWO_POT_OPTIMIZE */ |
795 | # define POW2_OPTIMIZE_ADJUST(nbytes) |
796 | # define POW2_OPTIMIZE_SURPLUS(bucket) 0 |
797 | #endif /* !TWO_POT_OPTIMIZE */ |
798 | |
799 | #if defined(HAS_64K_LIMIT) && defined(PERL_CORE) |
800 | # define BARK_64K_LIMIT(what,nbytes,size) \ |
801 | if (nbytes > 0xffff) { \ |
802 | PerlIO_printf(PerlIO_stderr(), \ |
803 | "%s too large: %lx\n", what, size); \ |
804 | my_exit(1); \ |
805 | } |
806 | #else /* !HAS_64K_LIMIT || !PERL_CORE */ |
807 | # define BARK_64K_LIMIT(what,nbytes,size) |
808 | #endif /* !HAS_64K_LIMIT || !PERL_CORE */ |
55497cff |
809 | |
e8bc2b5c |
810 | #ifndef MIN_SBRK |
811 | # define MIN_SBRK 2048 |
812 | #endif |
813 | |
814 | #ifndef FIRST_SBRK |
d720c441 |
815 | # define FIRST_SBRK (48*1024) |
e8bc2b5c |
816 | #endif |
817 | |
818 | /* Minimal sbrk in percents of what is already alloced. */ |
819 | #ifndef MIN_SBRK_FRAC |
820 | # define MIN_SBRK_FRAC 3 |
821 | #endif |
822 | |
823 | #ifndef SBRK_ALLOW_FAILURES |
824 | # define SBRK_ALLOW_FAILURES 3 |
825 | #endif |
55497cff |
826 | |
e8bc2b5c |
827 | #ifndef SBRK_FAILURE_PRICE |
828 | # define SBRK_FAILURE_PRICE 50 |
55497cff |
829 | #endif |
830 | |
24dd13bf |
831 | static void morecore (register int bucket); |
832 | # if defined(DEBUGGING) |
833 | static void botch (char *diag, char *s); |
834 | # endif |
835 | static void add_to_chain (void *p, MEM_SIZE size, MEM_SIZE chip); |
836 | static void* get_from_chain (MEM_SIZE size); |
837 | static void* get_from_bigger_buckets(int bucket, MEM_SIZE size); |
838 | static union overhead *getpages (MEM_SIZE needed, int *nblksp, int bucket); |
839 | static int getpages_adjacent(MEM_SIZE require); |
840 | |
e8bc2b5c |
841 | #if defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE) |
842 | |
843 | # ifndef BIG_SIZE |
844 | # define BIG_SIZE (1<<16) /* 64K */ |
845 | # endif |
846 | |
3541dd58 |
847 | #ifdef I_MACH_CTHREADS |
772fe5b3 |
848 | # undef MUTEX_LOCK |
849 | # define MUTEX_LOCK(m) STMT_START { if (*m) mutex_lock(*m); } STMT_END |
850 | # undef MUTEX_UNLOCK |
851 | # define MUTEX_UNLOCK(m) STMT_START { if (*m) mutex_unlock(*m); } STMT_END |
3541dd58 |
852 | #endif |
853 | |
55497cff |
854 | static char *emergency_buffer; |
855 | static MEM_SIZE emergency_buffer_size; |
856 | |
cea2e8a9 |
857 | static Malloc_t |
858 | emergency_sbrk(MEM_SIZE size) |
55497cff |
859 | { |
28ac10b1 |
860 | MEM_SIZE rsize = (((size - 1)>>LOG_OF_MIN_ARENA) + 1)<<LOG_OF_MIN_ARENA; |
861 | |
55497cff |
862 | if (size >= BIG_SIZE) { |
863 | /* Give the possibility to recover: */ |
741df71a |
864 | MALLOC_UNLOCK; |
1b979e0a |
865 | croak("Out of memory during \"large\" request for %i bytes", size); |
55497cff |
866 | } |
867 | |
28ac10b1 |
868 | if (emergency_buffer_size >= rsize) { |
869 | char *old = emergency_buffer; |
870 | |
871 | emergency_buffer_size -= rsize; |
872 | emergency_buffer += rsize; |
873 | return old; |
874 | } else { |
cea2e8a9 |
875 | dTHX; |
55497cff |
876 | /* First offense, give a possibility to recover by dieing. */ |
877 | /* No malloc involved here: */ |
4a33f861 |
878 | GV **gvp = (GV**)hv_fetch(PL_defstash, "^M", 2, 0); |
55497cff |
879 | SV *sv; |
880 | char *pv; |
28ac10b1 |
881 | int have = 0; |
2d8e6c8d |
882 | STRLEN n_a; |
55497cff |
883 | |
28ac10b1 |
884 | if (emergency_buffer_size) { |
885 | add_to_chain(emergency_buffer, emergency_buffer_size, 0); |
886 | emergency_buffer_size = 0; |
887 | emergency_buffer = Nullch; |
888 | have = 1; |
889 | } |
4a33f861 |
890 | if (!gvp) gvp = (GV**)hv_fetch(PL_defstash, "\015", 1, 0); |
55497cff |
891 | if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv) |
28ac10b1 |
892 | || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD)) { |
893 | if (have) |
894 | goto do_croak; |
55497cff |
895 | return (char *)-1; /* Now die die die... */ |
28ac10b1 |
896 | } |
55497cff |
897 | /* Got it, now detach SvPV: */ |
2d8e6c8d |
898 | pv = SvPV(sv, n_a); |
55497cff |
899 | /* Check alignment: */ |
56431972 |
900 | if ((PTR2UV(pv) - sizeof(union overhead)) & (NEEDED_ALIGNMENT - 1)) { |
55497cff |
901 | PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n"); |
bbce6d69 |
902 | return (char *)-1; /* die die die */ |
55497cff |
903 | } |
904 | |
28ac10b1 |
905 | emergency_buffer = pv - sizeof(union overhead); |
906 | emergency_buffer_size = malloced_size(pv) + M_OVERHEAD; |
55497cff |
907 | SvPOK_off(sv); |
28ac10b1 |
908 | SvPVX(sv) = Nullch; |
909 | SvCUR(sv) = SvLEN(sv) = 0; |
55497cff |
910 | } |
28ac10b1 |
911 | do_croak: |
741df71a |
912 | MALLOC_UNLOCK; |
28ac10b1 |
913 | croak("Out of memory during request for %i bytes", size); |
ce70748c |
914 | /* NOTREACHED */ |
915 | return Nullch; |
55497cff |
916 | } |
917 | |
e8bc2b5c |
918 | #else /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */ |
55497cff |
919 | # define emergency_sbrk(size) -1 |
e8bc2b5c |
920 | #endif /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */ |
55497cff |
921 | |
c7374474 |
922 | #ifndef BITS_IN_PTR |
923 | # define BITS_IN_PTR (8*PTRSIZE) |
924 | #endif |
925 | |
55497cff |
926 | /* |
e8bc2b5c |
927 | * nextf[i] is the pointer to the next free block of size 2^i. The |
8d063cd8 |
928 | * smallest allocatable block is 8 bytes. The overhead information |
929 | * precedes the data area returned to the user. |
930 | */ |
c7374474 |
931 | #define NBUCKETS (BITS_IN_PTR*BUCKETS_PER_POW2 + 1) |
8d063cd8 |
932 | static union overhead *nextf[NBUCKETS]; |
cf5c4ad8 |
933 | |
e3663bad |
934 | #if defined(PURIFY) && !defined(USE_PERL_SBRK) |
935 | # define USE_PERL_SBRK |
936 | #endif |
937 | |
cf5c4ad8 |
938 | #ifdef USE_PERL_SBRK |
939 | #define sbrk(a) Perl_sbrk(a) |
20ce7b12 |
940 | Malloc_t Perl_sbrk (int size); |
8ac85365 |
941 | #else |
942 | #ifdef DONT_DECLARE_STD |
943 | #ifdef I_UNISTD |
944 | #include <unistd.h> |
945 | #endif |
cf5c4ad8 |
946 | #else |
52082926 |
947 | extern Malloc_t sbrk(int); |
8ac85365 |
948 | #endif |
cf5c4ad8 |
949 | #endif |
8d063cd8 |
950 | |
c07a80fd |
951 | #ifdef DEBUGGING_MSTATS |
8d063cd8 |
952 | /* |
953 | * nmalloc[i] is the difference between the number of mallocs and frees |
954 | * for a given block size. |
955 | */ |
956 | static u_int nmalloc[NBUCKETS]; |
5f05dabc |
957 | static u_int sbrk_slack; |
958 | static u_int start_slack; |
8d063cd8 |
959 | #endif |
960 | |
e8bc2b5c |
961 | static u_int goodsbrk; |
962 | |
760ac839 |
963 | #ifdef DEBUGGING |
3541dd58 |
964 | #undef ASSERT |
965 | #define ASSERT(p,diag) if (!(p)) botch(diag,STRINGIFY(p)); else |
cea2e8a9 |
966 | static void |
967 | botch(char *diag, char *s) |
8d063cd8 |
968 | { |
e8cd8248 |
969 | dTHX; |
d720c441 |
970 | PerlIO_printf(PerlIO_stderr(), "assertion botched (%s?): %s\n", diag, s); |
3028581b |
971 | PerlProc_abort(); |
8d063cd8 |
972 | } |
973 | #else |
3541dd58 |
974 | #define ASSERT(p, diag) |
8d063cd8 |
975 | #endif |
976 | |
2304df62 |
977 | Malloc_t |
86058a2d |
978 | Perl_malloc(register size_t nbytes) |
8d063cd8 |
979 | { |
980 | register union overhead *p; |
e8bc2b5c |
981 | register int bucket; |
ee0007ab |
982 | register MEM_SIZE shiftr; |
8d063cd8 |
983 | |
c2a5c2d2 |
984 | #if defined(DEBUGGING) || defined(RCHECK) |
ee0007ab |
985 | MEM_SIZE size = nbytes; |
45d8adaa |
986 | #endif |
987 | |
e8bc2b5c |
988 | BARK_64K_LIMIT("Allocation",nbytes,nbytes); |
45d8adaa |
989 | #ifdef DEBUGGING |
990 | if ((long)nbytes < 0) |
cea2e8a9 |
991 | croak("%s", "panic: malloc"); |
45d8adaa |
992 | #endif |
45d8adaa |
993 | |
8d063cd8 |
994 | /* |
995 | * Convert amount of memory requested into |
996 | * closest block size stored in hash buckets |
997 | * which satisfies request. Account for |
998 | * space used per block for accounting. |
999 | */ |
cf5c4ad8 |
1000 | #ifdef PACK_MALLOC |
e8bc2b5c |
1001 | # ifdef SMALL_BUCKET_VIA_TABLE |
1002 | if (nbytes == 0) |
1003 | bucket = MIN_BUCKET; |
1004 | else if (nbytes <= SIZE_TABLE_MAX) { |
1005 | bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT]; |
1006 | } else |
1007 | # else |
043bf814 |
1008 | if (nbytes == 0) |
1009 | nbytes = 1; |
e8bc2b5c |
1010 | if (nbytes <= MAX_POW2_ALGO) goto do_shifts; |
1011 | else |
1012 | # endif |
55497cff |
1013 | #endif |
e8bc2b5c |
1014 | { |
1015 | POW2_OPTIMIZE_ADJUST(nbytes); |
1016 | nbytes += M_OVERHEAD; |
1017 | nbytes = (nbytes + 3) &~ 3; |
1018 | do_shifts: |
1019 | shiftr = (nbytes - 1) >> START_SHIFT; |
1020 | bucket = START_SHIFTS_BUCKET; |
1021 | /* apart from this loop, this is O(1) */ |
1022 | while (shiftr >>= 1) |
1023 | bucket += BUCKETS_PER_POW2; |
cf5c4ad8 |
1024 | } |
4ad56ec9 |
1025 | MALLOC_LOCK; |
8d063cd8 |
1026 | /* |
1027 | * If nothing in hash bucket right now, |
1028 | * request more memory from the system. |
1029 | */ |
1030 | if (nextf[bucket] == NULL) |
1031 | morecore(bucket); |
e8bc2b5c |
1032 | if ((p = nextf[bucket]) == NULL) { |
741df71a |
1033 | MALLOC_UNLOCK; |
55497cff |
1034 | #ifdef PERL_CORE |
0b250b9e |
1035 | { |
1036 | dTHX; |
1037 | if (!PL_nomemok) { |
1038 | PerlIO_puts(PerlIO_stderr(),"Out of memory!\n"); |
1039 | my_exit(1); |
1040 | } |
ee0007ab |
1041 | } |
45d8adaa |
1042 | #endif |
4ad56ec9 |
1043 | return (NULL); |
45d8adaa |
1044 | } |
1045 | |
e8bc2b5c |
1046 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
b900a521 |
1047 | "0x%"UVxf": (%05lu) malloc %ld bytes\n", |
1048 | PTR2UV(p+1), (unsigned long)(PL_an++), |
e8bc2b5c |
1049 | (long)size)); |
45d8adaa |
1050 | |
8d063cd8 |
1051 | /* remove from linked list */ |
802004fa |
1052 | #if defined(RCHECK) |
32e30700 |
1053 | if ((PTR2UV(p)) & (MEM_ALIGNBYTES - 1)) { |
e8cd8248 |
1054 | dTHX; |
b900a521 |
1055 | PerlIO_printf(PerlIO_stderr(), |
7fa2f4f1 |
1056 | "Unaligned pointer in the free chain 0x%"UVxf"\n", |
1057 | PTR2UV(p)); |
1058 | } |
1059 | if ((PTR2UV(p->ov_next)) & (MEM_ALIGNBYTES - 1)) { |
e8cd8248 |
1060 | dTHX; |
7fa2f4f1 |
1061 | PerlIO_printf(PerlIO_stderr(), |
1062 | "Unaligned `next' pointer in the free " |
1063 | "chain 0x"UVxf" at 0x%"UVxf"\n", |
1064 | PTR2UV(p->ov_next), PTR2UV(p)); |
32e30700 |
1065 | } |
bf38876a |
1066 | #endif |
1067 | nextf[bucket] = p->ov_next; |
4ad56ec9 |
1068 | |
1069 | MALLOC_UNLOCK; |
1070 | |
e8bc2b5c |
1071 | #ifdef IGNORE_SMALL_BAD_FREE |
1072 | if (bucket >= FIRST_BUCKET_WITH_CHECK) |
1073 | #endif |
1074 | OV_MAGIC(p, bucket) = MAGIC; |
cf5c4ad8 |
1075 | #ifndef PACK_MALLOC |
1076 | OV_INDEX(p) = bucket; |
1077 | #endif |
8d063cd8 |
1078 | #ifdef RCHECK |
1079 | /* |
1080 | * Record allocated size of block and |
1081 | * bound space with magic numbers. |
1082 | */ |
8d063cd8 |
1083 | p->ov_rmagic = RMAGIC; |
e8bc2b5c |
1084 | if (bucket <= MAX_SHORT_BUCKET) { |
1085 | int i; |
1086 | |
1087 | nbytes = size + M_OVERHEAD; |
1088 | p->ov_size = nbytes - 1; |
1089 | if ((i = nbytes & 3)) { |
1090 | i = 4 - i; |
1091 | while (i--) |
1092 | *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C; |
1093 | } |
1094 | nbytes = (nbytes + 3) &~ 3; |
1095 | *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC; |
1096 | } |
8d063cd8 |
1097 | #endif |
cf5c4ad8 |
1098 | return ((Malloc_t)(p + CHUNK_SHIFT)); |
8d063cd8 |
1099 | } |
1100 | |
e8bc2b5c |
1101 | static char *last_sbrk_top; |
1102 | static char *last_op; /* This arena can be easily extended. */ |
1103 | static int sbrked_remains; |
1104 | static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE; |
1105 | |
1106 | #ifdef DEBUGGING_MSTATS |
1107 | static int sbrks; |
1108 | #endif |
1109 | |
1110 | struct chunk_chain_s { |
1111 | struct chunk_chain_s *next; |
1112 | MEM_SIZE size; |
1113 | }; |
1114 | static struct chunk_chain_s *chunk_chain; |
1115 | static int n_chunks; |
1116 | static char max_bucket; |
1117 | |
1118 | /* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */ |
cea2e8a9 |
1119 | static void * |
1120 | get_from_chain(MEM_SIZE size) |
e8bc2b5c |
1121 | { |
1122 | struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain; |
1123 | struct chunk_chain_s **oldgoodp = NULL; |
1124 | long min_remain = LONG_MAX; |
1125 | |
1126 | while (elt) { |
1127 | if (elt->size >= size) { |
1128 | long remains = elt->size - size; |
1129 | if (remains >= 0 && remains < min_remain) { |
1130 | oldgoodp = oldp; |
1131 | min_remain = remains; |
1132 | } |
1133 | if (remains == 0) { |
1134 | break; |
1135 | } |
1136 | } |
1137 | oldp = &( elt->next ); |
1138 | elt = elt->next; |
1139 | } |
1140 | if (!oldgoodp) return NULL; |
1141 | if (min_remain) { |
1142 | void *ret = *oldgoodp; |
1143 | struct chunk_chain_s *next = (*oldgoodp)->next; |
1144 | |
1145 | *oldgoodp = (struct chunk_chain_s *)((char*)ret + size); |
1146 | (*oldgoodp)->size = min_remain; |
1147 | (*oldgoodp)->next = next; |
1148 | return ret; |
1149 | } else { |
1150 | void *ret = *oldgoodp; |
1151 | *oldgoodp = (*oldgoodp)->next; |
1152 | n_chunks--; |
1153 | return ret; |
1154 | } |
1155 | } |
1156 | |
cea2e8a9 |
1157 | static void |
1158 | add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip) |
e8bc2b5c |
1159 | { |
1160 | struct chunk_chain_s *next = chunk_chain; |
1161 | char *cp = (char*)p; |
1162 | |
1163 | cp += chip; |
1164 | chunk_chain = (struct chunk_chain_s *)cp; |
1165 | chunk_chain->size = size - chip; |
1166 | chunk_chain->next = next; |
1167 | n_chunks++; |
1168 | } |
1169 | |
cea2e8a9 |
1170 | static void * |
1171 | get_from_bigger_buckets(int bucket, MEM_SIZE size) |
e8bc2b5c |
1172 | { |
1173 | int price = 1; |
1174 | static int bucketprice[NBUCKETS]; |
1175 | while (bucket <= max_bucket) { |
1176 | /* We postpone stealing from bigger buckets until we want it |
1177 | often enough. */ |
1178 | if (nextf[bucket] && bucketprice[bucket]++ >= price) { |
1179 | /* Steal it! */ |
1180 | void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT); |
1181 | bucketprice[bucket] = 0; |
1182 | if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) { |
1183 | last_op = NULL; /* Disable optimization */ |
1184 | } |
1185 | nextf[bucket] = nextf[bucket]->ov_next; |
1186 | #ifdef DEBUGGING_MSTATS |
1187 | nmalloc[bucket]--; |
1188 | start_slack -= M_OVERHEAD; |
1189 | #endif |
1190 | add_to_chain(ret, (BUCKET_SIZE(bucket) + |
1191 | POW2_OPTIMIZE_SURPLUS(bucket)), |
1192 | size); |
1193 | return ret; |
1194 | } |
1195 | bucket++; |
1196 | } |
1197 | return NULL; |
1198 | } |
1199 | |
cea2e8a9 |
1200 | static union overhead * |
c7374474 |
1201 | getpages(MEM_SIZE needed, int *nblksp, int bucket) |
fa423c5b |
1202 | { |
1203 | /* Need to do (possibly expensive) system call. Try to |
1204 | optimize it for rare calling. */ |
1205 | MEM_SIZE require = needed - sbrked_remains; |
1206 | char *cp; |
1207 | union overhead *ovp; |
c7374474 |
1208 | MEM_SIZE slack = 0; |
fa423c5b |
1209 | |
1210 | if (sbrk_good > 0) { |
1211 | if (!last_sbrk_top && require < FIRST_SBRK) |
1212 | require = FIRST_SBRK; |
1213 | else if (require < MIN_SBRK) require = MIN_SBRK; |
1214 | |
1215 | if (require < goodsbrk * MIN_SBRK_FRAC / 100) |
1216 | require = goodsbrk * MIN_SBRK_FRAC / 100; |
1217 | require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK; |
1218 | } else { |
1219 | require = needed; |
1220 | last_sbrk_top = 0; |
1221 | sbrked_remains = 0; |
1222 | } |
1223 | |
1224 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1225 | "sbrk(%ld) for %ld-byte-long arena\n", |
1226 | (long)require, (long) needed)); |
1227 | cp = (char *)sbrk(require); |
1228 | #ifdef DEBUGGING_MSTATS |
1229 | sbrks++; |
1230 | #endif |
1231 | if (cp == last_sbrk_top) { |
1232 | /* Common case, anything is fine. */ |
1233 | sbrk_good++; |
1234 | ovp = (union overhead *) (cp - sbrked_remains); |
e9397286 |
1235 | last_op = cp - sbrked_remains; |
fa423c5b |
1236 | sbrked_remains = require - (needed - sbrked_remains); |
1237 | } else if (cp == (char *)-1) { /* no more room! */ |
1238 | ovp = (union overhead *)emergency_sbrk(needed); |
1239 | if (ovp == (union overhead *)-1) |
1240 | return 0; |
e9397286 |
1241 | if (((char*)ovp) > last_op) { /* Cannot happen with current emergency_sbrk() */ |
1242 | last_op = 0; |
1243 | } |
fa423c5b |
1244 | return ovp; |
1245 | } else { /* Non-continuous or first sbrk(). */ |
1246 | long add = sbrked_remains; |
1247 | char *newcp; |
1248 | |
1249 | if (sbrked_remains) { /* Put rest into chain, we |
1250 | cannot use it right now. */ |
1251 | add_to_chain((void*)(last_sbrk_top - sbrked_remains), |
1252 | sbrked_remains, 0); |
1253 | } |
1254 | |
1255 | /* Second, check alignment. */ |
1256 | slack = 0; |
1257 | |
61ae2fbf |
1258 | #if !defined(atarist) && !defined(__MINT__) /* on the atari we dont have to worry about this */ |
fa423c5b |
1259 | # ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */ |
5bbd1ef5 |
1260 | /* WANTED_ALIGNMENT may be more than NEEDED_ALIGNMENT, but this may |
1261 | improve performance of memory access. */ |
56431972 |
1262 | if (PTR2UV(cp) & (WANTED_ALIGNMENT - 1)) { /* Not aligned. */ |
1263 | slack = WANTED_ALIGNMENT - (PTR2UV(cp) & (WANTED_ALIGNMENT - 1)); |
fa423c5b |
1264 | add += slack; |
1265 | } |
1266 | # endif |
61ae2fbf |
1267 | #endif /* !atarist && !MINT */ |
fa423c5b |
1268 | |
1269 | if (add) { |
1270 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1271 | "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n", |
1272 | (long)add, (long) slack, |
1273 | (long) sbrked_remains)); |
1274 | newcp = (char *)sbrk(add); |
1275 | #if defined(DEBUGGING_MSTATS) |
1276 | sbrks++; |
1277 | sbrk_slack += add; |
1278 | #endif |
1279 | if (newcp != cp + require) { |
1280 | /* Too bad: even rounding sbrk() is not continuous.*/ |
1281 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1282 | "failed to fix bad sbrk()\n")); |
1283 | #ifdef PACK_MALLOC |
1284 | if (slack) { |
741df71a |
1285 | MALLOC_UNLOCK; |
5bbd1ef5 |
1286 | fatalcroak("panic: Off-page sbrk\n"); |
fa423c5b |
1287 | } |
1288 | #endif |
1289 | if (sbrked_remains) { |
1290 | /* Try again. */ |
1291 | #if defined(DEBUGGING_MSTATS) |
1292 | sbrk_slack += require; |
1293 | #endif |
1294 | require = needed; |
1295 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1296 | "straight sbrk(%ld)\n", |
1297 | (long)require)); |
1298 | cp = (char *)sbrk(require); |
1299 | #ifdef DEBUGGING_MSTATS |
1300 | sbrks++; |
1301 | #endif |
1302 | if (cp == (char *)-1) |
1303 | return 0; |
1304 | } |
1305 | sbrk_good = -1; /* Disable optimization! |
1306 | Continue with not-aligned... */ |
1307 | } else { |
1308 | cp += slack; |
1309 | require += sbrked_remains; |
1310 | } |
1311 | } |
1312 | |
1313 | if (last_sbrk_top) { |
1314 | sbrk_good -= SBRK_FAILURE_PRICE; |
1315 | } |
1316 | |
1317 | ovp = (union overhead *) cp; |
1318 | /* |
1319 | * Round up to minimum allocation size boundary |
1320 | * and deduct from block count to reflect. |
1321 | */ |
1322 | |
5bbd1ef5 |
1323 | # if NEEDED_ALIGNMENT > MEM_ALIGNBYTES |
56431972 |
1324 | if (PTR2UV(ovp) & (NEEDED_ALIGNMENT - 1)) |
5bbd1ef5 |
1325 | fatalcroak("Misalignment of sbrk()\n"); |
1326 | else |
1327 | # endif |
fa423c5b |
1328 | #ifndef I286 /* Again, this should always be ok on an 80286 */ |
56431972 |
1329 | if (PTR2UV(ovp) & (MEM_ALIGNBYTES - 1)) { |
fa423c5b |
1330 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1331 | "fixing sbrk(): %d bytes off machine alignement\n", |
56431972 |
1332 | (int)(PTR2UV(ovp) & (MEM_ALIGNBYTES - 1)))); |
1333 | ovp = INT2PTR(union overhead *,(PTR2UV(ovp) + MEM_ALIGNBYTES) & |
5bbd1ef5 |
1334 | (MEM_ALIGNBYTES - 1)); |
fa423c5b |
1335 | (*nblksp)--; |
1336 | # if defined(DEBUGGING_MSTATS) |
1337 | /* This is only approx. if TWO_POT_OPTIMIZE: */ |
5bbd1ef5 |
1338 | sbrk_slack += (1 << (bucket >> BUCKET_POW2_SHIFT)); |
fa423c5b |
1339 | # endif |
1340 | } |
1341 | #endif |
5bbd1ef5 |
1342 | ; /* Finish `else' */ |
fa423c5b |
1343 | sbrked_remains = require - needed; |
e9397286 |
1344 | last_op = cp; |
fa423c5b |
1345 | } |
1346 | last_sbrk_top = cp + require; |
fa423c5b |
1347 | #ifdef DEBUGGING_MSTATS |
1348 | goodsbrk += require; |
1349 | #endif |
1350 | return ovp; |
1351 | } |
1352 | |
cea2e8a9 |
1353 | static int |
c7374474 |
1354 | getpages_adjacent(MEM_SIZE require) |
fa423c5b |
1355 | { |
1356 | if (require <= sbrked_remains) { |
1357 | sbrked_remains -= require; |
1358 | } else { |
1359 | char *cp; |
1360 | |
1361 | require -= sbrked_remains; |
1362 | /* We do not try to optimize sbrks here, we go for place. */ |
1363 | cp = (char*) sbrk(require); |
1364 | #ifdef DEBUGGING_MSTATS |
1365 | sbrks++; |
1366 | goodsbrk += require; |
1367 | #endif |
1368 | if (cp == last_sbrk_top) { |
1369 | sbrked_remains = 0; |
1370 | last_sbrk_top = cp + require; |
1371 | } else { |
28ac10b1 |
1372 | if (cp == (char*)-1) { /* Out of memory */ |
1373 | #ifdef DEBUGGING_MSTATS |
1374 | goodsbrk -= require; |
1375 | #endif |
1376 | return 0; |
1377 | } |
fa423c5b |
1378 | /* Report the failure: */ |
1379 | if (sbrked_remains) |
1380 | add_to_chain((void*)(last_sbrk_top - sbrked_remains), |
1381 | sbrked_remains, 0); |
1382 | add_to_chain((void*)cp, require, 0); |
1383 | sbrk_good -= SBRK_FAILURE_PRICE; |
1384 | sbrked_remains = 0; |
1385 | last_sbrk_top = 0; |
1386 | last_op = 0; |
1387 | return 0; |
1388 | } |
1389 | } |
1390 | |
1391 | return 1; |
1392 | } |
1393 | |
8d063cd8 |
1394 | /* |
1395 | * Allocate more memory to the indicated bucket. |
1396 | */ |
cea2e8a9 |
1397 | static void |
1398 | morecore(register int bucket) |
8d063cd8 |
1399 | { |
72aaf631 |
1400 | register union overhead *ovp; |
8d063cd8 |
1401 | register int rnu; /* 2^rnu bytes will be requested */ |
fa423c5b |
1402 | int nblks; /* become nblks blocks of the desired size */ |
bbce6d69 |
1403 | register MEM_SIZE siz, needed; |
8d063cd8 |
1404 | |
1405 | if (nextf[bucket]) |
1406 | return; |
e8bc2b5c |
1407 | if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) { |
741df71a |
1408 | MALLOC_UNLOCK; |
d720c441 |
1409 | croak("%s", "Out of memory during ridiculously large request"); |
55497cff |
1410 | } |
d720c441 |
1411 | if (bucket > max_bucket) |
e8bc2b5c |
1412 | max_bucket = bucket; |
d720c441 |
1413 | |
e8bc2b5c |
1414 | rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT)) |
1415 | ? LOG_OF_MIN_ARENA |
1416 | : (bucket >> BUCKET_POW2_SHIFT) ); |
1417 | /* This may be overwritten later: */ |
1418 | nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */ |
1419 | needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket); |
1420 | if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */ |
1421 | ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT; |
1422 | nextf[rnu << BUCKET_POW2_SHIFT] |
1423 | = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next; |
1424 | #ifdef DEBUGGING_MSTATS |
1425 | nmalloc[rnu << BUCKET_POW2_SHIFT]--; |
1426 | start_slack -= M_OVERHEAD; |
1427 | #endif |
1428 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1429 | "stealing %ld bytes from %ld arena\n", |
1430 | (long) needed, (long) rnu << BUCKET_POW2_SHIFT)); |
1431 | } else if (chunk_chain |
1432 | && (ovp = (union overhead*) get_from_chain(needed))) { |
1433 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1434 | "stealing %ld bytes from chain\n", |
1435 | (long) needed)); |
d720c441 |
1436 | } else if ( (ovp = (union overhead*) |
1437 | get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1, |
1438 | needed)) ) { |
e8bc2b5c |
1439 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1440 | "stealing %ld bytes from bigger buckets\n", |
1441 | (long) needed)); |
1442 | } else if (needed <= sbrked_remains) { |
1443 | ovp = (union overhead *)(last_sbrk_top - sbrked_remains); |
1444 | sbrked_remains -= needed; |
1445 | last_op = (char*)ovp; |
fa423c5b |
1446 | } else |
1447 | ovp = getpages(needed, &nblks, bucket); |
e8bc2b5c |
1448 | |
fa423c5b |
1449 | if (!ovp) |
1450 | return; |
e8bc2b5c |
1451 | |
8d063cd8 |
1452 | /* |
1453 | * Add new memory allocated to that on |
1454 | * free list for this hash bucket. |
1455 | */ |
e8bc2b5c |
1456 | siz = BUCKET_SIZE(bucket); |
cf5c4ad8 |
1457 | #ifdef PACK_MALLOC |
72aaf631 |
1458 | *(u_char*)ovp = bucket; /* Fill index. */ |
e8bc2b5c |
1459 | if (bucket <= MAX_PACKED) { |
1460 | ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket)); |
1461 | nblks = N_BLKS(bucket); |
cf5c4ad8 |
1462 | # ifdef DEBUGGING_MSTATS |
e8bc2b5c |
1463 | start_slack += BLK_SHIFT(bucket); |
cf5c4ad8 |
1464 | # endif |
e8bc2b5c |
1465 | } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) { |
1466 | ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket)); |
cf5c4ad8 |
1467 | siz -= sizeof(union overhead); |
72aaf631 |
1468 | } else ovp++; /* One chunk per block. */ |
e8bc2b5c |
1469 | #endif /* PACK_MALLOC */ |
72aaf631 |
1470 | nextf[bucket] = ovp; |
5f05dabc |
1471 | #ifdef DEBUGGING_MSTATS |
1472 | nmalloc[bucket] += nblks; |
e8bc2b5c |
1473 | if (bucket > MAX_PACKED) { |
1474 | start_slack += M_OVERHEAD * nblks; |
1475 | } |
5f05dabc |
1476 | #endif |
8d063cd8 |
1477 | while (--nblks > 0) { |
72aaf631 |
1478 | ovp->ov_next = (union overhead *)((caddr_t)ovp + siz); |
1479 | ovp = (union overhead *)((caddr_t)ovp + siz); |
8d063cd8 |
1480 | } |
8595d6f1 |
1481 | /* Not all sbrks return zeroed memory.*/ |
72aaf631 |
1482 | ovp->ov_next = (union overhead *)NULL; |
cf5c4ad8 |
1483 | #ifdef PACK_MALLOC |
e8bc2b5c |
1484 | if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */ |
1485 | union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next; |
1486 | nextf[7*BUCKETS_PER_POW2] = |
1487 | (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2] |
1488 | - sizeof(union overhead)); |
1489 | nextf[7*BUCKETS_PER_POW2]->ov_next = n_op; |
cf5c4ad8 |
1490 | } |
1491 | #endif /* !PACK_MALLOC */ |
8d063cd8 |
1492 | } |
1493 | |
94b6baf5 |
1494 | Free_t |
86058a2d |
1495 | Perl_mfree(void *mp) |
cea2e8a9 |
1496 | { |
ee0007ab |
1497 | register MEM_SIZE size; |
72aaf631 |
1498 | register union overhead *ovp; |
352d5a3a |
1499 | char *cp = (char*)mp; |
cf5c4ad8 |
1500 | #ifdef PACK_MALLOC |
1501 | u_char bucket; |
1502 | #endif |
8d063cd8 |
1503 | |
e8bc2b5c |
1504 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
b900a521 |
1505 | "0x%"UVxf": (%05lu) free\n", |
1506 | PTR2UV(cp), (unsigned long)(PL_an++))); |
45d8adaa |
1507 | |
cf5c4ad8 |
1508 | if (cp == NULL) |
1509 | return; |
72aaf631 |
1510 | ovp = (union overhead *)((caddr_t)cp |
e8bc2b5c |
1511 | - sizeof (union overhead) * CHUNK_SHIFT); |
cf5c4ad8 |
1512 | #ifdef PACK_MALLOC |
72aaf631 |
1513 | bucket = OV_INDEX(ovp); |
cf5c4ad8 |
1514 | #endif |
e8bc2b5c |
1515 | #ifdef IGNORE_SMALL_BAD_FREE |
1516 | if ((bucket >= FIRST_BUCKET_WITH_CHECK) |
1517 | && (OV_MAGIC(ovp, bucket) != MAGIC)) |
1518 | #else |
1519 | if (OV_MAGIC(ovp, bucket) != MAGIC) |
1520 | #endif |
1521 | { |
68dc0745 |
1522 | static int bad_free_warn = -1; |
cf5c4ad8 |
1523 | if (bad_free_warn == -1) { |
e8cd8248 |
1524 | dTHX; |
5fd9e9a4 |
1525 | char *pbf = PerlEnv_getenv("PERL_BADFREE"); |
cf5c4ad8 |
1526 | bad_free_warn = (pbf) ? atoi(pbf) : 1; |
1527 | } |
1528 | if (!bad_free_warn) |
1529 | return; |
8990e307 |
1530 | #ifdef RCHECK |
2ba999ec |
1531 | #ifdef PERL_CORE |
e8cd8248 |
1532 | { |
1533 | dTHX; |
1534 | if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC)) |
1d860e85 |
1535 | Perl_warner(aTHX_ WARN_MALLOC, "%s free() ignored", |
e8cd8248 |
1536 | ovp->ov_rmagic == RMAGIC - 1 ? |
1537 | "Duplicate" : "Bad"); |
2ba999ec |
1538 | } |
e476b1b5 |
1539 | #else |
2ba999ec |
1540 | warn("%s free() ignored", |
1541 | ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad"); |
e476b1b5 |
1542 | #endif |
1543 | #else |
1544 | #ifdef PERL_CORE |
2ba999ec |
1545 | { |
1546 | dTHX; |
1d860e85 |
1547 | if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC)) |
1548 | Perl_warner(aTHX_ WARN_MALLOC, "%s", "Bad free() ignored"); |
2ba999ec |
1549 | } |
8990e307 |
1550 | #else |
2ba999ec |
1551 | warn("%s", "Bad free() ignored"); |
8990e307 |
1552 | #endif |
e476b1b5 |
1553 | #endif |
8d063cd8 |
1554 | return; /* sanity */ |
e8bc2b5c |
1555 | } |
8d063cd8 |
1556 | #ifdef RCHECK |
3541dd58 |
1557 | ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite"); |
e8bc2b5c |
1558 | if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) { |
1559 | int i; |
1560 | MEM_SIZE nbytes = ovp->ov_size + 1; |
1561 | |
1562 | if ((i = nbytes & 3)) { |
1563 | i = 4 - i; |
1564 | while (i--) { |
3541dd58 |
1565 | ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i)) |
d720c441 |
1566 | == RMAGIC_C, "chunk's tail overwrite"); |
e8bc2b5c |
1567 | } |
1568 | } |
1569 | nbytes = (nbytes + 3) &~ 3; |
3541dd58 |
1570 | ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC, "chunk's tail overwrite"); |
e8bc2b5c |
1571 | } |
72aaf631 |
1572 | ovp->ov_rmagic = RMAGIC - 1; |
8d063cd8 |
1573 | #endif |
3541dd58 |
1574 | ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite"); |
72aaf631 |
1575 | size = OV_INDEX(ovp); |
4ad56ec9 |
1576 | |
1577 | MALLOC_LOCK; |
72aaf631 |
1578 | ovp->ov_next = nextf[size]; |
1579 | nextf[size] = ovp; |
741df71a |
1580 | MALLOC_UNLOCK; |
8d063cd8 |
1581 | } |
1582 | |
4ad56ec9 |
1583 | /* There is no need to do any locking in realloc (with an exception of |
1584 | trying to grow in place if we are at the end of the chain). |
1585 | If somebody calls us from a different thread with the same address, |
1586 | we are sole anyway. */ |
8d063cd8 |
1587 | |
2304df62 |
1588 | Malloc_t |
86058a2d |
1589 | Perl_realloc(void *mp, size_t nbytes) |
cea2e8a9 |
1590 | { |
ee0007ab |
1591 | register MEM_SIZE onb; |
72aaf631 |
1592 | union overhead *ovp; |
d720c441 |
1593 | char *res; |
1594 | int prev_bucket; |
e8bc2b5c |
1595 | register int bucket; |
4ad56ec9 |
1596 | int incr; /* 1 if does not fit, -1 if "easily" fits in a |
1597 | smaller bucket, otherwise 0. */ |
352d5a3a |
1598 | char *cp = (char*)mp; |
8d063cd8 |
1599 | |
e8bc2b5c |
1600 | #if defined(DEBUGGING) || !defined(PERL_CORE) |
ee0007ab |
1601 | MEM_SIZE size = nbytes; |
45d8adaa |
1602 | |
45d8adaa |
1603 | if ((long)nbytes < 0) |
cea2e8a9 |
1604 | croak("%s", "panic: realloc"); |
45d8adaa |
1605 | #endif |
e8bc2b5c |
1606 | |
1607 | BARK_64K_LIMIT("Reallocation",nbytes,size); |
1608 | if (!cp) |
86058a2d |
1609 | return Perl_malloc(nbytes); |
45d8adaa |
1610 | |
72aaf631 |
1611 | ovp = (union overhead *)((caddr_t)cp |
e8bc2b5c |
1612 | - sizeof (union overhead) * CHUNK_SHIFT); |
1613 | bucket = OV_INDEX(ovp); |
4ad56ec9 |
1614 | |
e8bc2b5c |
1615 | #ifdef IGNORE_SMALL_BAD_FREE |
4ad56ec9 |
1616 | if ((bucket >= FIRST_BUCKET_WITH_CHECK) |
1617 | && (OV_MAGIC(ovp, bucket) != MAGIC)) |
e8bc2b5c |
1618 | #else |
4ad56ec9 |
1619 | if (OV_MAGIC(ovp, bucket) != MAGIC) |
e8bc2b5c |
1620 | #endif |
4ad56ec9 |
1621 | { |
1622 | static int bad_free_warn = -1; |
1623 | if (bad_free_warn == -1) { |
e8cd8248 |
1624 | dTHX; |
4ad56ec9 |
1625 | char *pbf = PerlEnv_getenv("PERL_BADFREE"); |
1626 | bad_free_warn = (pbf) ? atoi(pbf) : 1; |
1627 | } |
1628 | if (!bad_free_warn) |
ce70748c |
1629 | return Nullch; |
4ad56ec9 |
1630 | #ifdef RCHECK |
2ba999ec |
1631 | #ifdef PERL_CORE |
e8cd8248 |
1632 | { |
1633 | dTHX; |
1634 | if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC)) |
1d860e85 |
1635 | Perl_warner(aTHX_ WARN_MALLOC, "%srealloc() %signored", |
e8cd8248 |
1636 | (ovp->ov_rmagic == RMAGIC - 1 ? "" : "Bad "), |
1637 | ovp->ov_rmagic == RMAGIC - 1 |
1638 | ? "of freed memory " : ""); |
2ba999ec |
1639 | } |
e476b1b5 |
1640 | #else |
2ba999ec |
1641 | warn("%srealloc() %signored", |
1642 | (ovp->ov_rmagic == RMAGIC - 1 ? "" : "Bad "), |
1643 | ovp->ov_rmagic == RMAGIC - 1 ? "of freed memory " : ""); |
e476b1b5 |
1644 | #endif |
1645 | #else |
1646 | #ifdef PERL_CORE |
2ba999ec |
1647 | { |
1648 | dTHX; |
1d860e85 |
1649 | if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC)) |
1650 | Perl_warner(aTHX_ WARN_MALLOC, "%s", |
1651 | "Bad realloc() ignored"); |
2ba999ec |
1652 | } |
4ad56ec9 |
1653 | #else |
2ba999ec |
1654 | warn("%s", "Bad realloc() ignored"); |
4ad56ec9 |
1655 | #endif |
e476b1b5 |
1656 | #endif |
ce70748c |
1657 | return Nullch; /* sanity */ |
4ad56ec9 |
1658 | } |
1659 | |
e8bc2b5c |
1660 | onb = BUCKET_SIZE_REAL(bucket); |
55497cff |
1661 | /* |
1662 | * avoid the copy if same size block. |
e8bc2b5c |
1663 | * We are not agressive with boundary cases. Note that it might |
1664 | * (for a small number of cases) give false negative if |
55497cff |
1665 | * both new size and old one are in the bucket for |
e8bc2b5c |
1666 | * FIRST_BIG_POW2, but the new one is near the lower end. |
1667 | * |
1668 | * We do not try to go to 1.5 times smaller bucket so far. |
55497cff |
1669 | */ |
e8bc2b5c |
1670 | if (nbytes > onb) incr = 1; |
1671 | else { |
1672 | #ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING |
1673 | if ( /* This is a little bit pessimal if PACK_MALLOC: */ |
1674 | nbytes > ( (onb >> 1) - M_OVERHEAD ) |
1675 | # ifdef TWO_POT_OPTIMIZE |
1676 | || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND ) |
1677 | # endif |
1678 | ) |
1679 | #else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */ |
1680 | prev_bucket = ( (bucket > MAX_PACKED + 1) |
1681 | ? bucket - BUCKETS_PER_POW2 |
1682 | : bucket - 1); |
1683 | if (nbytes > BUCKET_SIZE_REAL(prev_bucket)) |
1684 | #endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */ |
1685 | incr = 0; |
1686 | else incr = -1; |
1687 | } |
2ce36478 |
1688 | #ifdef STRESS_REALLOC |
4ad56ec9 |
1689 | goto hard_way; |
2ce36478 |
1690 | #endif |
4ad56ec9 |
1691 | if (incr == 0) { |
852c2e52 |
1692 | inplace_label: |
a687059c |
1693 | #ifdef RCHECK |
1694 | /* |
1695 | * Record new allocated size of block and |
1696 | * bound space with magic numbers. |
1697 | */ |
72aaf631 |
1698 | if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) { |
e8bc2b5c |
1699 | int i, nb = ovp->ov_size + 1; |
1700 | |
1701 | if ((i = nb & 3)) { |
1702 | i = 4 - i; |
1703 | while (i--) { |
3541dd58 |
1704 | ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C, "chunk's tail overwrite"); |
e8bc2b5c |
1705 | } |
1706 | } |
1707 | nb = (nb + 3) &~ 3; |
3541dd58 |
1708 | ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC, "chunk's tail overwrite"); |
a687059c |
1709 | /* |
1710 | * Convert amount of memory requested into |
1711 | * closest block size stored in hash buckets |
1712 | * which satisfies request. Account for |
1713 | * space used per block for accounting. |
1714 | */ |
cf5c4ad8 |
1715 | nbytes += M_OVERHEAD; |
72aaf631 |
1716 | ovp->ov_size = nbytes - 1; |
e8bc2b5c |
1717 | if ((i = nbytes & 3)) { |
1718 | i = 4 - i; |
1719 | while (i--) |
1720 | *((char *)((caddr_t)ovp + nbytes - RSLOP + i)) |
1721 | = RMAGIC_C; |
1722 | } |
1723 | nbytes = (nbytes + 3) &~ 3; |
72aaf631 |
1724 | *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC; |
a687059c |
1725 | } |
1726 | #endif |
45d8adaa |
1727 | res = cp; |
42ac124e |
1728 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
b900a521 |
1729 | "0x%"UVxf": (%05lu) realloc %ld bytes inplace\n", |
1730 | PTR2UV(res),(unsigned long)(PL_an++), |
42ac124e |
1731 | (long)size)); |
e8bc2b5c |
1732 | } else if (incr == 1 && (cp - M_OVERHEAD == last_op) |
1733 | && (onb > (1 << LOG_OF_MIN_ARENA))) { |
1734 | MEM_SIZE require, newarena = nbytes, pow; |
1735 | int shiftr; |
1736 | |
1737 | POW2_OPTIMIZE_ADJUST(newarena); |
1738 | newarena = newarena + M_OVERHEAD; |
1739 | /* newarena = (newarena + 3) &~ 3; */ |
1740 | shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA; |
1741 | pow = LOG_OF_MIN_ARENA + 1; |
1742 | /* apart from this loop, this is O(1) */ |
1743 | while (shiftr >>= 1) |
1744 | pow++; |
1745 | newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2); |
1746 | require = newarena - onb - M_OVERHEAD; |
1747 | |
4ad56ec9 |
1748 | MALLOC_LOCK; |
1749 | if (cp - M_OVERHEAD == last_op /* We *still* are the last chunk */ |
1750 | && getpages_adjacent(require)) { |
e8bc2b5c |
1751 | #ifdef DEBUGGING_MSTATS |
fa423c5b |
1752 | nmalloc[bucket]--; |
1753 | nmalloc[pow * BUCKETS_PER_POW2]++; |
e8bc2b5c |
1754 | #endif |
4d6cd4d8 |
1755 | *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */ |
4ad56ec9 |
1756 | MALLOC_UNLOCK; |
fa423c5b |
1757 | goto inplace_label; |
4ad56ec9 |
1758 | } else { |
1759 | MALLOC_UNLOCK; |
fa423c5b |
1760 | goto hard_way; |
4ad56ec9 |
1761 | } |
e8bc2b5c |
1762 | } else { |
1763 | hard_way: |
42ac124e |
1764 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
b900a521 |
1765 | "0x%"UVxf": (%05lu) realloc %ld bytes the hard way\n", |
1766 | PTR2UV(cp),(unsigned long)(PL_an++), |
42ac124e |
1767 | (long)size)); |
86058a2d |
1768 | if ((res = (char*)Perl_malloc(nbytes)) == NULL) |
e8bc2b5c |
1769 | return (NULL); |
1770 | if (cp != res) /* common optimization */ |
1771 | Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char); |
4ad56ec9 |
1772 | Perl_mfree(cp); |
45d8adaa |
1773 | } |
2304df62 |
1774 | return ((Malloc_t)res); |
8d063cd8 |
1775 | } |
1776 | |
cf5c4ad8 |
1777 | Malloc_t |
86058a2d |
1778 | Perl_calloc(register size_t elements, register size_t size) |
cf5c4ad8 |
1779 | { |
1780 | long sz = elements * size; |
86058a2d |
1781 | Malloc_t p = Perl_malloc(sz); |
cf5c4ad8 |
1782 | |
1783 | if (p) { |
1784 | memset((void*)p, 0, sz); |
1785 | } |
1786 | return p; |
1787 | } |
1788 | |
4ad56ec9 |
1789 | char * |
1790 | Perl_strdup(const char *s) |
1791 | { |
1792 | MEM_SIZE l = strlen(s); |
b48f1ba5 |
1793 | char *s1 = (char *)Perl_malloc(l+1); |
4ad56ec9 |
1794 | |
b48f1ba5 |
1795 | Copy(s, s1, (MEM_SIZE)(l+1), char); |
4ad56ec9 |
1796 | return s1; |
1797 | } |
1798 | |
1799 | #ifdef PERL_CORE |
1800 | int |
1801 | Perl_putenv(char *a) |
1802 | { |
1803 | /* Sometimes system's putenv conflicts with my_setenv() - this is system |
1804 | malloc vs Perl's free(). */ |
1805 | dTHX; |
1806 | char *var; |
1807 | char *val = a; |
1808 | MEM_SIZE l; |
1809 | char buf[80]; |
1810 | |
1811 | while (*val && *val != '=') |
1812 | val++; |
1813 | if (!*val) |
1814 | return -1; |
1815 | l = val - a; |
1816 | if (l < sizeof(buf)) |
1817 | var = buf; |
1818 | else |
1819 | var = Perl_malloc(l + 1); |
1820 | Copy(a, var, l, char); |
b48f1ba5 |
1821 | var[l + 1] = 0; |
1822 | my_setenv(var, val+1); |
4ad56ec9 |
1823 | if (var != buf) |
1824 | Perl_mfree(var); |
1825 | return 0; |
1826 | } |
1827 | # endif |
1828 | |
e8bc2b5c |
1829 | MEM_SIZE |
cea2e8a9 |
1830 | Perl_malloced_size(void *p) |
e8bc2b5c |
1831 | { |
8d6dde3e |
1832 | union overhead *ovp = (union overhead *) |
1833 | ((caddr_t)p - sizeof (union overhead) * CHUNK_SHIFT); |
1834 | int bucket = OV_INDEX(ovp); |
1835 | #ifdef RCHECK |
1836 | /* The caller wants to have a complete control over the chunk, |
1837 | disable the memory checking inside the chunk. */ |
1838 | if (bucket <= MAX_SHORT_BUCKET) { |
1839 | MEM_SIZE size = BUCKET_SIZE_REAL(bucket); |
1840 | ovp->ov_size = size + M_OVERHEAD - 1; |
1841 | *((u_int *)((caddr_t)ovp + size + M_OVERHEAD - RSLOP)) = RMAGIC; |
1842 | } |
1843 | #endif |
e8bc2b5c |
1844 | return BUCKET_SIZE_REAL(bucket); |
1845 | } |
1846 | |
e8bc2b5c |
1847 | # ifdef BUCKETS_ROOT2 |
1848 | # define MIN_EVEN_REPORT 6 |
1849 | # else |
1850 | # define MIN_EVEN_REPORT MIN_BUCKET |
1851 | # endif |
827e134a |
1852 | |
1853 | int |
1854 | Perl_get_mstats(pTHX_ perl_mstats_t *buf, int buflen, int level) |
8d063cd8 |
1855 | { |
df31f264 |
1856 | #ifdef DEBUGGING_MSTATS |
8d063cd8 |
1857 | register int i, j; |
1858 | register union overhead *p; |
4ad56ec9 |
1859 | struct chunk_chain_s* nextchain; |
8d063cd8 |
1860 | |
827e134a |
1861 | buf->topbucket = buf->topbucket_ev = buf->topbucket_odd |
1862 | = buf->totfree = buf->total = buf->total_chain = 0; |
1863 | |
1864 | buf->minbucket = MIN_BUCKET; |
4ad56ec9 |
1865 | MALLOC_LOCK; |
e8bc2b5c |
1866 | for (i = MIN_BUCKET ; i < NBUCKETS; i++) { |
8d063cd8 |
1867 | for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) |
1868 | ; |
827e134a |
1869 | if (i < buflen) { |
1870 | buf->nfree[i] = j; |
1871 | buf->ntotal[i] = nmalloc[i]; |
1872 | } |
1873 | buf->totfree += j * BUCKET_SIZE_REAL(i); |
1874 | buf->total += nmalloc[i] * BUCKET_SIZE_REAL(i); |
e8bc2b5c |
1875 | if (nmalloc[i]) { |
827e134a |
1876 | i % 2 ? (buf->topbucket_odd = i) : (buf->topbucket_ev = i); |
1877 | buf->topbucket = i; |
e8bc2b5c |
1878 | } |
c07a80fd |
1879 | } |
4ad56ec9 |
1880 | nextchain = chunk_chain; |
1881 | while (nextchain) { |
827e134a |
1882 | buf->total_chain += nextchain->size; |
4ad56ec9 |
1883 | nextchain = nextchain->next; |
1884 | } |
827e134a |
1885 | buf->total_sbrk = goodsbrk + sbrk_slack; |
1886 | buf->sbrks = sbrks; |
1887 | buf->sbrk_good = sbrk_good; |
1888 | buf->sbrk_slack = sbrk_slack; |
1889 | buf->start_slack = start_slack; |
1890 | buf->sbrked_remains = sbrked_remains; |
4ad56ec9 |
1891 | MALLOC_UNLOCK; |
827e134a |
1892 | if (level) { |
1893 | for (i = MIN_BUCKET ; i < NBUCKETS; i++) { |
1894 | if (i >= buflen) |
1895 | break; |
1896 | buf->bucket_mem_size[i] = BUCKET_SIZE(i); |
1897 | buf->bucket_available_size[i] = BUCKET_SIZE_REAL(i); |
1898 | } |
1899 | } |
1900 | #endif /* defined DEBUGGING_MSTATS */ |
fe52b3b7 |
1901 | return 0; /* XXX unused */ |
827e134a |
1902 | } |
1903 | /* |
1904 | * mstats - print out statistics about malloc |
1905 | * |
1906 | * Prints two lines of numbers, one showing the length of the free list |
1907 | * for each size category, the second showing the number of mallocs - |
1908 | * frees for each size category. |
1909 | */ |
1910 | void |
1911 | Perl_dump_mstats(pTHX_ char *s) |
1912 | { |
1913 | #ifdef DEBUGGING_MSTATS |
1914 | register int i, j; |
1915 | register union overhead *p; |
1916 | perl_mstats_t buffer; |
1917 | unsigned long nf[NBUCKETS]; |
1918 | unsigned long nt[NBUCKETS]; |
1919 | struct chunk_chain_s* nextchain; |
1920 | |
1921 | buffer.nfree = nf; |
1922 | buffer.ntotal = nt; |
1923 | get_mstats(&buffer, NBUCKETS, 0); |
1924 | |
c07a80fd |
1925 | if (s) |
bf49b057 |
1926 | PerlIO_printf(Perl_error_log, |
d720c441 |
1927 | "Memory allocation statistics %s (buckets %ld(%ld)..%ld(%ld)\n", |
e8bc2b5c |
1928 | s, |
d720c441 |
1929 | (long)BUCKET_SIZE_REAL(MIN_BUCKET), |
1930 | (long)BUCKET_SIZE(MIN_BUCKET), |
827e134a |
1931 | (long)BUCKET_SIZE_REAL(buffer.topbucket), |
1932 | (long)BUCKET_SIZE(buffer.topbucket)); |
76cfd9aa |
1933 | PerlIO_printf(Perl_error_log, "%8ld free:", buffer.totfree); |
827e134a |
1934 | for (i = MIN_EVEN_REPORT; i <= buffer.topbucket; i += BUCKETS_PER_POW2) { |
bf49b057 |
1935 | PerlIO_printf(Perl_error_log, |
e8bc2b5c |
1936 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
1937 | ? " %5d" |
1938 | : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")), |
827e134a |
1939 | buffer.nfree[i]); |
e8bc2b5c |
1940 | } |
1941 | #ifdef BUCKETS_ROOT2 |
bf49b057 |
1942 | PerlIO_printf(Perl_error_log, "\n\t "); |
827e134a |
1943 | for (i = MIN_BUCKET + 1; i <= buffer.topbucket_odd; i += BUCKETS_PER_POW2) { |
bf49b057 |
1944 | PerlIO_printf(Perl_error_log, |
e8bc2b5c |
1945 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
1946 | ? " %5d" |
1947 | : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")), |
827e134a |
1948 | buffer.nfree[i]); |
8d063cd8 |
1949 | } |
e8bc2b5c |
1950 | #endif |
76cfd9aa |
1951 | PerlIO_printf(Perl_error_log, "\n%8ld used:", buffer.total - buffer.totfree); |
827e134a |
1952 | for (i = MIN_EVEN_REPORT; i <= buffer.topbucket; i += BUCKETS_PER_POW2) { |
bf49b057 |
1953 | PerlIO_printf(Perl_error_log, |
e8bc2b5c |
1954 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
1955 | ? " %5d" |
1956 | : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")), |
827e134a |
1957 | buffer.ntotal[i] - buffer.nfree[i]); |
c07a80fd |
1958 | } |
e8bc2b5c |
1959 | #ifdef BUCKETS_ROOT2 |
bf49b057 |
1960 | PerlIO_printf(Perl_error_log, "\n\t "); |
827e134a |
1961 | for (i = MIN_BUCKET + 1; i <= buffer.topbucket_odd; i += BUCKETS_PER_POW2) { |
bf49b057 |
1962 | PerlIO_printf(Perl_error_log, |
e8bc2b5c |
1963 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
1964 | ? " %5d" |
1965 | : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")), |
827e134a |
1966 | buffer.ntotal[i] - buffer.nfree[i]); |
e8bc2b5c |
1967 | } |
1968 | #endif |
76cfd9aa |
1969 | PerlIO_printf(Perl_error_log, "\nTotal sbrk(): %ld/%ld:%ld. Odd ends: pad+heads+chain+tail: %ld+%ld+%ld+%ld.\n", |
827e134a |
1970 | buffer.total_sbrk, buffer.sbrks, buffer.sbrk_good, |
1971 | buffer.sbrk_slack, buffer.start_slack, |
1972 | buffer.total_chain, buffer.sbrked_remains); |
df31f264 |
1973 | #endif /* DEBUGGING_MSTATS */ |
c07a80fd |
1974 | } |
a687059c |
1975 | #endif /* lint */ |
cf5c4ad8 |
1976 | |
cf5c4ad8 |
1977 | #ifdef USE_PERL_SBRK |
1978 | |
e3663bad |
1979 | # if defined(__MACHTEN_PPC__) || defined(NeXT) || defined(__NeXT__) || defined(PURIFY) |
38ac2dc8 |
1980 | # define PERL_SBRK_VIA_MALLOC |
38ac2dc8 |
1981 | # endif |
1982 | |
760ac839 |
1983 | # ifdef PERL_SBRK_VIA_MALLOC |
cf5c4ad8 |
1984 | |
1985 | /* it may seem schizophrenic to use perl's malloc and let it call system */ |
1986 | /* malloc, the reason for that is only the 3.2 version of the OS that had */ |
1987 | /* frequent core dumps within nxzonefreenolock. This sbrk routine put an */ |
1988 | /* end to the cores */ |
1989 | |
38ac2dc8 |
1990 | # ifndef SYSTEM_ALLOC |
1991 | # define SYSTEM_ALLOC(a) malloc(a) |
1992 | # endif |
5bbd1ef5 |
1993 | # ifndef SYSTEM_ALLOC_ALIGNMENT |
1994 | # define SYSTEM_ALLOC_ALIGNMENT MEM_ALIGNBYTES |
1995 | # endif |
cf5c4ad8 |
1996 | |
760ac839 |
1997 | # endif /* PERL_SBRK_VIA_MALLOC */ |
cf5c4ad8 |
1998 | |
1999 | static IV Perl_sbrk_oldchunk; |
2000 | static long Perl_sbrk_oldsize; |
2001 | |
760ac839 |
2002 | # define PERLSBRK_32_K (1<<15) |
2003 | # define PERLSBRK_64_K (1<<16) |
cf5c4ad8 |
2004 | |
b63effbb |
2005 | Malloc_t |
df0003d4 |
2006 | Perl_sbrk(int size) |
cf5c4ad8 |
2007 | { |
2008 | IV got; |
2009 | int small, reqsize; |
2010 | |
2011 | if (!size) return 0; |
55497cff |
2012 | #ifdef PERL_CORE |
cf5c4ad8 |
2013 | reqsize = size; /* just for the DEBUG_m statement */ |
2014 | #endif |
57569e04 |
2015 | #ifdef PACK_MALLOC |
2016 | size = (size + 0x7ff) & ~0x7ff; |
2017 | #endif |
cf5c4ad8 |
2018 | if (size <= Perl_sbrk_oldsize) { |
2019 | got = Perl_sbrk_oldchunk; |
2020 | Perl_sbrk_oldchunk += size; |
2021 | Perl_sbrk_oldsize -= size; |
2022 | } else { |
2023 | if (size >= PERLSBRK_32_K) { |
2024 | small = 0; |
2025 | } else { |
cf5c4ad8 |
2026 | size = PERLSBRK_64_K; |
2027 | small = 1; |
2028 | } |
5bbd1ef5 |
2029 | # if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT |
2030 | size += NEEDED_ALIGNMENT - SYSTEM_ALLOC_ALIGNMENT; |
2031 | # endif |
cf5c4ad8 |
2032 | got = (IV)SYSTEM_ALLOC(size); |
5bbd1ef5 |
2033 | # if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT |
5a7d6335 |
2034 | got = (got + NEEDED_ALIGNMENT - 1) & ~(NEEDED_ALIGNMENT - 1); |
5bbd1ef5 |
2035 | # endif |
cf5c4ad8 |
2036 | if (small) { |
2037 | /* Chunk is small, register the rest for future allocs. */ |
2038 | Perl_sbrk_oldchunk = got + reqsize; |
2039 | Perl_sbrk_oldsize = size - reqsize; |
2040 | } |
2041 | } |
2042 | |
b900a521 |
2043 | DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%"UVxf"\n", |
2044 | size, reqsize, Perl_sbrk_oldsize, PTR2UV(got))); |
cf5c4ad8 |
2045 | |
2046 | return (void *)got; |
2047 | } |
2048 | |
2049 | #endif /* ! defined USE_PERL_SBRK */ |