Commit | Line | Data |
a0d0e21e |
1 | /* malloc.c |
8d063cd8 |
2 | * |
8d063cd8 |
3 | */ |
4 | |
87c6202a |
5 | /* |
d31a8517 |
6 | * "'The Chamber of Records,' said Gimli. 'I guess that is where we now stand.'" |
7 | */ |
8 | |
9 | /* |
741df71a |
10 | Here are some notes on configuring Perl's malloc. (For non-perl |
11 | usage see below.) |
87c6202a |
12 | |
13 | There are two macros which serve as bulk disablers of advanced |
14 | features of this malloc: NO_FANCY_MALLOC, PLAIN_MALLOC (undef by |
15 | default). Look in the list of default values below to understand |
16 | their exact effect. Defining NO_FANCY_MALLOC returns malloc.c to the |
17 | state of the malloc in Perl 5.004. Additionally defining PLAIN_MALLOC |
18 | returns it to the state as of Perl 5.000. |
19 | |
20 | Note that some of the settings below may be ignored in the code based |
21 | on values of other macros. The PERL_CORE symbol is only defined when |
22 | perl itself is being compiled (so malloc can make some assumptions |
23 | about perl's facilities being available to it). |
24 | |
25 | Each config option has a short description, followed by its name, |
26 | default value, and a comment about the default (if applicable). Some |
27 | options take a precise value, while the others are just boolean. |
28 | The boolean ones are listed first. |
29 | |
30 | # Enable code for an emergency memory pool in $^M. See perlvar.pod |
31 | # for a description of $^M. |
32 | PERL_EMERGENCY_SBRK (!PLAIN_MALLOC && PERL_CORE) |
33 | |
34 | # Enable code for printing memory statistics. |
35 | DEBUGGING_MSTATS (!PLAIN_MALLOC && PERL_CORE) |
36 | |
37 | # Move allocation info for small buckets into separate areas. |
38 | # Memory optimization (especially for small allocations, of the |
39 | # less than 64 bytes). Since perl usually makes a large number |
40 | # of small allocations, this is usually a win. |
41 | PACK_MALLOC (!PLAIN_MALLOC && !RCHECK) |
42 | |
43 | # Add one page to big powers of two when calculating bucket size. |
44 | # This is targeted at big allocations, as are common in image |
45 | # processing. |
46 | TWO_POT_OPTIMIZE !PLAIN_MALLOC |
47 | |
48 | # Use intermediate bucket sizes between powers-of-two. This is |
49 | # generally a memory optimization, and a (small) speed pessimization. |
50 | BUCKETS_ROOT2 !NO_FANCY_MALLOC |
51 | |
52 | # Do not check small deallocations for bad free(). Memory |
53 | # and speed optimization, error reporting pessimization. |
54 | IGNORE_SMALL_BAD_FREE (!NO_FANCY_MALLOC && !RCHECK) |
55 | |
56 | # Use table lookup to decide in which bucket a given allocation will go. |
57 | SMALL_BUCKET_VIA_TABLE !NO_FANCY_MALLOC |
58 | |
38ac2dc8 |
59 | # Use a perl-defined sbrk() instead of the (presumably broken or |
60 | # missing) system-supplied sbrk(). |
61 | USE_PERL_SBRK undef |
62 | |
63 | # Use system malloc() (or calloc() etc.) to emulate sbrk(). Normally |
64 | # only used with broken sbrk()s. |
87c6202a |
65 | PERL_SBRK_VIA_MALLOC undef |
66 | |
38ac2dc8 |
67 | # Which allocator to use if PERL_SBRK_VIA_MALLOC |
68 | SYSTEM_ALLOC(a) malloc(a) |
69 | |
9ee81ef6 |
70 | # Minimal alignment (in bytes, should be a power of 2) of SYSTEM_ALLOC |
5bbd1ef5 |
71 | SYSTEM_ALLOC_ALIGNMENT MEM_ALIGNBYTES |
72 | |
87c6202a |
73 | # Disable memory overwrite checking with DEBUGGING. Memory and speed |
74 | # optimization, error reporting pessimization. |
75 | NO_RCHECK undef |
76 | |
77 | # Enable memory overwrite checking with DEBUGGING. Memory and speed |
78 | # pessimization, error reporting optimization |
79 | RCHECK (DEBUGGING && !NO_RCHECK) |
80 | |
81 | # Failed allocations bigger than this size croak (if |
82 | # PERL_EMERGENCY_SBRK is enabled) without touching $^M. See |
83 | # perlvar.pod for a description of $^M. |
84 | BIG_SIZE (1<<16) # 64K |
85 | |
86 | # Starting from this power of two, add an extra page to the |
87 | # size of the bucket. This enables optimized allocations of sizes |
88 | # close to powers of 2. Note that the value is indexed at 0. |
89 | FIRST_BIG_POW2 15 # 32K, 16K is used too often |
90 | |
91 | # Estimate of minimal memory footprint. malloc uses this value to |
92 | # request the most reasonable largest blocks of memory from the system. |
93 | FIRST_SBRK (48*1024) |
94 | |
95 | # Round up sbrk()s to multiples of this. |
96 | MIN_SBRK 2048 |
97 | |
98 | # Round up sbrk()s to multiples of this percent of footprint. |
99 | MIN_SBRK_FRAC 3 |
100 | |
101 | # Add this much memory to big powers of two to get the bucket size. |
102 | PERL_PAGESIZE 4096 |
103 | |
104 | # This many sbrk() discontinuities should be tolerated even |
105 | # from the start without deciding that sbrk() is usually |
106 | # discontinuous. |
107 | SBRK_ALLOW_FAILURES 3 |
108 | |
109 | # This many continuous sbrk()s compensate for one discontinuous one. |
110 | SBRK_FAILURE_PRICE 50 |
111 | |
28ac10b1 |
112 | # Some configurations may ask for 12-byte-or-so allocations which |
113 | # require 8-byte alignment (?!). In such situation one needs to |
114 | # define this to disable 12-byte bucket (will increase memory footprint) |
115 | STRICT_ALIGNMENT undef |
116 | |
87c6202a |
117 | This implementation assumes that calling PerlIO_printf() does not |
118 | result in any memory allocation calls (used during a panic). |
119 | |
120 | */ |
121 | |
741df71a |
122 | /* |
123 | If used outside of Perl environment, it may be useful to redefine |
124 | the following macros (listed below with defaults): |
125 | |
126 | # Type of address returned by allocation functions |
127 | Malloc_t void * |
128 | |
129 | # Type of size argument for allocation functions |
130 | MEM_SIZE unsigned long |
131 | |
c7374474 |
132 | # size of void* |
133 | PTRSIZE 4 |
134 | |
741df71a |
135 | # Maximal value in LONG |
136 | LONG_MAX 0x7FFFFFFF |
137 | |
138 | # Unsigned integer type big enough to keep a pointer |
139 | UV unsigned long |
140 | |
141 | # Type of pointer with 1-byte granularity |
142 | caddr_t char * |
143 | |
144 | # Type returned by free() |
145 | Free_t void |
146 | |
5bbd1ef5 |
147 | # Very fatal condition reporting function (cannot call any ) |
148 | fatalcroak(arg) write(2,arg,strlen(arg)) + exit(2) |
149 | |
741df71a |
150 | # Fatal error reporting function |
151 | croak(format, arg) warn(idem) + exit(1) |
152 | |
b022d2d2 |
153 | # Fatal error reporting function |
154 | croak2(format, arg1, arg2) warn2(idem) + exit(1) |
155 | |
741df71a |
156 | # Error reporting function |
157 | warn(format, arg) fprintf(stderr, idem) |
158 | |
b022d2d2 |
159 | # Error reporting function |
160 | warn2(format, arg1, arg2) fprintf(stderr, idem) |
161 | |
741df71a |
162 | # Locking/unlocking for MT operation |
efc57feb |
163 | MALLOC_LOCK MUTEX_LOCK(&PL_malloc_mutex) |
164 | MALLOC_UNLOCK MUTEX_UNLOCK(&PL_malloc_mutex) |
741df71a |
165 | |
166 | # Locking/unlocking mutex for MT operation |
167 | MUTEX_LOCK(l) void |
168 | MUTEX_UNLOCK(l) void |
169 | */ |
170 | |
e8bc2b5c |
171 | #ifndef NO_FANCY_MALLOC |
172 | # ifndef SMALL_BUCKET_VIA_TABLE |
173 | # define SMALL_BUCKET_VIA_TABLE |
174 | # endif |
175 | # ifndef BUCKETS_ROOT2 |
176 | # define BUCKETS_ROOT2 |
177 | # endif |
178 | # ifndef IGNORE_SMALL_BAD_FREE |
179 | # define IGNORE_SMALL_BAD_FREE |
180 | # endif |
3562ef9b |
181 | #endif |
182 | |
e8bc2b5c |
183 | #ifndef PLAIN_MALLOC /* Bulk enable features */ |
184 | # ifndef PACK_MALLOC |
185 | # define PACK_MALLOC |
186 | # endif |
187 | # ifndef TWO_POT_OPTIMIZE |
188 | # define TWO_POT_OPTIMIZE |
189 | # endif |
d720c441 |
190 | # if defined(PERL_CORE) && !defined(PERL_EMERGENCY_SBRK) |
191 | # define PERL_EMERGENCY_SBRK |
e8bc2b5c |
192 | # endif |
193 | # if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS) |
194 | # define DEBUGGING_MSTATS |
195 | # endif |
196 | #endif |
197 | |
198 | #define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */ |
199 | #define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2) |
200 | |
61ae2fbf |
201 | #if !(defined(I286) || defined(atarist) || defined(__MINT__)) |
e8bc2b5c |
202 | /* take 2k unless the block is bigger than that */ |
203 | # define LOG_OF_MIN_ARENA 11 |
204 | #else |
205 | /* take 16k unless the block is bigger than that |
206 | (80286s like large segments!), probably good on the atari too */ |
207 | # define LOG_OF_MIN_ARENA 14 |
208 | #endif |
209 | |
8d063cd8 |
210 | #ifndef lint |
1944739a |
211 | # if defined(DEBUGGING) && !defined(NO_RCHECK) |
212 | # define RCHECK |
213 | # endif |
e8bc2b5c |
214 | # if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE) |
215 | # undef IGNORE_SMALL_BAD_FREE |
216 | # endif |
8d063cd8 |
217 | /* |
218 | * malloc.c (Caltech) 2/21/82 |
219 | * Chris Kingsley, kingsley@cit-20. |
220 | * |
221 | * This is a very fast storage allocator. It allocates blocks of a small |
222 | * number of different sizes, and keeps free lists of each size. Blocks that |
223 | * don't exactly fit are passed up to the next larger size. In this |
224 | * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long. |
cf5c4ad8 |
225 | * If PACK_MALLOC is defined, small blocks are 2^n bytes long. |
8d063cd8 |
226 | * This is designed for use in a program that uses vast quantities of memory, |
741df71a |
227 | * but bombs when it runs out. |
228 | * |
4eb8286e |
229 | * Modifications Copyright Ilya Zakharevich 1996-99. |
741df71a |
230 | * |
231 | * Still very quick, but much more thrifty. (Std config is 10% slower |
232 | * than it was, and takes 67% of old heap size for typical usage.) |
233 | * |
234 | * Allocations of small blocks are now table-driven to many different |
235 | * buckets. Sizes of really big buckets are increased to accomodata |
236 | * common size=power-of-2 blocks. Running-out-of-memory is made into |
237 | * an exception. Deeply configurable and thread-safe. |
238 | * |
8d063cd8 |
239 | */ |
240 | |
d720c441 |
241 | #ifdef PERL_CORE |
242 | # include "EXTERN.h" |
4ad56ec9 |
243 | # define PERL_IN_MALLOC_C |
d720c441 |
244 | # include "perl.h" |
cea2e8a9 |
245 | # if defined(PERL_IMPLICIT_CONTEXT) |
246 | # define croak Perl_croak_nocontext |
b022d2d2 |
247 | # define croak2 Perl_croak_nocontext |
cea2e8a9 |
248 | # define warn Perl_warn_nocontext |
b022d2d2 |
249 | # define warn2 Perl_warn_nocontext |
250 | # else |
251 | # define croak2 croak |
252 | # define warn2 warn |
cea2e8a9 |
253 | # endif |
d720c441 |
254 | #else |
255 | # ifdef PERL_FOR_X2P |
256 | # include "../EXTERN.h" |
257 | # include "../perl.h" |
258 | # else |
259 | # include <stdlib.h> |
260 | # include <stdio.h> |
261 | # include <memory.h> |
d720c441 |
262 | # ifndef Malloc_t |
263 | # define Malloc_t void * |
264 | # endif |
c7374474 |
265 | # ifndef PTRSIZE |
266 | # define PTRSIZE 4 |
267 | # endif |
d720c441 |
268 | # ifndef MEM_SIZE |
269 | # define MEM_SIZE unsigned long |
270 | # endif |
271 | # ifndef LONG_MAX |
272 | # define LONG_MAX 0x7FFFFFFF |
273 | # endif |
274 | # ifndef UV |
275 | # define UV unsigned long |
276 | # endif |
277 | # ifndef caddr_t |
278 | # define caddr_t char * |
279 | # endif |
280 | # ifndef Free_t |
281 | # define Free_t void |
282 | # endif |
283 | # define Copy(s,d,n,t) (void)memcpy((char*)(d),(char*)(s), (n) * sizeof(t)) |
284 | # define PerlEnv_getenv getenv |
285 | # define PerlIO_printf fprintf |
286 | # define PerlIO_stderr() stderr |
287 | # endif |
e8bc2b5c |
288 | # ifndef croak /* make depend */ |
741df71a |
289 | # define croak(mess, arg) (warn((mess), (arg)), exit(1)) |
d720c441 |
290 | # endif |
b022d2d2 |
291 | # ifndef croak2 /* make depend */ |
292 | # define croak2(mess, arg1, arg2) (warn2((mess), (arg1), (arg2)), exit(1)) |
293 | # endif |
d720c441 |
294 | # ifndef warn |
741df71a |
295 | # define warn(mess, arg) fprintf(stderr, (mess), (arg)) |
e8bc2b5c |
296 | # endif |
0b415716 |
297 | # ifndef warn2 |
b022d2d2 |
298 | # define warn2(mess, arg1) fprintf(stderr, (mess), (arg1), (arg2)) |
299 | # endif |
e8bc2b5c |
300 | # ifdef DEBUG_m |
301 | # undef DEBUG_m |
302 | # endif |
303 | # define DEBUG_m(a) |
304 | # ifdef DEBUGGING |
305 | # undef DEBUGGING |
306 | # endif |
cea2e8a9 |
307 | # ifndef pTHX |
308 | # define pTHX void |
309 | # define pTHX_ |
afb90382 |
310 | # ifdef HASATTRIBUTE |
44dbb695 |
311 | # define dTHX extern int Perl___notused PERL_UNUSED_DECL |
afb90382 |
312 | # else |
313 | # define dTHX extern int Perl___notused |
314 | # endif |
cea2e8a9 |
315 | # define WITH_THX(s) s |
316 | # endif |
c5be433b |
317 | # ifndef PERL_GET_INTERP |
318 | # define PERL_GET_INTERP PL_curinterp |
319 | # endif |
4ad56ec9 |
320 | # ifndef Perl_malloc |
321 | # define Perl_malloc malloc |
322 | # endif |
323 | # ifndef Perl_mfree |
324 | # define Perl_mfree free |
325 | # endif |
326 | # ifndef Perl_realloc |
327 | # define Perl_realloc realloc |
328 | # endif |
329 | # ifndef Perl_calloc |
330 | # define Perl_calloc calloc |
331 | # endif |
332 | # ifndef Perl_strdup |
333 | # define Perl_strdup strdup |
334 | # endif |
e8bc2b5c |
335 | #endif |
336 | |
337 | #ifndef MUTEX_LOCK |
338 | # define MUTEX_LOCK(l) |
339 | #endif |
340 | |
341 | #ifndef MUTEX_UNLOCK |
342 | # define MUTEX_UNLOCK(l) |
343 | #endif |
344 | |
741df71a |
345 | #ifndef MALLOC_LOCK |
efc57feb |
346 | # define MALLOC_LOCK MUTEX_LOCK(&PL_malloc_mutex) |
741df71a |
347 | #endif |
348 | |
349 | #ifndef MALLOC_UNLOCK |
efc57feb |
350 | # define MALLOC_UNLOCK MUTEX_UNLOCK(&PL_malloc_mutex) |
741df71a |
351 | #endif |
352 | |
5bbd1ef5 |
353 | # ifndef fatalcroak /* make depend */ |
354 | # define fatalcroak(mess) (write(2, (mess), strlen(mess)), exit(2)) |
355 | # endif |
356 | |
760ac839 |
357 | #ifdef DEBUGGING |
e8bc2b5c |
358 | # undef DEBUG_m |
51a5ed28 |
359 | # define DEBUG_m(a) \ |
0b250b9e |
360 | STMT_START { \ |
51a5ed28 |
361 | if (PERL_GET_INTERP) { \ |
362 | dTHX; \ |
363 | if (DEBUG_m_TEST) { \ |
364 | PL_debug &= ~DEBUG_m_FLAG; \ |
365 | a; \ |
366 | PL_debug |= DEBUG_m_FLAG; \ |
367 | } \ |
368 | } \ |
0b250b9e |
369 | } STMT_END |
760ac839 |
370 | #endif |
371 | |
e476b1b5 |
372 | #ifdef PERL_IMPLICIT_CONTEXT |
373 | # define PERL_IS_ALIVE aTHX |
374 | #else |
375 | # define PERL_IS_ALIVE TRUE |
376 | #endif |
377 | |
378 | |
e9397286 |
379 | /* |
380 | * Layout of memory: |
381 | * ~~~~~~~~~~~~~~~~ |
382 | * The memory is broken into "blocks" which occupy multiples of 2K (and |
383 | * generally speaking, have size "close" to a power of 2). The addresses |
384 | * of such *unused* blocks are kept in nextf[i] with big enough i. (nextf |
385 | * is an array of linked lists.) (Addresses of used blocks are not known.) |
386 | * |
4ad56ec9 |
387 | * Moreover, since the algorithm may try to "bite" smaller blocks out |
e9397286 |
388 | * of unused bigger ones, there are also regions of "irregular" size, |
389 | * managed separately, by a linked list chunk_chain. |
390 | * |
391 | * The third type of storage is the sbrk()ed-but-not-yet-used space, its |
392 | * end and size are kept in last_sbrk_top and sbrked_remains. |
393 | * |
394 | * Growing blocks "in place": |
395 | * ~~~~~~~~~~~~~~~~~~~~~~~~~ |
396 | * The address of the block with the greatest address is kept in last_op |
397 | * (if not known, last_op is 0). If it is known that the memory above |
398 | * last_op is not continuous, or contains a chunk from chunk_chain, |
399 | * last_op is set to 0. |
400 | * |
401 | * The chunk with address last_op may be grown by expanding into |
402 | * sbrk()ed-but-not-yet-used space, or trying to sbrk() more continuous |
403 | * memory. |
404 | * |
405 | * Management of last_op: |
406 | * ~~~~~~~~~~~~~~~~~~~~~ |
407 | * |
408 | * free() never changes the boundaries of blocks, so is not relevant. |
409 | * |
410 | * The only way realloc() may change the boundaries of blocks is if it |
411 | * grows a block "in place". However, in the case of success such a |
412 | * chunk is automatically last_op, and it remains last_op. In the case |
413 | * of failure getpages_adjacent() clears last_op. |
414 | * |
415 | * malloc() may change blocks by calling morecore() only. |
416 | * |
417 | * morecore() may create new blocks by: |
418 | * a) biting pieces from chunk_chain (cannot create one above last_op); |
419 | * b) biting a piece from an unused block (if block was last_op, this |
420 | * may create a chunk from chain above last_op, thus last_op is |
421 | * invalidated in such a case). |
422 | * c) biting of sbrk()ed-but-not-yet-used space. This creates |
423 | * a block which is last_op. |
424 | * d) Allocating new pages by calling getpages(); |
425 | * |
426 | * getpages() creates a new block. It marks last_op at the bottom of |
427 | * the chunk of memory it returns. |
428 | * |
429 | * Active pages footprint: |
430 | * ~~~~~~~~~~~~~~~~~~~~~~ |
431 | * Note that we do not need to traverse the lists in nextf[i], just take |
432 | * the first element of this list. However, we *need* to traverse the |
433 | * list in chunk_chain, but most the time it should be a very short one, |
434 | * so we do not step on a lot of pages we are not going to use. |
435 | * |
436 | * Flaws: |
437 | * ~~~~~ |
438 | * get_from_bigger_buckets(): forget to increment price => Quite |
439 | * aggressive. |
440 | */ |
441 | |
135863df |
442 | /* I don't much care whether these are defined in sys/types.h--LAW */ |
443 | |
444 | #define u_char unsigned char |
445 | #define u_int unsigned int |
56431972 |
446 | /* |
447 | * I removed the definition of u_bigint which appeared to be u_bigint = UV |
448 | * u_bigint was only used in TWOK_MASKED and TWOK_SHIFT |
449 | * where I have used PTR2UV. RMB |
450 | */ |
135863df |
451 | #define u_short unsigned short |
8d063cd8 |
452 | |
cf5c4ad8 |
453 | /* 286 and atarist like big chunks, which gives too much overhead. */ |
61ae2fbf |
454 | #if (defined(RCHECK) || defined(I286) || defined(atarist) || defined(__MINT__)) && defined(PACK_MALLOC) |
e8bc2b5c |
455 | # undef PACK_MALLOC |
cf5c4ad8 |
456 | #endif |
457 | |
8d063cd8 |
458 | /* |
cf5c4ad8 |
459 | * The description below is applicable if PACK_MALLOC is not defined. |
460 | * |
8d063cd8 |
461 | * The overhead on a block is at least 4 bytes. When free, this space |
462 | * contains a pointer to the next free block, and the bottom two bits must |
463 | * be zero. When in use, the first byte is set to MAGIC, and the second |
464 | * byte is the size index. The remaining bytes are for alignment. |
465 | * If range checking is enabled and the size of the block fits |
466 | * in two bytes, then the top two bytes hold the size of the requested block |
467 | * plus the range checking words, and the header word MINUS ONE. |
468 | */ |
469 | union overhead { |
470 | union overhead *ov_next; /* when free */ |
85e6fe83 |
471 | #if MEM_ALIGNBYTES > 4 |
c623bd54 |
472 | double strut; /* alignment problems */ |
a687059c |
473 | #endif |
8d063cd8 |
474 | struct { |
00ff3b56 |
475 | /* |
476 | * Keep the ovu_index and ovu_magic in this order, having a char |
477 | * field first gives alignment indigestion in some systems, such as |
478 | * MachTen. |
479 | */ |
8d063cd8 |
480 | u_char ovu_index; /* bucket # */ |
b72ff565 |
481 | u_char ovu_magic; /* magic number */ |
8d063cd8 |
482 | #ifdef RCHECK |
483 | u_short ovu_size; /* actual block size */ |
484 | u_int ovu_rmagic; /* range magic number */ |
485 | #endif |
486 | } ovu; |
487 | #define ov_magic ovu.ovu_magic |
488 | #define ov_index ovu.ovu_index |
489 | #define ov_size ovu.ovu_size |
490 | #define ov_rmagic ovu.ovu_rmagic |
491 | }; |
492 | |
493 | #define MAGIC 0xff /* magic # on accounting info */ |
494 | #define RMAGIC 0x55555555 /* magic # on range info */ |
e8bc2b5c |
495 | #define RMAGIC_C 0x55 /* magic # on range info */ |
496 | |
8d063cd8 |
497 | #ifdef RCHECK |
c2a5c2d2 |
498 | # define RSLOP sizeof (u_int) |
499 | # ifdef TWO_POT_OPTIMIZE |
e8bc2b5c |
500 | # define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2) |
c2a5c2d2 |
501 | # else |
e8bc2b5c |
502 | # define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2) |
c2a5c2d2 |
503 | # endif |
8d063cd8 |
504 | #else |
c2a5c2d2 |
505 | # define RSLOP 0 |
8d063cd8 |
506 | #endif |
507 | |
e8bc2b5c |
508 | #if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2) |
509 | # undef BUCKETS_ROOT2 |
510 | #endif |
511 | |
512 | #ifdef BUCKETS_ROOT2 |
513 | # define BUCKET_TABLE_SHIFT 2 |
514 | # define BUCKET_POW2_SHIFT 1 |
515 | # define BUCKETS_PER_POW2 2 |
516 | #else |
517 | # define BUCKET_TABLE_SHIFT MIN_BUC_POW2 |
518 | # define BUCKET_POW2_SHIFT 0 |
519 | # define BUCKETS_PER_POW2 1 |
520 | #endif |
521 | |
274c7500 |
522 | #if !defined(MEM_ALIGNBYTES) || ((MEM_ALIGNBYTES > 4) && !defined(STRICT_ALIGNMENT)) |
523 | /* Figure out the alignment of void*. */ |
524 | struct aligner { |
525 | char c; |
526 | void *p; |
527 | }; |
528 | # define ALIGN_SMALL ((int)((caddr_t)&(((struct aligner*)0)->p))) |
529 | #else |
530 | # define ALIGN_SMALL MEM_ALIGNBYTES |
531 | #endif |
532 | |
533 | #define IF_ALIGN_8(yes,no) ((ALIGN_SMALL>4) ? (yes) : (no)) |
534 | |
e8bc2b5c |
535 | #ifdef BUCKETS_ROOT2 |
536 | # define MAX_BUCKET_BY_TABLE 13 |
537 | static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] = |
538 | { |
539 | 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80, |
540 | }; |
541 | # define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT))) |
542 | # define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \ |
543 | ? buck_size[i] \ |
544 | : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \ |
545 | - MEM_OVERHEAD(i) \ |
546 | + POW2_OPTIMIZE_SURPLUS(i))) |
547 | #else |
548 | # define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT)) |
549 | # define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i)) |
550 | #endif |
551 | |
552 | |
cf5c4ad8 |
553 | #ifdef PACK_MALLOC |
4ad56ec9 |
554 | /* In this case there are several possible layout of arenas depending |
555 | * on the size. Arenas are of sizes multiple to 2K, 2K-aligned, and |
556 | * have a size close to a power of 2. |
557 | * |
558 | * Arenas of the size >= 4K keep one chunk only. Arenas of size 2K |
559 | * may keep one chunk or multiple chunks. Here are the possible |
560 | * layouts of arenas: |
561 | * |
562 | * # One chunk only, chunksize 2^k + SOMETHING - ALIGN, k >= 11 |
563 | * |
564 | * INDEX MAGIC1 UNUSED CHUNK1 |
565 | * |
566 | * # Multichunk with sanity checking and chunksize 2^k-ALIGN, k>7 |
567 | * |
568 | * INDEX MAGIC1 MAGIC2 MAGIC3 UNUSED CHUNK1 CHUNK2 CHUNK3 ... |
569 | * |
570 | * # Multichunk with sanity checking and size 2^k-ALIGN, k=7 |
571 | * |
572 | * INDEX MAGIC1 MAGIC2 MAGIC3 UNUSED CHUNK1 UNUSED CHUNK2 CHUNK3 ... |
573 | * |
574 | * # Multichunk with sanity checking and size up to 80 |
575 | * |
576 | * INDEX UNUSED MAGIC1 UNUSED MAGIC2 UNUSED ... CHUNK1 CHUNK2 CHUNK3 ... |
577 | * |
578 | * # No sanity check (usually up to 48=byte-long buckets) |
579 | * INDEX UNUSED CHUNK1 CHUNK2 ... |
580 | * |
581 | * Above INDEX and MAGIC are one-byte-long. Sizes of UNUSED are |
582 | * appropriate to keep algorithms simple and memory aligned. INDEX |
583 | * encodes the size of the chunk, while MAGICn encodes state (used, |
584 | * free or non-managed-by-us-so-it-indicates-a-bug) of CHUNKn. MAGIC |
585 | * is used for sanity checking purposes only. SOMETHING is 0 or 4K |
586 | * (to make size of big CHUNK accomodate allocations for powers of two |
587 | * better). |
588 | * |
589 | * [There is no need to alignment between chunks, since C rules ensure |
590 | * that structs which need 2^k alignment have sizeof which is |
591 | * divisible by 2^k. Thus as far as the last chunk is aligned at the |
592 | * end of the arena, and 2K-alignment does not contradict things, |
593 | * everything is going to be OK for sizes of chunks 2^n and 2^n + |
594 | * 2^k. Say, 80-bit buckets will be 16-bit aligned, and as far as we |
595 | * put allocations for requests in 65..80 range, all is fine. |
596 | * |
597 | * Note, however, that standard malloc() puts more strict |
598 | * requirements than the above C rules. Moreover, our algorithms of |
599 | * realloc() may break this idyll, but we suppose that realloc() does |
600 | * need not change alignment.] |
601 | * |
602 | * Is very important to make calculation of the offset of MAGICm as |
603 | * quick as possible, since it is done on each malloc()/free(). In |
604 | * fact it is so quick that it has quite little effect on the speed of |
605 | * doing malloc()/free(). [By default] We forego such calculations |
606 | * for small chunks, but only to save extra 3% of memory, not because |
607 | * of speed considerations. |
608 | * |
609 | * Here is the algorithm [which is the same for all the allocations |
610 | * schemes above], see OV_MAGIC(block,bucket). Let OFFSETm be the |
611 | * offset of the CHUNKm from the start of ARENA. Then offset of |
612 | * MAGICm is (OFFSET1 >> SHIFT) + ADDOFFSET. Here SHIFT and ADDOFFSET |
613 | * are numbers which depend on the size of the chunks only. |
614 | * |
615 | * Let as check some sanity conditions. Numbers OFFSETm>>SHIFT are |
616 | * different for all the chunks in the arena if 2^SHIFT is not greater |
617 | * than size of the chunks in the arena. MAGIC1 will not overwrite |
618 | * INDEX provided ADDOFFSET is >0 if OFFSET1 < 2^SHIFT. MAGIClast |
619 | * will not overwrite CHUNK1 if OFFSET1 > (OFFSETlast >> SHIFT) + |
620 | * ADDOFFSET. |
621 | * |
622 | * Make SHIFT the maximal possible (there is no point in making it |
623 | * smaller). Since OFFSETlast is 2K - CHUNKSIZE, above restrictions |
624 | * give restrictions on OFFSET1 and on ADDOFFSET. |
625 | * |
626 | * In particular, for chunks of size 2^k with k>=6 we can put |
627 | * ADDOFFSET to be from 0 to 2^k - 2^(11-k), and have |
628 | * OFFSET1==chunksize. For chunks of size 80 OFFSET1 of 2K%80=48 is |
629 | * large enough to have ADDOFFSET between 1 and 16 (similarly for 96, |
630 | * when ADDOFFSET should be 1). In particular, keeping MAGICs for |
631 | * these sizes gives no additional size penalty. |
632 | * |
633 | * However, for chunks of size 2^k with k<=5 this gives OFFSET1 >= |
634 | * ADDOFSET + 2^(11-k). Keeping ADDOFFSET 0 allows for 2^(11-k)-2^(11-2k) |
635 | * chunks per arena. This is smaller than 2^(11-k) - 1 which are |
636 | * needed if no MAGIC is kept. [In fact, having a negative ADDOFFSET |
637 | * would allow for slightly more buckets per arena for k=2,3.] |
638 | * |
639 | * Similarly, for chunks of size 3/2*2^k with k<=5 MAGICs would span |
640 | * the area up to 2^(11-k)+ADDOFFSET. For k=4 this give optimal |
641 | * ADDOFFSET as -7..0. For k=3 ADDOFFSET can go up to 4 (with tiny |
642 | * savings for negative ADDOFFSET). For k=5 ADDOFFSET can go -1..16 |
643 | * (with no savings for negative values). |
cf5c4ad8 |
644 | * |
4ad56ec9 |
645 | * In particular, keeping ADDOFFSET 0 for sizes of chunks up to 2^6 |
646 | * leads to tiny pessimizations in case of sizes 4, 8, 12, 24, and |
647 | * leads to no contradictions except for size=80 (or 96.) |
cf5c4ad8 |
648 | * |
4ad56ec9 |
649 | * However, it also makes sense to keep no magic for sizes 48 or less. |
650 | * This is what we do. In this case one needs ADDOFFSET>=1 also for |
651 | * chunksizes 12, 24, and 48, unless one gets one less chunk per |
652 | * arena. |
653 | * |
654 | * The algo of OV_MAGIC(block,bucket) keeps ADDOFFSET 0 until |
655 | * chunksize of 64, then makes it 1. |
cf5c4ad8 |
656 | * |
4ad56ec9 |
657 | * This allows for an additional optimization: the above scheme leads |
658 | * to giant overheads for sizes 128 or more (one whole chunk needs to |
659 | * be sacrifised to keep INDEX). Instead we use chunks not of size |
660 | * 2^k, but of size 2^k-ALIGN. If we pack these chunks at the end of |
661 | * the arena, then the beginnings are still in different 2^k-long |
662 | * sections of the arena if k>=7 for ALIGN==4, and k>=8 if ALIGN=8. |
663 | * Thus for k>7 the above algo of calculating the offset of the magic |
664 | * will still give different answers for different chunks. And to |
665 | * avoid the overrun of MAGIC1 into INDEX, one needs ADDOFFSET of >=1. |
666 | * In the case k=7 we just move the first chunk an extra ALIGN |
667 | * backward inside the ARENA (this is done once per arena lifetime, |
668 | * thus is not a big overhead). */ |
e8bc2b5c |
669 | # define MAX_PACKED_POW2 6 |
670 | # define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT) |
671 | # define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD) |
672 | # define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1) |
56431972 |
673 | # define TWOK_MASKED(x) (PTR2UV(x) & ~TWOK_MASK) |
674 | # define TWOK_SHIFT(x) (PTR2UV(x) & TWOK_MASK) |
675 | # define OV_INDEXp(block) (INT2PTR(u_char*,TWOK_MASKED(block))) |
cf5c4ad8 |
676 | # define OV_INDEX(block) (*OV_INDEXp(block)) |
677 | # define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \ |
e8bc2b5c |
678 | (TWOK_SHIFT(block)>> \ |
679 | (bucket>>BUCKET_POW2_SHIFT)) + \ |
680 | (bucket >= MIN_NEEDS_SHIFT ? 1 : 0))) |
681 | /* A bucket can have a shift smaller than it size, we need to |
682 | shift its magic number so it will not overwrite index: */ |
683 | # ifdef BUCKETS_ROOT2 |
684 | # define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */ |
685 | # else |
686 | # define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */ |
687 | # endif |
cf5c4ad8 |
688 | # define CHUNK_SHIFT 0 |
689 | |
e8bc2b5c |
690 | /* Number of active buckets of given ordinal. */ |
691 | #ifdef IGNORE_SMALL_BAD_FREE |
692 | #define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */ |
693 | # define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \ |
694 | ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \ |
695 | : n_blks[bucket] ) |
696 | #else |
697 | # define N_BLKS(bucket) n_blks[bucket] |
698 | #endif |
699 | |
700 | static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] = |
701 | { |
702 | # if BUCKETS_PER_POW2==1 |
703 | 0, 0, |
704 | (MIN_BUC_POW2==2 ? 384 : 0), |
705 | 224, 120, 62, 31, 16, 8, 4, 2 |
706 | # else |
707 | 0, 0, 0, 0, |
708 | (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */ |
709 | 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2 |
710 | # endif |
711 | }; |
712 | |
713 | /* Shift of the first bucket with the given ordinal inside 2K chunk. */ |
714 | #ifdef IGNORE_SMALL_BAD_FREE |
715 | # define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \ |
716 | ? ((1<<LOG_OF_MIN_ARENA) \ |
717 | - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \ |
718 | : blk_shift[bucket]) |
719 | #else |
720 | # define BLK_SHIFT(bucket) blk_shift[bucket] |
721 | #endif |
722 | |
723 | static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] = |
724 | { |
725 | # if BUCKETS_PER_POW2==1 |
726 | 0, 0, |
727 | (MIN_BUC_POW2==2 ? 512 : 0), |
728 | 256, 128, 64, 64, /* 8 to 64 */ |
729 | 16*sizeof(union overhead), |
730 | 8*sizeof(union overhead), |
731 | 4*sizeof(union overhead), |
732 | 2*sizeof(union overhead), |
733 | # else |
734 | 0, 0, 0, 0, |
735 | (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0), |
736 | 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */ |
737 | 16*sizeof(union overhead), 16*sizeof(union overhead), |
738 | 8*sizeof(union overhead), 8*sizeof(union overhead), |
739 | 4*sizeof(union overhead), 4*sizeof(union overhead), |
740 | 2*sizeof(union overhead), 2*sizeof(union overhead), |
741 | # endif |
742 | }; |
cf5c4ad8 |
743 | |
5bbd1ef5 |
744 | # define NEEDED_ALIGNMENT 0x800 /* 2k boundaries */ |
745 | # define WANTED_ALIGNMENT 0x800 /* 2k boundaries */ |
746 | |
cf5c4ad8 |
747 | #else /* !PACK_MALLOC */ |
748 | |
749 | # define OV_MAGIC(block,bucket) (block)->ov_magic |
750 | # define OV_INDEX(block) (block)->ov_index |
751 | # define CHUNK_SHIFT 1 |
e8bc2b5c |
752 | # define MAX_PACKED -1 |
5bbd1ef5 |
753 | # define NEEDED_ALIGNMENT MEM_ALIGNBYTES |
754 | # define WANTED_ALIGNMENT 0x400 /* 1k boundaries */ |
755 | |
cf5c4ad8 |
756 | #endif /* !PACK_MALLOC */ |
757 | |
e8bc2b5c |
758 | #define M_OVERHEAD (sizeof(union overhead) + RSLOP) |
759 | |
760 | #ifdef PACK_MALLOC |
761 | # define MEM_OVERHEAD(bucket) \ |
762 | (bucket <= MAX_PACKED ? 0 : M_OVERHEAD) |
763 | # ifdef SMALL_BUCKET_VIA_TABLE |
764 | # define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2) |
765 | # define START_SHIFT MAX_PACKED_POW2 |
766 | # ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */ |
767 | # define SIZE_TABLE_MAX 80 |
768 | # else |
769 | # define SIZE_TABLE_MAX 64 |
770 | # endif |
771 | static char bucket_of[] = |
772 | { |
773 | # ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */ |
774 | /* 0 to 15 in 4-byte increments. */ |
775 | (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */ |
776 | 6, /* 8 */ |
274c7500 |
777 | IF_ALIGN_8(8,7), 8, /* 16/12, 16 */ |
e8bc2b5c |
778 | 9, 9, 10, 10, /* 24, 32 */ |
779 | 11, 11, 11, 11, /* 48 */ |
780 | 12, 12, 12, 12, /* 64 */ |
781 | 13, 13, 13, 13, /* 80 */ |
782 | 13, 13, 13, 13 /* 80 */ |
783 | # else /* !BUCKETS_ROOT2 */ |
784 | /* 0 to 15 in 4-byte increments. */ |
785 | (sizeof(void*) > 4 ? 3 : 2), |
786 | 3, |
787 | 4, 4, |
788 | 5, 5, 5, 5, |
789 | 6, 6, 6, 6, |
790 | 6, 6, 6, 6 |
791 | # endif /* !BUCKETS_ROOT2 */ |
792 | }; |
793 | # else /* !SMALL_BUCKET_VIA_TABLE */ |
794 | # define START_SHIFTS_BUCKET MIN_BUCKET |
795 | # define START_SHIFT (MIN_BUC_POW2 - 1) |
796 | # endif /* !SMALL_BUCKET_VIA_TABLE */ |
797 | #else /* !PACK_MALLOC */ |
798 | # define MEM_OVERHEAD(bucket) M_OVERHEAD |
799 | # ifdef SMALL_BUCKET_VIA_TABLE |
800 | # undef SMALL_BUCKET_VIA_TABLE |
801 | # endif |
802 | # define START_SHIFTS_BUCKET MIN_BUCKET |
803 | # define START_SHIFT (MIN_BUC_POW2 - 1) |
804 | #endif /* !PACK_MALLOC */ |
cf5c4ad8 |
805 | |
8d063cd8 |
806 | /* |
55497cff |
807 | * Big allocations are often of the size 2^n bytes. To make them a |
808 | * little bit better, make blocks of size 2^n+pagesize for big n. |
809 | */ |
810 | |
811 | #ifdef TWO_POT_OPTIMIZE |
812 | |
5f05dabc |
813 | # ifndef PERL_PAGESIZE |
814 | # define PERL_PAGESIZE 4096 |
815 | # endif |
e8bc2b5c |
816 | # ifndef FIRST_BIG_POW2 |
817 | # define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */ |
5f05dabc |
818 | # endif |
e8bc2b5c |
819 | # define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2) |
55497cff |
820 | /* If this value or more, check against bigger blocks. */ |
821 | # define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD) |
822 | /* If less than this value, goes into 2^n-overhead-block. */ |
823 | # define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD) |
824 | |
e8bc2b5c |
825 | # define POW2_OPTIMIZE_ADJUST(nbytes) \ |
826 | ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0) |
827 | # define POW2_OPTIMIZE_SURPLUS(bucket) \ |
828 | ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0) |
829 | |
830 | #else /* !TWO_POT_OPTIMIZE */ |
831 | # define POW2_OPTIMIZE_ADJUST(nbytes) |
832 | # define POW2_OPTIMIZE_SURPLUS(bucket) 0 |
833 | #endif /* !TWO_POT_OPTIMIZE */ |
834 | |
835 | #if defined(HAS_64K_LIMIT) && defined(PERL_CORE) |
836 | # define BARK_64K_LIMIT(what,nbytes,size) \ |
837 | if (nbytes > 0xffff) { \ |
838 | PerlIO_printf(PerlIO_stderr(), \ |
839 | "%s too large: %lx\n", what, size); \ |
840 | my_exit(1); \ |
841 | } |
842 | #else /* !HAS_64K_LIMIT || !PERL_CORE */ |
843 | # define BARK_64K_LIMIT(what,nbytes,size) |
844 | #endif /* !HAS_64K_LIMIT || !PERL_CORE */ |
55497cff |
845 | |
e8bc2b5c |
846 | #ifndef MIN_SBRK |
847 | # define MIN_SBRK 2048 |
848 | #endif |
849 | |
850 | #ifndef FIRST_SBRK |
d720c441 |
851 | # define FIRST_SBRK (48*1024) |
e8bc2b5c |
852 | #endif |
853 | |
854 | /* Minimal sbrk in percents of what is already alloced. */ |
855 | #ifndef MIN_SBRK_FRAC |
856 | # define MIN_SBRK_FRAC 3 |
857 | #endif |
858 | |
859 | #ifndef SBRK_ALLOW_FAILURES |
860 | # define SBRK_ALLOW_FAILURES 3 |
861 | #endif |
55497cff |
862 | |
e8bc2b5c |
863 | #ifndef SBRK_FAILURE_PRICE |
864 | # define SBRK_FAILURE_PRICE 50 |
55497cff |
865 | #endif |
866 | |
24dd13bf |
867 | static void morecore (register int bucket); |
868 | # if defined(DEBUGGING) |
869 | static void botch (char *diag, char *s); |
870 | # endif |
871 | static void add_to_chain (void *p, MEM_SIZE size, MEM_SIZE chip); |
872 | static void* get_from_chain (MEM_SIZE size); |
873 | static void* get_from_bigger_buckets(int bucket, MEM_SIZE size); |
874 | static union overhead *getpages (MEM_SIZE needed, int *nblksp, int bucket); |
875 | static int getpages_adjacent(MEM_SIZE require); |
876 | |
febabd2a |
877 | #ifdef PERL_CORE |
e8bc2b5c |
878 | |
3541dd58 |
879 | #ifdef I_MACH_CTHREADS |
772fe5b3 |
880 | # undef MUTEX_LOCK |
881 | # define MUTEX_LOCK(m) STMT_START { if (*m) mutex_lock(*m); } STMT_END |
882 | # undef MUTEX_UNLOCK |
883 | # define MUTEX_UNLOCK(m) STMT_START { if (*m) mutex_unlock(*m); } STMT_END |
3541dd58 |
884 | #endif |
885 | |
b022d2d2 |
886 | #ifndef BITS_IN_PTR |
887 | # define BITS_IN_PTR (8*PTRSIZE) |
888 | #endif |
889 | |
890 | /* |
891 | * nextf[i] is the pointer to the next free block of size 2^i. The |
892 | * smallest allocatable block is 8 bytes. The overhead information |
893 | * precedes the data area returned to the user. |
894 | */ |
895 | #define NBUCKETS (BITS_IN_PTR*BUCKETS_PER_POW2 + 1) |
896 | static union overhead *nextf[NBUCKETS]; |
897 | |
898 | #if defined(PURIFY) && !defined(USE_PERL_SBRK) |
899 | # define USE_PERL_SBRK |
900 | #endif |
901 | |
902 | #ifdef USE_PERL_SBRK |
3f270f98 |
903 | # define sbrk(a) Perl_sbrk(a) |
b022d2d2 |
904 | Malloc_t Perl_sbrk (int size); |
b022d2d2 |
905 | #else |
3f270f98 |
906 | # ifndef HAS_SBRK_PROTO /* <unistd.h> usually takes care of this */ |
b022d2d2 |
907 | extern Malloc_t sbrk(int); |
3f270f98 |
908 | # endif |
ef9f17be |
909 | #endif |
b022d2d2 |
910 | |
911 | #ifdef DEBUGGING_MSTATS |
912 | /* |
913 | * nmalloc[i] is the difference between the number of mallocs and frees |
914 | * for a given block size. |
915 | */ |
916 | static u_int nmalloc[NBUCKETS]; |
917 | static u_int sbrk_slack; |
918 | static u_int start_slack; |
919 | #else /* !( defined DEBUGGING_MSTATS ) */ |
920 | # define sbrk_slack 0 |
921 | #endif |
922 | |
923 | static u_int goodsbrk; |
924 | |
febabd2a |
925 | # ifdef PERL_EMERGENCY_SBRK |
926 | |
927 | # ifndef BIG_SIZE |
928 | # define BIG_SIZE (1<<16) /* 64K */ |
929 | # endif |
930 | |
55497cff |
931 | static char *emergency_buffer; |
932 | static MEM_SIZE emergency_buffer_size; |
6e21dc91 |
933 | static MEM_SIZE no_mem; /* 0 if the last request for more memory succeeded. |
b022d2d2 |
934 | Otherwise the size of the failing request. */ |
55497cff |
935 | |
cea2e8a9 |
936 | static Malloc_t |
937 | emergency_sbrk(MEM_SIZE size) |
55497cff |
938 | { |
28ac10b1 |
939 | MEM_SIZE rsize = (((size - 1)>>LOG_OF_MIN_ARENA) + 1)<<LOG_OF_MIN_ARENA; |
940 | |
b022d2d2 |
941 | if (size >= BIG_SIZE && (!no_mem || (size < no_mem))) { |
942 | /* Give the possibility to recover, but avoid an infinite cycle. */ |
741df71a |
943 | MALLOC_UNLOCK; |
b022d2d2 |
944 | no_mem = size; |
945 | croak2("Out of memory during \"large\" request for %"UVuf" bytes, total sbrk() is %"UVuf" bytes", (UV)size, (UV)(goodsbrk + sbrk_slack)); |
55497cff |
946 | } |
947 | |
28ac10b1 |
948 | if (emergency_buffer_size >= rsize) { |
949 | char *old = emergency_buffer; |
950 | |
951 | emergency_buffer_size -= rsize; |
952 | emergency_buffer += rsize; |
953 | return old; |
954 | } else { |
cea2e8a9 |
955 | dTHX; |
55497cff |
956 | /* First offense, give a possibility to recover by dieing. */ |
957 | /* No malloc involved here: */ |
4a33f861 |
958 | GV **gvp = (GV**)hv_fetch(PL_defstash, "^M", 2, 0); |
55497cff |
959 | SV *sv; |
960 | char *pv; |
28ac10b1 |
961 | int have = 0; |
2d8e6c8d |
962 | STRLEN n_a; |
55497cff |
963 | |
28ac10b1 |
964 | if (emergency_buffer_size) { |
965 | add_to_chain(emergency_buffer, emergency_buffer_size, 0); |
966 | emergency_buffer_size = 0; |
967 | emergency_buffer = Nullch; |
968 | have = 1; |
969 | } |
4a33f861 |
970 | if (!gvp) gvp = (GV**)hv_fetch(PL_defstash, "\015", 1, 0); |
55497cff |
971 | if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv) |
28ac10b1 |
972 | || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD)) { |
973 | if (have) |
974 | goto do_croak; |
55497cff |
975 | return (char *)-1; /* Now die die die... */ |
28ac10b1 |
976 | } |
55497cff |
977 | /* Got it, now detach SvPV: */ |
2d8e6c8d |
978 | pv = SvPV(sv, n_a); |
55497cff |
979 | /* Check alignment: */ |
56431972 |
980 | if ((PTR2UV(pv) - sizeof(union overhead)) & (NEEDED_ALIGNMENT - 1)) { |
55497cff |
981 | PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n"); |
bbce6d69 |
982 | return (char *)-1; /* die die die */ |
55497cff |
983 | } |
984 | |
28ac10b1 |
985 | emergency_buffer = pv - sizeof(union overhead); |
986 | emergency_buffer_size = malloced_size(pv) + M_OVERHEAD; |
55497cff |
987 | SvPOK_off(sv); |
28ac10b1 |
988 | SvPVX(sv) = Nullch; |
989 | SvCUR(sv) = SvLEN(sv) = 0; |
55497cff |
990 | } |
28ac10b1 |
991 | do_croak: |
741df71a |
992 | MALLOC_UNLOCK; |
b022d2d2 |
993 | croak("Out of memory during request for %"UVuf" bytes, total sbrk() is %"UVuf" bytes", (UV)size, (UV)(goodsbrk + sbrk_slack)); |
ce70748c |
994 | /* NOTREACHED */ |
995 | return Nullch; |
55497cff |
996 | } |
997 | |
febabd2a |
998 | # else /* !defined(PERL_EMERGENCY_SBRK) */ |
55497cff |
999 | # define emergency_sbrk(size) -1 |
febabd2a |
1000 | # endif |
1001 | #endif /* ifdef PERL_CORE */ |
55497cff |
1002 | |
760ac839 |
1003 | #ifdef DEBUGGING |
3541dd58 |
1004 | #undef ASSERT |
1005 | #define ASSERT(p,diag) if (!(p)) botch(diag,STRINGIFY(p)); else |
cea2e8a9 |
1006 | static void |
1007 | botch(char *diag, char *s) |
8d063cd8 |
1008 | { |
e8cd8248 |
1009 | dTHX; |
d720c441 |
1010 | PerlIO_printf(PerlIO_stderr(), "assertion botched (%s?): %s\n", diag, s); |
3028581b |
1011 | PerlProc_abort(); |
8d063cd8 |
1012 | } |
1013 | #else |
3541dd58 |
1014 | #define ASSERT(p, diag) |
8d063cd8 |
1015 | #endif |
1016 | |
2304df62 |
1017 | Malloc_t |
86058a2d |
1018 | Perl_malloc(register size_t nbytes) |
8d063cd8 |
1019 | { |
1020 | register union overhead *p; |
e8bc2b5c |
1021 | register int bucket; |
ee0007ab |
1022 | register MEM_SIZE shiftr; |
8d063cd8 |
1023 | |
c2a5c2d2 |
1024 | #if defined(DEBUGGING) || defined(RCHECK) |
ee0007ab |
1025 | MEM_SIZE size = nbytes; |
45d8adaa |
1026 | #endif |
1027 | |
e8bc2b5c |
1028 | BARK_64K_LIMIT("Allocation",nbytes,nbytes); |
45d8adaa |
1029 | #ifdef DEBUGGING |
1030 | if ((long)nbytes < 0) |
cea2e8a9 |
1031 | croak("%s", "panic: malloc"); |
45d8adaa |
1032 | #endif |
45d8adaa |
1033 | |
8d063cd8 |
1034 | /* |
1035 | * Convert amount of memory requested into |
1036 | * closest block size stored in hash buckets |
1037 | * which satisfies request. Account for |
1038 | * space used per block for accounting. |
1039 | */ |
cf5c4ad8 |
1040 | #ifdef PACK_MALLOC |
e8bc2b5c |
1041 | # ifdef SMALL_BUCKET_VIA_TABLE |
1042 | if (nbytes == 0) |
1043 | bucket = MIN_BUCKET; |
1044 | else if (nbytes <= SIZE_TABLE_MAX) { |
1045 | bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT]; |
1046 | } else |
1047 | # else |
043bf814 |
1048 | if (nbytes == 0) |
1049 | nbytes = 1; |
e8bc2b5c |
1050 | if (nbytes <= MAX_POW2_ALGO) goto do_shifts; |
1051 | else |
1052 | # endif |
55497cff |
1053 | #endif |
e8bc2b5c |
1054 | { |
1055 | POW2_OPTIMIZE_ADJUST(nbytes); |
1056 | nbytes += M_OVERHEAD; |
1057 | nbytes = (nbytes + 3) &~ 3; |
516a5887 |
1058 | #if defined(PACK_MALLOC) && !defined(SMALL_BUCKET_VIA_TABLE) |
e8bc2b5c |
1059 | do_shifts: |
516a5887 |
1060 | #endif |
e8bc2b5c |
1061 | shiftr = (nbytes - 1) >> START_SHIFT; |
1062 | bucket = START_SHIFTS_BUCKET; |
1063 | /* apart from this loop, this is O(1) */ |
1064 | while (shiftr >>= 1) |
1065 | bucket += BUCKETS_PER_POW2; |
cf5c4ad8 |
1066 | } |
4ad56ec9 |
1067 | MALLOC_LOCK; |
8d063cd8 |
1068 | /* |
1069 | * If nothing in hash bucket right now, |
1070 | * request more memory from the system. |
1071 | */ |
1072 | if (nextf[bucket] == NULL) |
1073 | morecore(bucket); |
e8bc2b5c |
1074 | if ((p = nextf[bucket]) == NULL) { |
741df71a |
1075 | MALLOC_UNLOCK; |
55497cff |
1076 | #ifdef PERL_CORE |
0b250b9e |
1077 | { |
1078 | dTHX; |
1079 | if (!PL_nomemok) { |
febabd2a |
1080 | #if defined(PLAIN_MALLOC) && defined(NO_FANCY_MALLOC) |
1081 | PerlIO_puts(PerlIO_stderr(),"Out of memory!\n"); |
1082 | #else |
b022d2d2 |
1083 | char buff[80]; |
1084 | char *eb = buff + sizeof(buff) - 1; |
1085 | char *s = eb; |
1086 | size_t n = nbytes; |
1087 | |
1088 | PerlIO_puts(PerlIO_stderr(),"Out of memory during request for "); |
1089 | #if defined(DEBUGGING) || defined(RCHECK) |
1090 | n = size; |
1091 | #endif |
1092 | *s = 0; |
1093 | do { |
1094 | *--s = '0' + (n % 10); |
1095 | } while (n /= 10); |
1096 | PerlIO_puts(PerlIO_stderr(),s); |
1097 | PerlIO_puts(PerlIO_stderr()," bytes, total sbrk() is "); |
1098 | s = eb; |
1099 | n = goodsbrk + sbrk_slack; |
1100 | do { |
1101 | *--s = '0' + (n % 10); |
1102 | } while (n /= 10); |
1103 | PerlIO_puts(PerlIO_stderr(),s); |
1104 | PerlIO_puts(PerlIO_stderr()," bytes!\n"); |
febabd2a |
1105 | #endif /* defined(PLAIN_MALLOC) && defined(NO_FANCY_MALLOC) */ |
0b250b9e |
1106 | my_exit(1); |
1107 | } |
ee0007ab |
1108 | } |
45d8adaa |
1109 | #endif |
4ad56ec9 |
1110 | return (NULL); |
45d8adaa |
1111 | } |
1112 | |
8d063cd8 |
1113 | /* remove from linked list */ |
802004fa |
1114 | #if defined(RCHECK) |
32e30700 |
1115 | if ((PTR2UV(p)) & (MEM_ALIGNBYTES - 1)) { |
e8cd8248 |
1116 | dTHX; |
b900a521 |
1117 | PerlIO_printf(PerlIO_stderr(), |
7fa2f4f1 |
1118 | "Unaligned pointer in the free chain 0x%"UVxf"\n", |
1119 | PTR2UV(p)); |
1120 | } |
1121 | if ((PTR2UV(p->ov_next)) & (MEM_ALIGNBYTES - 1)) { |
e8cd8248 |
1122 | dTHX; |
7fa2f4f1 |
1123 | PerlIO_printf(PerlIO_stderr(), |
1124 | "Unaligned `next' pointer in the free " |
d2560b70 |
1125 | "chain 0x%"UVxf" at 0x%"UVxf"\n", |
7fa2f4f1 |
1126 | PTR2UV(p->ov_next), PTR2UV(p)); |
32e30700 |
1127 | } |
bf38876a |
1128 | #endif |
1129 | nextf[bucket] = p->ov_next; |
4ad56ec9 |
1130 | |
1131 | MALLOC_UNLOCK; |
1132 | |
51a5ed28 |
1133 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1134 | "0x%"UVxf": (%05lu) malloc %ld bytes\n", |
1135 | PTR2UV(p), (unsigned long)(PL_an++), |
1136 | (long)size)); |
1137 | |
e8bc2b5c |
1138 | #ifdef IGNORE_SMALL_BAD_FREE |
1139 | if (bucket >= FIRST_BUCKET_WITH_CHECK) |
1140 | #endif |
1141 | OV_MAGIC(p, bucket) = MAGIC; |
cf5c4ad8 |
1142 | #ifndef PACK_MALLOC |
1143 | OV_INDEX(p) = bucket; |
1144 | #endif |
8d063cd8 |
1145 | #ifdef RCHECK |
1146 | /* |
1147 | * Record allocated size of block and |
1148 | * bound space with magic numbers. |
1149 | */ |
8d063cd8 |
1150 | p->ov_rmagic = RMAGIC; |
e8bc2b5c |
1151 | if (bucket <= MAX_SHORT_BUCKET) { |
1152 | int i; |
1153 | |
1154 | nbytes = size + M_OVERHEAD; |
1155 | p->ov_size = nbytes - 1; |
1156 | if ((i = nbytes & 3)) { |
1157 | i = 4 - i; |
1158 | while (i--) |
1159 | *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C; |
1160 | } |
1161 | nbytes = (nbytes + 3) &~ 3; |
1162 | *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC; |
1163 | } |
8d063cd8 |
1164 | #endif |
cf5c4ad8 |
1165 | return ((Malloc_t)(p + CHUNK_SHIFT)); |
8d063cd8 |
1166 | } |
1167 | |
e8bc2b5c |
1168 | static char *last_sbrk_top; |
1169 | static char *last_op; /* This arena can be easily extended. */ |
6e21dc91 |
1170 | static MEM_SIZE sbrked_remains; |
e8bc2b5c |
1171 | static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE; |
1172 | |
1173 | #ifdef DEBUGGING_MSTATS |
1174 | static int sbrks; |
1175 | #endif |
1176 | |
1177 | struct chunk_chain_s { |
1178 | struct chunk_chain_s *next; |
1179 | MEM_SIZE size; |
1180 | }; |
1181 | static struct chunk_chain_s *chunk_chain; |
1182 | static int n_chunks; |
1183 | static char max_bucket; |
1184 | |
1185 | /* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */ |
cea2e8a9 |
1186 | static void * |
1187 | get_from_chain(MEM_SIZE size) |
e8bc2b5c |
1188 | { |
1189 | struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain; |
1190 | struct chunk_chain_s **oldgoodp = NULL; |
1191 | long min_remain = LONG_MAX; |
1192 | |
1193 | while (elt) { |
1194 | if (elt->size >= size) { |
1195 | long remains = elt->size - size; |
1196 | if (remains >= 0 && remains < min_remain) { |
1197 | oldgoodp = oldp; |
1198 | min_remain = remains; |
1199 | } |
1200 | if (remains == 0) { |
1201 | break; |
1202 | } |
1203 | } |
1204 | oldp = &( elt->next ); |
1205 | elt = elt->next; |
1206 | } |
1207 | if (!oldgoodp) return NULL; |
1208 | if (min_remain) { |
1209 | void *ret = *oldgoodp; |
1210 | struct chunk_chain_s *next = (*oldgoodp)->next; |
1211 | |
1212 | *oldgoodp = (struct chunk_chain_s *)((char*)ret + size); |
1213 | (*oldgoodp)->size = min_remain; |
1214 | (*oldgoodp)->next = next; |
1215 | return ret; |
1216 | } else { |
1217 | void *ret = *oldgoodp; |
1218 | *oldgoodp = (*oldgoodp)->next; |
1219 | n_chunks--; |
1220 | return ret; |
1221 | } |
1222 | } |
1223 | |
cea2e8a9 |
1224 | static void |
1225 | add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip) |
e8bc2b5c |
1226 | { |
1227 | struct chunk_chain_s *next = chunk_chain; |
1228 | char *cp = (char*)p; |
1229 | |
1230 | cp += chip; |
1231 | chunk_chain = (struct chunk_chain_s *)cp; |
1232 | chunk_chain->size = size - chip; |
1233 | chunk_chain->next = next; |
1234 | n_chunks++; |
1235 | } |
1236 | |
cea2e8a9 |
1237 | static void * |
1238 | get_from_bigger_buckets(int bucket, MEM_SIZE size) |
e8bc2b5c |
1239 | { |
1240 | int price = 1; |
1241 | static int bucketprice[NBUCKETS]; |
1242 | while (bucket <= max_bucket) { |
1243 | /* We postpone stealing from bigger buckets until we want it |
1244 | often enough. */ |
1245 | if (nextf[bucket] && bucketprice[bucket]++ >= price) { |
1246 | /* Steal it! */ |
1247 | void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT); |
1248 | bucketprice[bucket] = 0; |
1249 | if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) { |
1250 | last_op = NULL; /* Disable optimization */ |
1251 | } |
1252 | nextf[bucket] = nextf[bucket]->ov_next; |
1253 | #ifdef DEBUGGING_MSTATS |
1254 | nmalloc[bucket]--; |
1255 | start_slack -= M_OVERHEAD; |
1256 | #endif |
1257 | add_to_chain(ret, (BUCKET_SIZE(bucket) + |
1258 | POW2_OPTIMIZE_SURPLUS(bucket)), |
1259 | size); |
1260 | return ret; |
1261 | } |
1262 | bucket++; |
1263 | } |
1264 | return NULL; |
1265 | } |
1266 | |
cea2e8a9 |
1267 | static union overhead * |
c7374474 |
1268 | getpages(MEM_SIZE needed, int *nblksp, int bucket) |
fa423c5b |
1269 | { |
1270 | /* Need to do (possibly expensive) system call. Try to |
1271 | optimize it for rare calling. */ |
1272 | MEM_SIZE require = needed - sbrked_remains; |
1273 | char *cp; |
1274 | union overhead *ovp; |
c7374474 |
1275 | MEM_SIZE slack = 0; |
fa423c5b |
1276 | |
1277 | if (sbrk_good > 0) { |
1278 | if (!last_sbrk_top && require < FIRST_SBRK) |
1279 | require = FIRST_SBRK; |
1280 | else if (require < MIN_SBRK) require = MIN_SBRK; |
1281 | |
1282 | if (require < goodsbrk * MIN_SBRK_FRAC / 100) |
1283 | require = goodsbrk * MIN_SBRK_FRAC / 100; |
1284 | require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK; |
1285 | } else { |
1286 | require = needed; |
1287 | last_sbrk_top = 0; |
1288 | sbrked_remains = 0; |
1289 | } |
1290 | |
1291 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1292 | "sbrk(%ld) for %ld-byte-long arena\n", |
1293 | (long)require, (long) needed)); |
1294 | cp = (char *)sbrk(require); |
1295 | #ifdef DEBUGGING_MSTATS |
1296 | sbrks++; |
1297 | #endif |
1298 | if (cp == last_sbrk_top) { |
1299 | /* Common case, anything is fine. */ |
1300 | sbrk_good++; |
1301 | ovp = (union overhead *) (cp - sbrked_remains); |
e9397286 |
1302 | last_op = cp - sbrked_remains; |
fa423c5b |
1303 | sbrked_remains = require - (needed - sbrked_remains); |
1304 | } else if (cp == (char *)-1) { /* no more room! */ |
1305 | ovp = (union overhead *)emergency_sbrk(needed); |
1306 | if (ovp == (union overhead *)-1) |
1307 | return 0; |
e9397286 |
1308 | if (((char*)ovp) > last_op) { /* Cannot happen with current emergency_sbrk() */ |
1309 | last_op = 0; |
1310 | } |
fa423c5b |
1311 | return ovp; |
1312 | } else { /* Non-continuous or first sbrk(). */ |
1313 | long add = sbrked_remains; |
1314 | char *newcp; |
1315 | |
1316 | if (sbrked_remains) { /* Put rest into chain, we |
1317 | cannot use it right now. */ |
1318 | add_to_chain((void*)(last_sbrk_top - sbrked_remains), |
1319 | sbrked_remains, 0); |
1320 | } |
1321 | |
1322 | /* Second, check alignment. */ |
1323 | slack = 0; |
1324 | |
61ae2fbf |
1325 | #if !defined(atarist) && !defined(__MINT__) /* on the atari we dont have to worry about this */ |
fa423c5b |
1326 | # ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */ |
5bbd1ef5 |
1327 | /* WANTED_ALIGNMENT may be more than NEEDED_ALIGNMENT, but this may |
1328 | improve performance of memory access. */ |
56431972 |
1329 | if (PTR2UV(cp) & (WANTED_ALIGNMENT - 1)) { /* Not aligned. */ |
1330 | slack = WANTED_ALIGNMENT - (PTR2UV(cp) & (WANTED_ALIGNMENT - 1)); |
fa423c5b |
1331 | add += slack; |
1332 | } |
1333 | # endif |
61ae2fbf |
1334 | #endif /* !atarist && !MINT */ |
fa423c5b |
1335 | |
1336 | if (add) { |
1337 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1338 | "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n", |
1339 | (long)add, (long) slack, |
1340 | (long) sbrked_remains)); |
1341 | newcp = (char *)sbrk(add); |
1342 | #if defined(DEBUGGING_MSTATS) |
1343 | sbrks++; |
1344 | sbrk_slack += add; |
1345 | #endif |
1346 | if (newcp != cp + require) { |
1347 | /* Too bad: even rounding sbrk() is not continuous.*/ |
1348 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1349 | "failed to fix bad sbrk()\n")); |
1350 | #ifdef PACK_MALLOC |
1351 | if (slack) { |
741df71a |
1352 | MALLOC_UNLOCK; |
5bbd1ef5 |
1353 | fatalcroak("panic: Off-page sbrk\n"); |
fa423c5b |
1354 | } |
1355 | #endif |
1356 | if (sbrked_remains) { |
1357 | /* Try again. */ |
1358 | #if defined(DEBUGGING_MSTATS) |
1359 | sbrk_slack += require; |
1360 | #endif |
1361 | require = needed; |
1362 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1363 | "straight sbrk(%ld)\n", |
1364 | (long)require)); |
1365 | cp = (char *)sbrk(require); |
1366 | #ifdef DEBUGGING_MSTATS |
1367 | sbrks++; |
1368 | #endif |
1369 | if (cp == (char *)-1) |
1370 | return 0; |
1371 | } |
1372 | sbrk_good = -1; /* Disable optimization! |
1373 | Continue with not-aligned... */ |
1374 | } else { |
1375 | cp += slack; |
1376 | require += sbrked_remains; |
1377 | } |
1378 | } |
1379 | |
1380 | if (last_sbrk_top) { |
1381 | sbrk_good -= SBRK_FAILURE_PRICE; |
1382 | } |
1383 | |
1384 | ovp = (union overhead *) cp; |
1385 | /* |
1386 | * Round up to minimum allocation size boundary |
1387 | * and deduct from block count to reflect. |
1388 | */ |
1389 | |
5bbd1ef5 |
1390 | # if NEEDED_ALIGNMENT > MEM_ALIGNBYTES |
56431972 |
1391 | if (PTR2UV(ovp) & (NEEDED_ALIGNMENT - 1)) |
5bbd1ef5 |
1392 | fatalcroak("Misalignment of sbrk()\n"); |
1393 | else |
1394 | # endif |
fa423c5b |
1395 | #ifndef I286 /* Again, this should always be ok on an 80286 */ |
56431972 |
1396 | if (PTR2UV(ovp) & (MEM_ALIGNBYTES - 1)) { |
fa423c5b |
1397 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1398 | "fixing sbrk(): %d bytes off machine alignement\n", |
56431972 |
1399 | (int)(PTR2UV(ovp) & (MEM_ALIGNBYTES - 1)))); |
1400 | ovp = INT2PTR(union overhead *,(PTR2UV(ovp) + MEM_ALIGNBYTES) & |
5bbd1ef5 |
1401 | (MEM_ALIGNBYTES - 1)); |
fa423c5b |
1402 | (*nblksp)--; |
1403 | # if defined(DEBUGGING_MSTATS) |
1404 | /* This is only approx. if TWO_POT_OPTIMIZE: */ |
5bbd1ef5 |
1405 | sbrk_slack += (1 << (bucket >> BUCKET_POW2_SHIFT)); |
fa423c5b |
1406 | # endif |
1407 | } |
1408 | #endif |
5bbd1ef5 |
1409 | ; /* Finish `else' */ |
fa423c5b |
1410 | sbrked_remains = require - needed; |
e9397286 |
1411 | last_op = cp; |
fa423c5b |
1412 | } |
febabd2a |
1413 | #if !defined(PLAIN_MALLOC) && !defined(NO_FANCY_MALLOC) |
b022d2d2 |
1414 | no_mem = 0; |
febabd2a |
1415 | #endif |
fa423c5b |
1416 | last_sbrk_top = cp + require; |
fa423c5b |
1417 | #ifdef DEBUGGING_MSTATS |
1418 | goodsbrk += require; |
1419 | #endif |
1420 | return ovp; |
1421 | } |
1422 | |
cea2e8a9 |
1423 | static int |
c7374474 |
1424 | getpages_adjacent(MEM_SIZE require) |
fa423c5b |
1425 | { |
1426 | if (require <= sbrked_remains) { |
1427 | sbrked_remains -= require; |
1428 | } else { |
1429 | char *cp; |
1430 | |
1431 | require -= sbrked_remains; |
1432 | /* We do not try to optimize sbrks here, we go for place. */ |
1433 | cp = (char*) sbrk(require); |
1434 | #ifdef DEBUGGING_MSTATS |
1435 | sbrks++; |
1436 | goodsbrk += require; |
1437 | #endif |
1438 | if (cp == last_sbrk_top) { |
1439 | sbrked_remains = 0; |
1440 | last_sbrk_top = cp + require; |
1441 | } else { |
28ac10b1 |
1442 | if (cp == (char*)-1) { /* Out of memory */ |
1443 | #ifdef DEBUGGING_MSTATS |
1444 | goodsbrk -= require; |
1445 | #endif |
1446 | return 0; |
1447 | } |
fa423c5b |
1448 | /* Report the failure: */ |
1449 | if (sbrked_remains) |
1450 | add_to_chain((void*)(last_sbrk_top - sbrked_remains), |
1451 | sbrked_remains, 0); |
1452 | add_to_chain((void*)cp, require, 0); |
1453 | sbrk_good -= SBRK_FAILURE_PRICE; |
1454 | sbrked_remains = 0; |
1455 | last_sbrk_top = 0; |
1456 | last_op = 0; |
1457 | return 0; |
1458 | } |
1459 | } |
1460 | |
1461 | return 1; |
1462 | } |
1463 | |
8d063cd8 |
1464 | /* |
1465 | * Allocate more memory to the indicated bucket. |
1466 | */ |
cea2e8a9 |
1467 | static void |
1468 | morecore(register int bucket) |
8d063cd8 |
1469 | { |
72aaf631 |
1470 | register union overhead *ovp; |
8d063cd8 |
1471 | register int rnu; /* 2^rnu bytes will be requested */ |
fa423c5b |
1472 | int nblks; /* become nblks blocks of the desired size */ |
bbce6d69 |
1473 | register MEM_SIZE siz, needed; |
8d063cd8 |
1474 | |
1475 | if (nextf[bucket]) |
1476 | return; |
e8bc2b5c |
1477 | if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) { |
741df71a |
1478 | MALLOC_UNLOCK; |
d720c441 |
1479 | croak("%s", "Out of memory during ridiculously large request"); |
55497cff |
1480 | } |
d720c441 |
1481 | if (bucket > max_bucket) |
e8bc2b5c |
1482 | max_bucket = bucket; |
d720c441 |
1483 | |
e8bc2b5c |
1484 | rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT)) |
1485 | ? LOG_OF_MIN_ARENA |
1486 | : (bucket >> BUCKET_POW2_SHIFT) ); |
1487 | /* This may be overwritten later: */ |
1488 | nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */ |
1489 | needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket); |
1490 | if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */ |
1491 | ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT; |
1492 | nextf[rnu << BUCKET_POW2_SHIFT] |
1493 | = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next; |
1494 | #ifdef DEBUGGING_MSTATS |
1495 | nmalloc[rnu << BUCKET_POW2_SHIFT]--; |
1496 | start_slack -= M_OVERHEAD; |
1497 | #endif |
1498 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1499 | "stealing %ld bytes from %ld arena\n", |
1500 | (long) needed, (long) rnu << BUCKET_POW2_SHIFT)); |
1501 | } else if (chunk_chain |
1502 | && (ovp = (union overhead*) get_from_chain(needed))) { |
1503 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1504 | "stealing %ld bytes from chain\n", |
1505 | (long) needed)); |
d720c441 |
1506 | } else if ( (ovp = (union overhead*) |
1507 | get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1, |
1508 | needed)) ) { |
e8bc2b5c |
1509 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1510 | "stealing %ld bytes from bigger buckets\n", |
1511 | (long) needed)); |
1512 | } else if (needed <= sbrked_remains) { |
1513 | ovp = (union overhead *)(last_sbrk_top - sbrked_remains); |
1514 | sbrked_remains -= needed; |
1515 | last_op = (char*)ovp; |
fa423c5b |
1516 | } else |
1517 | ovp = getpages(needed, &nblks, bucket); |
e8bc2b5c |
1518 | |
fa423c5b |
1519 | if (!ovp) |
1520 | return; |
e8bc2b5c |
1521 | |
8d063cd8 |
1522 | /* |
1523 | * Add new memory allocated to that on |
1524 | * free list for this hash bucket. |
1525 | */ |
e8bc2b5c |
1526 | siz = BUCKET_SIZE(bucket); |
cf5c4ad8 |
1527 | #ifdef PACK_MALLOC |
72aaf631 |
1528 | *(u_char*)ovp = bucket; /* Fill index. */ |
e8bc2b5c |
1529 | if (bucket <= MAX_PACKED) { |
1530 | ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket)); |
1531 | nblks = N_BLKS(bucket); |
cf5c4ad8 |
1532 | # ifdef DEBUGGING_MSTATS |
e8bc2b5c |
1533 | start_slack += BLK_SHIFT(bucket); |
cf5c4ad8 |
1534 | # endif |
e8bc2b5c |
1535 | } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) { |
1536 | ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket)); |
cf5c4ad8 |
1537 | siz -= sizeof(union overhead); |
72aaf631 |
1538 | } else ovp++; /* One chunk per block. */ |
e8bc2b5c |
1539 | #endif /* PACK_MALLOC */ |
72aaf631 |
1540 | nextf[bucket] = ovp; |
5f05dabc |
1541 | #ifdef DEBUGGING_MSTATS |
1542 | nmalloc[bucket] += nblks; |
e8bc2b5c |
1543 | if (bucket > MAX_PACKED) { |
1544 | start_slack += M_OVERHEAD * nblks; |
1545 | } |
5f05dabc |
1546 | #endif |
8d063cd8 |
1547 | while (--nblks > 0) { |
72aaf631 |
1548 | ovp->ov_next = (union overhead *)((caddr_t)ovp + siz); |
1549 | ovp = (union overhead *)((caddr_t)ovp + siz); |
8d063cd8 |
1550 | } |
8595d6f1 |
1551 | /* Not all sbrks return zeroed memory.*/ |
72aaf631 |
1552 | ovp->ov_next = (union overhead *)NULL; |
cf5c4ad8 |
1553 | #ifdef PACK_MALLOC |
e8bc2b5c |
1554 | if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */ |
1555 | union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next; |
1556 | nextf[7*BUCKETS_PER_POW2] = |
1557 | (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2] |
1558 | - sizeof(union overhead)); |
1559 | nextf[7*BUCKETS_PER_POW2]->ov_next = n_op; |
cf5c4ad8 |
1560 | } |
1561 | #endif /* !PACK_MALLOC */ |
8d063cd8 |
1562 | } |
1563 | |
94b6baf5 |
1564 | Free_t |
86058a2d |
1565 | Perl_mfree(void *mp) |
cea2e8a9 |
1566 | { |
ee0007ab |
1567 | register MEM_SIZE size; |
72aaf631 |
1568 | register union overhead *ovp; |
352d5a3a |
1569 | char *cp = (char*)mp; |
cf5c4ad8 |
1570 | #ifdef PACK_MALLOC |
1571 | u_char bucket; |
1572 | #endif |
8d063cd8 |
1573 | |
e8bc2b5c |
1574 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
b900a521 |
1575 | "0x%"UVxf": (%05lu) free\n", |
1576 | PTR2UV(cp), (unsigned long)(PL_an++))); |
45d8adaa |
1577 | |
cf5c4ad8 |
1578 | if (cp == NULL) |
1579 | return; |
72aaf631 |
1580 | ovp = (union overhead *)((caddr_t)cp |
e8bc2b5c |
1581 | - sizeof (union overhead) * CHUNK_SHIFT); |
cf5c4ad8 |
1582 | #ifdef PACK_MALLOC |
72aaf631 |
1583 | bucket = OV_INDEX(ovp); |
cf5c4ad8 |
1584 | #endif |
e8bc2b5c |
1585 | #ifdef IGNORE_SMALL_BAD_FREE |
1586 | if ((bucket >= FIRST_BUCKET_WITH_CHECK) |
1587 | && (OV_MAGIC(ovp, bucket) != MAGIC)) |
1588 | #else |
1589 | if (OV_MAGIC(ovp, bucket) != MAGIC) |
1590 | #endif |
1591 | { |
68dc0745 |
1592 | static int bad_free_warn = -1; |
cf5c4ad8 |
1593 | if (bad_free_warn == -1) { |
e8cd8248 |
1594 | dTHX; |
5fd9e9a4 |
1595 | char *pbf = PerlEnv_getenv("PERL_BADFREE"); |
cf5c4ad8 |
1596 | bad_free_warn = (pbf) ? atoi(pbf) : 1; |
1597 | } |
1598 | if (!bad_free_warn) |
1599 | return; |
8990e307 |
1600 | #ifdef RCHECK |
2ba999ec |
1601 | #ifdef PERL_CORE |
e8cd8248 |
1602 | { |
1603 | dTHX; |
1604 | if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC)) |
9014280d |
1605 | Perl_warner(aTHX_ packWARN(WARN_MALLOC), "%s free() ignored (RMAGIC, PERL_CORE)", |
e8cd8248 |
1606 | ovp->ov_rmagic == RMAGIC - 1 ? |
1607 | "Duplicate" : "Bad"); |
2ba999ec |
1608 | } |
e476b1b5 |
1609 | #else |
52c6645e |
1610 | warn("%s free() ignored (RMAGIC)", |
2ba999ec |
1611 | ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad"); |
e476b1b5 |
1612 | #endif |
1613 | #else |
1614 | #ifdef PERL_CORE |
2ba999ec |
1615 | { |
1616 | dTHX; |
1d860e85 |
1617 | if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC)) |
9014280d |
1618 | Perl_warner(aTHX_ packWARN(WARN_MALLOC), "%s", "Bad free() ignored (PERL_CORE)"); |
2ba999ec |
1619 | } |
8990e307 |
1620 | #else |
2ba999ec |
1621 | warn("%s", "Bad free() ignored"); |
8990e307 |
1622 | #endif |
e476b1b5 |
1623 | #endif |
8d063cd8 |
1624 | return; /* sanity */ |
e8bc2b5c |
1625 | } |
8d063cd8 |
1626 | #ifdef RCHECK |
3541dd58 |
1627 | ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite"); |
e8bc2b5c |
1628 | if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) { |
1629 | int i; |
1630 | MEM_SIZE nbytes = ovp->ov_size + 1; |
1631 | |
1632 | if ((i = nbytes & 3)) { |
1633 | i = 4 - i; |
1634 | while (i--) { |
3541dd58 |
1635 | ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i)) |
d720c441 |
1636 | == RMAGIC_C, "chunk's tail overwrite"); |
e8bc2b5c |
1637 | } |
1638 | } |
1639 | nbytes = (nbytes + 3) &~ 3; |
3541dd58 |
1640 | ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC, "chunk's tail overwrite"); |
e8bc2b5c |
1641 | } |
72aaf631 |
1642 | ovp->ov_rmagic = RMAGIC - 1; |
8d063cd8 |
1643 | #endif |
3541dd58 |
1644 | ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite"); |
72aaf631 |
1645 | size = OV_INDEX(ovp); |
4ad56ec9 |
1646 | |
1647 | MALLOC_LOCK; |
72aaf631 |
1648 | ovp->ov_next = nextf[size]; |
1649 | nextf[size] = ovp; |
741df71a |
1650 | MALLOC_UNLOCK; |
8d063cd8 |
1651 | } |
1652 | |
4ad56ec9 |
1653 | /* There is no need to do any locking in realloc (with an exception of |
1654 | trying to grow in place if we are at the end of the chain). |
1655 | If somebody calls us from a different thread with the same address, |
1656 | we are sole anyway. */ |
8d063cd8 |
1657 | |
2304df62 |
1658 | Malloc_t |
86058a2d |
1659 | Perl_realloc(void *mp, size_t nbytes) |
cea2e8a9 |
1660 | { |
ee0007ab |
1661 | register MEM_SIZE onb; |
72aaf631 |
1662 | union overhead *ovp; |
d720c441 |
1663 | char *res; |
1664 | int prev_bucket; |
e8bc2b5c |
1665 | register int bucket; |
4ad56ec9 |
1666 | int incr; /* 1 if does not fit, -1 if "easily" fits in a |
1667 | smaller bucket, otherwise 0. */ |
352d5a3a |
1668 | char *cp = (char*)mp; |
8d063cd8 |
1669 | |
e8bc2b5c |
1670 | #if defined(DEBUGGING) || !defined(PERL_CORE) |
ee0007ab |
1671 | MEM_SIZE size = nbytes; |
45d8adaa |
1672 | |
45d8adaa |
1673 | if ((long)nbytes < 0) |
cea2e8a9 |
1674 | croak("%s", "panic: realloc"); |
45d8adaa |
1675 | #endif |
e8bc2b5c |
1676 | |
1677 | BARK_64K_LIMIT("Reallocation",nbytes,size); |
1678 | if (!cp) |
86058a2d |
1679 | return Perl_malloc(nbytes); |
45d8adaa |
1680 | |
72aaf631 |
1681 | ovp = (union overhead *)((caddr_t)cp |
e8bc2b5c |
1682 | - sizeof (union overhead) * CHUNK_SHIFT); |
1683 | bucket = OV_INDEX(ovp); |
4ad56ec9 |
1684 | |
e8bc2b5c |
1685 | #ifdef IGNORE_SMALL_BAD_FREE |
4ad56ec9 |
1686 | if ((bucket >= FIRST_BUCKET_WITH_CHECK) |
1687 | && (OV_MAGIC(ovp, bucket) != MAGIC)) |
e8bc2b5c |
1688 | #else |
4ad56ec9 |
1689 | if (OV_MAGIC(ovp, bucket) != MAGIC) |
e8bc2b5c |
1690 | #endif |
4ad56ec9 |
1691 | { |
1692 | static int bad_free_warn = -1; |
1693 | if (bad_free_warn == -1) { |
e8cd8248 |
1694 | dTHX; |
4ad56ec9 |
1695 | char *pbf = PerlEnv_getenv("PERL_BADFREE"); |
1696 | bad_free_warn = (pbf) ? atoi(pbf) : 1; |
1697 | } |
1698 | if (!bad_free_warn) |
ce70748c |
1699 | return Nullch; |
4ad56ec9 |
1700 | #ifdef RCHECK |
2ba999ec |
1701 | #ifdef PERL_CORE |
e8cd8248 |
1702 | { |
1703 | dTHX; |
1704 | if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC)) |
9014280d |
1705 | Perl_warner(aTHX_ packWARN(WARN_MALLOC), "%srealloc() %signored", |
e8cd8248 |
1706 | (ovp->ov_rmagic == RMAGIC - 1 ? "" : "Bad "), |
1707 | ovp->ov_rmagic == RMAGIC - 1 |
1708 | ? "of freed memory " : ""); |
2ba999ec |
1709 | } |
e476b1b5 |
1710 | #else |
2ba999ec |
1711 | warn("%srealloc() %signored", |
1712 | (ovp->ov_rmagic == RMAGIC - 1 ? "" : "Bad "), |
1713 | ovp->ov_rmagic == RMAGIC - 1 ? "of freed memory " : ""); |
e476b1b5 |
1714 | #endif |
1715 | #else |
1716 | #ifdef PERL_CORE |
2ba999ec |
1717 | { |
1718 | dTHX; |
1d860e85 |
1719 | if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC)) |
9014280d |
1720 | Perl_warner(aTHX_ packWARN(WARN_MALLOC), "%s", |
1d860e85 |
1721 | "Bad realloc() ignored"); |
2ba999ec |
1722 | } |
4ad56ec9 |
1723 | #else |
2ba999ec |
1724 | warn("%s", "Bad realloc() ignored"); |
4ad56ec9 |
1725 | #endif |
e476b1b5 |
1726 | #endif |
ce70748c |
1727 | return Nullch; /* sanity */ |
4ad56ec9 |
1728 | } |
1729 | |
e8bc2b5c |
1730 | onb = BUCKET_SIZE_REAL(bucket); |
55497cff |
1731 | /* |
1732 | * avoid the copy if same size block. |
e8bc2b5c |
1733 | * We are not agressive with boundary cases. Note that it might |
1734 | * (for a small number of cases) give false negative if |
55497cff |
1735 | * both new size and old one are in the bucket for |
e8bc2b5c |
1736 | * FIRST_BIG_POW2, but the new one is near the lower end. |
1737 | * |
1738 | * We do not try to go to 1.5 times smaller bucket so far. |
55497cff |
1739 | */ |
e8bc2b5c |
1740 | if (nbytes > onb) incr = 1; |
1741 | else { |
1742 | #ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING |
1743 | if ( /* This is a little bit pessimal if PACK_MALLOC: */ |
1744 | nbytes > ( (onb >> 1) - M_OVERHEAD ) |
1745 | # ifdef TWO_POT_OPTIMIZE |
1746 | || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND ) |
1747 | # endif |
1748 | ) |
1749 | #else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */ |
1750 | prev_bucket = ( (bucket > MAX_PACKED + 1) |
1751 | ? bucket - BUCKETS_PER_POW2 |
1752 | : bucket - 1); |
1753 | if (nbytes > BUCKET_SIZE_REAL(prev_bucket)) |
1754 | #endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */ |
1755 | incr = 0; |
1756 | else incr = -1; |
1757 | } |
2ce36478 |
1758 | #ifdef STRESS_REALLOC |
4ad56ec9 |
1759 | goto hard_way; |
2ce36478 |
1760 | #endif |
4ad56ec9 |
1761 | if (incr == 0) { |
852c2e52 |
1762 | inplace_label: |
a687059c |
1763 | #ifdef RCHECK |
1764 | /* |
1765 | * Record new allocated size of block and |
1766 | * bound space with magic numbers. |
1767 | */ |
72aaf631 |
1768 | if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) { |
e8bc2b5c |
1769 | int i, nb = ovp->ov_size + 1; |
1770 | |
1771 | if ((i = nb & 3)) { |
1772 | i = 4 - i; |
1773 | while (i--) { |
3541dd58 |
1774 | ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C, "chunk's tail overwrite"); |
e8bc2b5c |
1775 | } |
1776 | } |
1777 | nb = (nb + 3) &~ 3; |
3541dd58 |
1778 | ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC, "chunk's tail overwrite"); |
a687059c |
1779 | /* |
1780 | * Convert amount of memory requested into |
1781 | * closest block size stored in hash buckets |
1782 | * which satisfies request. Account for |
1783 | * space used per block for accounting. |
1784 | */ |
cf5c4ad8 |
1785 | nbytes += M_OVERHEAD; |
72aaf631 |
1786 | ovp->ov_size = nbytes - 1; |
e8bc2b5c |
1787 | if ((i = nbytes & 3)) { |
1788 | i = 4 - i; |
1789 | while (i--) |
1790 | *((char *)((caddr_t)ovp + nbytes - RSLOP + i)) |
1791 | = RMAGIC_C; |
1792 | } |
1793 | nbytes = (nbytes + 3) &~ 3; |
72aaf631 |
1794 | *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC; |
a687059c |
1795 | } |
1796 | #endif |
45d8adaa |
1797 | res = cp; |
42ac124e |
1798 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
b900a521 |
1799 | "0x%"UVxf": (%05lu) realloc %ld bytes inplace\n", |
1800 | PTR2UV(res),(unsigned long)(PL_an++), |
42ac124e |
1801 | (long)size)); |
e8bc2b5c |
1802 | } else if (incr == 1 && (cp - M_OVERHEAD == last_op) |
1803 | && (onb > (1 << LOG_OF_MIN_ARENA))) { |
1804 | MEM_SIZE require, newarena = nbytes, pow; |
1805 | int shiftr; |
1806 | |
1807 | POW2_OPTIMIZE_ADJUST(newarena); |
1808 | newarena = newarena + M_OVERHEAD; |
1809 | /* newarena = (newarena + 3) &~ 3; */ |
1810 | shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA; |
1811 | pow = LOG_OF_MIN_ARENA + 1; |
1812 | /* apart from this loop, this is O(1) */ |
1813 | while (shiftr >>= 1) |
1814 | pow++; |
1815 | newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2); |
1816 | require = newarena - onb - M_OVERHEAD; |
1817 | |
4ad56ec9 |
1818 | MALLOC_LOCK; |
1819 | if (cp - M_OVERHEAD == last_op /* We *still* are the last chunk */ |
1820 | && getpages_adjacent(require)) { |
e8bc2b5c |
1821 | #ifdef DEBUGGING_MSTATS |
fa423c5b |
1822 | nmalloc[bucket]--; |
1823 | nmalloc[pow * BUCKETS_PER_POW2]++; |
e8bc2b5c |
1824 | #endif |
4d6cd4d8 |
1825 | *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */ |
4ad56ec9 |
1826 | MALLOC_UNLOCK; |
fa423c5b |
1827 | goto inplace_label; |
4ad56ec9 |
1828 | } else { |
1829 | MALLOC_UNLOCK; |
fa423c5b |
1830 | goto hard_way; |
4ad56ec9 |
1831 | } |
e8bc2b5c |
1832 | } else { |
1833 | hard_way: |
42ac124e |
1834 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
b900a521 |
1835 | "0x%"UVxf": (%05lu) realloc %ld bytes the hard way\n", |
1836 | PTR2UV(cp),(unsigned long)(PL_an++), |
42ac124e |
1837 | (long)size)); |
86058a2d |
1838 | if ((res = (char*)Perl_malloc(nbytes)) == NULL) |
e8bc2b5c |
1839 | return (NULL); |
1840 | if (cp != res) /* common optimization */ |
1841 | Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char); |
4ad56ec9 |
1842 | Perl_mfree(cp); |
45d8adaa |
1843 | } |
2304df62 |
1844 | return ((Malloc_t)res); |
8d063cd8 |
1845 | } |
1846 | |
cf5c4ad8 |
1847 | Malloc_t |
86058a2d |
1848 | Perl_calloc(register size_t elements, register size_t size) |
cf5c4ad8 |
1849 | { |
1850 | long sz = elements * size; |
86058a2d |
1851 | Malloc_t p = Perl_malloc(sz); |
cf5c4ad8 |
1852 | |
1853 | if (p) { |
1854 | memset((void*)p, 0, sz); |
1855 | } |
1856 | return p; |
1857 | } |
1858 | |
4ad56ec9 |
1859 | char * |
1860 | Perl_strdup(const char *s) |
1861 | { |
1862 | MEM_SIZE l = strlen(s); |
b48f1ba5 |
1863 | char *s1 = (char *)Perl_malloc(l+1); |
4ad56ec9 |
1864 | |
b48f1ba5 |
1865 | Copy(s, s1, (MEM_SIZE)(l+1), char); |
4ad56ec9 |
1866 | return s1; |
1867 | } |
1868 | |
1869 | #ifdef PERL_CORE |
1870 | int |
1871 | Perl_putenv(char *a) |
1872 | { |
1873 | /* Sometimes system's putenv conflicts with my_setenv() - this is system |
1874 | malloc vs Perl's free(). */ |
1875 | dTHX; |
1876 | char *var; |
1877 | char *val = a; |
1878 | MEM_SIZE l; |
1879 | char buf[80]; |
1880 | |
1881 | while (*val && *val != '=') |
1882 | val++; |
1883 | if (!*val) |
1884 | return -1; |
1885 | l = val - a; |
1886 | if (l < sizeof(buf)) |
1887 | var = buf; |
1888 | else |
1889 | var = Perl_malloc(l + 1); |
1890 | Copy(a, var, l, char); |
b48f1ba5 |
1891 | var[l + 1] = 0; |
1892 | my_setenv(var, val+1); |
4ad56ec9 |
1893 | if (var != buf) |
1894 | Perl_mfree(var); |
1895 | return 0; |
1896 | } |
1897 | # endif |
1898 | |
e8bc2b5c |
1899 | MEM_SIZE |
cea2e8a9 |
1900 | Perl_malloced_size(void *p) |
e8bc2b5c |
1901 | { |
8d6dde3e |
1902 | union overhead *ovp = (union overhead *) |
1903 | ((caddr_t)p - sizeof (union overhead) * CHUNK_SHIFT); |
1904 | int bucket = OV_INDEX(ovp); |
1905 | #ifdef RCHECK |
1906 | /* The caller wants to have a complete control over the chunk, |
1907 | disable the memory checking inside the chunk. */ |
1908 | if (bucket <= MAX_SHORT_BUCKET) { |
1909 | MEM_SIZE size = BUCKET_SIZE_REAL(bucket); |
1910 | ovp->ov_size = size + M_OVERHEAD - 1; |
1911 | *((u_int *)((caddr_t)ovp + size + M_OVERHEAD - RSLOP)) = RMAGIC; |
1912 | } |
1913 | #endif |
e8bc2b5c |
1914 | return BUCKET_SIZE_REAL(bucket); |
1915 | } |
1916 | |
e8bc2b5c |
1917 | # ifdef BUCKETS_ROOT2 |
1918 | # define MIN_EVEN_REPORT 6 |
1919 | # else |
1920 | # define MIN_EVEN_REPORT MIN_BUCKET |
1921 | # endif |
827e134a |
1922 | |
1923 | int |
1924 | Perl_get_mstats(pTHX_ perl_mstats_t *buf, int buflen, int level) |
8d063cd8 |
1925 | { |
df31f264 |
1926 | #ifdef DEBUGGING_MSTATS |
8d063cd8 |
1927 | register int i, j; |
1928 | register union overhead *p; |
4ad56ec9 |
1929 | struct chunk_chain_s* nextchain; |
8d063cd8 |
1930 | |
827e134a |
1931 | buf->topbucket = buf->topbucket_ev = buf->topbucket_odd |
1932 | = buf->totfree = buf->total = buf->total_chain = 0; |
1933 | |
1934 | buf->minbucket = MIN_BUCKET; |
4ad56ec9 |
1935 | MALLOC_LOCK; |
e8bc2b5c |
1936 | for (i = MIN_BUCKET ; i < NBUCKETS; i++) { |
8d063cd8 |
1937 | for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) |
1938 | ; |
827e134a |
1939 | if (i < buflen) { |
1940 | buf->nfree[i] = j; |
1941 | buf->ntotal[i] = nmalloc[i]; |
1942 | } |
1943 | buf->totfree += j * BUCKET_SIZE_REAL(i); |
1944 | buf->total += nmalloc[i] * BUCKET_SIZE_REAL(i); |
e8bc2b5c |
1945 | if (nmalloc[i]) { |
827e134a |
1946 | i % 2 ? (buf->topbucket_odd = i) : (buf->topbucket_ev = i); |
1947 | buf->topbucket = i; |
e8bc2b5c |
1948 | } |
c07a80fd |
1949 | } |
4ad56ec9 |
1950 | nextchain = chunk_chain; |
1951 | while (nextchain) { |
827e134a |
1952 | buf->total_chain += nextchain->size; |
4ad56ec9 |
1953 | nextchain = nextchain->next; |
1954 | } |
827e134a |
1955 | buf->total_sbrk = goodsbrk + sbrk_slack; |
1956 | buf->sbrks = sbrks; |
1957 | buf->sbrk_good = sbrk_good; |
1958 | buf->sbrk_slack = sbrk_slack; |
1959 | buf->start_slack = start_slack; |
1960 | buf->sbrked_remains = sbrked_remains; |
4ad56ec9 |
1961 | MALLOC_UNLOCK; |
880b20b6 |
1962 | buf->nbuckets = NBUCKETS; |
827e134a |
1963 | if (level) { |
1964 | for (i = MIN_BUCKET ; i < NBUCKETS; i++) { |
1965 | if (i >= buflen) |
1966 | break; |
1967 | buf->bucket_mem_size[i] = BUCKET_SIZE(i); |
1968 | buf->bucket_available_size[i] = BUCKET_SIZE_REAL(i); |
1969 | } |
1970 | } |
1971 | #endif /* defined DEBUGGING_MSTATS */ |
fe52b3b7 |
1972 | return 0; /* XXX unused */ |
827e134a |
1973 | } |
1974 | /* |
1975 | * mstats - print out statistics about malloc |
1976 | * |
1977 | * Prints two lines of numbers, one showing the length of the free list |
1978 | * for each size category, the second showing the number of mallocs - |
1979 | * frees for each size category. |
1980 | */ |
1981 | void |
1982 | Perl_dump_mstats(pTHX_ char *s) |
1983 | { |
1984 | #ifdef DEBUGGING_MSTATS |
880b20b6 |
1985 | register int i; |
827e134a |
1986 | perl_mstats_t buffer; |
880b20b6 |
1987 | UV nf[NBUCKETS]; |
1988 | UV nt[NBUCKETS]; |
827e134a |
1989 | |
1990 | buffer.nfree = nf; |
1991 | buffer.ntotal = nt; |
1992 | get_mstats(&buffer, NBUCKETS, 0); |
1993 | |
c07a80fd |
1994 | if (s) |
bf49b057 |
1995 | PerlIO_printf(Perl_error_log, |
880b20b6 |
1996 | "Memory allocation statistics %s (buckets %"IVdf"(%"IVdf")..%"IVdf"(%"IVdf")\n", |
e8bc2b5c |
1997 | s, |
880b20b6 |
1998 | (IV)BUCKET_SIZE_REAL(MIN_BUCKET), |
1999 | (IV)BUCKET_SIZE(MIN_BUCKET), |
2000 | (IV)BUCKET_SIZE_REAL(buffer.topbucket), |
2001 | (IV)BUCKET_SIZE(buffer.topbucket)); |
2002 | PerlIO_printf(Perl_error_log, "%8"IVdf" free:", buffer.totfree); |
827e134a |
2003 | for (i = MIN_EVEN_REPORT; i <= buffer.topbucket; i += BUCKETS_PER_POW2) { |
bf49b057 |
2004 | PerlIO_printf(Perl_error_log, |
e8bc2b5c |
2005 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
880b20b6 |
2006 | ? " %5"UVuf |
2007 | : ((i < 12*BUCKETS_PER_POW2) ? " %3"UVuf : " %"UVuf)), |
827e134a |
2008 | buffer.nfree[i]); |
e8bc2b5c |
2009 | } |
2010 | #ifdef BUCKETS_ROOT2 |
bf49b057 |
2011 | PerlIO_printf(Perl_error_log, "\n\t "); |
827e134a |
2012 | for (i = MIN_BUCKET + 1; i <= buffer.topbucket_odd; i += BUCKETS_PER_POW2) { |
bf49b057 |
2013 | PerlIO_printf(Perl_error_log, |
e8bc2b5c |
2014 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
880b20b6 |
2015 | ? " %5"UVuf |
2016 | : ((i < 12*BUCKETS_PER_POW2) ? " %3"UVuf : " %"UVuf)), |
827e134a |
2017 | buffer.nfree[i]); |
8d063cd8 |
2018 | } |
e8bc2b5c |
2019 | #endif |
880b20b6 |
2020 | PerlIO_printf(Perl_error_log, "\n%8"IVdf" used:", buffer.total - buffer.totfree); |
827e134a |
2021 | for (i = MIN_EVEN_REPORT; i <= buffer.topbucket; i += BUCKETS_PER_POW2) { |
bf49b057 |
2022 | PerlIO_printf(Perl_error_log, |
e8bc2b5c |
2023 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
880b20b6 |
2024 | ? " %5"IVdf |
2025 | : ((i < 12*BUCKETS_PER_POW2) ? " %3"IVdf : " %"IVdf)), |
827e134a |
2026 | buffer.ntotal[i] - buffer.nfree[i]); |
c07a80fd |
2027 | } |
e8bc2b5c |
2028 | #ifdef BUCKETS_ROOT2 |
bf49b057 |
2029 | PerlIO_printf(Perl_error_log, "\n\t "); |
827e134a |
2030 | for (i = MIN_BUCKET + 1; i <= buffer.topbucket_odd; i += BUCKETS_PER_POW2) { |
bf49b057 |
2031 | PerlIO_printf(Perl_error_log, |
e8bc2b5c |
2032 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
880b20b6 |
2033 | ? " %5"IVdf |
2034 | : ((i < 12*BUCKETS_PER_POW2) ? " %3"IVdf : " %"IVdf)), |
827e134a |
2035 | buffer.ntotal[i] - buffer.nfree[i]); |
e8bc2b5c |
2036 | } |
2037 | #endif |
880b20b6 |
2038 | PerlIO_printf(Perl_error_log, "\nTotal sbrk(): %"IVdf"/%"IVdf":%"IVdf". Odd ends: pad+heads+chain+tail: %"IVdf"+%"IVdf"+%"IVdf"+%"IVdf".\n", |
827e134a |
2039 | buffer.total_sbrk, buffer.sbrks, buffer.sbrk_good, |
2040 | buffer.sbrk_slack, buffer.start_slack, |
2041 | buffer.total_chain, buffer.sbrked_remains); |
df31f264 |
2042 | #endif /* DEBUGGING_MSTATS */ |
c07a80fd |
2043 | } |
a687059c |
2044 | #endif /* lint */ |
cf5c4ad8 |
2045 | |
cf5c4ad8 |
2046 | #ifdef USE_PERL_SBRK |
2047 | |
e3663bad |
2048 | # if defined(__MACHTEN_PPC__) || defined(NeXT) || defined(__NeXT__) || defined(PURIFY) |
38ac2dc8 |
2049 | # define PERL_SBRK_VIA_MALLOC |
38ac2dc8 |
2050 | # endif |
2051 | |
760ac839 |
2052 | # ifdef PERL_SBRK_VIA_MALLOC |
cf5c4ad8 |
2053 | |
2054 | /* it may seem schizophrenic to use perl's malloc and let it call system */ |
2055 | /* malloc, the reason for that is only the 3.2 version of the OS that had */ |
2056 | /* frequent core dumps within nxzonefreenolock. This sbrk routine put an */ |
2057 | /* end to the cores */ |
2058 | |
38ac2dc8 |
2059 | # ifndef SYSTEM_ALLOC |
2060 | # define SYSTEM_ALLOC(a) malloc(a) |
2061 | # endif |
5bbd1ef5 |
2062 | # ifndef SYSTEM_ALLOC_ALIGNMENT |
2063 | # define SYSTEM_ALLOC_ALIGNMENT MEM_ALIGNBYTES |
2064 | # endif |
cf5c4ad8 |
2065 | |
760ac839 |
2066 | # endif /* PERL_SBRK_VIA_MALLOC */ |
cf5c4ad8 |
2067 | |
2068 | static IV Perl_sbrk_oldchunk; |
2069 | static long Perl_sbrk_oldsize; |
2070 | |
760ac839 |
2071 | # define PERLSBRK_32_K (1<<15) |
2072 | # define PERLSBRK_64_K (1<<16) |
cf5c4ad8 |
2073 | |
b63effbb |
2074 | Malloc_t |
df0003d4 |
2075 | Perl_sbrk(int size) |
cf5c4ad8 |
2076 | { |
2077 | IV got; |
2078 | int small, reqsize; |
2079 | |
2080 | if (!size) return 0; |
55497cff |
2081 | #ifdef PERL_CORE |
cf5c4ad8 |
2082 | reqsize = size; /* just for the DEBUG_m statement */ |
2083 | #endif |
57569e04 |
2084 | #ifdef PACK_MALLOC |
2085 | size = (size + 0x7ff) & ~0x7ff; |
2086 | #endif |
cf5c4ad8 |
2087 | if (size <= Perl_sbrk_oldsize) { |
2088 | got = Perl_sbrk_oldchunk; |
2089 | Perl_sbrk_oldchunk += size; |
2090 | Perl_sbrk_oldsize -= size; |
2091 | } else { |
2092 | if (size >= PERLSBRK_32_K) { |
2093 | small = 0; |
2094 | } else { |
cf5c4ad8 |
2095 | size = PERLSBRK_64_K; |
2096 | small = 1; |
2097 | } |
5bbd1ef5 |
2098 | # if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT |
2099 | size += NEEDED_ALIGNMENT - SYSTEM_ALLOC_ALIGNMENT; |
2100 | # endif |
cf5c4ad8 |
2101 | got = (IV)SYSTEM_ALLOC(size); |
5bbd1ef5 |
2102 | # if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT |
5a7d6335 |
2103 | got = (got + NEEDED_ALIGNMENT - 1) & ~(NEEDED_ALIGNMENT - 1); |
5bbd1ef5 |
2104 | # endif |
cf5c4ad8 |
2105 | if (small) { |
2106 | /* Chunk is small, register the rest for future allocs. */ |
2107 | Perl_sbrk_oldchunk = got + reqsize; |
2108 | Perl_sbrk_oldsize = size - reqsize; |
2109 | } |
2110 | } |
2111 | |
b900a521 |
2112 | DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%"UVxf"\n", |
2113 | size, reqsize, Perl_sbrk_oldsize, PTR2UV(got))); |
cf5c4ad8 |
2114 | |
2115 | return (void *)got; |
2116 | } |
2117 | |
2118 | #endif /* ! defined USE_PERL_SBRK */ |