Commit | Line | Data |
a0d0e21e |
1 | /* malloc.c |
8d063cd8 |
2 | * |
8d063cd8 |
3 | */ |
4 | |
e8bc2b5c |
5 | #ifndef NO_FANCY_MALLOC |
6 | # ifndef SMALL_BUCKET_VIA_TABLE |
7 | # define SMALL_BUCKET_VIA_TABLE |
8 | # endif |
9 | # ifndef BUCKETS_ROOT2 |
10 | # define BUCKETS_ROOT2 |
11 | # endif |
12 | # ifndef IGNORE_SMALL_BAD_FREE |
13 | # define IGNORE_SMALL_BAD_FREE |
14 | # endif |
3562ef9b |
15 | #endif |
16 | |
e8bc2b5c |
17 | #ifndef PLAIN_MALLOC /* Bulk enable features */ |
18 | # ifndef PACK_MALLOC |
19 | # define PACK_MALLOC |
20 | # endif |
21 | # ifndef TWO_POT_OPTIMIZE |
22 | # define TWO_POT_OPTIMIZE |
23 | # endif |
d720c441 |
24 | # if defined(PERL_CORE) && !defined(PERL_EMERGENCY_SBRK) |
25 | # define PERL_EMERGENCY_SBRK |
e8bc2b5c |
26 | # endif |
27 | # if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS) |
28 | # define DEBUGGING_MSTATS |
29 | # endif |
30 | #endif |
31 | |
32 | #define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */ |
33 | #define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2) |
34 | |
35 | #if !(defined(I286) || defined(atarist)) |
36 | /* take 2k unless the block is bigger than that */ |
37 | # define LOG_OF_MIN_ARENA 11 |
38 | #else |
39 | /* take 16k unless the block is bigger than that |
40 | (80286s like large segments!), probably good on the atari too */ |
41 | # define LOG_OF_MIN_ARENA 14 |
42 | #endif |
43 | |
8d063cd8 |
44 | #ifndef lint |
1944739a |
45 | # if defined(DEBUGGING) && !defined(NO_RCHECK) |
46 | # define RCHECK |
47 | # endif |
e8bc2b5c |
48 | # if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE) |
49 | # undef IGNORE_SMALL_BAD_FREE |
50 | # endif |
8d063cd8 |
51 | /* |
52 | * malloc.c (Caltech) 2/21/82 |
53 | * Chris Kingsley, kingsley@cit-20. |
54 | * |
55 | * This is a very fast storage allocator. It allocates blocks of a small |
56 | * number of different sizes, and keeps free lists of each size. Blocks that |
57 | * don't exactly fit are passed up to the next larger size. In this |
58 | * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long. |
cf5c4ad8 |
59 | * If PACK_MALLOC is defined, small blocks are 2^n bytes long. |
8d063cd8 |
60 | * This is designed for use in a program that uses vast quantities of memory, |
61 | * but bombs when it runs out. |
62 | */ |
63 | |
d720c441 |
64 | #ifdef PERL_CORE |
65 | # include "EXTERN.h" |
66 | # include "perl.h" |
67 | #else |
68 | # ifdef PERL_FOR_X2P |
69 | # include "../EXTERN.h" |
70 | # include "../perl.h" |
71 | # else |
72 | # include <stdlib.h> |
73 | # include <stdio.h> |
74 | # include <memory.h> |
75 | # define _(arg) arg |
76 | # ifndef Malloc_t |
77 | # define Malloc_t void * |
78 | # endif |
79 | # ifndef MEM_SIZE |
80 | # define MEM_SIZE unsigned long |
81 | # endif |
82 | # ifndef LONG_MAX |
83 | # define LONG_MAX 0x7FFFFFFF |
84 | # endif |
85 | # ifndef UV |
86 | # define UV unsigned long |
87 | # endif |
88 | # ifndef caddr_t |
89 | # define caddr_t char * |
90 | # endif |
91 | # ifndef Free_t |
92 | # define Free_t void |
93 | # endif |
94 | # define Copy(s,d,n,t) (void)memcpy((char*)(d),(char*)(s), (n) * sizeof(t)) |
95 | # define PerlEnv_getenv getenv |
96 | # define PerlIO_printf fprintf |
97 | # define PerlIO_stderr() stderr |
98 | # endif |
e8bc2b5c |
99 | # ifndef croak /* make depend */ |
d720c441 |
100 | # define croak(mess, arg) warn((mess), (arg)); exit(1); |
101 | # endif |
102 | # ifndef warn |
103 | # define warn(mess, arg) fprintf(stderr, (mess), (arg)); |
e8bc2b5c |
104 | # endif |
105 | # ifdef DEBUG_m |
106 | # undef DEBUG_m |
107 | # endif |
108 | # define DEBUG_m(a) |
109 | # ifdef DEBUGGING |
110 | # undef DEBUGGING |
111 | # endif |
112 | #endif |
113 | |
114 | #ifndef MUTEX_LOCK |
115 | # define MUTEX_LOCK(l) |
116 | #endif |
117 | |
118 | #ifndef MUTEX_UNLOCK |
119 | # define MUTEX_UNLOCK(l) |
120 | #endif |
121 | |
760ac839 |
122 | #ifdef DEBUGGING |
e8bc2b5c |
123 | # undef DEBUG_m |
124 | # define DEBUG_m(a) if (debug & 128) a |
760ac839 |
125 | #endif |
126 | |
135863df |
127 | /* I don't much care whether these are defined in sys/types.h--LAW */ |
128 | |
129 | #define u_char unsigned char |
130 | #define u_int unsigned int |
e8bc2b5c |
131 | |
132 | #ifdef HAS_QUAD |
133 | # define u_bigint UV /* Needs to eat *void. */ |
134 | #else /* needed? */ |
135 | # define u_bigint unsigned long /* Needs to eat *void. */ |
136 | #endif |
137 | |
135863df |
138 | #define u_short unsigned short |
8d063cd8 |
139 | |
cf5c4ad8 |
140 | /* 286 and atarist like big chunks, which gives too much overhead. */ |
141 | #if (defined(RCHECK) || defined(I286) || defined(atarist)) && defined(PACK_MALLOC) |
e8bc2b5c |
142 | # undef PACK_MALLOC |
cf5c4ad8 |
143 | #endif |
144 | |
8d063cd8 |
145 | /* |
cf5c4ad8 |
146 | * The description below is applicable if PACK_MALLOC is not defined. |
147 | * |
8d063cd8 |
148 | * The overhead on a block is at least 4 bytes. When free, this space |
149 | * contains a pointer to the next free block, and the bottom two bits must |
150 | * be zero. When in use, the first byte is set to MAGIC, and the second |
151 | * byte is the size index. The remaining bytes are for alignment. |
152 | * If range checking is enabled and the size of the block fits |
153 | * in two bytes, then the top two bytes hold the size of the requested block |
154 | * plus the range checking words, and the header word MINUS ONE. |
155 | */ |
156 | union overhead { |
157 | union overhead *ov_next; /* when free */ |
85e6fe83 |
158 | #if MEM_ALIGNBYTES > 4 |
c623bd54 |
159 | double strut; /* alignment problems */ |
a687059c |
160 | #endif |
8d063cd8 |
161 | struct { |
162 | u_char ovu_magic; /* magic number */ |
163 | u_char ovu_index; /* bucket # */ |
164 | #ifdef RCHECK |
165 | u_short ovu_size; /* actual block size */ |
166 | u_int ovu_rmagic; /* range magic number */ |
167 | #endif |
168 | } ovu; |
169 | #define ov_magic ovu.ovu_magic |
170 | #define ov_index ovu.ovu_index |
171 | #define ov_size ovu.ovu_size |
172 | #define ov_rmagic ovu.ovu_rmagic |
173 | }; |
174 | |
760ac839 |
175 | #ifdef DEBUGGING |
d720c441 |
176 | static void botch _((char *diag, char *s)); |
a0d0e21e |
177 | #endif |
178 | static void morecore _((int bucket)); |
179 | static int findbucket _((union overhead *freep, int srchlen)); |
180 | |
8d063cd8 |
181 | #define MAGIC 0xff /* magic # on accounting info */ |
182 | #define RMAGIC 0x55555555 /* magic # on range info */ |
e8bc2b5c |
183 | #define RMAGIC_C 0x55 /* magic # on range info */ |
184 | |
8d063cd8 |
185 | #ifdef RCHECK |
c2a5c2d2 |
186 | # define RSLOP sizeof (u_int) |
187 | # ifdef TWO_POT_OPTIMIZE |
e8bc2b5c |
188 | # define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2) |
c2a5c2d2 |
189 | # else |
e8bc2b5c |
190 | # define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2) |
c2a5c2d2 |
191 | # endif |
8d063cd8 |
192 | #else |
c2a5c2d2 |
193 | # define RSLOP 0 |
8d063cd8 |
194 | #endif |
195 | |
e8bc2b5c |
196 | #if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2) |
197 | # undef BUCKETS_ROOT2 |
198 | #endif |
199 | |
200 | #ifdef BUCKETS_ROOT2 |
201 | # define BUCKET_TABLE_SHIFT 2 |
202 | # define BUCKET_POW2_SHIFT 1 |
203 | # define BUCKETS_PER_POW2 2 |
204 | #else |
205 | # define BUCKET_TABLE_SHIFT MIN_BUC_POW2 |
206 | # define BUCKET_POW2_SHIFT 0 |
207 | # define BUCKETS_PER_POW2 1 |
208 | #endif |
209 | |
210 | #ifdef BUCKETS_ROOT2 |
211 | # define MAX_BUCKET_BY_TABLE 13 |
212 | static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] = |
213 | { |
214 | 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80, |
215 | }; |
216 | # define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT))) |
217 | # define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \ |
218 | ? buck_size[i] \ |
219 | : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \ |
220 | - MEM_OVERHEAD(i) \ |
221 | + POW2_OPTIMIZE_SURPLUS(i))) |
222 | #else |
223 | # define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT)) |
224 | # define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i)) |
225 | #endif |
226 | |
227 | |
cf5c4ad8 |
228 | #ifdef PACK_MALLOC |
e8bc2b5c |
229 | /* In this case it is assumed that if we do sbrk() in 2K units, we |
230 | * will get 2K aligned arenas (at least after some initial |
231 | * alignment). The bucket number of the given subblock is on the start |
232 | * of 2K arena which contains the subblock. Several following bytes |
233 | * contain the magic numbers for the subblocks in the block. |
cf5c4ad8 |
234 | * |
235 | * Sizes of chunks are powers of 2 for chunks in buckets <= |
236 | * MAX_PACKED, after this they are (2^n - sizeof(union overhead)) (to |
237 | * get alignment right). |
238 | * |
e8bc2b5c |
239 | * Consider an arena for 2^n with n>MAX_PACKED. We suppose that |
240 | * starts of all the chunks in a 2K arena are in different |
241 | * 2^n-byte-long chunks. If the top of the last chunk is aligned on a |
242 | * boundary of 2K block, this means that sizeof(union |
243 | * overhead)*"number of chunks" < 2^n, or sizeof(union overhead)*2K < |
244 | * 4^n, or n > 6 + log2(sizeof()/2)/2, since a chunk of size 2^n - |
245 | * overhead is used. Since this rules out n = 7 for 8 byte alignment, |
246 | * we specialcase allocation of the first of 16 128-byte-long chunks. |
cf5c4ad8 |
247 | * |
248 | * Note that with the above assumption we automatically have enough |
249 | * place for MAGIC at the start of 2K block. Note also that we |
e8bc2b5c |
250 | * overlay union overhead over the chunk, thus the start of small chunks |
251 | * is immediately overwritten after freeing. */ |
252 | # define MAX_PACKED_POW2 6 |
253 | # define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT) |
254 | # define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD) |
255 | # define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1) |
256 | # define TWOK_MASKED(x) ((u_bigint)(x) & ~TWOK_MASK) |
257 | # define TWOK_SHIFT(x) ((u_bigint)(x) & TWOK_MASK) |
cf5c4ad8 |
258 | # define OV_INDEXp(block) ((u_char*)(TWOK_MASKED(block))) |
259 | # define OV_INDEX(block) (*OV_INDEXp(block)) |
260 | # define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \ |
e8bc2b5c |
261 | (TWOK_SHIFT(block)>> \ |
262 | (bucket>>BUCKET_POW2_SHIFT)) + \ |
263 | (bucket >= MIN_NEEDS_SHIFT ? 1 : 0))) |
264 | /* A bucket can have a shift smaller than it size, we need to |
265 | shift its magic number so it will not overwrite index: */ |
266 | # ifdef BUCKETS_ROOT2 |
267 | # define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */ |
268 | # else |
269 | # define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */ |
270 | # endif |
cf5c4ad8 |
271 | # define CHUNK_SHIFT 0 |
272 | |
e8bc2b5c |
273 | /* Number of active buckets of given ordinal. */ |
274 | #ifdef IGNORE_SMALL_BAD_FREE |
275 | #define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */ |
276 | # define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \ |
277 | ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \ |
278 | : n_blks[bucket] ) |
279 | #else |
280 | # define N_BLKS(bucket) n_blks[bucket] |
281 | #endif |
282 | |
283 | static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] = |
284 | { |
285 | # if BUCKETS_PER_POW2==1 |
286 | 0, 0, |
287 | (MIN_BUC_POW2==2 ? 384 : 0), |
288 | 224, 120, 62, 31, 16, 8, 4, 2 |
289 | # else |
290 | 0, 0, 0, 0, |
291 | (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */ |
292 | 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2 |
293 | # endif |
294 | }; |
295 | |
296 | /* Shift of the first bucket with the given ordinal inside 2K chunk. */ |
297 | #ifdef IGNORE_SMALL_BAD_FREE |
298 | # define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \ |
299 | ? ((1<<LOG_OF_MIN_ARENA) \ |
300 | - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \ |
301 | : blk_shift[bucket]) |
302 | #else |
303 | # define BLK_SHIFT(bucket) blk_shift[bucket] |
304 | #endif |
305 | |
306 | static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] = |
307 | { |
308 | # if BUCKETS_PER_POW2==1 |
309 | 0, 0, |
310 | (MIN_BUC_POW2==2 ? 512 : 0), |
311 | 256, 128, 64, 64, /* 8 to 64 */ |
312 | 16*sizeof(union overhead), |
313 | 8*sizeof(union overhead), |
314 | 4*sizeof(union overhead), |
315 | 2*sizeof(union overhead), |
316 | # else |
317 | 0, 0, 0, 0, |
318 | (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0), |
319 | 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */ |
320 | 16*sizeof(union overhead), 16*sizeof(union overhead), |
321 | 8*sizeof(union overhead), 8*sizeof(union overhead), |
322 | 4*sizeof(union overhead), 4*sizeof(union overhead), |
323 | 2*sizeof(union overhead), 2*sizeof(union overhead), |
324 | # endif |
325 | }; |
cf5c4ad8 |
326 | |
cf5c4ad8 |
327 | #else /* !PACK_MALLOC */ |
328 | |
329 | # define OV_MAGIC(block,bucket) (block)->ov_magic |
330 | # define OV_INDEX(block) (block)->ov_index |
331 | # define CHUNK_SHIFT 1 |
e8bc2b5c |
332 | # define MAX_PACKED -1 |
cf5c4ad8 |
333 | #endif /* !PACK_MALLOC */ |
334 | |
e8bc2b5c |
335 | #define M_OVERHEAD (sizeof(union overhead) + RSLOP) |
336 | |
337 | #ifdef PACK_MALLOC |
338 | # define MEM_OVERHEAD(bucket) \ |
339 | (bucket <= MAX_PACKED ? 0 : M_OVERHEAD) |
340 | # ifdef SMALL_BUCKET_VIA_TABLE |
341 | # define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2) |
342 | # define START_SHIFT MAX_PACKED_POW2 |
343 | # ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */ |
344 | # define SIZE_TABLE_MAX 80 |
345 | # else |
346 | # define SIZE_TABLE_MAX 64 |
347 | # endif |
348 | static char bucket_of[] = |
349 | { |
350 | # ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */ |
351 | /* 0 to 15 in 4-byte increments. */ |
352 | (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */ |
353 | 6, /* 8 */ |
354 | 7, 8, /* 12, 16 */ |
355 | 9, 9, 10, 10, /* 24, 32 */ |
356 | 11, 11, 11, 11, /* 48 */ |
357 | 12, 12, 12, 12, /* 64 */ |
358 | 13, 13, 13, 13, /* 80 */ |
359 | 13, 13, 13, 13 /* 80 */ |
360 | # else /* !BUCKETS_ROOT2 */ |
361 | /* 0 to 15 in 4-byte increments. */ |
362 | (sizeof(void*) > 4 ? 3 : 2), |
363 | 3, |
364 | 4, 4, |
365 | 5, 5, 5, 5, |
366 | 6, 6, 6, 6, |
367 | 6, 6, 6, 6 |
368 | # endif /* !BUCKETS_ROOT2 */ |
369 | }; |
370 | # else /* !SMALL_BUCKET_VIA_TABLE */ |
371 | # define START_SHIFTS_BUCKET MIN_BUCKET |
372 | # define START_SHIFT (MIN_BUC_POW2 - 1) |
373 | # endif /* !SMALL_BUCKET_VIA_TABLE */ |
374 | #else /* !PACK_MALLOC */ |
375 | # define MEM_OVERHEAD(bucket) M_OVERHEAD |
376 | # ifdef SMALL_BUCKET_VIA_TABLE |
377 | # undef SMALL_BUCKET_VIA_TABLE |
378 | # endif |
379 | # define START_SHIFTS_BUCKET MIN_BUCKET |
380 | # define START_SHIFT (MIN_BUC_POW2 - 1) |
381 | #endif /* !PACK_MALLOC */ |
cf5c4ad8 |
382 | |
8d063cd8 |
383 | /* |
55497cff |
384 | * Big allocations are often of the size 2^n bytes. To make them a |
385 | * little bit better, make blocks of size 2^n+pagesize for big n. |
386 | */ |
387 | |
388 | #ifdef TWO_POT_OPTIMIZE |
389 | |
5f05dabc |
390 | # ifndef PERL_PAGESIZE |
391 | # define PERL_PAGESIZE 4096 |
392 | # endif |
e8bc2b5c |
393 | # ifndef FIRST_BIG_POW2 |
394 | # define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */ |
5f05dabc |
395 | # endif |
e8bc2b5c |
396 | # define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2) |
55497cff |
397 | /* If this value or more, check against bigger blocks. */ |
398 | # define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD) |
399 | /* If less than this value, goes into 2^n-overhead-block. */ |
400 | # define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD) |
401 | |
e8bc2b5c |
402 | # define POW2_OPTIMIZE_ADJUST(nbytes) \ |
403 | ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0) |
404 | # define POW2_OPTIMIZE_SURPLUS(bucket) \ |
405 | ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0) |
406 | |
407 | #else /* !TWO_POT_OPTIMIZE */ |
408 | # define POW2_OPTIMIZE_ADJUST(nbytes) |
409 | # define POW2_OPTIMIZE_SURPLUS(bucket) 0 |
410 | #endif /* !TWO_POT_OPTIMIZE */ |
411 | |
412 | #if defined(HAS_64K_LIMIT) && defined(PERL_CORE) |
413 | # define BARK_64K_LIMIT(what,nbytes,size) \ |
414 | if (nbytes > 0xffff) { \ |
415 | PerlIO_printf(PerlIO_stderr(), \ |
416 | "%s too large: %lx\n", what, size); \ |
417 | my_exit(1); \ |
418 | } |
419 | #else /* !HAS_64K_LIMIT || !PERL_CORE */ |
420 | # define BARK_64K_LIMIT(what,nbytes,size) |
421 | #endif /* !HAS_64K_LIMIT || !PERL_CORE */ |
55497cff |
422 | |
e8bc2b5c |
423 | #ifndef MIN_SBRK |
424 | # define MIN_SBRK 2048 |
425 | #endif |
426 | |
427 | #ifndef FIRST_SBRK |
d720c441 |
428 | # define FIRST_SBRK (48*1024) |
e8bc2b5c |
429 | #endif |
430 | |
431 | /* Minimal sbrk in percents of what is already alloced. */ |
432 | #ifndef MIN_SBRK_FRAC |
433 | # define MIN_SBRK_FRAC 3 |
434 | #endif |
435 | |
436 | #ifndef SBRK_ALLOW_FAILURES |
437 | # define SBRK_ALLOW_FAILURES 3 |
438 | #endif |
55497cff |
439 | |
e8bc2b5c |
440 | #ifndef SBRK_FAILURE_PRICE |
441 | # define SBRK_FAILURE_PRICE 50 |
55497cff |
442 | #endif |
443 | |
e8bc2b5c |
444 | #if defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE) |
445 | |
446 | # ifndef BIG_SIZE |
447 | # define BIG_SIZE (1<<16) /* 64K */ |
448 | # endif |
449 | |
55497cff |
450 | static char *emergency_buffer; |
451 | static MEM_SIZE emergency_buffer_size; |
452 | |
52082926 |
453 | static Malloc_t |
55497cff |
454 | emergency_sbrk(size) |
455 | MEM_SIZE size; |
456 | { |
457 | if (size >= BIG_SIZE) { |
458 | /* Give the possibility to recover: */ |
d720c441 |
459 | MUTEX_UNLOCK(&malloc_mutex); |
460 | croak("Out of memory during request for %i bytes", size); |
55497cff |
461 | } |
462 | |
463 | if (!emergency_buffer) { |
18f739ee |
464 | dTHR; |
55497cff |
465 | /* First offense, give a possibility to recover by dieing. */ |
466 | /* No malloc involved here: */ |
467 | GV **gvp = (GV**)hv_fetch(defstash, "^M", 2, 0); |
468 | SV *sv; |
469 | char *pv; |
470 | |
471 | if (!gvp) gvp = (GV**)hv_fetch(defstash, "\015", 1, 0); |
472 | if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv) |
e8bc2b5c |
473 | || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD)) |
55497cff |
474 | return (char *)-1; /* Now die die die... */ |
475 | |
476 | /* Got it, now detach SvPV: */ |
bbce6d69 |
477 | pv = SvPV(sv, na); |
55497cff |
478 | /* Check alignment: */ |
e8bc2b5c |
479 | if (((u_bigint)(pv - M_OVERHEAD)) & ((1<<LOG_OF_MIN_ARENA) - 1)) { |
55497cff |
480 | PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n"); |
bbce6d69 |
481 | return (char *)-1; /* die die die */ |
55497cff |
482 | } |
483 | |
484 | emergency_buffer = pv - M_OVERHEAD; |
485 | emergency_buffer_size = SvLEN(sv) + M_OVERHEAD; |
486 | SvPOK_off(sv); |
487 | SvREADONLY_on(sv); |
d720c441 |
488 | MUTEX_UNLOCK(&malloc_mutex); |
489 | croak("Out of memory during request for %i bytes", size); |
ff68c719 |
490 | } |
491 | else if (emergency_buffer_size >= size) { |
55497cff |
492 | emergency_buffer_size -= size; |
493 | return emergency_buffer + emergency_buffer_size; |
494 | } |
495 | |
496 | return (char *)-1; /* poor guy... */ |
497 | } |
498 | |
e8bc2b5c |
499 | #else /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */ |
55497cff |
500 | # define emergency_sbrk(size) -1 |
e8bc2b5c |
501 | #endif /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */ |
55497cff |
502 | |
503 | /* |
e8bc2b5c |
504 | * nextf[i] is the pointer to the next free block of size 2^i. The |
8d063cd8 |
505 | * smallest allocatable block is 8 bytes. The overhead information |
506 | * precedes the data area returned to the user. |
507 | */ |
e8bc2b5c |
508 | #define NBUCKETS (32*BUCKETS_PER_POW2 + 1) |
8d063cd8 |
509 | static union overhead *nextf[NBUCKETS]; |
cf5c4ad8 |
510 | |
511 | #ifdef USE_PERL_SBRK |
512 | #define sbrk(a) Perl_sbrk(a) |
52082926 |
513 | Malloc_t Perl_sbrk _((int size)); |
8ac85365 |
514 | #else |
515 | #ifdef DONT_DECLARE_STD |
516 | #ifdef I_UNISTD |
517 | #include <unistd.h> |
518 | #endif |
cf5c4ad8 |
519 | #else |
52082926 |
520 | extern Malloc_t sbrk(int); |
8ac85365 |
521 | #endif |
cf5c4ad8 |
522 | #endif |
8d063cd8 |
523 | |
c07a80fd |
524 | #ifdef DEBUGGING_MSTATS |
8d063cd8 |
525 | /* |
526 | * nmalloc[i] is the difference between the number of mallocs and frees |
527 | * for a given block size. |
528 | */ |
529 | static u_int nmalloc[NBUCKETS]; |
5f05dabc |
530 | static u_int sbrk_slack; |
531 | static u_int start_slack; |
8d063cd8 |
532 | #endif |
533 | |
e8bc2b5c |
534 | static u_int goodsbrk; |
535 | |
760ac839 |
536 | #ifdef DEBUGGING |
d720c441 |
537 | #define ASSERT(p,diag) if (!(p)) botch(diag,STRINGIFY(p)); else |
ee0007ab |
538 | static void |
d720c441 |
539 | botch(char *diag, char *s) |
8d063cd8 |
540 | { |
d720c441 |
541 | PerlIO_printf(PerlIO_stderr(), "assertion botched (%s?): %s\n", diag, s); |
3028581b |
542 | PerlProc_abort(); |
8d063cd8 |
543 | } |
544 | #else |
d720c441 |
545 | #define ASSERT(p, diag) |
8d063cd8 |
546 | #endif |
547 | |
2304df62 |
548 | Malloc_t |
8ac85365 |
549 | malloc(register size_t nbytes) |
8d063cd8 |
550 | { |
551 | register union overhead *p; |
e8bc2b5c |
552 | register int bucket; |
ee0007ab |
553 | register MEM_SIZE shiftr; |
8d063cd8 |
554 | |
c2a5c2d2 |
555 | #if defined(DEBUGGING) || defined(RCHECK) |
ee0007ab |
556 | MEM_SIZE size = nbytes; |
45d8adaa |
557 | #endif |
558 | |
e8bc2b5c |
559 | BARK_64K_LIMIT("Allocation",nbytes,nbytes); |
45d8adaa |
560 | #ifdef DEBUGGING |
561 | if ((long)nbytes < 0) |
d720c441 |
562 | croak("%s", "panic: malloc"); |
45d8adaa |
563 | #endif |
45d8adaa |
564 | |
11343788 |
565 | MUTEX_LOCK(&malloc_mutex); |
8d063cd8 |
566 | /* |
567 | * Convert amount of memory requested into |
568 | * closest block size stored in hash buckets |
569 | * which satisfies request. Account for |
570 | * space used per block for accounting. |
571 | */ |
cf5c4ad8 |
572 | #ifdef PACK_MALLOC |
e8bc2b5c |
573 | # ifdef SMALL_BUCKET_VIA_TABLE |
574 | if (nbytes == 0) |
575 | bucket = MIN_BUCKET; |
576 | else if (nbytes <= SIZE_TABLE_MAX) { |
577 | bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT]; |
578 | } else |
579 | # else |
043bf814 |
580 | if (nbytes == 0) |
581 | nbytes = 1; |
e8bc2b5c |
582 | if (nbytes <= MAX_POW2_ALGO) goto do_shifts; |
583 | else |
584 | # endif |
55497cff |
585 | #endif |
e8bc2b5c |
586 | { |
587 | POW2_OPTIMIZE_ADJUST(nbytes); |
588 | nbytes += M_OVERHEAD; |
589 | nbytes = (nbytes + 3) &~ 3; |
590 | do_shifts: |
591 | shiftr = (nbytes - 1) >> START_SHIFT; |
592 | bucket = START_SHIFTS_BUCKET; |
593 | /* apart from this loop, this is O(1) */ |
594 | while (shiftr >>= 1) |
595 | bucket += BUCKETS_PER_POW2; |
cf5c4ad8 |
596 | } |
8d063cd8 |
597 | /* |
598 | * If nothing in hash bucket right now, |
599 | * request more memory from the system. |
600 | */ |
601 | if (nextf[bucket] == NULL) |
602 | morecore(bucket); |
e8bc2b5c |
603 | if ((p = nextf[bucket]) == NULL) { |
11343788 |
604 | MUTEX_UNLOCK(&malloc_mutex); |
55497cff |
605 | #ifdef PERL_CORE |
ee0007ab |
606 | if (!nomemok) { |
760ac839 |
607 | PerlIO_puts(PerlIO_stderr(),"Out of memory!\n"); |
79072805 |
608 | my_exit(1); |
ee0007ab |
609 | } |
45d8adaa |
610 | #else |
8d063cd8 |
611 | return (NULL); |
45d8adaa |
612 | #endif |
613 | } |
614 | |
e8bc2b5c |
615 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
616 | "0x%lx: (%05lu) malloc %ld bytes\n", |
617 | (unsigned long)(p+1), (unsigned long)(an++), |
618 | (long)size)); |
45d8adaa |
619 | |
8d063cd8 |
620 | /* remove from linked list */ |
bf38876a |
621 | #ifdef RCHECK |
622 | if (*((int*)p) & (sizeof(union overhead) - 1)) |
760ac839 |
623 | PerlIO_printf(PerlIO_stderr(), "Corrupt malloc ptr 0x%lx at 0x%lx\n", |
a0d0e21e |
624 | (unsigned long)*((int*)p),(unsigned long)p); |
bf38876a |
625 | #endif |
626 | nextf[bucket] = p->ov_next; |
e8bc2b5c |
627 | #ifdef IGNORE_SMALL_BAD_FREE |
628 | if (bucket >= FIRST_BUCKET_WITH_CHECK) |
629 | #endif |
630 | OV_MAGIC(p, bucket) = MAGIC; |
cf5c4ad8 |
631 | #ifndef PACK_MALLOC |
632 | OV_INDEX(p) = bucket; |
633 | #endif |
8d063cd8 |
634 | #ifdef RCHECK |
635 | /* |
636 | * Record allocated size of block and |
637 | * bound space with magic numbers. |
638 | */ |
8d063cd8 |
639 | p->ov_rmagic = RMAGIC; |
e8bc2b5c |
640 | if (bucket <= MAX_SHORT_BUCKET) { |
641 | int i; |
642 | |
643 | nbytes = size + M_OVERHEAD; |
644 | p->ov_size = nbytes - 1; |
645 | if ((i = nbytes & 3)) { |
646 | i = 4 - i; |
647 | while (i--) |
648 | *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C; |
649 | } |
650 | nbytes = (nbytes + 3) &~ 3; |
651 | *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC; |
652 | } |
8d063cd8 |
653 | #endif |
11343788 |
654 | MUTEX_UNLOCK(&malloc_mutex); |
cf5c4ad8 |
655 | return ((Malloc_t)(p + CHUNK_SHIFT)); |
8d063cd8 |
656 | } |
657 | |
e8bc2b5c |
658 | static char *last_sbrk_top; |
659 | static char *last_op; /* This arena can be easily extended. */ |
660 | static int sbrked_remains; |
661 | static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE; |
662 | |
663 | #ifdef DEBUGGING_MSTATS |
664 | static int sbrks; |
665 | #endif |
666 | |
667 | struct chunk_chain_s { |
668 | struct chunk_chain_s *next; |
669 | MEM_SIZE size; |
670 | }; |
671 | static struct chunk_chain_s *chunk_chain; |
672 | static int n_chunks; |
673 | static char max_bucket; |
674 | |
675 | /* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */ |
676 | static void * |
677 | get_from_chain(MEM_SIZE size) |
678 | { |
679 | struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain; |
680 | struct chunk_chain_s **oldgoodp = NULL; |
681 | long min_remain = LONG_MAX; |
682 | |
683 | while (elt) { |
684 | if (elt->size >= size) { |
685 | long remains = elt->size - size; |
686 | if (remains >= 0 && remains < min_remain) { |
687 | oldgoodp = oldp; |
688 | min_remain = remains; |
689 | } |
690 | if (remains == 0) { |
691 | break; |
692 | } |
693 | } |
694 | oldp = &( elt->next ); |
695 | elt = elt->next; |
696 | } |
697 | if (!oldgoodp) return NULL; |
698 | if (min_remain) { |
699 | void *ret = *oldgoodp; |
700 | struct chunk_chain_s *next = (*oldgoodp)->next; |
701 | |
702 | *oldgoodp = (struct chunk_chain_s *)((char*)ret + size); |
703 | (*oldgoodp)->size = min_remain; |
704 | (*oldgoodp)->next = next; |
705 | return ret; |
706 | } else { |
707 | void *ret = *oldgoodp; |
708 | *oldgoodp = (*oldgoodp)->next; |
709 | n_chunks--; |
710 | return ret; |
711 | } |
712 | } |
713 | |
714 | static void |
715 | add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip) |
716 | { |
717 | struct chunk_chain_s *next = chunk_chain; |
718 | char *cp = (char*)p; |
719 | |
720 | cp += chip; |
721 | chunk_chain = (struct chunk_chain_s *)cp; |
722 | chunk_chain->size = size - chip; |
723 | chunk_chain->next = next; |
724 | n_chunks++; |
725 | } |
726 | |
727 | static void * |
728 | get_from_bigger_buckets(int bucket, MEM_SIZE size) |
729 | { |
730 | int price = 1; |
731 | static int bucketprice[NBUCKETS]; |
732 | while (bucket <= max_bucket) { |
733 | /* We postpone stealing from bigger buckets until we want it |
734 | often enough. */ |
735 | if (nextf[bucket] && bucketprice[bucket]++ >= price) { |
736 | /* Steal it! */ |
737 | void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT); |
738 | bucketprice[bucket] = 0; |
739 | if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) { |
740 | last_op = NULL; /* Disable optimization */ |
741 | } |
742 | nextf[bucket] = nextf[bucket]->ov_next; |
743 | #ifdef DEBUGGING_MSTATS |
744 | nmalloc[bucket]--; |
745 | start_slack -= M_OVERHEAD; |
746 | #endif |
747 | add_to_chain(ret, (BUCKET_SIZE(bucket) + |
748 | POW2_OPTIMIZE_SURPLUS(bucket)), |
749 | size); |
750 | return ret; |
751 | } |
752 | bucket++; |
753 | } |
754 | return NULL; |
755 | } |
756 | |
fa423c5b |
757 | static union overhead * |
758 | getpages(int needed, int *nblksp, int bucket) |
759 | { |
760 | /* Need to do (possibly expensive) system call. Try to |
761 | optimize it for rare calling. */ |
762 | MEM_SIZE require = needed - sbrked_remains; |
763 | char *cp; |
764 | union overhead *ovp; |
765 | int slack = 0; |
766 | |
767 | if (sbrk_good > 0) { |
768 | if (!last_sbrk_top && require < FIRST_SBRK) |
769 | require = FIRST_SBRK; |
770 | else if (require < MIN_SBRK) require = MIN_SBRK; |
771 | |
772 | if (require < goodsbrk * MIN_SBRK_FRAC / 100) |
773 | require = goodsbrk * MIN_SBRK_FRAC / 100; |
774 | require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK; |
775 | } else { |
776 | require = needed; |
777 | last_sbrk_top = 0; |
778 | sbrked_remains = 0; |
779 | } |
780 | |
781 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
782 | "sbrk(%ld) for %ld-byte-long arena\n", |
783 | (long)require, (long) needed)); |
784 | cp = (char *)sbrk(require); |
785 | #ifdef DEBUGGING_MSTATS |
786 | sbrks++; |
787 | #endif |
788 | if (cp == last_sbrk_top) { |
789 | /* Common case, anything is fine. */ |
790 | sbrk_good++; |
791 | ovp = (union overhead *) (cp - sbrked_remains); |
792 | sbrked_remains = require - (needed - sbrked_remains); |
793 | } else if (cp == (char *)-1) { /* no more room! */ |
794 | ovp = (union overhead *)emergency_sbrk(needed); |
795 | if (ovp == (union overhead *)-1) |
796 | return 0; |
797 | return ovp; |
798 | } else { /* Non-continuous or first sbrk(). */ |
799 | long add = sbrked_remains; |
800 | char *newcp; |
801 | |
802 | if (sbrked_remains) { /* Put rest into chain, we |
803 | cannot use it right now. */ |
804 | add_to_chain((void*)(last_sbrk_top - sbrked_remains), |
805 | sbrked_remains, 0); |
806 | } |
807 | |
808 | /* Second, check alignment. */ |
809 | slack = 0; |
810 | |
811 | #ifndef atarist /* on the atari we dont have to worry about this */ |
812 | # ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */ |
813 | |
814 | /* CHUNK_SHIFT is 1 for PACK_MALLOC, 0 otherwise. */ |
815 | if ((UV)cp & (0x7FF >> CHUNK_SHIFT)) { /* Not aligned. */ |
816 | slack = (0x800 >> CHUNK_SHIFT) |
817 | - ((UV)cp & (0x7FF >> CHUNK_SHIFT)); |
818 | add += slack; |
819 | } |
820 | # endif |
821 | #endif /* atarist */ |
822 | |
823 | if (add) { |
824 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
825 | "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n", |
826 | (long)add, (long) slack, |
827 | (long) sbrked_remains)); |
828 | newcp = (char *)sbrk(add); |
829 | #if defined(DEBUGGING_MSTATS) |
830 | sbrks++; |
831 | sbrk_slack += add; |
832 | #endif |
833 | if (newcp != cp + require) { |
834 | /* Too bad: even rounding sbrk() is not continuous.*/ |
835 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
836 | "failed to fix bad sbrk()\n")); |
837 | #ifdef PACK_MALLOC |
838 | if (slack) { |
839 | MUTEX_UNLOCK(&malloc_mutex); |
840 | croak("%s", "panic: Off-page sbrk"); |
841 | } |
842 | #endif |
843 | if (sbrked_remains) { |
844 | /* Try again. */ |
845 | #if defined(DEBUGGING_MSTATS) |
846 | sbrk_slack += require; |
847 | #endif |
848 | require = needed; |
849 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
850 | "straight sbrk(%ld)\n", |
851 | (long)require)); |
852 | cp = (char *)sbrk(require); |
853 | #ifdef DEBUGGING_MSTATS |
854 | sbrks++; |
855 | #endif |
856 | if (cp == (char *)-1) |
857 | return 0; |
858 | } |
859 | sbrk_good = -1; /* Disable optimization! |
860 | Continue with not-aligned... */ |
861 | } else { |
862 | cp += slack; |
863 | require += sbrked_remains; |
864 | } |
865 | } |
866 | |
867 | if (last_sbrk_top) { |
868 | sbrk_good -= SBRK_FAILURE_PRICE; |
869 | } |
870 | |
871 | ovp = (union overhead *) cp; |
872 | /* |
873 | * Round up to minimum allocation size boundary |
874 | * and deduct from block count to reflect. |
875 | */ |
876 | |
877 | #ifndef I286 /* Again, this should always be ok on an 80286 */ |
878 | if ((UV)ovp & 7) { |
879 | ovp = (union overhead *)(((UV)ovp + 8) & ~7); |
880 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
881 | "fixing sbrk(): %d bytes off machine alignement\n", |
882 | (int)((UV)ovp & 7))); |
883 | (*nblksp)--; |
884 | # if defined(DEBUGGING_MSTATS) |
885 | /* This is only approx. if TWO_POT_OPTIMIZE: */ |
886 | sbrk_slack += (1 << bucket); |
887 | # endif |
888 | } |
889 | #endif |
890 | sbrked_remains = require - needed; |
891 | } |
892 | last_sbrk_top = cp + require; |
893 | last_op = (char*) cp; |
894 | #ifdef DEBUGGING_MSTATS |
895 | goodsbrk += require; |
896 | #endif |
897 | return ovp; |
898 | } |
899 | |
900 | static int |
901 | getpages_adjacent(int require) |
902 | { |
903 | if (require <= sbrked_remains) { |
904 | sbrked_remains -= require; |
905 | } else { |
906 | char *cp; |
907 | |
908 | require -= sbrked_remains; |
909 | /* We do not try to optimize sbrks here, we go for place. */ |
910 | cp = (char*) sbrk(require); |
911 | #ifdef DEBUGGING_MSTATS |
912 | sbrks++; |
913 | goodsbrk += require; |
914 | #endif |
915 | if (cp == last_sbrk_top) { |
916 | sbrked_remains = 0; |
917 | last_sbrk_top = cp + require; |
918 | } else { |
919 | /* Report the failure: */ |
920 | if (sbrked_remains) |
921 | add_to_chain((void*)(last_sbrk_top - sbrked_remains), |
922 | sbrked_remains, 0); |
923 | add_to_chain((void*)cp, require, 0); |
924 | sbrk_good -= SBRK_FAILURE_PRICE; |
925 | sbrked_remains = 0; |
926 | last_sbrk_top = 0; |
927 | last_op = 0; |
928 | return 0; |
929 | } |
930 | } |
931 | |
932 | return 1; |
933 | } |
934 | |
8d063cd8 |
935 | /* |
936 | * Allocate more memory to the indicated bucket. |
937 | */ |
a0d0e21e |
938 | static void |
8ac85365 |
939 | morecore(register int bucket) |
8d063cd8 |
940 | { |
72aaf631 |
941 | register union overhead *ovp; |
8d063cd8 |
942 | register int rnu; /* 2^rnu bytes will be requested */ |
fa423c5b |
943 | int nblks; /* become nblks blocks of the desired size */ |
bbce6d69 |
944 | register MEM_SIZE siz, needed; |
8d063cd8 |
945 | |
946 | if (nextf[bucket]) |
947 | return; |
e8bc2b5c |
948 | if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) { |
d720c441 |
949 | MUTEX_UNLOCK(&malloc_mutex); |
950 | croak("%s", "Out of memory during ridiculously large request"); |
55497cff |
951 | } |
d720c441 |
952 | if (bucket > max_bucket) |
e8bc2b5c |
953 | max_bucket = bucket; |
d720c441 |
954 | |
e8bc2b5c |
955 | rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT)) |
956 | ? LOG_OF_MIN_ARENA |
957 | : (bucket >> BUCKET_POW2_SHIFT) ); |
958 | /* This may be overwritten later: */ |
959 | nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */ |
960 | needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket); |
961 | if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */ |
962 | ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT; |
963 | nextf[rnu << BUCKET_POW2_SHIFT] |
964 | = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next; |
965 | #ifdef DEBUGGING_MSTATS |
966 | nmalloc[rnu << BUCKET_POW2_SHIFT]--; |
967 | start_slack -= M_OVERHEAD; |
968 | #endif |
969 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
970 | "stealing %ld bytes from %ld arena\n", |
971 | (long) needed, (long) rnu << BUCKET_POW2_SHIFT)); |
972 | } else if (chunk_chain |
973 | && (ovp = (union overhead*) get_from_chain(needed))) { |
974 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
975 | "stealing %ld bytes from chain\n", |
976 | (long) needed)); |
d720c441 |
977 | } else if ( (ovp = (union overhead*) |
978 | get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1, |
979 | needed)) ) { |
e8bc2b5c |
980 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
981 | "stealing %ld bytes from bigger buckets\n", |
982 | (long) needed)); |
983 | } else if (needed <= sbrked_remains) { |
984 | ovp = (union overhead *)(last_sbrk_top - sbrked_remains); |
985 | sbrked_remains -= needed; |
986 | last_op = (char*)ovp; |
fa423c5b |
987 | } else |
988 | ovp = getpages(needed, &nblks, bucket); |
e8bc2b5c |
989 | |
fa423c5b |
990 | if (!ovp) |
991 | return; |
e8bc2b5c |
992 | |
8d063cd8 |
993 | /* |
994 | * Add new memory allocated to that on |
995 | * free list for this hash bucket. |
996 | */ |
e8bc2b5c |
997 | siz = BUCKET_SIZE(bucket); |
cf5c4ad8 |
998 | #ifdef PACK_MALLOC |
72aaf631 |
999 | *(u_char*)ovp = bucket; /* Fill index. */ |
e8bc2b5c |
1000 | if (bucket <= MAX_PACKED) { |
1001 | ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket)); |
1002 | nblks = N_BLKS(bucket); |
cf5c4ad8 |
1003 | # ifdef DEBUGGING_MSTATS |
e8bc2b5c |
1004 | start_slack += BLK_SHIFT(bucket); |
cf5c4ad8 |
1005 | # endif |
e8bc2b5c |
1006 | } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) { |
1007 | ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket)); |
cf5c4ad8 |
1008 | siz -= sizeof(union overhead); |
72aaf631 |
1009 | } else ovp++; /* One chunk per block. */ |
e8bc2b5c |
1010 | #endif /* PACK_MALLOC */ |
72aaf631 |
1011 | nextf[bucket] = ovp; |
5f05dabc |
1012 | #ifdef DEBUGGING_MSTATS |
1013 | nmalloc[bucket] += nblks; |
e8bc2b5c |
1014 | if (bucket > MAX_PACKED) { |
1015 | start_slack += M_OVERHEAD * nblks; |
1016 | } |
5f05dabc |
1017 | #endif |
8d063cd8 |
1018 | while (--nblks > 0) { |
72aaf631 |
1019 | ovp->ov_next = (union overhead *)((caddr_t)ovp + siz); |
1020 | ovp = (union overhead *)((caddr_t)ovp + siz); |
8d063cd8 |
1021 | } |
8595d6f1 |
1022 | /* Not all sbrks return zeroed memory.*/ |
72aaf631 |
1023 | ovp->ov_next = (union overhead *)NULL; |
cf5c4ad8 |
1024 | #ifdef PACK_MALLOC |
e8bc2b5c |
1025 | if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */ |
1026 | union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next; |
1027 | nextf[7*BUCKETS_PER_POW2] = |
1028 | (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2] |
1029 | - sizeof(union overhead)); |
1030 | nextf[7*BUCKETS_PER_POW2]->ov_next = n_op; |
cf5c4ad8 |
1031 | } |
1032 | #endif /* !PACK_MALLOC */ |
8d063cd8 |
1033 | } |
1034 | |
94b6baf5 |
1035 | Free_t |
8ac85365 |
1036 | free(void *mp) |
8d063cd8 |
1037 | { |
ee0007ab |
1038 | register MEM_SIZE size; |
72aaf631 |
1039 | register union overhead *ovp; |
352d5a3a |
1040 | char *cp = (char*)mp; |
cf5c4ad8 |
1041 | #ifdef PACK_MALLOC |
1042 | u_char bucket; |
1043 | #endif |
8d063cd8 |
1044 | |
e8bc2b5c |
1045 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1046 | "0x%lx: (%05lu) free\n", |
1047 | (unsigned long)cp, (unsigned long)(an++))); |
45d8adaa |
1048 | |
cf5c4ad8 |
1049 | if (cp == NULL) |
1050 | return; |
72aaf631 |
1051 | ovp = (union overhead *)((caddr_t)cp |
e8bc2b5c |
1052 | - sizeof (union overhead) * CHUNK_SHIFT); |
cf5c4ad8 |
1053 | #ifdef PACK_MALLOC |
72aaf631 |
1054 | bucket = OV_INDEX(ovp); |
cf5c4ad8 |
1055 | #endif |
e8bc2b5c |
1056 | #ifdef IGNORE_SMALL_BAD_FREE |
1057 | if ((bucket >= FIRST_BUCKET_WITH_CHECK) |
1058 | && (OV_MAGIC(ovp, bucket) != MAGIC)) |
1059 | #else |
1060 | if (OV_MAGIC(ovp, bucket) != MAGIC) |
1061 | #endif |
1062 | { |
68dc0745 |
1063 | static int bad_free_warn = -1; |
cf5c4ad8 |
1064 | if (bad_free_warn == -1) { |
5fd9e9a4 |
1065 | char *pbf = PerlEnv_getenv("PERL_BADFREE"); |
cf5c4ad8 |
1066 | bad_free_warn = (pbf) ? atoi(pbf) : 1; |
1067 | } |
1068 | if (!bad_free_warn) |
1069 | return; |
8990e307 |
1070 | #ifdef RCHECK |
a687059c |
1071 | warn("%s free() ignored", |
72aaf631 |
1072 | ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad"); |
8990e307 |
1073 | #else |
d720c441 |
1074 | warn("%s", "Bad free() ignored"); |
8990e307 |
1075 | #endif |
8d063cd8 |
1076 | return; /* sanity */ |
e8bc2b5c |
1077 | } |
11343788 |
1078 | MUTEX_LOCK(&malloc_mutex); |
8d063cd8 |
1079 | #ifdef RCHECK |
d720c441 |
1080 | ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite"); |
e8bc2b5c |
1081 | if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) { |
1082 | int i; |
1083 | MEM_SIZE nbytes = ovp->ov_size + 1; |
1084 | |
1085 | if ((i = nbytes & 3)) { |
1086 | i = 4 - i; |
1087 | while (i--) { |
1088 | ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i)) |
d720c441 |
1089 | == RMAGIC_C, "chunk's tail overwrite"); |
e8bc2b5c |
1090 | } |
1091 | } |
1092 | nbytes = (nbytes + 3) &~ 3; |
d720c441 |
1093 | ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC, "chunk's tail overwrite"); |
e8bc2b5c |
1094 | } |
72aaf631 |
1095 | ovp->ov_rmagic = RMAGIC - 1; |
8d063cd8 |
1096 | #endif |
d720c441 |
1097 | ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite"); |
72aaf631 |
1098 | size = OV_INDEX(ovp); |
1099 | ovp->ov_next = nextf[size]; |
1100 | nextf[size] = ovp; |
11343788 |
1101 | MUTEX_UNLOCK(&malloc_mutex); |
8d063cd8 |
1102 | } |
1103 | |
1104 | /* |
1105 | * When a program attempts "storage compaction" as mentioned in the |
1106 | * old malloc man page, it realloc's an already freed block. Usually |
1107 | * this is the last block it freed; occasionally it might be farther |
1108 | * back. We have to search all the free lists for the block in order |
1109 | * to determine its bucket: 1st we make one pass thru the lists |
1110 | * checking only the first block in each; if that fails we search |
378cc40b |
1111 | * ``reall_srchlen'' blocks in each list for a match (the variable |
8d063cd8 |
1112 | * is extern so the caller can modify it). If that fails we just copy |
1113 | * however many bytes was given to realloc() and hope it's not huge. |
1114 | */ |
378cc40b |
1115 | int reall_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ |
8d063cd8 |
1116 | |
2304df62 |
1117 | Malloc_t |
8ac85365 |
1118 | realloc(void *mp, size_t nbytes) |
8d063cd8 |
1119 | { |
ee0007ab |
1120 | register MEM_SIZE onb; |
72aaf631 |
1121 | union overhead *ovp; |
d720c441 |
1122 | char *res; |
1123 | int prev_bucket; |
e8bc2b5c |
1124 | register int bucket; |
1125 | int was_alloced = 0, incr; |
352d5a3a |
1126 | char *cp = (char*)mp; |
8d063cd8 |
1127 | |
e8bc2b5c |
1128 | #if defined(DEBUGGING) || !defined(PERL_CORE) |
ee0007ab |
1129 | MEM_SIZE size = nbytes; |
45d8adaa |
1130 | |
45d8adaa |
1131 | if ((long)nbytes < 0) |
d720c441 |
1132 | croak("%s", "panic: realloc"); |
45d8adaa |
1133 | #endif |
e8bc2b5c |
1134 | |
1135 | BARK_64K_LIMIT("Reallocation",nbytes,size); |
1136 | if (!cp) |
1137 | return malloc(nbytes); |
45d8adaa |
1138 | |
11343788 |
1139 | MUTEX_LOCK(&malloc_mutex); |
72aaf631 |
1140 | ovp = (union overhead *)((caddr_t)cp |
e8bc2b5c |
1141 | - sizeof (union overhead) * CHUNK_SHIFT); |
1142 | bucket = OV_INDEX(ovp); |
1143 | #ifdef IGNORE_SMALL_BAD_FREE |
1144 | if ((bucket < FIRST_BUCKET_WITH_CHECK) |
1145 | || (OV_MAGIC(ovp, bucket) == MAGIC)) |
1146 | #else |
1147 | if (OV_MAGIC(ovp, bucket) == MAGIC) |
1148 | #endif |
1149 | { |
55497cff |
1150 | was_alloced = 1; |
8d063cd8 |
1151 | } else { |
1152 | /* |
1153 | * Already free, doing "compaction". |
1154 | * |
1155 | * Search for the old block of memory on the |
1156 | * free list. First, check the most common |
1157 | * case (last element free'd), then (this failing) |
378cc40b |
1158 | * the last ``reall_srchlen'' items free'd. |
8d063cd8 |
1159 | * If all lookups fail, then assume the size of |
1160 | * the memory block being realloc'd is the |
1161 | * smallest possible. |
1162 | */ |
e8bc2b5c |
1163 | if ((bucket = findbucket(ovp, 1)) < 0 && |
1164 | (bucket = findbucket(ovp, reall_srchlen)) < 0) |
1165 | bucket = 0; |
8d063cd8 |
1166 | } |
e8bc2b5c |
1167 | onb = BUCKET_SIZE_REAL(bucket); |
55497cff |
1168 | /* |
1169 | * avoid the copy if same size block. |
e8bc2b5c |
1170 | * We are not agressive with boundary cases. Note that it might |
1171 | * (for a small number of cases) give false negative if |
55497cff |
1172 | * both new size and old one are in the bucket for |
e8bc2b5c |
1173 | * FIRST_BIG_POW2, but the new one is near the lower end. |
1174 | * |
1175 | * We do not try to go to 1.5 times smaller bucket so far. |
55497cff |
1176 | */ |
e8bc2b5c |
1177 | if (nbytes > onb) incr = 1; |
1178 | else { |
1179 | #ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING |
1180 | if ( /* This is a little bit pessimal if PACK_MALLOC: */ |
1181 | nbytes > ( (onb >> 1) - M_OVERHEAD ) |
1182 | # ifdef TWO_POT_OPTIMIZE |
1183 | || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND ) |
1184 | # endif |
1185 | ) |
1186 | #else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */ |
1187 | prev_bucket = ( (bucket > MAX_PACKED + 1) |
1188 | ? bucket - BUCKETS_PER_POW2 |
1189 | : bucket - 1); |
1190 | if (nbytes > BUCKET_SIZE_REAL(prev_bucket)) |
1191 | #endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */ |
1192 | incr = 0; |
1193 | else incr = -1; |
1194 | } |
1195 | if (!was_alloced |
2ce36478 |
1196 | #ifdef STRESS_REALLOC |
e8bc2b5c |
1197 | || 1 /* always do it the hard way */ |
2ce36478 |
1198 | #endif |
e8bc2b5c |
1199 | ) goto hard_way; |
1200 | else if (incr == 0) { |
852c2e52 |
1201 | inplace_label: |
a687059c |
1202 | #ifdef RCHECK |
1203 | /* |
1204 | * Record new allocated size of block and |
1205 | * bound space with magic numbers. |
1206 | */ |
72aaf631 |
1207 | if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) { |
e8bc2b5c |
1208 | int i, nb = ovp->ov_size + 1; |
1209 | |
1210 | if ((i = nb & 3)) { |
1211 | i = 4 - i; |
1212 | while (i--) { |
d720c441 |
1213 | ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C, "chunk's tail overwrite"); |
e8bc2b5c |
1214 | } |
1215 | } |
1216 | nb = (nb + 3) &~ 3; |
d720c441 |
1217 | ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC, "chunk's tail overwrite"); |
a687059c |
1218 | /* |
1219 | * Convert amount of memory requested into |
1220 | * closest block size stored in hash buckets |
1221 | * which satisfies request. Account for |
1222 | * space used per block for accounting. |
1223 | */ |
cf5c4ad8 |
1224 | nbytes += M_OVERHEAD; |
72aaf631 |
1225 | ovp->ov_size = nbytes - 1; |
e8bc2b5c |
1226 | if ((i = nbytes & 3)) { |
1227 | i = 4 - i; |
1228 | while (i--) |
1229 | *((char *)((caddr_t)ovp + nbytes - RSLOP + i)) |
1230 | = RMAGIC_C; |
1231 | } |
1232 | nbytes = (nbytes + 3) &~ 3; |
72aaf631 |
1233 | *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC; |
a687059c |
1234 | } |
1235 | #endif |
45d8adaa |
1236 | res = cp; |
11343788 |
1237 | MUTEX_UNLOCK(&malloc_mutex); |
e8bc2b5c |
1238 | } else if (incr == 1 && (cp - M_OVERHEAD == last_op) |
1239 | && (onb > (1 << LOG_OF_MIN_ARENA))) { |
1240 | MEM_SIZE require, newarena = nbytes, pow; |
1241 | int shiftr; |
1242 | |
1243 | POW2_OPTIMIZE_ADJUST(newarena); |
1244 | newarena = newarena + M_OVERHEAD; |
1245 | /* newarena = (newarena + 3) &~ 3; */ |
1246 | shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA; |
1247 | pow = LOG_OF_MIN_ARENA + 1; |
1248 | /* apart from this loop, this is O(1) */ |
1249 | while (shiftr >>= 1) |
1250 | pow++; |
1251 | newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2); |
1252 | require = newarena - onb - M_OVERHEAD; |
1253 | |
fa423c5b |
1254 | if (getpages_adjacent(require)) { |
e8bc2b5c |
1255 | #ifdef DEBUGGING_MSTATS |
fa423c5b |
1256 | nmalloc[bucket]--; |
1257 | nmalloc[pow * BUCKETS_PER_POW2]++; |
e8bc2b5c |
1258 | #endif |
fa423c5b |
1259 | *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */ |
1260 | goto inplace_label; |
1261 | } else |
1262 | goto hard_way; |
e8bc2b5c |
1263 | } else { |
1264 | hard_way: |
1265 | MUTEX_UNLOCK(&malloc_mutex); |
1266 | if ((res = (char*)malloc(nbytes)) == NULL) |
1267 | return (NULL); |
1268 | if (cp != res) /* common optimization */ |
1269 | Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char); |
1270 | if (was_alloced) |
1271 | free(cp); |
45d8adaa |
1272 | } |
1273 | |
e8bc2b5c |
1274 | DEBUG_m(PerlIO_printf(Perl_debug_log, "0x%lu: (%05lu) rfree\n", |
1275 | (unsigned long)res,(unsigned long)(an++))); |
1276 | DEBUG_m(PerlIO_printf(Perl_debug_log, |
1277 | "0x%lx: (%05lu) realloc %ld bytes\n", |
1278 | (unsigned long)res,(unsigned long)(an++), |
1279 | (long)size)); |
2304df62 |
1280 | return ((Malloc_t)res); |
8d063cd8 |
1281 | } |
1282 | |
1283 | /* |
1284 | * Search ``srchlen'' elements of each free list for a block whose |
1285 | * header starts at ``freep''. If srchlen is -1 search the whole list. |
1286 | * Return bucket number, or -1 if not found. |
1287 | */ |
ee0007ab |
1288 | static int |
8ac85365 |
1289 | findbucket(union overhead *freep, int srchlen) |
8d063cd8 |
1290 | { |
1291 | register union overhead *p; |
1292 | register int i, j; |
1293 | |
1294 | for (i = 0; i < NBUCKETS; i++) { |
1295 | j = 0; |
1296 | for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { |
1297 | if (p == freep) |
1298 | return (i); |
1299 | j++; |
1300 | } |
1301 | } |
1302 | return (-1); |
1303 | } |
1304 | |
cf5c4ad8 |
1305 | Malloc_t |
8ac85365 |
1306 | calloc(register size_t elements, register size_t size) |
cf5c4ad8 |
1307 | { |
1308 | long sz = elements * size; |
1309 | Malloc_t p = malloc(sz); |
1310 | |
1311 | if (p) { |
1312 | memset((void*)p, 0, sz); |
1313 | } |
1314 | return p; |
1315 | } |
1316 | |
e8bc2b5c |
1317 | MEM_SIZE |
1318 | malloced_size(void *p) |
1319 | { |
1320 | int bucket = OV_INDEX((union overhead *)p); |
1321 | |
1322 | return BUCKET_SIZE_REAL(bucket); |
1323 | } |
1324 | |
c07a80fd |
1325 | #ifdef DEBUGGING_MSTATS |
e8bc2b5c |
1326 | |
1327 | # ifdef BUCKETS_ROOT2 |
1328 | # define MIN_EVEN_REPORT 6 |
1329 | # else |
1330 | # define MIN_EVEN_REPORT MIN_BUCKET |
1331 | # endif |
8d063cd8 |
1332 | /* |
1333 | * mstats - print out statistics about malloc |
1334 | * |
1335 | * Prints two lines of numbers, one showing the length of the free list |
1336 | * for each size category, the second showing the number of mallocs - |
1337 | * frees for each size category. |
1338 | */ |
ee0007ab |
1339 | void |
8ac85365 |
1340 | dump_mstats(char *s) |
8d063cd8 |
1341 | { |
1342 | register int i, j; |
1343 | register union overhead *p; |
e8bc2b5c |
1344 | int topbucket=0, topbucket_ev=0, topbucket_odd=0, totfree=0, total=0; |
c07a80fd |
1345 | u_int nfree[NBUCKETS]; |
e8bc2b5c |
1346 | int total_chain = 0; |
1347 | struct chunk_chain_s* nextchain = chunk_chain; |
8d063cd8 |
1348 | |
e8bc2b5c |
1349 | for (i = MIN_BUCKET ; i < NBUCKETS; i++) { |
8d063cd8 |
1350 | for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) |
1351 | ; |
c07a80fd |
1352 | nfree[i] = j; |
e8bc2b5c |
1353 | totfree += nfree[i] * BUCKET_SIZE_REAL(i); |
1354 | total += nmalloc[i] * BUCKET_SIZE_REAL(i); |
1355 | if (nmalloc[i]) { |
1356 | i % 2 ? (topbucket_odd = i) : (topbucket_ev = i); |
1357 | topbucket = i; |
1358 | } |
c07a80fd |
1359 | } |
1360 | if (s) |
e8bc2b5c |
1361 | PerlIO_printf(PerlIO_stderr(), |
d720c441 |
1362 | "Memory allocation statistics %s (buckets %ld(%ld)..%ld(%ld)\n", |
e8bc2b5c |
1363 | s, |
d720c441 |
1364 | (long)BUCKET_SIZE_REAL(MIN_BUCKET), |
1365 | (long)BUCKET_SIZE(MIN_BUCKET), |
1366 | (long)BUCKET_SIZE_REAL(topbucket), (long)BUCKET_SIZE(topbucket)); |
5f05dabc |
1367 | PerlIO_printf(PerlIO_stderr(), "%8d free:", totfree); |
e8bc2b5c |
1368 | for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) { |
1369 | PerlIO_printf(PerlIO_stderr(), |
1370 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
1371 | ? " %5d" |
1372 | : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")), |
1373 | nfree[i]); |
1374 | } |
1375 | #ifdef BUCKETS_ROOT2 |
1376 | PerlIO_printf(PerlIO_stderr(), "\n\t "); |
1377 | for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) { |
1378 | PerlIO_printf(PerlIO_stderr(), |
1379 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
1380 | ? " %5d" |
1381 | : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")), |
1382 | nfree[i]); |
8d063cd8 |
1383 | } |
e8bc2b5c |
1384 | #endif |
5f05dabc |
1385 | PerlIO_printf(PerlIO_stderr(), "\n%8d used:", total - totfree); |
e8bc2b5c |
1386 | for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) { |
1387 | PerlIO_printf(PerlIO_stderr(), |
1388 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
1389 | ? " %5d" |
1390 | : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")), |
1391 | nmalloc[i] - nfree[i]); |
c07a80fd |
1392 | } |
e8bc2b5c |
1393 | #ifdef BUCKETS_ROOT2 |
1394 | PerlIO_printf(PerlIO_stderr(), "\n\t "); |
1395 | for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) { |
1396 | PerlIO_printf(PerlIO_stderr(), |
1397 | ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2) |
1398 | ? " %5d" |
1399 | : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")), |
1400 | nmalloc[i] - nfree[i]); |
1401 | } |
1402 | #endif |
1403 | while (nextchain) { |
1404 | total_chain += nextchain->size; |
1405 | nextchain = nextchain->next; |
1406 | } |
1407 | PerlIO_printf(PerlIO_stderr(), "\nTotal sbrk(): %d/%d:%d. Odd ends: pad+heads+chain+tail: %d+%d+%d+%d.\n", |
1408 | goodsbrk + sbrk_slack, sbrks, sbrk_good, sbrk_slack, |
1409 | start_slack, total_chain, sbrked_remains); |
c07a80fd |
1410 | } |
1411 | #else |
1412 | void |
8ac85365 |
1413 | dump_mstats(char *s) |
c07a80fd |
1414 | { |
8d063cd8 |
1415 | } |
1416 | #endif |
a687059c |
1417 | #endif /* lint */ |
cf5c4ad8 |
1418 | |
1419 | |
1420 | #ifdef USE_PERL_SBRK |
1421 | |
760ac839 |
1422 | # ifdef NeXT |
1423 | # define PERL_SBRK_VIA_MALLOC |
1424 | # endif |
1425 | |
1426 | # ifdef PERL_SBRK_VIA_MALLOC |
72e5b9db |
1427 | # if defined(HIDEMYMALLOC) || defined(EMBEDMYMALLOC) |
760ac839 |
1428 | # undef malloc |
1429 | # else |
72e5b9db |
1430 | # include "Error: -DPERL_SBRK_VIA_MALLOC needs -D(HIDE|EMBED)MYMALLOC" |
760ac839 |
1431 | # endif |
cf5c4ad8 |
1432 | |
1433 | /* it may seem schizophrenic to use perl's malloc and let it call system */ |
1434 | /* malloc, the reason for that is only the 3.2 version of the OS that had */ |
1435 | /* frequent core dumps within nxzonefreenolock. This sbrk routine put an */ |
1436 | /* end to the cores */ |
1437 | |
760ac839 |
1438 | # define SYSTEM_ALLOC(a) malloc(a) |
cf5c4ad8 |
1439 | |
760ac839 |
1440 | # endif /* PERL_SBRK_VIA_MALLOC */ |
cf5c4ad8 |
1441 | |
1442 | static IV Perl_sbrk_oldchunk; |
1443 | static long Perl_sbrk_oldsize; |
1444 | |
760ac839 |
1445 | # define PERLSBRK_32_K (1<<15) |
1446 | # define PERLSBRK_64_K (1<<16) |
cf5c4ad8 |
1447 | |
b63effbb |
1448 | Malloc_t |
cf5c4ad8 |
1449 | Perl_sbrk(size) |
1450 | int size; |
1451 | { |
1452 | IV got; |
1453 | int small, reqsize; |
1454 | |
1455 | if (!size) return 0; |
55497cff |
1456 | #ifdef PERL_CORE |
cf5c4ad8 |
1457 | reqsize = size; /* just for the DEBUG_m statement */ |
1458 | #endif |
57569e04 |
1459 | #ifdef PACK_MALLOC |
1460 | size = (size + 0x7ff) & ~0x7ff; |
1461 | #endif |
cf5c4ad8 |
1462 | if (size <= Perl_sbrk_oldsize) { |
1463 | got = Perl_sbrk_oldchunk; |
1464 | Perl_sbrk_oldchunk += size; |
1465 | Perl_sbrk_oldsize -= size; |
1466 | } else { |
1467 | if (size >= PERLSBRK_32_K) { |
1468 | small = 0; |
1469 | } else { |
cf5c4ad8 |
1470 | size = PERLSBRK_64_K; |
1471 | small = 1; |
1472 | } |
1473 | got = (IV)SYSTEM_ALLOC(size); |
57569e04 |
1474 | #ifdef PACK_MALLOC |
1475 | got = (got + 0x7ff) & ~0x7ff; |
1476 | #endif |
cf5c4ad8 |
1477 | if (small) { |
1478 | /* Chunk is small, register the rest for future allocs. */ |
1479 | Perl_sbrk_oldchunk = got + reqsize; |
1480 | Perl_sbrk_oldsize = size - reqsize; |
1481 | } |
1482 | } |
1483 | |
fb73857a |
1484 | DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%lx\n", |
cf5c4ad8 |
1485 | size, reqsize, Perl_sbrk_oldsize, got)); |
cf5c4ad8 |
1486 | |
1487 | return (void *)got; |
1488 | } |
1489 | |
1490 | #endif /* ! defined USE_PERL_SBRK */ |