php-internal-docs 8.4.8
Unofficial docs for php/php-src
Loading...
Searching...
No Matches
zend_alloc.c
Go to the documentation of this file.
1/*
2 +----------------------------------------------------------------------+
3 | Zend Engine |
4 +----------------------------------------------------------------------+
5 | Copyright (c) Zend Technologies Ltd. (http://www.zend.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 2.00 of the Zend license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.zend.com/license/2_00.txt. |
11 | If you did not receive a copy of the Zend license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@zend.com so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
15 | Authors: Andi Gutmans <andi@php.net> |
16 | Zeev Suraski <zeev@php.net> |
17 | Dmitry Stogov <dmitry@php.net> |
18 +----------------------------------------------------------------------+
19*/
20
21/*
22 * zend_alloc is designed to be a modern CPU cache friendly memory manager
23 * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
24 *
25 * All allocations are split into 3 categories:
26 *
27 * Huge - the size is greater than CHUNK size (~2M by default), allocation is
28 * performed using mmap(). The result is aligned on 2M boundary.
29 *
30 * Large - a number of 4096K pages inside a CHUNK. Large blocks
31 * are always aligned on page boundary.
32 *
33 * Small - less than 3/4 of page size. Small sizes are rounded up to nearest
34 * greater predefined small size (there are 30 predefined sizes:
35 * 8, 16, 24, 32, ... 3072). Small blocks are allocated from
36 * RUNs. Each RUN is allocated as a single or few following pages.
37 * Allocation inside RUNs implemented using linked list of free
38 * elements. The result is aligned to 8 bytes.
39 *
40 * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
41 * blocks are always aligned to CHUNK boundary. So it's very easy to determine
42 * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
43 * page at start for special purpose. It contains bitset of free pages,
44 * few bitset for available runs of predefined small sizes, map of pages that
45 * keeps information about usage of each page in this CHUNK, etc.
46 *
47 * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
48 * provides specialized and optimized routines to allocate blocks of predefined
49 * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
50 * The library uses C preprocessor tricks that substitute calls to emalloc()
51 * with more specialized routines when the requested size is known.
52 */
53
54#include "zend.h"
55#include "zend_alloc.h"
56#include "zend_globals.h"
57#include "zend_hrtime.h"
58#include "zend_operators.h"
59#include "zend_multiply.h"
60#include "zend_bitset.h"
61#include "zend_mmap.h"
62#include "zend_portability.h"
63#include <signal.h>
64
65#ifdef HAVE_UNISTD_H
66# include <unistd.h>
67#endif
68
69#ifdef ZEND_WIN32
70# include <wincrypt.h>
71# include <process.h>
72# include "win32/winutil.h"
73# define getpid _getpid
74typedef int pid_t;
75#endif
76
77#include <stdio.h>
78#include <stdlib.h>
79#include <string.h>
80
81#include <sys/types.h>
82#include <sys/stat.h>
83#include <limits.h>
84#include <fcntl.h>
85#include <errno.h>
86#ifdef __SANITIZE_ADDRESS__
87# include <sanitizer/asan_interface.h>
88#endif
89
90#ifndef _WIN32
91# include <sys/mman.h>
92# ifndef MAP_ANON
93# ifdef MAP_ANONYMOUS
94# define MAP_ANON MAP_ANONYMOUS
95# endif
96# endif
97# ifndef MAP_FAILED
98# define MAP_FAILED ((void*)-1)
99# endif
100# ifndef MAP_POPULATE
101# define MAP_POPULATE 0
102# endif
103# if defined(_SC_PAGESIZE) || (_SC_PAGE_SIZE)
104# define REAL_PAGE_SIZE _real_page_size
105static size_t _real_page_size = ZEND_MM_PAGE_SIZE;
106# endif
107# ifdef MAP_ALIGNED_SUPER
108# define MAP_HUGETLB MAP_ALIGNED_SUPER
109# endif
110#endif
111
112#ifndef REAL_PAGE_SIZE
113# define REAL_PAGE_SIZE ZEND_MM_PAGE_SIZE
114#endif
115
116/* NetBSD has an mremap() function with a signature that is incompatible with Linux (WTF?),
117 * so pretend it doesn't exist. */
118#ifndef __linux__
119# undef HAVE_MREMAP
120#endif
121
122#ifndef __APPLE__
123# define ZEND_MM_FD -1
124#else
125# include <mach/vm_statistics.h>
126/* Mac allows to track anonymous page via vmmap per TAG id.
127 * user land applications are allowed to take from 240 to 255.
128 */
129# define ZEND_MM_FD VM_MAKE_TAG(250U)
130#endif
131
132#ifndef ZEND_MM_STAT
133# define ZEND_MM_STAT 1 /* track current and peak memory usage */
134#endif
135#ifndef ZEND_MM_LIMIT
136# define ZEND_MM_LIMIT 1 /* support for user-defined memory limit */
137#endif
138#ifndef ZEND_MM_CUSTOM
139# define ZEND_MM_CUSTOM 1 /* support for custom memory allocator */
140 /* USE_ZEND_ALLOC=0 may switch to system malloc() */
141#endif
142#ifndef ZEND_MM_STORAGE
143# define ZEND_MM_STORAGE 1 /* support for custom memory storage */
144#endif
145#ifndef ZEND_MM_ERROR
146# define ZEND_MM_ERROR 1 /* report system errors */
147#endif
148#ifndef ZEND_MM_HEAP_PROTECTION
149# define ZEND_MM_HEAP_PROTECTION 1 /* protect heap against corruptions */
150#endif
151
152#if ZEND_MM_HEAP_PROTECTION
153/* Define ZEND_MM_MIN_USEABLE_BIN_SIZE to the size of two pointers */
154# if UINTPTR_MAX == UINT64_MAX
155# define ZEND_MM_MIN_USEABLE_BIN_SIZE 16
156# elif UINTPTR_MAX == UINT32_MAX
157# define ZEND_MM_MIN_USEABLE_BIN_SIZE 8
158# else
159# error
160# endif
161# if ZEND_MM_MIN_USEABLE_BIN_SIZE < ZEND_MM_MIN_SMALL_SIZE
162# error
163# endif
164#else /* ZEND_MM_HEAP_PROTECTION */
165# define ZEND_MM_MIN_USEABLE_BIN_SIZE ZEND_MM_MIN_SMALL_SIZE
166#endif /* ZEND_MM_HEAP_PROTECTION */
167
168#ifndef ZEND_MM_CHECK
169# define ZEND_MM_CHECK(condition, message) do { \
170 if (UNEXPECTED(!(condition))) { \
171 zend_mm_panic(message); \
172 } \
173 } while (0)
174#endif
175
176typedef uint32_t zend_mm_page_info; /* 4-byte integer */
177typedef zend_ulong zend_mm_bitset; /* 4-byte or 8-byte integer */
178
179#define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
180 (((size_t)(size)) & ((alignment) - 1))
181#define ZEND_MM_ALIGNED_BASE(size, alignment) \
182 (((size_t)(size)) & ~((alignment) - 1))
183#define ZEND_MM_SIZE_TO_NUM(size, alignment) \
184 (((size_t)(size) + ((alignment) - 1)) / (alignment))
185
186#define ZEND_MM_BITSET_LEN (sizeof(zend_mm_bitset) * 8) /* 32 or 64 */
187#define ZEND_MM_PAGE_MAP_LEN (ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
188
190
191#define ZEND_MM_IS_FRUN 0x00000000
192#define ZEND_MM_IS_LRUN 0x40000000
193#define ZEND_MM_IS_SRUN 0x80000000
194
195#define ZEND_MM_LRUN_PAGES_MASK 0x000003ff
196#define ZEND_MM_LRUN_PAGES_OFFSET 0
197
198#define ZEND_MM_SRUN_BIN_NUM_MASK 0x0000001f
199#define ZEND_MM_SRUN_BIN_NUM_OFFSET 0
200
201#define ZEND_MM_SRUN_FREE_COUNTER_MASK 0x01ff0000
202#define ZEND_MM_SRUN_FREE_COUNTER_OFFSET 16
203
204#define ZEND_MM_NRUN_OFFSET_MASK 0x01ff0000
205#define ZEND_MM_NRUN_OFFSET_OFFSET 16
206
207#define ZEND_MM_LRUN_PAGES(info) (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
208#define ZEND_MM_SRUN_BIN_NUM(info) (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
209#define ZEND_MM_SRUN_FREE_COUNTER(info) (((info) & ZEND_MM_SRUN_FREE_COUNTER_MASK) >> ZEND_MM_SRUN_FREE_COUNTER_OFFSET)
210#define ZEND_MM_NRUN_OFFSET(info) (((info) & ZEND_MM_NRUN_OFFSET_MASK) >> ZEND_MM_NRUN_OFFSET_OFFSET)
211
212#define ZEND_MM_FRUN() ZEND_MM_IS_FRUN
213#define ZEND_MM_LRUN(count) (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
214#define ZEND_MM_SRUN(bin_num) (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
215#define ZEND_MM_SRUN_EX(bin_num, count) (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((count) << ZEND_MM_SRUN_FREE_COUNTER_OFFSET))
216#define ZEND_MM_NRUN(bin_num, offset) (ZEND_MM_IS_SRUN | ZEND_MM_IS_LRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((offset) << ZEND_MM_NRUN_OFFSET_OFFSET))
217
218#define ZEND_MM_BINS 30
219
220#if UINTPTR_MAX == UINT64_MAX
221# define BSWAPPTR(u) ZEND_BYTES_SWAP64(u)
222#else
223# define BSWAPPTR(u) ZEND_BYTES_SWAP32(u)
224#endif
225
231
232static bool zend_mm_use_huge_pages = false;
233
234/*
235 * Memory is retrieved from OS by chunks of fixed size 2MB.
236 * Inside chunk it's managed by pages of fixed size 4096B.
237 * So each chunk consists from 512 pages.
238 * The first page of each chunk is reserved for chunk header.
239 * It contains service information about all pages.
240 *
241 * free_pages - current number of free pages in this chunk
242 *
243 * free_tail - number of continuous free pages at the end of chunk
244 *
245 * free_map - bitset (a bit for each page). The bit is set if the corresponding
246 * page is allocated. Allocator for "large sizes" may easily find a
247 * free page (or a continuous number of pages) searching for zero
248 * bits.
249 *
250 * map - contains service information for each page. (32-bits for each
251 * page).
252 * usage:
253 * (2 bits)
254 * FRUN - free page,
255 * LRUN - first page of "large" allocation
256 * SRUN - first page of a bin used for "small" allocation
257 *
258 * lrun_pages:
259 * (10 bits) number of allocated pages
260 *
261 * srun_bin_num:
262 * (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
263 * 2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
264 */
265
267#if ZEND_MM_CUSTOM
269#endif
270#if ZEND_MM_STORAGE
272#endif
273#if ZEND_MM_STAT
274 size_t size; /* current memory usage */
275 size_t peak; /* peak memory usage */
276#endif
277 uintptr_t shadow_key; /* free slot shadow ptr xor key */
278 zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
279#if ZEND_MM_STAT || ZEND_MM_LIMIT
280 size_t real_size; /* current size of allocated pages */
281#endif
282#if ZEND_MM_STAT
283 size_t real_peak; /* peak size of allocated pages */
284#endif
285#if ZEND_MM_LIMIT
286 size_t limit; /* memory limit */
287 int overflow; /* memory overflow flag */
288#endif
289
290 zend_mm_huge_list *huge_list; /* list of huge allocated blocks */
291
293 zend_mm_chunk *cached_chunks; /* list of unused chunks */
294 int chunks_count; /* number of allocated chunks */
295 int peak_chunks_count; /* peak number of allocated chunks for current request */
296 int cached_chunks_count; /* number of cached chunks */
297 double avg_chunks_count; /* average number of chunks allocated per request */
298 int last_chunks_delete_boundary; /* number of chunks after last deletion */
299 int last_chunks_delete_count; /* number of deletion over the last boundary */
300#if ZEND_MM_CUSTOM
301 struct {
304 void *(*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
305 size_t (*_gc)(void);
306 void (*_shutdown)(bool full, bool silent);
309#endif
310 pid_t pid;
312};
313
318 uint32_t free_pages; /* number of free pages */
319 uint32_t free_tail; /* number of free pages at the end of chunk */
320 uint32_t num;
321 char reserve[64 - (sizeof(void*) * 3 + sizeof(uint32_t) * 3)];
322 zend_mm_heap heap_slot; /* used only in main chunk */
323 zend_mm_page_map free_map; /* 512 bits or 64 bytes */
324 zend_mm_page_info map[ZEND_MM_PAGES]; /* 2 KB = 512 * 4 */
325};
326
330
331/*
332 * bin - is one or few continuous pages (up to 8) used for allocation of
333 * a particular "small size".
334 */
337};
338
342
344 void *ptr;
345 size_t size;
347#if ZEND_DEBUG
348 zend_mm_debug_info dbg;
349#endif
350};
351
352#define ZEND_MM_PAGE_ADDR(chunk, page_num) \
353 ((void*)(((zend_mm_page*)(chunk)) + (page_num)))
354
355#define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
356static const uint32_t bin_data_size[] = {
358};
359
360#define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
361static const uint32_t bin_elements[] = {
363};
364
365#define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
366static const uint32_t bin_pages[] = {
368};
369
370#if ZEND_DEBUG
371ZEND_COLD void zend_debug_alloc_output(char *format, ...)
372{
373 char output_buf[256];
374 va_list args;
375
376 va_start(args, format);
377 vsprintf(output_buf, format, args);
378 va_end(args);
379
380#ifdef ZEND_WIN32
381 OutputDebugString(output_buf);
382#else
383 fprintf(stderr, "%s", output_buf);
384#endif
385}
386#endif
387
388static ZEND_COLD ZEND_NORETURN void zend_mm_panic(const char *message)
389{
390 fprintf(stderr, "%s\n", message);
391/* See http://support.microsoft.com/kb/190351 */
392#ifdef ZEND_WIN32
393 fflush(stderr);
394#endif
395#if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
396 kill(getpid(), SIGSEGV);
397#endif
398 abort();
399}
400
401static ZEND_COLD ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
402 const char *format,
403 size_t limit,
404#if ZEND_DEBUG
405 const char *filename,
406 uint32_t lineno,
407#endif
408 size_t size)
409{
410
411 heap->overflow = 1;
412 zend_try {
414 format,
415 limit,
416#if ZEND_DEBUG
417 filename,
418 lineno,
419#endif
420 size);
421 } zend_catch {
422 } zend_end_try();
423 heap->overflow = 0;
424 zend_bailout();
425 exit(1);
426}
427
428#ifdef _WIN32
429static void stderr_last_error(char *msg)
430{
431 DWORD err = GetLastError();
433
434 if (!buf[0]) {
435 fprintf(stderr, "\n%s: [0x%08lx]\n", msg, err);
436 }
437 else {
438 fprintf(stderr, "\n%s: [0x%08lx] %s\n", msg, err, buf);
439 }
440
442}
443#endif
444
445/*****************/
446/* OS Allocation */
447/*****************/
448
449static void zend_mm_munmap(void *addr, size_t size)
450{
451#ifdef _WIN32
452 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
454 if (GetLastError() != ERROR_INVALID_ADDRESS) {
455#if ZEND_MM_ERROR
456 stderr_last_error("VirtualFree() failed");
457#endif
458 return;
459 }
460 SetLastError(0);
461
462 MEMORY_BASIC_INFORMATION mbi;
463 if (VirtualQuery(addr, &mbi, sizeof(mbi)) == 0) {
464#if ZEND_MM_ERROR
465 stderr_last_error("VirtualQuery() failed");
466#endif
467 return;
468 }
469 addr = mbi.AllocationBase;
470
471 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
472#if ZEND_MM_ERROR
473 stderr_last_error("VirtualFree() failed");
474#endif
475 }
476 }
477#else
478 if (munmap(addr, size) != 0) {
479#if ZEND_MM_ERROR
480 fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
481#endif
482 }
483#endif
484}
485
486#ifndef HAVE_MREMAP
487static void *zend_mm_mmap_fixed(void *addr, size_t size)
488{
489#ifdef _WIN32
490 void *ptr = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
491
492 if (ptr == NULL) {
494 if (GetLastError() != ERROR_INVALID_ADDRESS) {
495#if ZEND_MM_ERROR
496 stderr_last_error("VirtualAlloc() fixed failed");
497#endif
498 }
499 SetLastError(0);
500 return NULL;
501 }
502 ZEND_ASSERT(ptr == addr);
503 return ptr;
504#else
505 int flags = MAP_PRIVATE | MAP_ANON;
506#if defined(MAP_EXCL)
507 flags |= MAP_FIXED | MAP_EXCL;
508#elif defined(MAP_TRYFIXED)
509 flags |= MAP_TRYFIXED;
510#endif
511 /* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
512 void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, flags /*| MAP_POPULATE | MAP_HUGETLB*/, ZEND_MM_FD, 0);
513
514 if (ptr == MAP_FAILED) {
515#if ZEND_MM_ERROR && !defined(MAP_EXCL) && !defined(MAP_TRYFIXED)
516 fprintf(stderr, "\nmmap() fixed failed: [%d] %s\n", errno, strerror(errno));
517#endif
518 return NULL;
519 } else if (ptr != addr) {
520 zend_mm_munmap(ptr, size);
521 return NULL;
522 }
523 return ptr;
524#endif
525}
526#endif
527
528static void *zend_mm_mmap(size_t size)
529{
530#ifdef _WIN32
531 void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
532
533 if (ptr == NULL) {
534#if ZEND_MM_ERROR
535 stderr_last_error("VirtualAlloc() failed");
536#endif
537 return NULL;
538 }
539 return ptr;
540#else
541 void *ptr;
542
543#if defined(MAP_HUGETLB) || defined(VM_FLAGS_SUPERPAGE_SIZE_2MB)
544 if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE) {
545 int fd = -1;
546 int mflags = MAP_PRIVATE | MAP_ANON;
547#if defined(MAP_HUGETLB)
548 mflags |= MAP_HUGETLB;
549#else
550 fd = VM_FLAGS_SUPERPAGE_SIZE_2MB;
551#endif
552 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, mflags, fd, 0);
553 if (ptr != MAP_FAILED) {
554 zend_mmap_set_name(ptr, size, "zend_alloc");
555 return ptr;
556 }
557 }
558#endif
559
560 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, ZEND_MM_FD, 0);
561
562 if (ptr == MAP_FAILED) {
563#if ZEND_MM_ERROR
564 fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
565#endif
566 return NULL;
567 }
568 zend_mmap_set_name(ptr, size, "zend_alloc");
569 return ptr;
570#endif
571}
572
573/***********/
574/* Bitmask */
575/***********/
576
577/* number of trailing set (1) bits */
578ZEND_ATTRIBUTE_CONST static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
579{
580#if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG && defined(PHP_HAVE_BUILTIN_CTZL)
581 return __builtin_ctzl(~bitset);
582#elif (defined(__GNUC__) || __has_builtin(__builtin_ctzll)) && defined(PHP_HAVE_BUILTIN_CTZLL)
583 return __builtin_ctzll(~bitset);
584#elif defined(_WIN32)
585 unsigned long index;
586
587#if defined(_WIN64)
588 if (!BitScanForward64(&index, ~bitset)) {
589#else
590 if (!BitScanForward(&index, ~bitset)) {
591#endif
592 /* undefined behavior */
593 return 32;
594 }
595
596 return (int)index;
597#else
598 int n;
599
600 if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
601
602 n = 0;
603#if SIZEOF_ZEND_LONG == 8
604 if (sizeof(zend_mm_bitset) == 8) {
605 if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
606 }
607#endif
608 if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
609 if ((bitset & 0x000000ff) == 0x000000ff) {n += 8; bitset = bitset >> 8;}
610 if ((bitset & 0x0000000f) == 0x0000000f) {n += 4; bitset = bitset >> 4;}
611 if ((bitset & 0x00000003) == 0x00000003) {n += 2; bitset = bitset >> 2;}
612 return n + (bitset & 1);
613#endif
614}
615
616static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
617{
618 return ZEND_BIT_TEST(bitset, bit);
619}
620
621static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
622{
623 bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_UL(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
624}
625
626static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
627{
628 bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_UL(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
629}
630
631static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
632{
633 if (len == 1) {
634 zend_mm_bitset_set_bit(bitset, start);
635 } else {
637 int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
638 int bit = start & (ZEND_MM_BITSET_LEN - 1);
639 zend_mm_bitset tmp;
640
641 if (pos != end) {
642 /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
643 tmp = (zend_mm_bitset)-1 << bit;
644 bitset[pos++] |= tmp;
645 while (pos != end) {
646 /* set all bits */
647 bitset[pos++] = (zend_mm_bitset)-1;
648 }
649 end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
650 /* set bits from "0" to "end" */
651 tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
652 bitset[pos] |= tmp;
653 } else {
654 end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
655 /* set bits from "bit" to "end" */
656 tmp = (zend_mm_bitset)-1 << bit;
657 tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
658 bitset[pos] |= tmp;
659 }
660 }
661}
662
663static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
664{
665 if (len == 1) {
666 zend_mm_bitset_reset_bit(bitset, start);
667 } else {
669 int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
670 int bit = start & (ZEND_MM_BITSET_LEN - 1);
671 zend_mm_bitset tmp;
672
673 if (pos != end) {
674 /* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
675 tmp = ~((Z_UL(1) << bit) - 1);
676 bitset[pos++] &= ~tmp;
677 while (pos != end) {
678 /* set all bits */
679 bitset[pos++] = 0;
680 }
681 end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
682 /* reset bits from "0" to "end" */
683 tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
684 bitset[pos] &= ~tmp;
685 } else {
686 end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
687 /* reset bits from "bit" to "end" */
688 tmp = (zend_mm_bitset)-1 << bit;
689 tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
690 bitset[pos] &= ~tmp;
691 }
692 }
693}
694
695static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
696{
697 if (len == 1) {
698 return !zend_mm_bitset_is_set(bitset, start);
699 } else {
701 int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
702 int bit = start & (ZEND_MM_BITSET_LEN - 1);
703 zend_mm_bitset tmp;
704
705 if (pos != end) {
706 /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
707 tmp = (zend_mm_bitset)-1 << bit;
708 if ((bitset[pos++] & tmp) != 0) {
709 return 0;
710 }
711 while (pos != end) {
712 /* set all bits */
713 if (bitset[pos++] != 0) {
714 return 0;
715 }
716 }
717 end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
718 /* set bits from "0" to "end" */
719 tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
720 return (bitset[pos] & tmp) == 0;
721 } else {
722 end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
723 /* set bits from "bit" to "end" */
724 tmp = (zend_mm_bitset)-1 << bit;
725 tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
726 return (bitset[pos] & tmp) == 0;
727 }
728 }
729}
730
731/**********/
732/* Chunks */
733/**********/
734
735static zend_always_inline void zend_mm_hugepage(void* ptr, size_t size)
736{
737#if defined(MADV_HUGEPAGE)
738 (void)madvise(ptr, size, MADV_HUGEPAGE);
739#elif defined(HAVE_MEMCNTL)
740 struct memcntl_mha m = {.mha_cmd = MHA_MAPSIZE_VA, .mha_pagesize = ZEND_MM_CHUNK_SIZE, .mha_flags = 0};
741 (void)memcntl(ptr, size, MC_HAT_ADVISE, (char *)&m, 0, 0);
742#elif !defined(VM_FLAGS_SUPERPAGE_SIZE_2MB) && !defined(MAP_ALIGNED_SUPER)
743 zend_error_noreturn(E_ERROR, "huge_pages: thp unsupported on this platform");
744#endif
745}
746
747static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
748{
749 void *ptr = zend_mm_mmap(size);
750
751 if (ptr == NULL) {
752 return NULL;
753 } else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
754 if (zend_mm_use_huge_pages) {
755 zend_mm_hugepage(ptr, size);
756 }
757#ifdef __SANITIZE_ADDRESS__
758 ASAN_UNPOISON_MEMORY_REGION(ptr, size);
759#endif
760 return ptr;
761 } else {
762 size_t offset;
763
764 /* chunk has to be aligned */
765 zend_mm_munmap(ptr, size);
766 ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
767#ifdef _WIN32
768 offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
769 if (offset != 0) {
770 offset = alignment - offset;
771 }
772 zend_mm_munmap(ptr, size + alignment - REAL_PAGE_SIZE);
773 ptr = zend_mm_mmap_fixed((void*)((char*)ptr + offset), size);
774 if (ptr == NULL) { // fix GH-9650, fixed addr range is not free
775 ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
776 if (ptr == NULL) {
777 return NULL;
778 }
779 offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
780 if (offset != 0) {
781 ptr = (void*)((char*)ptr + alignment - offset);
782 }
783 }
784 return ptr;
785#else
786 offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
787 if (offset != 0) {
788 offset = alignment - offset;
789 zend_mm_munmap(ptr, offset);
790 ptr = (char*)ptr + offset;
791 alignment -= offset;
792 }
793 if (alignment > REAL_PAGE_SIZE) {
794 zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
795 }
796 if (zend_mm_use_huge_pages) {
797 zend_mm_hugepage(ptr, size);
798 }
799# ifdef __SANITIZE_ADDRESS__
800 ASAN_UNPOISON_MEMORY_REGION(ptr, size);
801# endif
802#endif
803 return ptr;
804 }
805}
806
807static void *zend_mm_chunk_alloc(zend_mm_heap *heap, size_t size, size_t alignment)
808{
809#if ZEND_MM_STORAGE
810 if (UNEXPECTED(heap->storage)) {
811 void *ptr = heap->storage->handlers.chunk_alloc(heap->storage, size, alignment);
812 ZEND_ASSERT(((uintptr_t)((char*)ptr + (alignment-1)) & (alignment-1)) == (uintptr_t)ptr);
813 return ptr;
814 }
815#endif
816 return zend_mm_chunk_alloc_int(size, alignment);
817}
818
819static void zend_mm_chunk_free(zend_mm_heap *heap, void *addr, size_t size)
820{
821#if ZEND_MM_STORAGE
822 if (UNEXPECTED(heap->storage)) {
823 heap->storage->handlers.chunk_free(heap->storage, addr, size);
824 return;
825 }
826#endif
827 zend_mm_munmap(addr, size);
828}
829
830static int zend_mm_chunk_truncate(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
831{
832#if ZEND_MM_STORAGE
833 if (UNEXPECTED(heap->storage)) {
834 if (heap->storage->handlers.chunk_truncate) {
835 return heap->storage->handlers.chunk_truncate(heap->storage, addr, old_size, new_size);
836 } else {
837 return 0;
838 }
839 }
840#endif
841#ifndef _WIN32
842 zend_mm_munmap((char*)addr + new_size, old_size - new_size);
843 return 1;
844#else
845 return 0;
846#endif
847}
848
849static int zend_mm_chunk_extend(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
850{
851#if ZEND_MM_STORAGE
852 if (UNEXPECTED(heap->storage)) {
853 if (heap->storage->handlers.chunk_extend) {
854 return heap->storage->handlers.chunk_extend(heap->storage, addr, old_size, new_size);
855 } else {
856 return 0;
857 }
858 }
859#endif
860#ifdef HAVE_MREMAP
861 /* We don't use MREMAP_MAYMOVE due to alignment requirements. */
862 void *ptr = mremap(addr, old_size, new_size, 0);
863 if (ptr == MAP_FAILED) {
864 return 0;
865 }
866 /* Sanity check: The mapping shouldn't have moved. */
867 ZEND_ASSERT(ptr == addr);
868 return 1;
869#elif !defined(_WIN32)
870 return (zend_mm_mmap_fixed((char*)addr + old_size, new_size - old_size) != NULL);
871#else
872 return 0;
873#endif
874}
875
876static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
877{
878 chunk->heap = heap;
879 chunk->next = heap->main_chunk;
880 chunk->prev = heap->main_chunk->prev;
881 chunk->prev->next = chunk;
882 chunk->next->prev = chunk;
883 /* mark first pages as allocated */
885 chunk->free_tail = ZEND_MM_FIRST_PAGE;
886 /* the younger chunks have bigger number */
887 chunk->num = chunk->prev->num + 1;
888 /* mark first pages as allocated */
889 chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
890 chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
891}
892
893/***********************/
894/* Huge Runs (forward) */
895/***********************/
896
897static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
898static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
899static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
900
901#if ZEND_DEBUG
902static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
903#else
904static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
905#endif
906
907/**************/
908/* Large Runs */
909/**************/
910
911#if ZEND_DEBUG
912static void *zend_mm_alloc_pages(zend_mm_heap *heap, uint32_t pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
913#else
914static void *zend_mm_alloc_pages(zend_mm_heap *heap, uint32_t pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
915#endif
916{
917 zend_mm_chunk *chunk = heap->main_chunk;
918 uint32_t page_num, len;
919 int steps = 0;
920
921 while (1) {
922 if (UNEXPECTED(chunk->free_pages < pages_count)) {
923 goto not_found;
924#if 0
925 } else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
926 if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
927 goto not_found;
928 } else {
929 page_num = chunk->free_tail;
930 goto found;
931 }
932 } else if (0) {
933 /* First-Fit Search */
934 int free_tail = chunk->free_tail;
935 zend_mm_bitset *bitset = chunk->free_map;
936 zend_mm_bitset tmp = *(bitset++);
937 int i = 0;
938
939 while (1) {
940 /* skip allocated blocks */
941 while (tmp == (zend_mm_bitset)-1) {
943 if (i == ZEND_MM_PAGES) {
944 goto not_found;
945 }
946 tmp = *(bitset++);
947 }
948 /* find first 0 bit */
949 page_num = i + zend_mm_bitset_nts(tmp);
950 /* reset bits from 0 to "bit" */
951 tmp &= tmp + 1;
952 /* skip free blocks */
953 while (tmp == 0) {
955 len = i - page_num;
956 if (len >= pages_count) {
957 goto found;
958 } else if (i >= free_tail) {
959 goto not_found;
960 }
961 tmp = *(bitset++);
962 }
963 /* find first 1 bit */
964 len = (i + zend_ulong_ntz(tmp)) - page_num;
965 if (len >= pages_count) {
966 goto found;
967 }
968 /* set bits from 0 to "bit" */
969 tmp |= tmp - 1;
970 }
971#endif
972 } else {
973 /* Best-Fit Search */
974 int best = -1;
975 uint32_t best_len = ZEND_MM_PAGES;
976 uint32_t free_tail = chunk->free_tail;
977 zend_mm_bitset *bitset = chunk->free_map;
978 zend_mm_bitset tmp = *(bitset++);
979 uint32_t i = 0;
980
981 while (1) {
982 /* skip allocated blocks */
983 while (tmp == (zend_mm_bitset)-1) {
985 if (i == ZEND_MM_PAGES) {
986 if (best > 0) {
987 page_num = best;
988 goto found;
989 } else {
990 goto not_found;
991 }
992 }
993 tmp = *(bitset++);
994 }
995 /* find first 0 bit */
996 page_num = i + zend_mm_bitset_nts(tmp);
997 /* reset bits from 0 to "bit" */
998 tmp &= tmp + 1;
999 /* skip free blocks */
1000 while (tmp == 0) {
1001 i += ZEND_MM_BITSET_LEN;
1002 if (i >= free_tail || i == ZEND_MM_PAGES) {
1003 len = ZEND_MM_PAGES - page_num;
1004 if (len >= pages_count && len < best_len) {
1005 chunk->free_tail = page_num + pages_count;
1006 goto found;
1007 } else {
1008 /* set accurate value */
1009 chunk->free_tail = page_num;
1010 if (best > 0) {
1011 page_num = best;
1012 goto found;
1013 } else {
1014 goto not_found;
1015 }
1016 }
1017 }
1018 tmp = *(bitset++);
1019 }
1020 /* find first 1 bit */
1021 len = i + zend_ulong_ntz(tmp) - page_num;
1022 if (len >= pages_count) {
1023 if (len == pages_count) {
1024 goto found;
1025 } else if (len < best_len) {
1026 best_len = len;
1027 best = page_num;
1028 }
1029 }
1030 /* set bits from 0 to "bit" */
1031 tmp |= tmp - 1;
1032 }
1033 }
1034
1035not_found:
1036 if (chunk->next == heap->main_chunk) {
1037get_chunk:
1038 if (heap->cached_chunks) {
1039 heap->cached_chunks_count--;
1040 chunk = heap->cached_chunks;
1041 heap->cached_chunks = chunk->next;
1042 } else {
1043#if ZEND_MM_LIMIT
1044 if (UNEXPECTED(ZEND_MM_CHUNK_SIZE > heap->limit - heap->real_size)) {
1045 if (zend_mm_gc(heap)) {
1046 goto get_chunk;
1047 } else if (heap->overflow == 0) {
1048#if ZEND_DEBUG
1049 zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1050#else
1051 zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
1052#endif
1053 return NULL;
1054 }
1055 }
1056#endif
1057 chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1058 if (UNEXPECTED(chunk == NULL)) {
1059 /* insufficient memory */
1060 if (zend_mm_gc(heap) &&
1061 (chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE)) != NULL) {
1062 /* pass */
1063 } else {
1064#if !ZEND_MM_LIMIT
1065 zend_mm_safe_error(heap, "Out of memory");
1066#elif ZEND_DEBUG
1067 zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1068#else
1069 zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) (tried to allocate %zu bytes)", heap->real_size, ZEND_MM_PAGE_SIZE * pages_count);
1070#endif
1071 return NULL;
1072 }
1073 }
1074#if ZEND_MM_STAT
1075 do {
1076 size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
1077 size_t peak = MAX(heap->real_peak, size);
1078 heap->real_size = size;
1079 heap->real_peak = peak;
1080 } while (0);
1081#elif ZEND_MM_LIMIT
1083
1084#endif
1085 }
1086 heap->chunks_count++;
1087 if (heap->chunks_count > heap->peak_chunks_count) {
1088 heap->peak_chunks_count = heap->chunks_count;
1089 }
1090 zend_mm_chunk_init(heap, chunk);
1091 page_num = ZEND_MM_FIRST_PAGE;
1093 goto found;
1094 } else {
1095 chunk = chunk->next;
1096 steps++;
1097 }
1098 }
1099
1100found:
1101 if (steps > 2 && pages_count < 8) {
1102 ZEND_MM_CHECK(chunk->next->prev == chunk, "zend_mm_heap corrupted");
1103 ZEND_MM_CHECK(chunk->prev->next == chunk, "zend_mm_heap corrupted");
1104
1105 /* move chunk into the head of the linked-list */
1106 chunk->prev->next = chunk->next;
1107 chunk->next->prev = chunk->prev;
1108 chunk->next = heap->main_chunk->next;
1109 chunk->prev = heap->main_chunk;
1110 chunk->prev->next = chunk;
1111 chunk->next->prev = chunk;
1112 }
1113 /* mark run as allocated */
1114 chunk->free_pages -= pages_count;
1115 zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
1116 chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
1117 if (page_num == chunk->free_tail) {
1118 chunk->free_tail = page_num + pages_count;
1119 }
1120 return ZEND_MM_PAGE_ADDR(chunk, page_num);
1121}
1122
1123static zend_always_inline void *zend_mm_alloc_large_ex(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1124{
1125 int pages_count = (int)ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
1126#if ZEND_DEBUG
1127 void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1128#else
1129 void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1130#endif
1131#if ZEND_MM_STAT
1132 do {
1133 size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
1134 size_t peak = MAX(heap->peak, size);
1135 heap->size = size;
1136 heap->peak = peak;
1137 } while (0);
1138#endif
1139 return ptr;
1140}
1141
1142static zend_never_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1143{
1144 return zend_mm_alloc_large_ex(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1145}
1146
1147static zend_always_inline void zend_mm_delete_chunk(zend_mm_heap *heap, zend_mm_chunk *chunk)
1148{
1149 ZEND_MM_CHECK(chunk->next->prev == chunk, "zend_mm_heap corrupted");
1150 ZEND_MM_CHECK(chunk->prev->next == chunk, "zend_mm_heap corrupted");
1151
1152 chunk->next->prev = chunk->prev;
1153 chunk->prev->next = chunk->next;
1154 heap->chunks_count--;
1155 if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1
1156 || (heap->chunks_count == heap->last_chunks_delete_boundary
1157 && heap->last_chunks_delete_count >= 4)) {
1158 /* delay deletion */
1159 heap->cached_chunks_count++;
1160 chunk->next = heap->cached_chunks;
1161 heap->cached_chunks = chunk;
1162 } else {
1163#if ZEND_MM_STAT || ZEND_MM_LIMIT
1165#endif
1166 if (!heap->cached_chunks) {
1167 if (heap->chunks_count != heap->last_chunks_delete_boundary) {
1169 heap->last_chunks_delete_count = 0;
1170 } else {
1172 }
1173 }
1174 if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
1175 zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
1176 } else {
1177//TODO: select the best chunk to delete???
1178 chunk->next = heap->cached_chunks->next;
1179 zend_mm_chunk_free(heap, heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
1180 heap->cached_chunks = chunk;
1181 }
1182 }
1183}
1184
1185static zend_always_inline void zend_mm_free_pages_ex(zend_mm_heap *heap, zend_mm_chunk *chunk, uint32_t page_num, uint32_t pages_count, int free_chunk)
1186{
1187 chunk->free_pages += pages_count;
1188 zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
1189 chunk->map[page_num] = 0;
1190 if (chunk->free_tail == page_num + pages_count) {
1191 /* this setting may be not accurate */
1192 chunk->free_tail = page_num;
1193 }
1194 if (free_chunk && chunk != heap->main_chunk && chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
1195 zend_mm_delete_chunk(heap, chunk);
1196 }
1197}
1198
1199static zend_never_inline void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1200{
1201 zend_mm_free_pages_ex(heap, chunk, page_num, pages_count, 1);
1202}
1203
1204static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1205{
1206#if ZEND_MM_STAT
1207 heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
1208#endif
1209 zend_mm_free_pages(heap, chunk, page_num, pages_count);
1210}
1211
1212/**************/
1213/* Small Runs */
1214/**************/
1215
1216/* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
1217static zend_always_inline int zend_mm_small_size_to_bit(int size)
1218{
1219#if (defined(__GNUC__) || __has_builtin(__builtin_clz)) && defined(PHP_HAVE_BUILTIN_CLZ)
1220 return (__builtin_clz(size) ^ 0x1f) + 1;
1221#elif defined(_WIN32)
1222 unsigned long index;
1223
1224 if (!BitScanReverse(&index, (unsigned long)size)) {
1225 /* undefined behavior */
1226 return 64;
1227 }
1228
1229 return (((31 - (int)index) ^ 0x1f) + 1);
1230#else
1231 int n = 16;
1232 if (size <= 0x00ff) {n -= 8; size = size << 8;}
1233 if (size <= 0x0fff) {n -= 4; size = size << 4;}
1234 if (size <= 0x3fff) {n -= 2; size = size << 2;}
1235 if (size <= 0x7fff) {n -= 1;}
1236 return n;
1237#endif
1238}
1239
1240#ifndef MAX
1241# define MAX(a, b) (((a) > (b)) ? (a) : (b))
1242#endif
1243
1244#ifndef MIN
1245# define MIN(a, b) (((a) < (b)) ? (a) : (b))
1246#endif
1247
1248static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
1249{
1250#if 0
1251 int n;
1252 /*0, 1, 2, 3, 4, 5, 6, 7, 8, 9 10, 11, 12*/
1253 static const int f1[] = { 3, 3, 3, 3, 3, 3, 3, 4, 5, 6, 7, 8, 9};
1254 static const int f2[] = { 0, 0, 0, 0, 0, 0, 0, 4, 8, 12, 16, 20, 24};
1255
1256 if (UNEXPECTED(size <= 2)) return 0;
1257 n = zend_mm_small_size_to_bit(size - 1);
1258 return ((size-1) >> f1[n]) + f2[n];
1259#else
1260 unsigned int t1, t2;
1261
1262 if (size <= 64) {
1263 /* we need to support size == 0 ... */
1264 return (size - !!size) >> 3;
1265 } else {
1266 t1 = size - 1;
1267 t2 = zend_mm_small_size_to_bit(t1) - 3;
1268 t1 = t1 >> t2;
1269 t2 = t2 - 3;
1270 t2 = t2 << 2;
1271 return (int)(t1 + t2);
1272 }
1273#endif
1274}
1275
1276#define ZEND_MM_SMALL_SIZE_TO_BIN(size) zend_mm_small_size_to_bin(size)
1277
1278#if ZEND_MM_HEAP_PROTECTION
1279/* We keep track of free slots by organizing them in a linked list, with the
1280 * first word of every free slot being a pointer to the next one.
1281 *
1282 * In order to frustrate corruptions, we check the consistency of these pointers
1283 * before dereference by comparing them with a shadow.
1284 *
1285 * The shadow is a copy of the pointer, stored at the end of the slot. It is
1286 * XOR'ed with a random key, and converted to big-endian so that smaller
1287 * corruptions affect the most significant bytes, which has a high chance of
1288 * resulting in an invalid address instead of pointing to an adjacent slot.
1289 */
1290
1291#define ZEND_MM_FREE_SLOT_PTR_SHADOW(free_slot, bin_num) \
1292 *((zend_mm_free_slot**)((char*)(free_slot) + bin_data_size[(bin_num)] - sizeof(zend_mm_free_slot*)))
1293
1294static zend_always_inline zend_mm_free_slot* zend_mm_encode_free_slot(const zend_mm_heap *heap, const zend_mm_free_slot *slot)
1295{
1296#ifdef WORDS_BIGENDIAN
1297 return (zend_mm_free_slot*)(((uintptr_t)slot) ^ heap->shadow_key);
1298#else
1299 return (zend_mm_free_slot*)(BSWAPPTR((uintptr_t)slot) ^ heap->shadow_key);
1300#endif
1301}
1302
1303static zend_always_inline zend_mm_free_slot* zend_mm_decode_free_slot(zend_mm_heap *heap, zend_mm_free_slot *slot)
1304{
1305#ifdef WORDS_BIGENDIAN
1306 return (zend_mm_free_slot*)((uintptr_t)slot ^ heap->shadow_key);
1307#else
1308 return (zend_mm_free_slot*)(BSWAPPTR((uintptr_t)slot ^ heap->shadow_key));
1309#endif
1310}
1311
1312static zend_always_inline void zend_mm_set_next_free_slot(zend_mm_heap *heap, uint32_t bin_num, zend_mm_free_slot *slot, zend_mm_free_slot *next)
1313{
1314 ZEND_ASSERT(bin_data_size[bin_num] >= ZEND_MM_MIN_USEABLE_BIN_SIZE);
1315
1316 slot->next_free_slot = next;
1317 ZEND_MM_FREE_SLOT_PTR_SHADOW(slot, bin_num) = zend_mm_encode_free_slot(heap, next);
1318}
1319
1320static zend_always_inline zend_mm_free_slot *zend_mm_get_next_free_slot(zend_mm_heap *heap, uint32_t bin_num, zend_mm_free_slot* slot)
1321{
1323 if (EXPECTED(next != NULL)) {
1324 zend_mm_free_slot *shadow = ZEND_MM_FREE_SLOT_PTR_SHADOW(slot, bin_num);
1325 if (UNEXPECTED(next != zend_mm_decode_free_slot(heap, shadow))) {
1326 zend_mm_panic("zend_mm_heap corrupted");
1327 }
1328 }
1329 return (zend_mm_free_slot*)next;
1330}
1331
1332#else /* ZEND_MM_HEAP_PROTECTION */
1333# define zend_mm_set_next_free_slot(heap, bin_num, slot, next) do { \
1334 (slot)->next_free_slot = (next); \
1335 } while (0)
1336# define zend_mm_get_next_free_slot(heap, bin_num, slot) (slot)->next_free_slot
1337#endif /* ZEND_MM_HEAP_PROTECTION */
1338
1339static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, uint32_t bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1340{
1341 zend_mm_chunk *chunk;
1342 int page_num;
1343 zend_mm_bin *bin;
1345
1346#if ZEND_DEBUG
1347 bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1348#else
1349 bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1350#endif
1351 if (UNEXPECTED(bin == NULL)) {
1352 /* insufficient memory */
1353 return NULL;
1354 }
1355
1358 chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
1359 if (bin_pages[bin_num] > 1) {
1360 uint32_t i = 1;
1361
1362 do {
1363 chunk->map[page_num+i] = ZEND_MM_NRUN(bin_num, i);
1364 i++;
1365 } while (i < bin_pages[bin_num]);
1366 }
1367
1368 /* create a linked list of elements from 1 to last */
1369 end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
1370 heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
1371 do {
1372 zend_mm_set_next_free_slot(heap, bin_num, p, (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]));
1373#if ZEND_DEBUG
1374 do {
1375 zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1376 dbg->size = 0;
1377 } while (0);
1378#endif
1379 p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
1380 } while (p != end);
1381
1382 /* terminate list using NULL */
1383 p->next_free_slot = NULL;
1384#if ZEND_DEBUG
1385 do {
1386 zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1387 dbg->size = 0;
1388 } while (0);
1389#endif
1390
1391 /* return first element */
1392 return bin;
1393}
1394
1395static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1396{
1397 ZEND_ASSERT(bin_data_size[bin_num] >= ZEND_MM_MIN_USEABLE_BIN_SIZE);
1398
1399#if ZEND_MM_STAT
1400 do {
1401 size_t size = heap->size + bin_data_size[bin_num];
1402 size_t peak = MAX(heap->peak, size);
1403 heap->size = size;
1404 heap->peak = peak;
1405 } while (0);
1406#endif
1407
1408 if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
1409 zend_mm_free_slot *p = heap->free_slot[bin_num];
1410 heap->free_slot[bin_num] = zend_mm_get_next_free_slot(heap, bin_num, p);
1411 return p;
1412 } else {
1413 return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1414 }
1415}
1416
1417static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
1418{
1419 ZEND_ASSERT(bin_data_size[bin_num] >= ZEND_MM_MIN_USEABLE_BIN_SIZE);
1420
1422
1423#if ZEND_MM_STAT
1424 heap->size -= bin_data_size[bin_num];
1425#endif
1426
1427#if ZEND_DEBUG
1428 do {
1429 zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1430 dbg->size = 0;
1431 } while (0);
1432#endif
1433
1435 zend_mm_set_next_free_slot(heap, bin_num, p, heap->free_slot[bin_num]);
1436 heap->free_slot[bin_num] = p;
1437}
1438
1439/********/
1440/* Heap */
1441/********/
1442
1443#if ZEND_DEBUG
1444static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
1445{
1446 size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1447 zend_mm_chunk *chunk;
1448 int page_num;
1449 zend_mm_page_info info;
1450
1451 ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
1453 page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1454 info = chunk->map[page_num];
1455 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1456 if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1457 int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1458 return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1459 } else /* if (info & ZEND_MM_IS_LRUN) */ {
1460 int pages_count = ZEND_MM_LRUN_PAGES(info);
1461
1462 return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1463 }
1464}
1465#endif
1466
1467static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1468{
1469 void *ptr;
1470#if ZEND_MM_HEAP_PROTECTION
1473 }
1474#endif /* ZEND_MM_HEAP_PROTECTION */
1475#if ZEND_DEBUG
1476 size_t real_size = size;
1477 zend_mm_debug_info *dbg;
1478
1479 /* special handling for zero-size allocation */
1480 size = MAX(size, 1);
1481 size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1482 if (UNEXPECTED(size < real_size)) {
1483 zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", ZEND_MM_ALIGNED_SIZE(real_size), ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1484 return NULL;
1485 }
1486#endif
1489#if ZEND_DEBUG
1490 dbg = zend_mm_get_debug_info(heap, ptr);
1491 dbg->size = real_size;
1492 dbg->filename = __zend_filename;
1493 dbg->orig_filename = __zend_orig_filename;
1494 dbg->lineno = __zend_lineno;
1495 dbg->orig_lineno = __zend_orig_lineno;
1496#endif
1497 return ptr;
1498 } else if (EXPECTED(size <= ZEND_MM_MAX_LARGE_SIZE)) {
1499 ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1500#if ZEND_DEBUG
1501 dbg = zend_mm_get_debug_info(heap, ptr);
1502 dbg->size = real_size;
1503 dbg->filename = __zend_filename;
1504 dbg->orig_filename = __zend_orig_filename;
1505 dbg->lineno = __zend_lineno;
1506 dbg->orig_lineno = __zend_orig_lineno;
1507#endif
1508 return ptr;
1509 } else {
1510#if ZEND_DEBUG
1511 size = real_size;
1512#endif
1513 return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1514 }
1515}
1516
1517static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1518{
1519 size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1520
1521 if (UNEXPECTED(page_offset == 0)) {
1522 if (ptr != NULL) {
1524 }
1525 } else {
1527 int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1528 zend_mm_page_info info = chunk->map[page_num];
1529
1530 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1531 if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1532 zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
1533 } else /* if (info & ZEND_MM_IS_LRUN) */ {
1534 int pages_count = ZEND_MM_LRUN_PAGES(info);
1535
1536 ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1537 zend_mm_free_large(heap, chunk, page_num, pages_count);
1538 }
1539 }
1540}
1541
1542static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1543{
1544 size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1545
1546 if (UNEXPECTED(page_offset == 0)) {
1547 return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1548 } else {
1549 zend_mm_chunk *chunk;
1550#if 0 && ZEND_DEBUG
1551 zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
1552 return dbg->size;
1553#else
1554 int page_num;
1555 zend_mm_page_info info;
1556
1558 page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1559 info = chunk->map[page_num];
1560 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1561 if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1562 return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
1563 } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1565 }
1566#endif
1567 }
1568}
1569
1570static zend_never_inline void *zend_mm_realloc_slow(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1571{
1572 void *ret;
1573
1574#if ZEND_MM_STAT
1575 do {
1576 size_t orig_peak = heap->peak;
1577#endif
1579 memcpy(ret, ptr, copy_size);
1581#if ZEND_MM_STAT
1582 heap->peak = MAX(orig_peak, heap->size);
1583 } while (0);
1584#endif
1585 return ret;
1586}
1587
1588static zend_never_inline void *zend_mm_realloc_huge(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1589{
1590 size_t old_size;
1591 size_t new_size;
1592#if ZEND_DEBUG
1593 size_t real_size;
1594#endif
1595
1596 old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1597#if ZEND_DEBUG
1598 real_size = size;
1599 size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1600#endif
1602#if ZEND_DEBUG
1603 size = real_size;
1604#endif
1605#ifdef ZEND_WIN32
1606 /* On Windows we don't have ability to extend huge blocks in-place.
1607 * We allocate them with 2MB size granularity, to avoid many
1608 * reallocations when they are extended by small pieces
1609 */
1611#else
1613#endif
1614 if (new_size == old_size) {
1615#if ZEND_DEBUG
1616 zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1617#else
1618 zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1619#endif
1620 return ptr;
1621 } else if (new_size < old_size) {
1622 /* unmup tail */
1623 if (zend_mm_chunk_truncate(heap, ptr, old_size, new_size)) {
1624#if ZEND_MM_STAT || ZEND_MM_LIMIT
1625 heap->real_size -= old_size - new_size;
1626#endif
1627#if ZEND_MM_STAT
1628 heap->size -= old_size - new_size;
1629#endif
1630#if ZEND_DEBUG
1631 zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1632#else
1633 zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1634#endif
1635 return ptr;
1636 }
1637 } else /* if (new_size > old_size) */ {
1638#if ZEND_MM_LIMIT
1639 if (UNEXPECTED(new_size - old_size > heap->limit - heap->real_size)) {
1640 if (zend_mm_gc(heap) && new_size - old_size <= heap->limit - heap->real_size) {
1641 /* pass */
1642 } else if (heap->overflow == 0) {
1643#if ZEND_DEBUG
1644 zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1645#else
1646 zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1647#endif
1648 return NULL;
1649 }
1650 }
1651#endif
1652 /* try to map tail right after this block */
1653 if (zend_mm_chunk_extend(heap, ptr, old_size, new_size)) {
1654#if ZEND_MM_STAT || ZEND_MM_LIMIT
1655 heap->real_size += new_size - old_size;
1656#endif
1657#if ZEND_MM_STAT
1658 heap->real_peak = MAX(heap->real_peak, heap->real_size);
1659 heap->size += new_size - old_size;
1660 heap->peak = MAX(heap->peak, heap->size);
1661#endif
1662#if ZEND_DEBUG
1663 zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1664#else
1665 zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1666#endif
1667 return ptr;
1668 }
1669 }
1670 }
1671
1672 return zend_mm_realloc_slow(heap, ptr, size, MIN(old_size, copy_size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1673}
1674
1675static zend_always_inline void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size, bool use_copy_size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1676{
1677 size_t page_offset;
1678 size_t old_size;
1679 size_t new_size;
1680 void *ret;
1681#if ZEND_DEBUG
1682 zend_mm_debug_info *dbg;
1683#endif
1684
1686 if (UNEXPECTED(page_offset == 0)) {
1687 if (EXPECTED(ptr == NULL)) {
1689 } else {
1690 return zend_mm_realloc_huge(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1691 }
1692 } else {
1694 int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1695 zend_mm_page_info info = chunk->map[page_num];
1696#if ZEND_MM_HEAP_PROTECTION
1699 }
1700#endif /* ZEND_MM_HEAP_PROTECTION */
1701#if ZEND_DEBUG
1702 size_t real_size = size;
1703
1704 size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1705#endif
1706
1707 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1708 if (info & ZEND_MM_IS_SRUN) {
1709 int old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1710
1711 do {
1712 old_size = bin_data_size[old_bin_num];
1713
1714 /* Check if requested size fits into current bin */
1715 if (size <= old_size) {
1716 /* Check if truncation is necessary */
1717 if (old_bin_num > 0 && size < bin_data_size[old_bin_num - 1]) {
1718 /* truncation */
1720 copy_size = use_copy_size ? MIN(size, copy_size) : size;
1721 memcpy(ret, ptr, copy_size);
1722 zend_mm_free_small(heap, ptr, old_bin_num);
1723 } else {
1724 /* reallocation in-place */
1725 ret = ptr;
1726 }
1727 } else if (size <= ZEND_MM_MAX_SMALL_SIZE) {
1728 /* small extension */
1729
1730#if ZEND_MM_STAT
1731 do {
1732 size_t orig_peak = heap->peak;
1733#endif
1735 copy_size = use_copy_size ? MIN(old_size, copy_size) : old_size;
1736 memcpy(ret, ptr, copy_size);
1737 zend_mm_free_small(heap, ptr, old_bin_num);
1738#if ZEND_MM_STAT
1739 heap->peak = MAX(orig_peak, heap->size);
1740 } while (0);
1741#endif
1742 } else {
1743 /* slow reallocation */
1744 break;
1745 }
1746
1747#if ZEND_DEBUG
1748 dbg = zend_mm_get_debug_info(heap, ret);
1749 dbg->size = real_size;
1750 dbg->filename = __zend_filename;
1751 dbg->orig_filename = __zend_orig_filename;
1752 dbg->lineno = __zend_lineno;
1753 dbg->orig_lineno = __zend_orig_lineno;
1754#endif
1755 return ret;
1756 } while (0);
1757
1758 } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1759 ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1760 old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1763 if (new_size == old_size) {
1764#if ZEND_DEBUG
1765 dbg = zend_mm_get_debug_info(heap, ptr);
1766 dbg->size = real_size;
1767 dbg->filename = __zend_filename;
1768 dbg->orig_filename = __zend_orig_filename;
1769 dbg->lineno = __zend_lineno;
1770 dbg->orig_lineno = __zend_orig_lineno;
1771#endif
1772 return ptr;
1773 } else if (new_size < old_size) {
1774 /* free tail pages */
1775 int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1776 int rest_pages_count = (int)((old_size - new_size) / ZEND_MM_PAGE_SIZE);
1777
1778#if ZEND_MM_STAT
1779 heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
1780#endif
1781 chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1782 chunk->free_pages += rest_pages_count;
1783 zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
1784#if ZEND_DEBUG
1785 dbg = zend_mm_get_debug_info(heap, ptr);
1786 dbg->size = real_size;
1787 dbg->filename = __zend_filename;
1788 dbg->orig_filename = __zend_orig_filename;
1789 dbg->lineno = __zend_lineno;
1790 dbg->orig_lineno = __zend_orig_lineno;
1791#endif
1792 return ptr;
1793 } else /* if (new_size > old_size) */ {
1794 int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1795 int old_pages_count = (int)(old_size / ZEND_MM_PAGE_SIZE);
1796
1797 /* try to allocate tail pages after this block */
1798 if (page_num + new_pages_count <= ZEND_MM_PAGES &&
1799 zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
1800#if ZEND_MM_STAT
1801 do {
1802 size_t size = heap->size + (new_size - old_size);
1803 size_t peak = MAX(heap->peak, size);
1804 heap->size = size;
1805 heap->peak = peak;
1806 } while (0);
1807#endif
1808 chunk->free_pages -= new_pages_count - old_pages_count;
1809 zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
1810 chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1811#if ZEND_DEBUG
1812 dbg = zend_mm_get_debug_info(heap, ptr);
1813 dbg->size = real_size;
1814 dbg->filename = __zend_filename;
1815 dbg->orig_filename = __zend_orig_filename;
1816 dbg->lineno = __zend_lineno;
1817 dbg->orig_lineno = __zend_orig_lineno;
1818#endif
1819 return ptr;
1820 }
1821 }
1822 }
1823 }
1824#if ZEND_DEBUG
1825 size = real_size;
1826#endif
1827 }
1828
1829 copy_size = MIN(old_size, copy_size);
1830 return zend_mm_realloc_slow(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1831}
1832
1833/*********************/
1834/* Huge Runs (again) */
1835/*********************/
1836
1837#if ZEND_DEBUG
1838static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1839#else
1840static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1841#endif
1842{
1844 list->ptr = ptr;
1845 list->size = size;
1846 list->next = heap->huge_list;
1847#if ZEND_DEBUG
1848 list->dbg.size = dbg_size;
1849 list->dbg.filename = __zend_filename;
1850 list->dbg.orig_filename = __zend_orig_filename;
1851 list->dbg.lineno = __zend_lineno;
1852 list->dbg.orig_lineno = __zend_orig_lineno;
1853#endif
1854 heap->huge_list = list;
1855}
1856
1857static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1858{
1860 zend_mm_huge_list *list = heap->huge_list;
1861 while (list != NULL) {
1862 if (list->ptr == ptr) {
1863 size_t size;
1864
1865 if (prev) {
1866 prev->next = list->next;
1867 } else {
1868 heap->huge_list = list->next;
1869 }
1870 size = list->size;
1871 zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1872 return size;
1873 }
1874 prev = list;
1875 list = list->next;
1876 }
1877 ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1878 return 0;
1879}
1880
1881static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1882{
1883 zend_mm_huge_list *list = heap->huge_list;
1884 while (list != NULL) {
1885 if (list->ptr == ptr) {
1886 return list->size;
1887 }
1888 list = list->next;
1889 }
1890 ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1891 return 0;
1892}
1893
1894#if ZEND_DEBUG
1895static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1896#else
1897static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1898#endif
1899{
1900 zend_mm_huge_list *list = heap->huge_list;
1901 while (list != NULL) {
1902 if (list->ptr == ptr) {
1903 list->size = size;
1904#if ZEND_DEBUG
1905 list->dbg.size = dbg_size;
1906 list->dbg.filename = __zend_filename;
1907 list->dbg.orig_filename = __zend_orig_filename;
1908 list->dbg.lineno = __zend_lineno;
1909 list->dbg.orig_lineno = __zend_orig_lineno;
1910#endif
1911 return;
1912 }
1913 list = list->next;
1914 }
1915}
1916
1917static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1918{
1919#ifdef ZEND_WIN32
1920 /* On Windows we don't have ability to extend huge blocks in-place.
1921 * We allocate them with 2MB size granularity, to avoid many
1922 * reallocations when they are extended by small pieces
1923 */
1924 size_t alignment = MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE);
1925#else
1926 size_t alignment = REAL_PAGE_SIZE;
1927#endif
1928 size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, alignment);
1929 void *ptr;
1930
1931 if (UNEXPECTED(new_size < size)) {
1932 zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", size, alignment);
1933 }
1934
1935#if ZEND_MM_LIMIT
1936 if (UNEXPECTED(new_size > heap->limit - heap->real_size)) {
1937 if (zend_mm_gc(heap) && new_size <= heap->limit - heap->real_size) {
1938 /* pass */
1939 } else if (heap->overflow == 0) {
1940#if ZEND_DEBUG
1941 zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1942#else
1943 zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1944#endif
1945 return NULL;
1946 }
1947 }
1948#endif
1949 ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE);
1950 if (UNEXPECTED(ptr == NULL)) {
1951 /* insufficient memory */
1952 if (zend_mm_gc(heap) &&
1953 (ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE)) != NULL) {
1954 /* pass */
1955 } else {
1956#if !ZEND_MM_LIMIT
1957 zend_mm_safe_error(heap, "Out of memory");
1958#elif ZEND_DEBUG
1959 zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1960#else
1961 zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) (tried to allocate %zu bytes)", heap->real_size, size);
1962#endif
1963 return NULL;
1964 }
1965 }
1966#if ZEND_DEBUG
1967 zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1968#else
1969 zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1970#endif
1971#if ZEND_MM_STAT
1972 do {
1973 size_t size = heap->real_size + new_size;
1974 size_t peak = MAX(heap->real_peak, size);
1975 heap->real_size = size;
1976 heap->real_peak = peak;
1977 } while (0);
1978 do {
1979 size_t size = heap->size + new_size;
1980 size_t peak = MAX(heap->peak, size);
1981 heap->size = size;
1982 heap->peak = peak;
1983 } while (0);
1984#elif ZEND_MM_LIMIT
1985 heap->real_size += new_size;
1986#endif
1987 return ptr;
1988}
1989
1990static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1991{
1992 size_t size;
1993
1994 ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
1995 size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1996 zend_mm_chunk_free(heap, ptr, size);
1997#if ZEND_MM_STAT || ZEND_MM_LIMIT
1998 heap->real_size -= size;
1999#endif
2000#if ZEND_MM_STAT
2001 heap->size -= size;
2002#endif
2003}
2004
2005/******************/
2006/* Initialization */
2007/******************/
2008
2009static void zend_mm_refresh_key(zend_mm_heap *heap)
2010{
2011 zend_random_bytes_insecure(&heap->rand_state, &heap->shadow_key, sizeof(heap->shadow_key));
2012}
2013
2014static void zend_mm_init_key(zend_mm_heap *heap)
2015{
2016 memset(&heap->rand_state, 0, sizeof(heap->rand_state));
2017 zend_mm_refresh_key(heap);
2018}
2019
2020static zend_mm_heap *zend_mm_init(void)
2021{
2022 zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
2023 zend_mm_heap *heap;
2024
2025 if (UNEXPECTED(chunk == NULL)) {
2026#if ZEND_MM_ERROR
2027 fprintf(stderr, "Can't initialize heap\n");
2028#endif
2029 return NULL;
2030 }
2031 heap = &chunk->heap_slot;
2032 chunk->heap = heap;
2033 chunk->next = chunk;
2034 chunk->prev = chunk;
2035 chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2036 chunk->free_tail = ZEND_MM_FIRST_PAGE;
2037 chunk->num = 0;
2038 chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
2039 chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2040 heap->main_chunk = chunk;
2041 heap->cached_chunks = NULL;
2042 heap->chunks_count = 1;
2043 heap->peak_chunks_count = 1;
2044 heap->cached_chunks_count = 0;
2045 heap->avg_chunks_count = 1.0;
2047 heap->last_chunks_delete_count = 0;
2048#if ZEND_MM_STAT || ZEND_MM_LIMIT
2050#endif
2051#if ZEND_MM_STAT
2053 heap->size = 0;
2054 heap->peak = 0;
2055#endif
2056 zend_mm_init_key(heap);
2057#if ZEND_MM_LIMIT
2058 heap->limit = (size_t)Z_L(-1) >> 1;
2059 heap->overflow = 0;
2060#endif
2061#if ZEND_MM_CUSTOM
2063#endif
2064#if ZEND_MM_STORAGE
2065 heap->storage = NULL;
2066#endif
2067 heap->huge_list = NULL;
2068 heap->pid = getpid();
2069 return heap;
2070}
2071
2073{
2074 zend_mm_free_slot *p, *q;
2075 zend_mm_chunk *chunk;
2076 size_t page_offset;
2077 int page_num;
2078 zend_mm_page_info info;
2079 uint32_t i, free_counter;
2080 bool has_free_pages;
2081 size_t collected = 0;
2082
2083#if ZEND_MM_CUSTOM
2084 if (heap->use_custom_heap) {
2085 size_t (*gc)(void) = heap->custom_heap._gc;
2086 if (gc) {
2087 return gc();
2088 }
2089 return 0;
2090 }
2091#endif
2092
2093 for (i = 0; i < ZEND_MM_BINS; i++) {
2094 has_free_pages = false;
2095 p = heap->free_slot[i];
2096 while (p != NULL) {
2098 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
2100 ZEND_ASSERT(page_offset != 0);
2101 page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
2102 info = chunk->map[page_num];
2104 if (info & ZEND_MM_IS_LRUN) {
2105 page_num -= ZEND_MM_NRUN_OFFSET(info);
2106 info = chunk->map[page_num];
2108 ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
2109 }
2111 free_counter = ZEND_MM_SRUN_FREE_COUNTER(info) + 1;
2112 if (free_counter == bin_elements[i]) {
2113 has_free_pages = true;
2114 }
2115 chunk->map[page_num] = ZEND_MM_SRUN_EX(i, free_counter);
2116 p = zend_mm_get_next_free_slot(heap, i, p);
2117 }
2118
2119 if (!has_free_pages) {
2120 continue;
2121 }
2122
2123 q = (zend_mm_free_slot*)&heap->free_slot[i];
2124 p = q->next_free_slot;
2125 while (p != NULL) {
2127 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
2129 ZEND_ASSERT(page_offset != 0);
2130 page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
2131 info = chunk->map[page_num];
2133 if (info & ZEND_MM_IS_LRUN) {
2134 page_num -= ZEND_MM_NRUN_OFFSET(info);
2135 info = chunk->map[page_num];
2137 ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
2138 }
2140 if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[i]) {
2141 /* remove from cache */
2142 p = zend_mm_get_next_free_slot(heap, i, p);
2143 if (q == (zend_mm_free_slot*)&heap->free_slot[i]) {
2144 q->next_free_slot = p;
2145 } else {
2146 zend_mm_set_next_free_slot(heap, i, q, p);
2147 }
2148 } else {
2149 q = p;
2150 if (q == (zend_mm_free_slot*)&heap->free_slot[i]) {
2151 p = q->next_free_slot;
2152 } else {
2153 p = zend_mm_get_next_free_slot(heap, i, q);
2154 }
2155 }
2156 }
2157 }
2158
2159 chunk = heap->main_chunk;
2160 do {
2162 while (i < chunk->free_tail) {
2163 if (zend_mm_bitset_is_set(chunk->free_map, i)) {
2164 info = chunk->map[i];
2165 if (info & ZEND_MM_IS_SRUN) {
2166 int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
2167 int pages_count = bin_pages[bin_num];
2168
2169 if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[bin_num]) {
2170 /* all elements are free */
2171 zend_mm_free_pages_ex(heap, chunk, i, pages_count, 0);
2172 collected += pages_count;
2173 } else {
2174 /* reset counter */
2175 chunk->map[i] = ZEND_MM_SRUN(bin_num);
2176 }
2177 i += bin_pages[bin_num];
2178 } else /* if (info & ZEND_MM_IS_LRUN) */ {
2179 i += ZEND_MM_LRUN_PAGES(info);
2180 }
2181 } else {
2182 i++;
2183 }
2184 }
2185 if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
2186 zend_mm_chunk *next_chunk = chunk->next;
2187
2188 zend_mm_delete_chunk(heap, chunk);
2189 chunk = next_chunk;
2190 } else {
2191 chunk = chunk->next;
2192 }
2193 } while (chunk != heap->main_chunk);
2194
2195 return collected * ZEND_MM_PAGE_SIZE;
2196}
2197
2198#if ZEND_DEBUG
2199/******************/
2200/* Leak detection */
2201/******************/
2202
2203static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, uint32_t i, uint32_t j, zend_leak_info *leak)
2204{
2205 bool empty = true;
2206 zend_long count = 0;
2207 int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2208 zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2209
2210 while (j < bin_elements[bin_num]) {
2211 if (dbg->size != 0) {
2212 if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
2213 count++;
2214 dbg->size = 0;
2215 dbg->filename = NULL;
2216 dbg->lineno = 0;
2217 } else {
2218 empty = false;
2219 }
2220 }
2221 j++;
2222 dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
2223 }
2224 if (empty) {
2225 zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
2226 }
2227 return count;
2228}
2229
2230static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, uint32_t i, zend_leak_info *leak)
2231{
2232 zend_long count = 0;
2233
2234 do {
2235 while (i < p->free_tail) {
2236 if (zend_mm_bitset_is_set(p->free_map, i)) {
2237 if (p->map[i] & ZEND_MM_IS_SRUN) {
2238 int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2239 count += zend_mm_find_leaks_small(p, i, 0, leak);
2240 i += bin_pages[bin_num];
2241 } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
2242 int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
2243 zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2244
2245 if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
2246 count++;
2247 }
2248 zend_mm_bitset_reset_range(p->free_map, i, pages_count);
2249 i += pages_count;
2250 }
2251 } else {
2252 i++;
2253 }
2254 }
2255 p = p->next;
2257 } while (p != heap->main_chunk);
2258 return count;
2259}
2260
2261static zend_long zend_mm_find_leaks_huge(zend_mm_heap *heap, zend_mm_huge_list *list)
2262{
2263 zend_long count = 0;
2264 zend_mm_huge_list *prev = list;
2265 zend_mm_huge_list *p = list->next;
2266
2267 while (p) {
2268 if (p->dbg.filename == list->dbg.filename && p->dbg.lineno == list->dbg.lineno) {
2269 prev->next = p->next;
2270 zend_mm_chunk_free(heap, p->ptr, p->size);
2271 zend_mm_free_heap(heap, p, NULL, 0, NULL, 0);
2272 count++;
2273 } else {
2274 prev = p;
2275 }
2276 p = prev->next;
2277 }
2278
2279 return count;
2280}
2281
2282static void zend_mm_check_leaks(zend_mm_heap *heap)
2283{
2284 zend_mm_huge_list *list;
2286 zend_leak_info leak;
2287 zend_long repeated = 0;
2288 uint32_t total = 0;
2289 uint32_t i, j;
2290
2291 /* find leaked huge blocks and free them */
2292 list = heap->huge_list;
2293 while (list) {
2294 zend_mm_huge_list *q = list;
2295
2296 leak.addr = list->ptr;
2297 leak.size = list->dbg.size;
2298 leak.filename = list->dbg.filename;
2299 leak.orig_filename = list->dbg.orig_filename;
2300 leak.lineno = list->dbg.lineno;
2301 leak.orig_lineno = list->dbg.orig_lineno;
2302
2305 repeated = zend_mm_find_leaks_huge(heap, list);
2306 total += 1 + repeated;
2307 if (repeated) {
2308 zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(uintptr_t)repeated);
2309 }
2310
2311 heap->huge_list = list = list->next;
2312 zend_mm_chunk_free(heap, q->ptr, q->size);
2313 zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
2314 }
2315
2316 /* for each chunk */
2317 p = heap->main_chunk;
2318 do {
2320 while (i < p->free_tail) {
2321 if (zend_mm_bitset_is_set(p->free_map, i)) {
2322 if (p->map[i] & ZEND_MM_IS_SRUN) {
2323 int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2324 zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2325
2326 j = 0;
2327 while (j < bin_elements[bin_num]) {
2328 if (dbg->size != 0) {
2329 leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
2330 leak.size = dbg->size;
2331 leak.filename = dbg->filename;
2332 leak.orig_filename = dbg->orig_filename;
2333 leak.lineno = dbg->lineno;
2334 leak.orig_lineno = dbg->orig_lineno;
2335
2338
2339 dbg->size = 0;
2340 dbg->filename = NULL;
2341 dbg->lineno = 0;
2342
2343 repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
2344 zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
2345 total += 1 + repeated;
2346 if (repeated) {
2347 zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(uintptr_t)repeated);
2348 }
2349 }
2350 dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
2351 j++;
2352 }
2353 i += bin_pages[bin_num];
2354 } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
2355 int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
2356 zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2357
2358 leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
2359 leak.size = dbg->size;
2360 leak.filename = dbg->filename;
2361 leak.orig_filename = dbg->orig_filename;
2362 leak.lineno = dbg->lineno;
2363 leak.orig_lineno = dbg->orig_lineno;
2364
2367
2368 zend_mm_bitset_reset_range(p->free_map, i, pages_count);
2369
2370 repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
2371 total += 1 + repeated;
2372 if (repeated) {
2373 zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(uintptr_t)repeated);
2374 }
2375 i += pages_count;
2376 }
2377 } else {
2378 i++;
2379 }
2380 }
2381 p = p->next;
2382 } while (p != heap->main_chunk);
2383 if (total) {
2385 }
2386}
2387#endif
2388
2389#if ZEND_MM_CUSTOM
2390static void *tracked_malloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
2391static void tracked_free_all(zend_mm_heap *heap);
2392#endif
2393
2394ZEND_API void zend_mm_shutdown(zend_mm_heap *heap, bool full, bool silent)
2395{
2397 zend_mm_huge_list *list;
2398
2399#if ZEND_MM_CUSTOM
2400 if (heap->use_custom_heap) {
2401 if (heap->custom_heap._malloc == tracked_malloc) {
2402 if (silent) {
2403 tracked_free_all(heap);
2404 }
2406 if (full) {
2408 free(heap->tracked_allocs);
2409 /* Make sure the heap free below does not use tracked_free(). */
2411 }
2412 heap->size = 0;
2413 }
2414
2415 void (*shutdown)(bool, bool) = heap->custom_heap._shutdown;
2416
2417 if (full) {
2419 }
2420
2421 if (shutdown) {
2422 shutdown(full, silent);
2423 }
2424
2425 return;
2426 }
2427#endif
2428
2429#if ZEND_DEBUG
2430 if (!silent) {
2431 char *tmp = getenv("ZEND_ALLOC_PRINT_LEAKS");
2432 if (!tmp || ZEND_ATOL(tmp)) {
2433 zend_mm_check_leaks(heap);
2434 }
2435 }
2436#endif
2437
2438 /* free huge blocks */
2439 list = heap->huge_list;
2440 heap->huge_list = NULL;
2441 while (list) {
2442 zend_mm_huge_list *q = list;
2443 list = list->next;
2444 zend_mm_chunk_free(heap, q->ptr, q->size);
2445 }
2446
2447 /* move all chunks except of the first one into the cache */
2448 p = heap->main_chunk->next;
2449 while (p != heap->main_chunk) {
2450 zend_mm_chunk *q = p->next;
2451 p->next = heap->cached_chunks;
2452 heap->cached_chunks = p;
2453 p = q;
2454 heap->chunks_count--;
2455 heap->cached_chunks_count++;
2456 }
2457
2458 if (full) {
2459 /* free all cached chunks */
2460 while (heap->cached_chunks) {
2461 p = heap->cached_chunks;
2462 heap->cached_chunks = p->next;
2463 zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2464 }
2465 /* free the first chunk */
2466 zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
2467 } else {
2468 /* free some cached chunks to keep average count */
2469 heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
2470 while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
2471 heap->cached_chunks) {
2472 p = heap->cached_chunks;
2473 heap->cached_chunks = p->next;
2474 zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2475 heap->cached_chunks_count--;
2476 }
2477 /* clear cached chunks */
2478 p = heap->cached_chunks;
2479 while (p != NULL) {
2480 zend_mm_chunk *q = p->next;
2481 memset(p, 0, sizeof(zend_mm_chunk));
2482 p->next = q;
2483 p = q;
2484 }
2485
2486 /* reinitialize the first chunk and heap */
2487 p = heap->main_chunk;
2488 p->heap = &p->heap_slot;
2489 p->next = p;
2490 p->prev = p;
2491 p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2492 p->free_tail = ZEND_MM_FIRST_PAGE;
2493 p->num = 0;
2494
2495#if ZEND_MM_STAT
2496 heap->size = heap->peak = 0;
2497#endif
2498 memset(heap->free_slot, 0, sizeof(heap->free_slot));
2499#if ZEND_MM_STAT || ZEND_MM_LIMIT
2500 heap->real_size = (heap->cached_chunks_count + 1) * ZEND_MM_CHUNK_SIZE;
2501#endif
2502#if ZEND_MM_STAT
2503 heap->real_peak = (heap->cached_chunks_count + 1) * ZEND_MM_CHUNK_SIZE;
2504#endif
2505 heap->chunks_count = 1;
2506 heap->peak_chunks_count = 1;
2508 heap->last_chunks_delete_count = 0;
2509
2510 memset(p->free_map, 0, sizeof(p->free_map) + sizeof(p->map));
2511 p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
2513
2514 pid_t pid = getpid();
2515 if (heap->pid != pid) {
2516 zend_mm_init_key(heap);
2517 heap->pid = pid;
2518 } else {
2519 zend_mm_refresh_key(heap);
2520 }
2521 }
2522}
2523
2524/**************/
2525/* PUBLIC API */
2526/**************/
2527
2532
2537
2542
2544{
2545 return zend_mm_realloc_heap(heap, ptr, size, 1, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2546}
2547
2549{
2550#if ZEND_MM_CUSTOM
2551 if (UNEXPECTED(heap->use_custom_heap)) {
2552 if (heap->custom_heap._malloc == tracked_malloc) {
2553 zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2554 zval *size_zv = zend_hash_index_find(heap->tracked_allocs, h);
2555 if (size_zv) {
2556 return Z_LVAL_P(size_zv);
2557 }
2558 }
2559 return 0;
2560 }
2561#endif
2562 return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2563}
2564
2565/**********************/
2566/* Allocation Manager */
2567/**********************/
2568
2572
2573#ifdef ZTS
2574static int alloc_globals_id;
2575static size_t alloc_globals_offset;
2576# define AG(v) ZEND_TSRMG_FAST(alloc_globals_offset, zend_alloc_globals *, v)
2577#else
2578# define AG(v) (alloc_globals.v)
2579static zend_alloc_globals alloc_globals;
2580#endif
2581
2583{
2584#if ZEND_MM_CUSTOM
2585 return !AG(mm_heap)->use_custom_heap;
2586#else
2587 return 1;
2588#endif
2589}
2590
2591ZEND_API bool is_zend_ptr(const void *ptr)
2592{
2593#if ZEND_MM_CUSTOM
2594 if (AG(mm_heap)->use_custom_heap) {
2595 if (AG(mm_heap)->custom_heap._malloc == tracked_malloc) {
2596 zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2597 zval *size_zv = zend_hash_index_find(AG(mm_heap)->tracked_allocs, h);
2598 if (size_zv) {
2599 return 1;
2600 }
2601 }
2602 return 0;
2603 }
2604#endif
2605
2606 if (AG(mm_heap)->main_chunk) {
2607 zend_mm_chunk *chunk = AG(mm_heap)->main_chunk;
2608
2609 do {
2610 if (ptr >= (void*)chunk
2611 && ptr < (void*)((char*)chunk + ZEND_MM_CHUNK_SIZE)) {
2612 return 1;
2613 }
2614 chunk = chunk->next;
2615 } while (chunk != AG(mm_heap)->main_chunk);
2616 }
2617
2618 zend_mm_huge_list *block = AG(mm_heap)->huge_list;
2619 while (block) {
2620 if (ptr >= block->ptr
2621 && ptr < (void*)((char*)block->ptr + block->size)) {
2622 return 1;
2623 }
2624 block = block->next;
2625 }
2626
2627 return 0;
2628}
2629
2630#if !ZEND_DEBUG && defined(HAVE_BUILTIN_CONSTANT_P)
2631#undef _emalloc
2632
2633#if ZEND_MM_CUSTOM
2634# define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
2635 if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2636 return AG(mm_heap)->custom_heap._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2637 } \
2638 } while (0)
2639# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
2640 if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2641 AG(mm_heap)->custom_heap._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2642 return; \
2643 } \
2644 } while (0)
2645#else
2646# define ZEND_MM_CUSTOM_ALLOCATOR(size)
2647# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
2648#endif
2649
2650# define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, _min_size, y) \
2651 ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
2652 ZEND_MM_CUSTOM_ALLOCATOR(_size); \
2653 if (_size < _min_size) { \
2654 return _emalloc_ ## _min_size(); \
2655 } \
2656 return zend_mm_alloc_small(AG(mm_heap), _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2657 }
2658
2659ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, ZEND_MM_MIN_USEABLE_BIN_SIZE, y)
2660
2662{
2663 ZEND_MM_CUSTOM_ALLOCATOR(size);
2664 return zend_mm_alloc_large_ex(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2665}
2666
2668{
2669 ZEND_MM_CUSTOM_ALLOCATOR(size);
2670 return zend_mm_alloc_huge(AG(mm_heap), size);
2671}
2672
2673#if ZEND_DEBUG
2674# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, _min_size, y) \
2675 ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2676 ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2677 if (_size < _min_size) { \
2678 _efree_ ## _min_size(ptr); \
2679 return; \
2680 } \
2681 { \
2682 size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
2683 zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2684 int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
2685 ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2686 ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
2687 ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
2688 zend_mm_free_small(AG(mm_heap), ptr, _num); \
2689 } \
2690 }
2691#else
2692# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, _min_size, y) \
2693 ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2694 ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2695 if (_size < _min_size) { \
2696 _efree_ ## _min_size(ptr); \
2697 return; \
2698 } \
2699 { \
2700 zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2701 ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2702 zend_mm_free_small(AG(mm_heap), ptr, _num); \
2703 } \
2704 }
2705#endif
2706
2708
2709ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
2710{
2711 ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2712 {
2713 size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
2715 int page_num = page_offset / ZEND_MM_PAGE_SIZE;
2717
2718 ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
2719 ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
2720 ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
2721 zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
2722 }
2723}
2724
2725ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
2726{
2727
2728 ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2729 zend_mm_free_huge(AG(mm_heap), ptr);
2730}
2731#endif
2732
2734{
2735#if ZEND_MM_CUSTOM
2736 if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2737 return AG(mm_heap)->custom_heap._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2738 }
2739#endif
2740 return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2741}
2742
2744{
2745#if ZEND_MM_CUSTOM
2746 if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2747 AG(mm_heap)->custom_heap._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2748 return;
2749 }
2750#endif
2751 zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2752}
2753
2755{
2756#if ZEND_MM_CUSTOM
2757 if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2758 return AG(mm_heap)->custom_heap._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2759 }
2760#endif
2761 return zend_mm_realloc_heap(AG(mm_heap), ptr, size, 0, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2762}
2763
2765{
2766#if ZEND_MM_CUSTOM
2767 if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2768 return AG(mm_heap)->custom_heap._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2769 }
2770#endif
2771 return zend_mm_realloc_heap(AG(mm_heap), ptr, size, 1, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2772}
2773
2778
2780{
2781 return _emalloc(zend_safe_address_guarded(nmemb, size, offset) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2782}
2783
2784ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
2785{
2786 return pemalloc(zend_safe_address_guarded(nmemb, size, offset), 1);
2787}
2788
2790{
2791 return _erealloc(ptr, zend_safe_address_guarded(nmemb, size, offset) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2792}
2793
2794ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
2795{
2796 return perealloc(ptr, zend_safe_address_guarded(nmemb, size, offset), 1);
2797}
2798
2800{
2801 void *p;
2802
2803 size = zend_safe_address_guarded(nmemb, size, 0);
2805 memset(p, 0, size);
2806 return p;
2807}
2808
2810{
2811 size_t length;
2812 char *p;
2813
2814 length = strlen(s);
2815 if (UNEXPECTED(length + 1 == 0)) {
2816 zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
2817 }
2819 memcpy(p, s, length+1);
2820 return p;
2821}
2822
2824{
2825 char *p;
2826
2827 if (UNEXPECTED(length + 1 == 0)) {
2828 zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
2829 }
2831 memcpy(p, s, length);
2832 p[length] = 0;
2833 return p;
2834}
2835
2836static ZEND_COLD ZEND_NORETURN void zend_out_of_memory(void);
2837
2838ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
2839{
2840 char *p;
2841
2842 if (UNEXPECTED(length + 1 == 0)) {
2843 zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
2844 }
2845 p = (char *) malloc(length + 1);
2846 if (UNEXPECTED(p == NULL)) {
2847 zend_out_of_memory();
2848 }
2849 if (EXPECTED(length)) {
2850 memcpy(p, s, length);
2851 }
2852 p[length] = 0;
2853 return p;
2854}
2855
2857{
2858#if ZEND_MM_LIMIT
2859 zend_mm_heap *heap = AG(mm_heap);
2860
2861 if (UNEXPECTED(memory_limit < heap->real_size)) {
2862 if (memory_limit >= heap->real_size - heap->cached_chunks_count * ZEND_MM_CHUNK_SIZE) {
2863 /* free some cached chunks to fit into new memory limit */
2864 do {
2865 zend_mm_chunk *p = heap->cached_chunks;
2866 heap->cached_chunks = p->next;
2867 zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2868 heap->cached_chunks_count--;
2870 } while (memory_limit < heap->real_size);
2871 return SUCCESS;
2872 }
2873 return FAILURE;
2874 }
2875 AG(mm_heap)->limit = memory_limit;
2876#endif
2877 return SUCCESS;
2878}
2879
2881{
2882#if ZEND_MM_LIMIT
2883 return AG(mm_heap)->overflow;
2884#else
2885 return false;
2886#endif
2887}
2888
2889ZEND_API size_t zend_memory_usage(bool real_usage)
2890{
2891#if ZEND_MM_STAT
2892 if (real_usage) {
2893 return AG(mm_heap)->real_size;
2894 } else {
2895 size_t usage = AG(mm_heap)->size;
2896 return usage;
2897 }
2898#endif
2899 return 0;
2900}
2901
2902ZEND_API size_t zend_memory_peak_usage(bool real_usage)
2903{
2904#if ZEND_MM_STAT
2905 if (real_usage) {
2906 return AG(mm_heap)->real_peak;
2907 } else {
2908 return AG(mm_heap)->peak;
2909 }
2910#endif
2911 return 0;
2912}
2913
2915{
2916#if ZEND_MM_STAT
2917 AG(mm_heap)->real_peak = AG(mm_heap)->real_size;
2918 AG(mm_heap)->peak = AG(mm_heap)->size;
2919#endif
2920}
2921
2922ZEND_API void shutdown_memory_manager(bool silent, bool full_shutdown)
2923{
2924 zend_mm_shutdown(AG(mm_heap), full_shutdown, silent);
2925}
2926
2927static ZEND_COLD ZEND_NORETURN void zend_out_of_memory(void)
2928{
2929 fprintf(stderr, "Out of memory\n");
2930 exit(1);
2931}
2932
2933#if ZEND_MM_CUSTOM
2934static zend_always_inline void tracked_add(zend_mm_heap *heap, void *ptr, size_t size) {
2935 zval size_zv;
2936 zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2937 ZEND_ASSERT((void *) (uintptr_t) (h << ZEND_MM_ALIGNMENT_LOG2) == ptr);
2938 ZVAL_LONG(&size_zv, size);
2939 zend_hash_index_add_new(heap->tracked_allocs, h, &size_zv);
2940}
2941
2942static zend_always_inline zval *tracked_get_size_zv(zend_mm_heap *heap, void *ptr) {
2943 zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2944 zval *size_zv = zend_hash_index_find(heap->tracked_allocs, h);
2945 ZEND_ASSERT(size_zv && "Trying to free pointer not allocated through ZendMM");
2946 return size_zv;
2947}
2948
2949static zend_always_inline void tracked_check_limit(zend_mm_heap *heap, size_t add_size) {
2950 if (add_size > heap->limit - heap->size && !heap->overflow) {
2951#if ZEND_DEBUG
2952 zend_mm_safe_error(heap,
2953 "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)",
2954 heap->limit, "file", 0, add_size);
2955#else
2956 zend_mm_safe_error(heap,
2957 "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)",
2958 heap->limit, add_size);
2959#endif
2960 }
2961}
2962
2963static void *tracked_malloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2964{
2965 zend_mm_heap *heap = AG(mm_heap);
2966 tracked_check_limit(heap, size);
2967
2968 void *ptr = malloc(size);
2969 if (!ptr) {
2970 zend_out_of_memory();
2971 }
2972
2973 tracked_add(heap, ptr, size);
2974 heap->size += size;
2975 return ptr;
2976}
2977
2978static void tracked_free(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) {
2979 if (!ptr) {
2980 return;
2981 }
2982
2983 zend_mm_heap *heap = AG(mm_heap);
2984 zval *size_zv = tracked_get_size_zv(heap, ptr);
2985 heap->size -= Z_LVAL_P(size_zv);
2986 zend_hash_del_bucket(heap->tracked_allocs, (Bucket *) size_zv);
2987 free(ptr);
2988}
2989
2990static void *tracked_realloc(void *ptr, size_t new_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) {
2991 zend_mm_heap *heap = AG(mm_heap);
2992 zval *old_size_zv = NULL;
2993 size_t old_size = 0;
2994 if (ptr) {
2995 old_size_zv = tracked_get_size_zv(heap, ptr);
2996 old_size = Z_LVAL_P(old_size_zv);
2997 }
2998
2999 if (new_size > old_size) {
3000 tracked_check_limit(heap, new_size - old_size);
3001 }
3002
3003 /* Delete information about old allocation only after checking the memory limit. */
3004 if (old_size_zv) {
3005 zend_hash_del_bucket(heap->tracked_allocs, (Bucket *) old_size_zv);
3006 }
3007
3009 tracked_add(heap, ptr, new_size);
3010 heap->size += new_size - old_size;
3011 return ptr;
3012}
3013
3014static void tracked_free_all(zend_mm_heap *heap) {
3015 HashTable *tracked_allocs = heap->tracked_allocs;
3016 zend_ulong h;
3017 ZEND_HASH_FOREACH_NUM_KEY(tracked_allocs, h) {
3018 void *ptr = (void *) (uintptr_t) (h << ZEND_MM_ALIGNMENT_LOG2);
3019 free(ptr);
3021}
3022#endif
3023
3024static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
3025{
3026 char *tmp;
3027
3028#if ZEND_MM_CUSTOM
3029 tmp = getenv("USE_ZEND_ALLOC");
3030 if (tmp && !ZEND_ATOL(tmp)) {
3031 bool tracked = (tmp = getenv("USE_TRACKED_ALLOC")) && ZEND_ATOL(tmp);
3032 zend_mm_heap *mm_heap = alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
3033 memset(mm_heap, 0, sizeof(zend_mm_heap));
3035 mm_heap->limit = (size_t)Z_L(-1) >> 1;
3036 mm_heap->overflow = 0;
3037
3038 if (!tracked) {
3039 /* Use system allocator. */
3041 mm_heap->custom_heap._free = __zend_free;
3043 } else {
3044 /* Use system allocator and track allocations for auto-free. */
3045 mm_heap->custom_heap._malloc = tracked_malloc;
3046 mm_heap->custom_heap._free = tracked_free;
3047 mm_heap->custom_heap._realloc = tracked_realloc;
3048 mm_heap->tracked_allocs = malloc(sizeof(HashTable));
3049 zend_hash_init(mm_heap->tracked_allocs, 1024, NULL, NULL, 1);
3050 }
3051 return;
3052 }
3053#endif
3054
3055 tmp = getenv("USE_ZEND_ALLOC_HUGE_PAGES");
3056 if (tmp && ZEND_ATOL(tmp)) {
3057 zend_mm_use_huge_pages = true;
3058 }
3059 alloc_globals->mm_heap = zend_mm_init();
3060}
3061
3062#ifdef ZTS
3063static void alloc_globals_dtor(zend_alloc_globals *alloc_globals)
3064{
3065 zend_mm_shutdown(alloc_globals->mm_heap, 1, 1);
3066}
3067#endif
3068
3070{
3071#ifndef _WIN32
3072# if defined(_SC_PAGESIZE)
3073 REAL_PAGE_SIZE = sysconf(_SC_PAGESIZE);
3074# elif defined(_SC_PAGE_SIZE)
3075 REAL_PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
3076# endif
3077#endif
3078#ifdef ZTS
3079 ts_allocate_fast_id(&alloc_globals_id, &alloc_globals_offset, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
3080#else
3081 alloc_globals_ctor(&alloc_globals);
3082#endif
3083}
3084
3086{
3087 zend_mm_heap *old_heap;
3088
3089 old_heap = AG(mm_heap);
3090 AG(mm_heap) = (zend_mm_heap*)new_heap;
3091 return (zend_mm_heap*)old_heap;
3092}
3093
3095{
3096 return AG(mm_heap);
3097}
3098
3100{
3101#if ZEND_MM_CUSTOM
3102 return AG(mm_heap)->use_custom_heap;
3103#else
3104 return 0;
3105#endif
3106}
3107
3109 void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3110 void (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3111 void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
3112{
3113#if ZEND_MM_CUSTOM
3114 zend_mm_set_custom_handlers_ex(heap, _malloc, _free, _realloc, NULL, NULL);
3115#endif
3116}
3117
3119 void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3120 void (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3121 void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3122 size_t (*_gc)(void),
3123 void (*_shutdown)(bool, bool))
3124{
3125#if ZEND_MM_CUSTOM
3126 zend_mm_heap *_heap = (zend_mm_heap*)heap;
3127
3128 if (!_malloc && !_free && !_realloc) {
3130 } else {
3132 _heap->custom_heap._malloc = _malloc;
3133 _heap->custom_heap._free = _free;
3134 _heap->custom_heap._realloc = _realloc;
3135 _heap->custom_heap._gc = _gc;
3136 _heap->custom_heap._shutdown = _shutdown;
3137 }
3138#endif
3139}
3140
3142 void* (**_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3143 void (**_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3144 void* (**_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
3145{
3146#if ZEND_MM_CUSTOM
3147 zend_mm_get_custom_handlers_ex(heap, _malloc, _free, _realloc, NULL, NULL);
3148#endif
3149}
3150
3152 void* (**_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3153 void (**_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3154 void* (**_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3155 size_t (**_gc)(void),
3156 void (**_shutdown)(bool, bool))
3157{
3158#if ZEND_MM_CUSTOM
3159 zend_mm_heap *_heap = (zend_mm_heap*)heap;
3160
3161 if (heap->use_custom_heap) {
3162 *_malloc = _heap->custom_heap._malloc;
3163 *_free = _heap->custom_heap._free;
3164 *_realloc = _heap->custom_heap._realloc;
3165 if (_gc != NULL) {
3166 *_gc = _heap->custom_heap._gc;
3167 }
3168 if (_shutdown != NULL) {
3169 *_shutdown = _heap->custom_heap._shutdown;
3170 }
3171 } else {
3172 *_malloc = NULL;
3173 *_free = NULL;
3174 *_realloc = NULL;
3175 if (_gc != NULL) {
3176 *_gc = NULL;
3177 }
3178 if (_shutdown != NULL) {
3179 *_shutdown = NULL;
3180 }
3181 }
3182#else
3183 *_malloc = NULL;
3184 *_free = NULL;
3185 *_realloc = NULL;
3186 *_gc = NULL;
3187 *_shutdown = NULL;
3188#endif
3189}
3190
3192{
3193#if ZEND_MM_STORAGE
3194 return heap->storage;
3195#else
3196 return NULL
3197#endif
3198}
3199
3201{
3202 return zend_mm_init();
3203}
3204
3206{
3207#if ZEND_MM_STORAGE
3208 zend_mm_storage tmp_storage, *storage;
3209 zend_mm_chunk *chunk;
3210 zend_mm_heap *heap;
3211
3212 memcpy((zend_mm_handlers*)&tmp_storage.handlers, handlers, sizeof(zend_mm_handlers));
3213 tmp_storage.data = data;
3214 chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
3215 if (UNEXPECTED(chunk == NULL)) {
3216#if ZEND_MM_ERROR
3217 fprintf(stderr, "Can't initialize heap\n");
3218#endif
3219 return NULL;
3220 }
3221 heap = &chunk->heap_slot;
3222 chunk->heap = heap;
3223 chunk->next = chunk;
3224 chunk->prev = chunk;
3225 chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
3226 chunk->free_tail = ZEND_MM_FIRST_PAGE;
3227 chunk->num = 0;
3228 chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
3229 chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
3230 heap->main_chunk = chunk;
3231 heap->cached_chunks = NULL;
3232 heap->chunks_count = 1;
3233 heap->peak_chunks_count = 1;
3234 heap->cached_chunks_count = 0;
3235 heap->avg_chunks_count = 1.0;
3237 heap->last_chunks_delete_count = 0;
3238#if ZEND_MM_STAT || ZEND_MM_LIMIT
3240#endif
3241#if ZEND_MM_STAT
3243 heap->size = 0;
3244 heap->peak = 0;
3245#endif
3246 zend_mm_init_key(heap);
3247#if ZEND_MM_LIMIT
3248 heap->limit = (size_t)Z_L(-1) >> 1;
3249 heap->overflow = 0;
3250#endif
3251#if ZEND_MM_CUSTOM
3252 heap->use_custom_heap = 0;
3253#endif
3254 heap->storage = &tmp_storage;
3255 heap->huge_list = NULL;
3256 memset(heap->free_slot, 0, sizeof(heap->free_slot));
3257 storage = _zend_mm_alloc(heap, sizeof(zend_mm_storage) + data_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_CC);
3258 if (!storage) {
3259 handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);
3260#if ZEND_MM_ERROR
3261 fprintf(stderr, "Can't initialize heap\n");
3262#endif
3263 return NULL;
3264 }
3265 memcpy(storage, &tmp_storage, sizeof(zend_mm_storage));
3266 if (data) {
3267 storage->data = (void*)(((char*)storage + sizeof(zend_mm_storage)));
3268 memcpy(storage->data, data, data_size);
3269 }
3270 heap->storage = storage;
3271 heap->pid = getpid();
3272 return heap;
3273#else
3274 return NULL;
3275#endif
3276}
3277
3279{
3280 void *tmp = malloc(len);
3281 if (EXPECTED(tmp || !len)) {
3282 return tmp;
3283 }
3284 zend_out_of_memory();
3285}
3286
3288{
3289 void *tmp;
3290
3291 len = zend_safe_address_guarded(nmemb, len, 0);
3293 memset(tmp, 0, len);
3294 return tmp;
3295}
3296
3298{
3299 p = realloc(p, len);
3300 if (EXPECTED(p || !len)) {
3301 return p;
3302 }
3303 zend_out_of_memory();
3304}
3305
3307{
3308 free(p);
3309 return;
3310}
3311
3312ZEND_API char * __zend_strdup(const char *s)
3313{
3314 char *tmp = strdup(s);
3315 if (EXPECTED(tmp)) {
3316 return tmp;
3317 }
3318 zend_out_of_memory();
3319}
3320
3321#ifdef ZTS
3322size_t zend_mm_globals_size(void)
3323{
3324 return sizeof(zend_alloc_globals);
3325}
3326#endif
total()
Definition bench.php:374
size_t len
Definition apprentice.c:174
fprintf($stream, string $format, mixed ... $values)
getenv(?string $name=null, bool $local_only=false)
prev(array|object &$array)
vsprintf(string $format, array $values)
count(Countable|array $value, int $mode=COUNT_NORMAL)
fflush($stream)
char s[4]
Definition cdf.c:77
#define DWORD
Definition exif.c:1762
zend_long n
Definition ffi.c:4979
new_type size
Definition ffi.c:4365
void * ptr
Definition ffi.c:3814
memcpy(ptr1, ptr2, size)
char * err
Definition ffi.c:3029
memset(ptr, 0, type->size)
buf start
Definition ffi.c:4687
zend_ffi_ctype_name_buf buf
Definition ffi.c:4685
zend_long offset
#define output_buf
Definition gd_topal.c:117
#define NULL
Definition gdcache.h:45
#define SUCCESS
Definition hash_sha3.c:261
again j
#define next(ls)
Definition minilua.c:2661
MYSQLND_DEBUG * dbg
Definition mysqlnd.h:300
#define __zend_orig_filename
#define __zend_orig_lineno
const SIGSEGV
unsigned const char * end
Definition php_ffi.h:51
unsigned const char * pos
Definition php_ffi.h:52
#define t1
#define t2
#define abort()
#define shutdown(s, n)
Definition php_network.h:30
zend_stack handlers
Definition php_output.h:139
char * msg
Definition phpdbg.h:289
int fd
Definition phpdbg.h:282
zend_constant * data
#define PROT_READ
Definition phpdbg_win.h:26
#define PROT_WRITE
Definition phpdbg_win.h:27
p
Definition session.c:1105
zend_mm_heap * mm_heap
const char * filename
Definition zend_alloc.h:43
uint32_t lineno
Definition zend_alloc.h:45
uint32_t orig_lineno
Definition zend_alloc.h:46
const char * orig_filename
Definition zend_alloc.h:44
char bytes[ZEND_MM_PAGE_SIZE *8]
Definition zend_alloc.c:336
uint32_t free_tail
Definition zend_alloc.c:319
zend_mm_page_info map[ZEND_MM_PAGES]
Definition zend_alloc.c:324
zend_mm_chunk * prev
Definition zend_alloc.c:317
zend_mm_chunk * next
Definition zend_alloc.c:316
char reserve[64 -(sizeof(void *) *3+sizeof(uint32_t) *3)]
Definition zend_alloc.c:321
zend_mm_heap * heap
Definition zend_alloc.c:315
zend_mm_page_map free_map
Definition zend_alloc.c:323
zend_mm_heap heap_slot
Definition zend_alloc.c:322
uint32_t free_pages
Definition zend_alloc.c:318
zend_mm_free_slot * next_free_slot
Definition zend_alloc.c:340
zend_mm_chunk_extend_t chunk_extend
Definition zend_alloc.h:308
zend_mm_chunk_truncate_t chunk_truncate
Definition zend_alloc.h:307
zend_mm_chunk_alloc_t chunk_alloc
Definition zend_alloc.h:305
zend_mm_chunk_free_t chunk_free
Definition zend_alloc.h:306
HashTable * tracked_allocs
Definition zend_alloc.c:308
void(* _shutdown)(bool full, bool silent)
Definition zend_alloc.c:306
struct _zend_mm_heap::@141134307255302353166333233161222346252336363001 custom_heap
size_t real_peak
Definition zend_alloc.c:283
zend_mm_chunk * cached_chunks
Definition zend_alloc.c:293
void *(* _malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
Definition zend_alloc.c:302
double avg_chunks_count
Definition zend_alloc.c:297
zend_random_bytes_insecure_state rand_state
Definition zend_alloc.c:311
void *(* _realloc)(void *, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
Definition zend_alloc.c:304
size_t real_size
Definition zend_alloc.c:280
zend_mm_free_slot * free_slot[ZEND_MM_BINS]
Definition zend_alloc.c:278
zend_mm_storage * storage
Definition zend_alloc.c:271
uintptr_t shadow_key
Definition zend_alloc.c:277
int cached_chunks_count
Definition zend_alloc.c:296
int peak_chunks_count
Definition zend_alloc.c:295
int last_chunks_delete_boundary
Definition zend_alloc.c:298
int last_chunks_delete_count
Definition zend_alloc.c:299
void(* _free)(void *ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
Definition zend_alloc.c:303
size_t(* _gc)(void)
Definition zend_alloc.c:305
zend_mm_huge_list * huge_list
Definition zend_alloc.c:290
zend_mm_chunk * main_chunk
Definition zend_alloc.c:292
zend_mm_huge_list * next
Definition zend_alloc.c:346
char bytes[ZEND_MM_PAGE_SIZE]
Definition zend_alloc.c:328
const zend_mm_handlers handlers
Definition zend_alloc.h:312
#define errno
PHP_WINUTIL_API char * php_win32_error_to_msg(HRESULT error)
Definition winutil.c:25
PHP_WINUTIL_API void php_win32_error_msg_free(char *msg)
Definition winutil.c:50
ZEND_ATTRIBUTE_NONNULL ZEND_API void(* zend_random_bytes_insecure)(zend_random_bytes_insecure_state *state, void *bytes, size_t size)
Definition zend.c:99
ZEND_API ZEND_COLD ZEND_NORETURN void zend_error_noreturn(int type, const char *format,...)
Definition zend.c:1703
ZEND_API void zend_message_dispatcher(zend_long message, const void *data)
Definition zend.c:1393
#define ZMSG_MEMORY_LEAK_DETECTED
Definition zend.h:426
#define zend_catch
Definition zend.h:277
#define ZMSG_MEMORY_LEAK_REPEATED
Definition zend.h:427
#define zend_try
Definition zend.h:270
#define zend_end_try()
Definition zend.h:280
#define ZMSG_MEMORY_LEAKS_GRAND_TOTAL
Definition zend.h:429
#define ZMSG_LOG_SCRIPT_NAME
Definition zend.h:428
#define zend_bailout()
Definition zend.h:268
#define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y)
Definition zend_alloc.c:360
#define ZEND_MM_SRUN_FREE_COUNTER(info)
Definition zend_alloc.c:209
ZEND_API void *ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
struct _zend_alloc_globals zend_alloc_globals
ZEND_API void *ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
ZEND_API void ZEND_FASTCALL _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
#define _BIN_DATA_SIZE(num, size, elements, pages, x, y)
Definition zend_alloc.c:355
ZEND_API void *ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
ZEND_API void * __zend_calloc(size_t nmemb, size_t len ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
#define ZEND_MM_ALIGNED_BASE(size, alignment)
Definition zend_alloc.c:181
#define ZEND_MM_LRUN(count)
Definition zend_alloc.c:213
ZEND_API void * __zend_realloc(void *p, size_t len ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
ZEND_API void __zend_free(void *p ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
ZEND_API void start_memory_manager(void)
#define BSWAPPTR(u)
Definition zend_alloc.c:221
#define ZEND_MM_BITSET_LEN
Definition zend_alloc.c:186
struct _zend_mm_free_slot zend_mm_free_slot
Definition zend_alloc.c:228
ZEND_API char *ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
ZEND_API size_t zend_memory_usage(bool real_usage)
ZEND_API char *ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
struct _zend_mm_huge_list zend_mm_huge_list
Definition zend_alloc.c:230
ZEND_API char *ZEND_FASTCALL zend_strndup(const char *s, size_t length)
ZEND_API void *ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
#define ZEND_MM_SMALL_SIZE_TO_BIN(size)
ZEND_API void zend_mm_get_custom_handlers_ex(zend_mm_heap *heap, void *(**_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC), void(**_free)(void *ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC), void *(**_realloc)(void *, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC), size_t(**_gc)(void), void(**_shutdown)(bool, bool))
ZEND_API void * __zend_malloc(size_t len ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
void *ZEND_FASTCALL _zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
ZEND_API bool zend_mm_is_custom_heap(zend_mm_heap *new_heap)
ZEND_API zend_mm_heap * zend_mm_startup_ex(const zend_mm_handlers *handlers, void *data, size_t data_size)
ZEND_API bool zend_alloc_in_memory_limit_error_reporting(void)
#define ZEND_MM_SIZE_TO_NUM(size, alignment)
Definition zend_alloc.c:183
struct _zend_mm_bin zend_mm_bin
Definition zend_alloc.c:227
#define ZEND_MM_PAGE_ADDR(chunk, page_num)
Definition zend_alloc.c:352
#define ZEND_MM_IS_LRUN
Definition zend_alloc.c:192
struct _zend_mm_page zend_mm_page
Definition zend_alloc.c:226
#define ZEND_MM_SRUN_BIN_NUM(info)
Definition zend_alloc.c:208
#define ZEND_MM_FD
Definition zend_alloc.c:123
uint32_t zend_mm_page_info
Definition zend_alloc.c:176
ZEND_API void *ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
#define AG(v)
ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap, void *(*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC), void(*_free)(void *ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC), void *(*_realloc)(void *, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
#define ZEND_MM_IS_SRUN
Definition zend_alloc.c:193
#define ZEND_MM_FREE_SLOT_PTR_SHADOW(free_slot, bin_num)
#define MAP_FAILED
Definition zend_alloc.c:98
ZEND_API void *ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
#define ZEND_MM_CHECK(condition, message)
Definition zend_alloc.c:169
ZEND_API char * __zend_strdup(const char *s)
ZEND_API bool is_zend_mm(void)
ZEND_API void zend_memory_reset_peak_usage(void)
zend_ulong zend_mm_bitset
Definition zend_alloc.c:177
ZEND_API size_t zend_memory_peak_usage(bool real_usage)
#define _BIN_DATA_PAGES(num, size, elements, pages, x, y)
Definition zend_alloc.c:365
ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
ZEND_API zend_mm_storage * zend_mm_get_storage(zend_mm_heap *heap)
#define ZEND_MM_SRUN_EX(bin_num, count)
Definition zend_alloc.c:215
#define ZEND_MM_BINS
Definition zend_alloc.c:218
#define ZEND_MM_PAGE_MAP_LEN
Definition zend_alloc.c:187
void *ZEND_FASTCALL _zend_mm_realloc2(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
ZEND_API zend_result zend_set_memory_limit(size_t memory_limit)
ZEND_API zend_mm_heap * zend_mm_set_heap(zend_mm_heap *new_heap)
#define ZEND_MM_MIN_USEABLE_BIN_SIZE
Definition zend_alloc.c:155
#define ZEND_MM_NRUN_OFFSET(info)
Definition zend_alloc.c:210
#define ZEND_MM_SRUN(bin_num)
Definition zend_alloc.c:214
ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
ZEND_API void zend_mm_set_custom_handlers_ex(zend_mm_heap *heap, void *(*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC), void(*_free)(void *ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC), void *(*_realloc)(void *, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC), size_t(*_gc)(void), void(*_shutdown)(bool, bool))
ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
#define ZEND_MM_NRUN(bin_num, offset)
Definition zend_alloc.c:216
zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN]
Definition zend_alloc.c:189
#define ZEND_MM_LRUN_PAGES(info)
Definition zend_alloc.c:207
#define REAL_PAGE_SIZE
Definition zend_alloc.c:113
ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap, void *(**_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC), void(**_free)(void *ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC), void *(**_realloc)(void *, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
ZEND_API void *ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
ZEND_API bool is_zend_ptr(const void *ptr)
struct _zend_mm_chunk zend_mm_chunk
Definition zend_alloc.c:229
ZEND_API zend_mm_heap * zend_mm_get_heap(void)
ZEND_API zend_mm_heap * zend_mm_startup(void)
ZEND_API void shutdown_memory_manager(bool silent, bool full_shutdown)
ZEND_API void *ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
ZEND_API void *ZEND_FASTCALL _zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
ZEND_API void zend_mm_shutdown(zend_mm_heap *heap, bool full, bool silent)
#define ZEND_MM_ALIGNED_OFFSET(size, alignment)
Definition zend_alloc.c:179
#define _emalloc_large
Definition zend_alloc.h:143
struct _zend_mm_storage zend_mm_storage
Definition zend_alloc.h:297
#define _emalloc_huge
Definition zend_alloc.h:144
#define ZEND_MM_ALIGNED_SIZE_EX(size, alignment)
Definition zend_alloc.h:37
#define ZEND_MM_CUSTOM_HEAP_STD
Definition zend_alloc.h:272
struct _zend_mm_handlers zend_mm_handlers
#define _efree_huge
Definition zend_alloc.h:146
#define perealloc(ptr, size, persistent)
Definition zend_alloc.h:201
#define ZEND_MM_ALIGNED_SIZE(size)
Definition zend_alloc.h:35
#define pemalloc(size, persistent)
Definition zend_alloc.h:189
struct _zend_leak_info zend_leak_info
struct _zend_mm_heap zend_mm_heap
Definition zend_alloc.h:244
#define _efree_large
Definition zend_alloc.h:145
#define ZEND_MM_CUSTOM_HEAP_NONE
Definition zend_alloc.h:271
#define ZEND_MM_PAGE_SIZE
#define ZEND_MM_PAGES
#define ZEND_MM_MAX_SMALL_SIZE
#define ZEND_MM_FIRST_PAGE
#define ZEND_MM_MAX_LARGE_SIZE
#define ZEND_MM_CHUNK_SIZE
#define ZEND_MM_BINS_INFO(_, x, y)
struct _zval_struct zval
strlen(string $string)
exit(string|int $status=0)
zval * args
#define ZEND_API
#define E_ERROR
Definition zend_errors.h:23
ZEND_API void(ZEND_FASTCALL *zend_touch_vm_stack_data)(void *vm_stack_data)
ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
Definition zend_hash.c:1727
ZEND_API zval *ZEND_FASTCALL zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData)
Definition zend_hash.c:1214
ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
Definition zend_hash.c:1869
ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
Definition zend_hash.c:1526
ZEND_API zval *ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
Definition zend_hash.c:2701
#define zend_hash_init(ht, nSize, pHashFunction, pDestructor, persistent)
Definition zend_hash.h:108
#define ZEND_HASH_FOREACH_NUM_KEY(ht, _h)
Definition zend_hash.h:1130
#define ZEND_HASH_FOREACH_END()
Definition zend_hash.h:1086
int32_t zend_long
Definition zend_long.h:42
#define ZEND_ATOL(s)
Definition zend_long.h:101
uint32_t zend_ulong
Definition zend_long.h:43
#define Z_L(i)
Definition zend_long.h:48
#define Z_UL(i)
Definition zend_long.h:49
#define ZEND_ATTRIBUTE_CONST
#define zend_never_inline
#define MIN(a, b)
#define EXPECTED(condition)
#define ZEND_FILE_LINE_DC
#define zend_always_inline
#define ZEND_FILE_LINE_ORIG_RELAY_CC
#define ZEND_FASTCALL
#define ZEND_ASSERT(c)
#define ZEND_BIT_TEST(bits, bit)
#define ZEND_COLD
#define ZEND_FILE_LINE_EMPTY_CC
#define ZEND_FILE_LINE_CC
#define UNEXPECTED(condition)
#define ZEND_NORETURN
#define ZEND_FILE_LINE_RELAY_CC
#define ZEND_FILE_LINE_ORIG_DC
#define MAX(a, b)
#define ZVAL_LONG(z, l)
struct _zend_array HashTable
Definition zend_types.h:386
@ FAILURE
Definition zend_types.h:61
struct _Bucket Bucket
ZEND_RESULT_CODE zend_result
Definition zend_types.h:64
#define Z_LVAL_P(zval_p)
Definition zend_types.h:966
zval * ret
usage()