1/*
  2 * Copyright (c) 1999-2023 Apple Computer, Inc. All rights reserved.
  3 *
  4 * @APPLE_LICENSE_HEADER_START@
  5 *
  6 * This file contains Original Code and/or Modifications of Original Code
  7 * as defined in and that are subject to the Apple Public Source License
  8 * Version 2.0 (the 'License'). You may not use this file except in
  9 * compliance with the License. Please obtain a copy of the License at
 10 * http://www.opensource.apple.com/apsl/ and read it before using this
 11 * file.
 12 *
 13 * The Original Code and all software distributed under the License are
 14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
 15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
 16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
 18 * Please see the License for the specific language governing rights and
 19 * limitations under the License.
 20 *
 21 * @APPLE_LICENSE_HEADER_END@
 22 */
 23
 24#ifndef _MALLOC_MALLOC_H_
 25#define _MALLOC_MALLOC_H_
 26
 27#include <TargetConditionals.h>
 28#include <malloc/_platform.h>
 29#include <Availability.h>
 30#include <os/availability.h>
 31
 32#include <malloc/_ptrcheck.h>
 33__ptrcheck_abi_assume_single()
 34
 35#if __has_feature(ptrauth_calls)
 36#include <ptrauth.h>
 37
 38// Zone function pointer, type-diversified but not address-diversified (because
 39// the zone can be copied). Process-independent because the zone structure may
 40// be in the shared library cache.
 41#define MALLOC_ZONE_FN_PTR(fn) __ptrauth(ptrauth_key_process_independent_code, \
 42		0, ptrauth_string_discriminator("malloc_zone_fn." #fn)) fn
 43
 44// Introspection function pointer, address- and type-diversified.
 45// Process-independent because the malloc_introspection_t structure that contains
 46// these pointers may be in the shared library cache.
 47#define MALLOC_INTROSPECT_FN_PTR(fn) __ptrauth(ptrauth_key_process_independent_code, \
 48		1, ptrauth_string_discriminator("malloc_introspect_fn." #fn)) fn
 49
 50// Pointer to the introspection pointer table, type-diversified but not
 51// address-diversified (because the zone can be copied).
 52// Process-independent because the table pointer may be in the shared library cache.
 53#define MALLOC_INTROSPECT_TBL_PTR(ptr) __ptrauth(ptrauth_key_process_independent_data,\
 54		0, ptrauth_string_discriminator("malloc_introspect_tbl")) ptr
 55
 56#endif	// __has_feature(ptrauth_calls)
 57
 58#ifndef MALLOC_ZONE_FN_PTR
 59#define MALLOC_ZONE_FN_PTR(fn) fn
 60#define MALLOC_INTROSPECT_FN_PTR(fn) fn
 61#define MALLOC_INTROSPECT_TBL_PTR(ptr) ptr
 62#endif // MALLOC_ZONE_FN_PTR
 63
 64__BEGIN_DECLS
 65
 66/*********  Typed zone functions        ************/
 67
 68#if defined(__has_attribute) && __has_attribute(swift_name)
 69#define MALLOC_SWIFT_NAME(x) __attribute__((swift_name(#x)))
 70#else
 71#define MALLOC_SWIFT_NAME(x)
 72#endif // defined(__has_attribute) && __has_attribute(swift_name)
 73
 74/*!
 75 * @constant MALLOC_ZONE_MALLOC_DEFAULT_ALIGN
 76 * Default alignment for malloc_type_zone_malloc_with_options
 77 */
 78#define MALLOC_ZONE_MALLOC_DEFAULT_ALIGN __SIZEOF_POINTER__
 79
 80/*!
 81 * @enum malloc_zone_malloc_options_t
 82 *
 83 * @constant MALLOC_ZONE_MALLOC_OPTION_NONE
 84 * Empty placeholder option.
 85 *
 86 * @constant MALLOC_ZONE_MALLOC_OPTION_CLEAR
 87 * Zero out the allocated memory, similar to calloc().
 88 *
 89 */
 90/*!
 91 * @constant MALLOC_ZONE_MALLOC_OPTION_CANONICAL_TAG
 92 * Under MTE, use a tag of zero (canonical) instead of a random value.
 93 */
 94typedef enum __enum_options : uint64_t {
 95	MALLOC_ZONE_MALLOC_OPTION_NONE = 0u,
 96	MALLOC_ZONE_MALLOC_OPTION_CLEAR MALLOC_SWIFT_NAME(clear) = 1u << 0,
 97	MALLOC_ZONE_MALLOC_OPTION_CANONICAL_TAG MALLOC_SWIFT_NAME(canonicalTag) = 1u << 1,
 98} malloc_zone_malloc_options_t;
 99
100/*!
101 * @function malloc_type_zone_malloc_with_options
102 *
103 * Like the other functions declared in malloc/_malloc_type.h, this function
104 * is not intended to be called directly, but is rather the rewrite target for
105 * calls to malloc_zone_malloc_with_options when typed memory operations are
106 * enabled.
107 */
108#if defined(__LP64__)
109__API_AVAILABLE(macos(26.0), ios(26.0), tvos(26.0), watchos(26.0), visionos(26.0), driverkit(25.0))
110void * __sized_by_or_null(size) malloc_type_zone_malloc_with_options(malloc_zone_t *zone, size_t alignment, size_t size, malloc_type_id_t type_id, malloc_zone_malloc_options_t opts) __result_use_check __alloc_align(2) __alloc_size(3);
111#endif /* __LP64__ */
112
113#if defined(_MALLOC_TYPE_MALLOC_IS_BACKDEPLOYING) && _MALLOC_TYPE_MALLOC_IS_BACKDEPLOYING
114static void * __sized_by_or_null(size) __attribute__((always_inline)) malloc_type_zone_malloc_with_options_backdeploy(malloc_zone_t *zone, size_t alignment, size_t size, malloc_type_id_t type_id, malloc_zone_malloc_options_t opts) __result_use_check __alloc_align(2) __alloc_size(3);
115#endif /* defined(_MALLOC_TYPE_MALLOC_IS_BACKDEPLOYING) && _MALLOC_TYPE_MALLOC_IS_BACKDEPLOYING */
116
117// The remainder of these functions are declared in malloc/_malloc_type.h, and
118// the backdeployment variant definitions are at the bottom of this file.
119
120/*********	Type definitions	************/
121
122/*
123 * Only zone implementors should depend on the layout of this structure;
124 * Regular callers should use the access functions below
125 */
126typedef struct _malloc_zone_t {
127	void *reserved1;	/* RESERVED FOR CFAllocator DO NOT USE */
128	void *reserved2;	/* RESERVED FOR CFAllocator DO NOT USE */
129
130	/*
131	 * Returns the size of a block or 0 if not in this zone; must be fast,
132	 * especially for negative answers.
133	 */
134	size_t (* MALLOC_ZONE_FN_PTR(size))(struct _malloc_zone_t *zone,
135			const void * __unsafe_indexable ptr);
136
137	void * __sized_by_or_null(size) (* MALLOC_ZONE_FN_PTR(malloc))(
138			struct _malloc_zone_t *zone, size_t size);
139
140	/* Same as malloc, but block returned is set to zero */
141	void * __sized_by_or_null(num_items * size) (* MALLOC_ZONE_FN_PTR(calloc))(
142			struct _malloc_zone_t *zone, size_t num_items, size_t size);
143
144	/* Same as malloc, but block returned is guaranteed to be page-aligned */
145	void * __sized_by_or_null(size) (* MALLOC_ZONE_FN_PTR(valloc))(
146			struct _malloc_zone_t *zone, size_t size);
147
148	void (* MALLOC_ZONE_FN_PTR(free))(struct _malloc_zone_t *zone,
149			void * __unsafe_indexable ptr);
150
151	void * __sized_by_or_null(size) (* MALLOC_ZONE_FN_PTR(realloc))(
152			struct _malloc_zone_t *zone, void * __unsafe_indexable ptr,
153			size_t size);
154
155	/* Zone is destroyed and all memory reclaimed */
156	void (* MALLOC_ZONE_FN_PTR(destroy))(struct _malloc_zone_t *zone);
157
158	const char * __null_terminated zone_name;
159
160	/* Optional batch callbacks; these may be NULL */
161
162	/*
163	 * Given a size, returns pointers capable of holding that size; returns the
164	 * number of pointers allocated (maybe 0 or less than num_requested)
165	 */
166	unsigned (* MALLOC_ZONE_FN_PTR(batch_malloc))(struct _malloc_zone_t *zone,
167			size_t size,
168			void * __unsafe_indexable * __counted_by(num_requested) results,
169			unsigned num_requested);
170
171	/*
172	 * Frees all the pointers in to_be_freed; note that to_be_freed may be
173	 * overwritten during the process
174	 */
175	void (* MALLOC_ZONE_FN_PTR(batch_free))(struct _malloc_zone_t *zone,
176			void * __unsafe_indexable * __counted_by(num_to_be_freed) to_be_freed,
177			unsigned num_to_be_freed);
178
179	struct malloc_introspection_t * MALLOC_INTROSPECT_TBL_PTR(introspect);
180	unsigned version;
181
182	/* Aligned memory allocation. May be NULL.  Present in version >= 5. */
183	void * __sized_by_or_null(size) (* MALLOC_ZONE_FN_PTR(memalign))(
184			struct _malloc_zone_t *zone, size_t alignment, size_t size);
185
186	/*
187	 * Free a pointer known to be in zone and known to have the given size.
188	 * May be NULL. Present in version >= 6.
189	 */
190	void (* MALLOC_ZONE_FN_PTR(free_definite_size))(struct _malloc_zone_t *zone,
191			void * __sized_by(size) ptr, size_t size);
192
193	/*
194	 * Empty out caches in the face of memory pressure. May be NULL.
195	 * Present in version >= 8.
196	 */
197	size_t (* MALLOC_ZONE_FN_PTR(pressure_relief))(struct _malloc_zone_t *zone,
198			size_t goal);
199
200	/*
201	 * Checks whether an address might belong to the zone. May be NULL. Present
202	 * in version >= 10.  False positives are allowed (e.g. the pointer was
203	 * freed, or it's in zone space that has not yet been allocated. False
204	 * negatives are not allowed.
205	 */
206	boolean_t (* MALLOC_ZONE_FN_PTR(claimed_address))(
207			struct _malloc_zone_t *zone, void * __unsafe_indexable ptr);
208
209	/*
210	 * For libmalloc-internal zone 0 implementations only: try to free ptr,
211	 * promising to call find_zone_and_free if it turns out not to belong to us.
212	 * May be present in version >= 13.
213	 */
214	void (* MALLOC_ZONE_FN_PTR(try_free_default))(struct _malloc_zone_t *zone,
215			void * __unsafe_indexable ptr);
216
217	/*
218	 * Memory allocation with an extensible binary flags option.
219	 * Added in version >= 15.
220	 */
221	void * __sized_by_or_null(size) (* MALLOC_ZONE_FN_PTR(malloc_with_options))(
222			struct _malloc_zone_t *zone, size_t align, size_t size,
223			uint64_t options);
224
225	/*
226	 * Typed Memory Operations versions of zone functions.  Present in
227	 * version >= 16.
228	 */
229
230	void * __sized_by_or_null(size) (* MALLOC_ZONE_FN_PTR(malloc_type_malloc))(
231			struct _malloc_zone_t *zone, size_t size, malloc_type_id_t type_id);
232
233	void * __sized_by_or_null(count * size) (* MALLOC_ZONE_FN_PTR(malloc_type_calloc))(
234			struct _malloc_zone_t *zone, size_t count, size_t size,
235			malloc_type_id_t type_id);
236
237	void * __sized_by_or_null(size) (* MALLOC_ZONE_FN_PTR(malloc_type_realloc))(
238			struct _malloc_zone_t *zone, void * __unsafe_indexable ptr,
239			size_t size, malloc_type_id_t type_id);
240
241	void * __sized_by_or_null(size) (* MALLOC_ZONE_FN_PTR(malloc_type_memalign))(
242			struct _malloc_zone_t *zone, size_t alignment, size_t size,
243			malloc_type_id_t type_id);
244
245	void * __sized_by_or_null(size) (* MALLOC_ZONE_FN_PTR(malloc_type_malloc_with_options))(
246			struct _malloc_zone_t *zone, size_t align, size_t size,
247			uint64_t options, malloc_type_id_t type_id);
248} malloc_zone_t;
249
250/*!
251 * @enum malloc_type_callsite_flags_v0_t
252 *
253 * Information about where and how malloc was called
254 *
255 * @constant MALLOC_TYPE_CALLSITE_FLAGS_V0_FIXED_SIZE
256 * Set in malloc_type_summary_v0_t if the call to malloc was called with a fixed
257 * size. Note that, at present, this bit is set in all callsites where the
258 * compiler rewrites a call to malloc
259 *
260 * @constant MALLOC_TYPE_CALLSITE_FLAGS_V0_ARRAY
261 * Set in malloc_type_summary_v0_t if the type being allocated is an array, e.g.
262 * allocated via new[] or calloc(count, size)
263 */
264typedef enum {
265	MALLOC_TYPE_CALLSITE_FLAGS_V0_NONE = 0,
266	MALLOC_TYPE_CALLSITE_FLAGS_V0_FIXED_SIZE = 1 << 0,
267	MALLOC_TYPE_CALLSITE_FLAGS_V0_ARRAY = 1 << 1,
268} malloc_type_callsite_flags_v0_t;
269
270/*!
271 * @enum malloc_type_kind_v0_t
272 *
273 * @constant MALLOC_TYPE_KIND_V0_OTHER
274 * Default allocation type, used for most calls to malloc
275 *
276 * @constant MALLOC_TYPE_KIND_V0_OBJC
277 * Marks a type allocated by libobjc
278 *
279 * @constant MALLOC_TYPE_KIND_V0_SWIFT
280 * Marks a type allocated by the Swift runtime
281 *
282 * @constant MALLOC_TYPE_KIND_V0_CXX
283 * Marks a type allocated by the C++ runtime's operator new
284 */
285typedef enum {
286	MALLOC_TYPE_KIND_V0_OTHER = 0,
287	MALLOC_TYPE_KIND_V0_OBJC = 1,
288	MALLOC_TYPE_KIND_V0_SWIFT = 2,
289	MALLOC_TYPE_KIND_V0_CXX = 3
290} malloc_type_kind_v0_t;
291
292/*!
293 * @struct malloc_type_layout_semantics_v0_t
294 *
295 * @field contains_data_pointer
296 * True if the allocated type or any of its fields is a pointer
297 * to a data type (i.e. the pointee contains no pointers)
298 *
299 * @field contains_struct_pointer
300 * True if the allocated type or any of its fields is a pointer
301 * to a struct or union
302 *
303 * @field contains_immutable_pointer
304 * True if the allocated type or any of its fields is a const pointer
305 *
306 * @field contains_anonymous_pointer
307 * True if the allocated type or any of its fields is a pointer
308 * to something other than a struct or data type
309 *
310 * @field is_reference_counted
311 * True if the allocated type is reference counted
312 *
313 * @field contains_generic_data
314 * True if the allocated type or any of its fields are not pointers
315 */
316typedef struct {
317	bool contains_data_pointer : 1;
318	bool contains_struct_pointer : 1;
319	bool contains_immutable_pointer : 1;
320	bool contains_anonymous_pointer : 1;
321	bool is_reference_counted : 1;
322	uint16_t reserved_0 : 3;
323	bool contains_generic_data : 1;
324	uint16_t reserved_1 : 7;
325} malloc_type_layout_semantics_v0_t;
326
327/*!
328 * @struct malloc_type_summary_v0_t
329 *
330 * @field version
331 * Versioning field of the type summary. Set to 0 for the current verison. New
332 * fields can be added where the reserved fields currently are without
333 * incrementing the version, as long as they are non-breaking.
334 *
335 * @field callsite_flags
336 * Details from the callsite of malloc inferred by the compiler
337 *
338 * @field type_kind
339 * Details about the runtime making the allocation
340 *
341 * @field layout_semantics
342 * Details about what kinds of data are contained in the type being allocated
343 *
344 * @discussion
345 * The reserved fields should not be read from or written to, and may be
346 * used for additional fields and information in future versions
347 */
348typedef struct {
349	uint32_t version : 4;
350	uint32_t reserved_0 : 2;
351	malloc_type_callsite_flags_v0_t callsite_flags : 4;
352	malloc_type_kind_v0_t type_kind : 2;
353	uint32_t reserved_1 : 4;
354	malloc_type_layout_semantics_v0_t layout_semantics;
355} malloc_type_summary_v0_t;
356
357/*!
358 * @union malloc_type_descriptor_v0_t
359 *
360 * @field hash
361 * Hash of the type layout of the allocated type, or if type inference failed,
362 * the hash of the callsite's file, line and column. The hash allows the
363 * allocator to disambiguate between different types with the same summary, e.g.
364 * types that have the same fields in different orders.
365 *
366 * @field summary
367 * Details of the type being allocated
368 *
369 * @field type_id
370 * opaque type used for punning
371 *
372 * @discussion
373 * Use malloc_type_descriptor_v0_t to decode the opaque malloc_type_id_t with
374 * version == 0 into a malloc_type_summary_v0_t:
375 *
376 * <code>
377 * malloc_type_descriptor_v0_t desc = (malloc_type_descriptor_v0_t){ .type_id = id };
378 * </code>
379 *
380 * See LLVM documentation for more details
381 */
382typedef union {
383	struct {
384		uint32_t hash;
385		malloc_type_summary_v0_t summary;
386	};
387	malloc_type_id_t type_id;
388} malloc_type_descriptor_v0_t;
389
390/*********	Creation and destruction	************/
391
392extern malloc_zone_t *malloc_default_zone(void);
393	/* The initial zone */
394
395#if !0 && !0
396extern malloc_zone_t *malloc_create_zone(vm_size_t start_size, unsigned flags);
397	/* Creates a new zone with default behavior and registers it */
398
399extern void malloc_destroy_zone(malloc_zone_t *zone);
400	/* Destroys zone and everything it allocated */
401#endif 
402
403/*********	Block creation and manipulation	************/
404
405extern void * __sized_by_or_null(size) malloc_zone_malloc(malloc_zone_t *zone, size_t size) __alloc_size(2) _MALLOC_TYPED(malloc_type_zone_malloc, 2);
406	/* Allocates a new pointer of size size; zone must be non-NULL */
407
408/*!
409 * @function malloc_zone_malloc_with_options
410 *
411 * @param zone
412 * The malloc zone that should be used to used to serve the allocation. This
413 * parameter may be NULL, in which case the default zone will be used.
414 *
415 * @param align
416 * The minimum alignment of the requested allocation. This non-zero parameter
417 * must be MALLOC_ZONE_MALLOC_DEFAULT_ALIGN to request default alignment, or a
418 * power of 2 > sizeof(void *).
419 *
420 * @param size
421 * The size, in bytes, of the requested allocation, which must be an integral
422 * multiple of align. This requirement is relaxed slightly on OS versions
423 * strictly newer than 26.0, where a non-multiple size is permitted if and only
424 * if align is MALLOC_ZONE_MALLOC_DEFAULT_ALIGN. OS version 26.0 does not
425 * implement this exception.
426 *
427 * @param options
428 * A bitmask of options defining how the memory should be allocated. See the
429 * available bit values in the malloc_zone_malloc_options_t enum definition.
430 *
431 * @result
432 * A pointer to the newly allocated block of memory, or NULL if the allocation
433 * failed.
434 *
435 * @discussion
436 * This API does not use errno to signal information about the reason for its
437 * success or failure, and makes no guarantees about preserving or settings its
438 * value in any case.
439 */
440__API_AVAILABLE(macos(26.0), ios(26.0), tvos(26.0), watchos(26.0), visionos(26.0), driverkit(25.0))
441extern void * __sized_by_or_null(size) malloc_zone_malloc_with_options(malloc_zone_t *zone, size_t align, size_t size, malloc_zone_malloc_options_t opts) __alloc_align(2) __alloc_size(3) _MALLOC_TYPED(malloc_type_zone_malloc_with_options, 3);
442
443extern void * __sized_by_or_null(num_items * size) malloc_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size) __alloc_size(2,3) _MALLOC_TYPED(malloc_type_zone_calloc, 3);
444	/* Allocates a new pointer of size num_items * size; block is cleared; zone must be non-NULL */
445
446extern void * __sized_by_or_null(size) malloc_zone_valloc(malloc_zone_t *zone, size_t size) __alloc_size(2) _MALLOC_TYPED(malloc_type_zone_valloc, 2);
447	/* Allocates a new pointer of size size; zone must be non-NULL; Pointer is guaranteed to be page-aligned and block is cleared */
448
449extern void malloc_zone_free(malloc_zone_t *zone, void * __unsafe_indexable ptr);
450	/* Frees pointer in zone; zone must be non-NULL */
451
452extern void * __sized_by_or_null(size) malloc_zone_realloc(malloc_zone_t *zone, void * __unsafe_indexable ptr, size_t size) __alloc_size(3) _MALLOC_TYPED(malloc_type_zone_realloc, 3);
453	/* Enlarges block if necessary; zone must be non-NULL */
454
455extern malloc_zone_t *malloc_zone_from_ptr(const void * __unsafe_indexable ptr);
456	/* Returns the zone for a pointer, or NULL if not in any zone.
457	The ptr must have been returned from a malloc or realloc call. */
458
459extern size_t malloc_size(const void * __unsafe_indexable ptr);
460	/* Returns size of given ptr, including any padding inserted by the allocator */
461
462extern size_t malloc_good_size(size_t size);
463	/* Returns number of bytes greater than or equal to size that can be allocated without padding */
464
465extern void * __sized_by_or_null(size) malloc_zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) __alloc_align(2) __alloc_size(3) _MALLOC_TYPED(malloc_type_zone_memalign, 3) __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0);
466	/*
467	 * Allocates a new pointer of size size whose address is an exact multiple of alignment.
468	 * alignment must be a power of two and at least as large as sizeof(void *).
469	 * zone must be non-NULL.
470	 */
471
472/*********	Batch methods	************/
473
474#if !0 && !0
475extern unsigned malloc_zone_batch_malloc(malloc_zone_t *zone, size_t size, void * __unsafe_indexable * __counted_by(num_requested) results, unsigned num_requested);
476	/* Allocates num blocks of the same size; Returns the number truly allocated (may be 0) */
477
478extern void malloc_zone_batch_free(malloc_zone_t *zone, void * __unsafe_indexable * __counted_by(num) to_be_freed, unsigned num);
479	/* frees all the pointers in to_be_freed; note that to_be_freed may be overwritten during the process; This function will always free even if the zone has no batch callback */
480#endif 
481
482/*********	Functions for libcache	************/
483
484#if !0 && !0
485extern malloc_zone_t *malloc_default_purgeable_zone(void) __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0);
486	/* Returns a pointer to the default purgeable_zone. */
487
488extern void malloc_make_purgeable(void * __unsafe_indexable ptr) __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0);
489	/* Make an allocation from the purgeable zone purgeable if possible.  */
490
491extern int malloc_make_nonpurgeable(void * __unsafe_indexable ptr) __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0);
492	/* Makes an allocation from the purgeable zone nonpurgeable.
493	 * Returns zero if the contents were not purged since the last
494	 * call to malloc_make_purgeable, else returns non-zero. */
495#endif 
496
497/*********	Functions for zone implementors	************/
498
499#if !0 && !0
500extern void malloc_zone_register(malloc_zone_t *zone);
501	/* Registers a custom malloc zone; Should typically be called after a
502	 * malloc_zone_t has been filled in with custom methods by a client.  See
503	 * malloc_create_zone for creating additional malloc zones with the
504	 * default allocation and free behavior. */
505
506extern void malloc_zone_unregister(malloc_zone_t *zone);
507	/* De-registers a zone
508	Should typically be called before calling the zone destruction routine */
509#endif 
510
511extern void malloc_set_zone_name(malloc_zone_t *zone, const char * __null_terminated name);
512	/* Sets the name of a zone */
513
514extern const char *malloc_get_zone_name(malloc_zone_t *zone);
515	/* Returns the name of a zone */
516
517#if !0 && !0
518size_t malloc_zone_pressure_relief(malloc_zone_t *zone, size_t goal) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
519	/* malloc_zone_pressure_relief() advises the malloc subsystem that the process is under memory pressure and
520	 * that the subsystem should make its best effort towards releasing (i.e. munmap()-ing) "goal" bytes from "zone".
521	 * If "goal" is passed as zero, the malloc subsystem will attempt to achieve maximal pressure relief in "zone".
522	 * If "zone" is passed as NULL, all zones are examined for pressure relief opportunities.
523	 * malloc_zone_pressure_relief() returns the number of bytes released.
524	 */
525#endif 
526
527typedef struct {
528	vm_address_t	address;
529	vm_size_t		size;
530} vm_range_t;
531
532typedef struct malloc_statistics_t {
533	unsigned	blocks_in_use;
534	size_t	size_in_use;
535	size_t	max_size_in_use;	/* high water mark of touched memory */
536	size_t	size_allocated;		/* reserved in memory */
537} malloc_statistics_t;
538
539typedef kern_return_t memory_reader_t(task_t remote_task, vm_address_t remote_address, vm_size_t size, void * __sized_by(size) *local_memory);
540	/* given a task, "reads" the memory at the given address and size
541	local_memory: set to a contiguous chunk of memory; validity of local_memory is assumed to be limited (until next call) */
542
543#define MALLOC_PTR_IN_USE_RANGE_TYPE	1	/* for allocated pointers */
544#define MALLOC_PTR_REGION_RANGE_TYPE	2	/* for region containing pointers */
545#define MALLOC_ADMIN_REGION_RANGE_TYPE	4	/* for region used internally */
546#define MALLOC_ZONE_SPECIFIC_FLAGS	0xff00	/* bits reserved for zone-specific purposes */
547
548typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
549/* given a task and context, "records" the specified addresses */
550
551/* Print function for the print_task() operation. */
552typedef void print_task_printer_t(const char * __null_terminated fmt, ...) __printflike(1,2);
553
554typedef struct malloc_introspection_t {
555	kern_return_t (* MALLOC_INTROSPECT_FN_PTR(enumerator))(task_t task, void *, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder); /* enumerates all the malloc pointers in use */
556	size_t	(* MALLOC_INTROSPECT_FN_PTR(good_size))(malloc_zone_t *zone, size_t size);
557	boolean_t 	(* MALLOC_INTROSPECT_FN_PTR(check))(malloc_zone_t *zone); /* Consistency checker */
558	void	(* MALLOC_INTROSPECT_FN_PTR(print))(malloc_zone_t *zone, boolean_t verbose); /* Prints zone  */
559	void	(* MALLOC_INTROSPECT_FN_PTR(log))(malloc_zone_t *zone, void * __unsafe_indexable address); /* Enables logging of activity */
560	void	(* MALLOC_INTROSPECT_FN_PTR(force_lock))(malloc_zone_t *zone); /* Forces locking zone */
561	void	(* MALLOC_INTROSPECT_FN_PTR(force_unlock))(malloc_zone_t *zone); /* Forces unlocking zone */
562	void	(* MALLOC_INTROSPECT_FN_PTR(statistics))(malloc_zone_t *zone, malloc_statistics_t *stats); /* Fills statistics */
563	boolean_t	(* MALLOC_INTROSPECT_FN_PTR(zone_locked))(malloc_zone_t *zone); /* Are any zone locks held */
564
565	/* Discharge checking. Present in version >= 7. */
566	boolean_t	(* MALLOC_INTROSPECT_FN_PTR(enable_discharge_checking))(malloc_zone_t *zone);
567	void	(* MALLOC_INTROSPECT_FN_PTR(disable_discharge_checking))(malloc_zone_t *zone);
568	void	(* MALLOC_INTROSPECT_FN_PTR(discharge))(malloc_zone_t *zone, void * __unsafe_indexable memory);
569#ifdef __BLOCKS__
570	void	(* MALLOC_INTROSPECT_FN_PTR(enumerate_discharged_pointers))(malloc_zone_t *zone, void (^report_discharged)(void *memory, void *info));
571	#else
572	void	*enumerate_unavailable_without_blocks;
573#endif /* __BLOCKS__ */
574	void	(* MALLOC_INTROSPECT_FN_PTR(reinit_lock))(malloc_zone_t *zone); /* Reinitialize zone locks, called only from atfork_child handler. Present in version >= 9. */
575	void	(* MALLOC_INTROSPECT_FN_PTR(print_task))(task_t task, unsigned level, vm_address_t zone_address, memory_reader_t reader, print_task_printer_t printer); /* debug print for another process. Present in version >= 11. */
576	void	(* MALLOC_INTROSPECT_FN_PTR(task_statistics))(task_t task, vm_address_t zone_address, memory_reader_t reader, malloc_statistics_t *stats); /* Present in version >= 12. */
577	unsigned	zone_type; /* Identifies the zone type.  0 means unknown/undefined zone type.  Present in version >= 14. */
578} malloc_introspection_t;
579
580// The value of "level" when passed to print_task() that corresponds to
581// verbose passed to print()
582#define MALLOC_VERBOSE_PRINT_LEVEL	2
583
584#if !0 && !0
585extern void malloc_printf(const char * __null_terminated format, ...) __printflike(1,2);
586	/* Convenience for logging errors and warnings;
587	No allocation is performed during execution of this function;
588	Only understands usual %p %d %s formats, and %y that expresses a number of bytes (5b,10KB,1MB...)
589	*/
590#endif 
591
592/*********	Functions for performance tools	************/
593
594#if !0 && !0
595extern kern_return_t malloc_get_all_zones(task_t task, memory_reader_t reader, vm_address_t * __single * __counted_by(*count) addresses, unsigned *count);
596	/* Fills addresses and count with the addresses of the zones in task;
597	Note that the validity of the addresses returned correspond to the validity reader */
598#endif 
599
600/*********	Debug helpers	************/
601
602extern void malloc_zone_print_ptr_info(void * __unsafe_indexable ptr);
603	/* print to stdout if this pointer is in the malloc heap, free status, and size */
604
605extern boolean_t malloc_zone_check(malloc_zone_t *zone);
606	/* Checks zone is well formed; if !zone, checks all zones */
607
608extern void malloc_zone_print(malloc_zone_t *zone, boolean_t verbose);
609	/* Prints summary on zone; if !zone, prints all zones */
610
611#if !0 && !0
612extern void malloc_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats);
613	/* Fills statistics for zone; if !zone, sums up all zones */
614
615extern void malloc_zone_log(malloc_zone_t *zone, void * __unsafe_indexable address);
616	/* Controls logging of all activity; if !zone, for all zones;
617	If address==0 nothing is logged;
618	If address==-1 all activity is logged;
619	Else only the activity regarding address is logged */
620#endif 
621
622struct mstats {
623	size_t	bytes_total;
624	size_t	chunks_used;
625	size_t	bytes_used;
626	size_t	chunks_free;
627	size_t	bytes_free;
628};
629
630#if !0 && !0
631extern struct mstats mstats(void);
632
633extern boolean_t malloc_zone_enable_discharge_checking(malloc_zone_t *zone) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
634	/* Increment the discharge checking enabled counter for a zone. Returns true if the zone supports checking, false if it does not. */
635
636extern void malloc_zone_disable_discharge_checking(malloc_zone_t *zone) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
637/* Decrement the discharge checking enabled counter for a zone. */
638
639extern void malloc_zone_discharge(malloc_zone_t *zone, void * __unsafe_indexable memory) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
640	/* Register memory that the programmer expects to be freed soon.
641	zone may be NULL in which case the zone is determined using malloc_zone_from_ptr().
642	If discharge checking is off for the zone this function is a no-op. */
643#endif 
644
645#if !0 && !0
646#ifdef __BLOCKS__
647extern void malloc_zone_enumerate_discharged_pointers(malloc_zone_t *zone, void (^report_discharged)(void *memory, void *info)) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
648	/* Calls report_discharged for each block that was registered using malloc_zone_discharge() but has not yet been freed.
649	info is used to provide zone defined information about the memory block.
650	If zone is NULL then the enumeration covers all zones. */
651#else
652extern void malloc_zone_enumerate_discharged_pointers(malloc_zone_t *zone, void *) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
653#endif /* __BLOCKS__ */
654#endif 
655
656/*********  Zone version summary ************/
657// Version 0, but optional:
658//   malloc_zone_t::batch_malloc
659//   malloc_zone_t::batch_free
660// Version 5:
661//   malloc_zone_t::memalign
662// Version 6:
663//   malloc_zone_t::free_definite_size
664// Version 7:
665//   malloc_introspection_t::enable_discharge_checking
666//   malloc_introspection_t::disable_discharge_checking
667//   malloc_introspection_t::discharge
668// Version 8:
669//   malloc_zone_t::pressure_relief
670// Version 9:
671//   malloc_introspection_t::reinit_lock
672// Version 10:
673//   malloc_zone_t::claimed_address
674// Version 11:
675//   malloc_introspection_t::print_task
676// Version 12:
677//   malloc_introspection_t::task_statistics
678// Version 13:
679//   - malloc_zone_t::malloc and malloc_zone_t::calloc assume responsibility for
680//     setting errno to ENOMEM on failure
681//   - malloc_zone_t::try_free_default (libmalloc only, NULL otherwise)
682// Version 14:
683//   malloc_introspection_t::zone_type (mandatory, should be 0)
684// Version 15:
685//   malloc_zone_t::malloc_with_options (optional)
686// Version 16:
687//   malloc_zone_t::malloc_type_malloc (mandatory)
688//   malloc_zone_t::malloc_type_calloc (mandatory)
689//   malloc_zone_t::malloc_type_realloc (mandatory)
690//   malloc_zone_t::malloc_type_memalign (mandatory)
691//   malloc_zone_t::malloc_type_malloc_with_options (optional)
692
693// Zone functions are optional unless specified otherwise above. Calling a zone
694// function requires two checks:
695//  * Check zone version to ensure zone struct is large enough to include the member.
696//  * Check that the function pointer is not null.
697
698#if defined(_MALLOC_TYPE_MALLOC_IS_BACKDEPLOYING) && _MALLOC_TYPE_MALLOC_IS_BACKDEPLOYING
699static void * __sized_by_or_null(size) __attribute__((always_inline)) malloc_type_zone_malloc_backdeploy(malloc_zone_t *zone, size_t size, malloc_type_id_t type_id) __result_use_check __alloc_size(2) {
700	__attribute__((weak_import)) void * __sized_by_or_null(size) malloc_type_zone_malloc(malloc_zone_t *zone, size_t size, malloc_type_id_t type_id) __result_use_check __alloc_size(2);
701	__auto_type func = malloc_zone_malloc;
702	if (malloc_type_zone_malloc) {
703		return malloc_type_zone_malloc(zone, size, type_id);
704	}
705	return func(zone, size);
706}
707
708static void * __sized_by_or_null(size) __attribute__((always_inline)) malloc_type_zone_malloc_with_options_backdeploy(malloc_zone_t *zone, size_t alignment, size_t size, malloc_type_id_t type_id, malloc_zone_malloc_options_t opts) __result_use_check __alloc_align(2) __alloc_size(3) {
709	__attribute__((weak_import)) void * __sized_by_or_null(size) malloc_type_zone_malloc_with_options(malloc_zone_t *zone, size_t alignment, size_t size, malloc_type_id_t type_id, malloc_zone_malloc_options_t opts) __result_use_check __alloc_align(2) __alloc_size(3);
710	__auto_type func = malloc_zone_malloc_with_options;
711	if (malloc_type_zone_malloc_with_options) {
712		return malloc_type_zone_malloc_with_options(zone, alignment, size, type_id, opts);
713	}
714	return func(zone, alignment, size, opts);
715}
716
717static void * __sized_by_or_null(count * size) __attribute__((always_inline)) malloc_type_zone_calloc_backdeploy(malloc_zone_t *zone, size_t count, size_t size, malloc_type_id_t type_id) __result_use_check __alloc_size(2,3) {
718	__attribute__((weak_import)) void * __sized_by_or_null(count * size) malloc_type_zone_calloc(malloc_zone_t *zone, size_t count, size_t size, malloc_type_id_t type_id) __result_use_check __alloc_size(2,3);
719	__auto_type func = malloc_zone_calloc;
720	if (malloc_type_zone_calloc) {
721		return malloc_type_zone_calloc(zone, count, size, type_id);
722	}
723	return func(zone, count, size);
724}
725
726static void __attribute__((always_inline)) malloc_type_zone_free_backdeploy(malloc_zone_t *zone, void * __unsafe_indexable ptr, malloc_type_id_t type_id) {
727	__attribute__((weak_import)) void malloc_type_zone_free(malloc_zone_t *zone, void * __unsafe_indexable ptr, malloc_type_id_t type_id);
728	__auto_type func = malloc_zone_free;
729	if (malloc_type_zone_free) {
730		malloc_type_zone_free(zone, ptr, type_id);
731	} else {
732		func(zone, ptr);
733	}
734}
735
736static void * __sized_by_or_null(size) __attribute__((always_inline)) malloc_type_zone_realloc_backdeploy(malloc_zone_t *zone, void * __unsafe_indexable ptr, size_t size, malloc_type_id_t type_id) __result_use_check __alloc_size(3) {
737	__auto_type func = malloc_zone_realloc;
738	__attribute__((weak_import)) void * __sized_by_or_null(size) malloc_type_zone_realloc(malloc_zone_t *zone, void * __unsafe_indexable ptr, size_t size, malloc_type_id_t type_id) __result_use_check __alloc_size(3);
739	if (malloc_type_zone_realloc) {
740		return malloc_type_zone_realloc(zone, ptr, size, type_id);
741	}
742	return func(zone, ptr, size);
743}
744
745static void *__sized_by_or_null(size) __attribute__((always_inline)) malloc_type_zone_valloc_backdeploy(malloc_zone_t *zone, size_t size, malloc_type_id_t type_id) __result_use_check __alloc_size(2) {
746	__attribute__((weak_import)) void *__sized_by_or_null(size) malloc_type_zone_valloc(malloc_zone_t *zone, size_t size, malloc_type_id_t type_id) __result_use_check __alloc_size(2);
747	__auto_type func = malloc_zone_valloc;
748	if (malloc_type_zone_valloc) {
749		return malloc_type_zone_valloc(zone, size, type_id);
750	}
751	return func(zone, size);
752}
753
754static void *__sized_by_or_null(size) __attribute__((always_inline)) malloc_type_zone_memalign_backdeploy(malloc_zone_t *zone, size_t alignment, size_t size, malloc_type_id_t type_id) __result_use_check __alloc_align(2) __alloc_size(3) {
755	__attribute__((weak_import)) void *__sized_by_or_null(size) malloc_type_zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size, malloc_type_id_t type_id) __result_use_check __alloc_align(2) __alloc_size(3);
756	__auto_type func = malloc_zone_memalign;
757	if (malloc_type_zone_memalign) {
758		return malloc_type_zone_memalign(zone, alignment, size, type_id);
759	}
760	return func(zone, alignment, size);
761}
762#endif // defined(_MALLOC_TYPE_MALLOC_IS_BACKDEPLOYING) && _MALLOC_TYPE_MALLOC_IS_BACKDEPLOYING
763
764__END_DECLS
765
766#endif /* _MALLOC_MALLOC_H_ */