master
  1/*-
  2 * SPDX-License-Identifier: BSD-3-Clause
  3 *
  4 * Copyright (c) 1987, 1993
  5 *	The Regents of the University of California.
  6 * Copyright (c) 2005, 2009 Robert N. M. Watson
  7 * All rights reserved.
  8 *
  9 * Redistribution and use in source and binary forms, with or without
 10 * modification, are permitted provided that the following conditions
 11 * are met:
 12 * 1. Redistributions of source code must retain the above copyright
 13 *    notice, this list of conditions and the following disclaimer.
 14 * 2. Redistributions in binary form must reproduce the above copyright
 15 *    notice, this list of conditions and the following disclaimer in the
 16 *    documentation and/or other materials provided with the distribution.
 17 * 3. Neither the name of the University nor the names of its contributors
 18 *    may be used to endorse or promote products derived from this software
 19 *    without specific prior written permission.
 20 *
 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 31 * SUCH DAMAGE.
 32 *
 33 *	@(#)malloc.h	8.5 (Berkeley) 5/3/95
 34 */
 35
 36#ifndef _SYS_MALLOC_H_
 37#define	_SYS_MALLOC_H_
 38
 39#ifndef _STANDALONE
 40#include <sys/param.h>
 41#ifdef _KERNEL
 42#include <sys/systm.h>
 43#endif
 44#include <sys/queue.h>
 45#include <sys/_lock.h>
 46#include <sys/_mutex.h>
 47#include <machine/_limits.h>
 48
 49#define	MINALLOCSIZE	UMA_SMALLEST_UNIT
 50
 51/*
 52 * Flags to memory allocation functions.
 53 */
 54#define	M_NOWAIT	0x0001		/* do not block */
 55#define	M_WAITOK	0x0002		/* ok to block */
 56#define	M_NORECLAIM	0x0080		/* do not reclaim after failure */
 57#define	M_ZERO		0x0100		/* bzero the allocation */
 58#define	M_NOVM		0x0200		/* don't ask VM for pages */
 59#define	M_USE_RESERVE	0x0400		/* can alloc out of reserve memory */
 60#define	M_NODUMP	0x0800		/* don't dump pages in this allocation */
 61#define	M_FIRSTFIT	0x1000		/* only for vmem, fast fit */
 62#define	M_BESTFIT	0x2000		/* only for vmem, low fragmentation */
 63#define	M_EXEC		0x4000		/* allocate executable space */
 64#define	M_NEXTFIT	0x8000		/* only for vmem, follow cursor */
 65
 66#define	M_VERSION	2020110501
 67
 68/*
 69 * Two malloc type structures are present: malloc_type, which is used by a
 70 * type owner to declare the type, and malloc_type_internal, which holds
 71 * malloc-owned statistics and other ABI-sensitive fields, such as the set of
 72 * malloc statistics indexed by the compile-time MAXCPU constant.
 73 * Applications should avoid introducing dependence on the allocator private
 74 * data layout and size.
 75 *
 76 * The malloc_type ks_next field is protected by malloc_mtx.  Other fields in
 77 * malloc_type are static after initialization so unsynchronized.
 78 *
 79 * Statistics in malloc_type_stats are written only when holding a critical
 80 * section and running on the CPU associated with the index into the stat
 81 * array, but read lock-free resulting in possible (minor) races, which the
 82 * monitoring app should take into account.
 83 */
 84struct malloc_type_stats {
 85	uint64_t	mts_memalloced;	/* Bytes allocated on CPU. */
 86	uint64_t	mts_memfreed;	/* Bytes freed on CPU. */
 87	uint64_t	mts_numallocs;	/* Number of allocates on CPU. */
 88	uint64_t	mts_numfrees;	/* number of frees on CPU. */
 89	uint64_t	mts_size;	/* Bitmask of sizes allocated on CPU. */
 90	uint64_t	_mts_reserved1;	/* Reserved field. */
 91	uint64_t	_mts_reserved2;	/* Reserved field. */
 92	uint64_t	_mts_reserved3;	/* Reserved field. */
 93};
 94
 95_Static_assert(sizeof(struct malloc_type_stats) == 64,
 96    "allocations come from pcpu_zone_64");
 97
 98/*
 99 * Index definitions for the mti_probes[] array.
100 */
101#define DTMALLOC_PROBE_MALLOC		0
102#define DTMALLOC_PROBE_FREE		1
103#define DTMALLOC_PROBE_MAX		2
104
105struct malloc_type_internal {
106	uint32_t	mti_probes[DTMALLOC_PROBE_MAX];
107					/* DTrace probe ID array. */
108	u_char		mti_zone;
109	struct malloc_type_stats	*mti_stats;
110	u_long		mti_spare[8];
111};
112
113/*
114 * Public data structure describing a malloc type.
115 */
116struct malloc_type {
117	struct malloc_type *ks_next;	/* Next in global chain. */
118	u_long		 ks_version;	/* Detect programmer error. */
119	const char	*ks_shortdesc;	/* Printable type name. */
120	struct malloc_type_internal ks_mti;
121};
122
123/*
124 * Statistics structure headers for user space.  The kern.malloc sysctl
125 * exposes a structure stream consisting of a stream header, then a series of
126 * malloc type headers and statistics structures (quantity maxcpus).  For
127 * convenience, the kernel will provide the current value of maxcpus at the
128 * head of the stream.
129 */
130#define	MALLOC_TYPE_STREAM_VERSION	0x00000001
131struct malloc_type_stream_header {
132	uint32_t	mtsh_version;	/* Stream format version. */
133	uint32_t	mtsh_maxcpus;	/* Value of MAXCPU for stream. */
134	uint32_t	mtsh_count;	/* Number of records. */
135	uint32_t	_mtsh_pad;	/* Pad/reserved field. */
136};
137
138#define	MALLOC_MAX_NAME	32
139struct malloc_type_header {
140	char				mth_name[MALLOC_MAX_NAME];
141};
142
143#ifdef _KERNEL
144#define	MALLOC_DEFINE(type, shortdesc, longdesc)			\
145	struct malloc_type type[1] = {					\
146		{							\
147			.ks_next = NULL,				\
148			.ks_version = M_VERSION,			\
149			.ks_shortdesc = shortdesc,			\
150		}							\
151	};								\
152	SYSINIT(type##_init, SI_SUB_KMEM, SI_ORDER_THIRD, malloc_init,	\
153	    type);							\
154	SYSUNINIT(type##_uninit, SI_SUB_KMEM, SI_ORDER_ANY,		\
155	    malloc_uninit, type)
156
157#define	MALLOC_DECLARE(type) \
158	extern struct malloc_type type[1]
159
160MALLOC_DECLARE(M_CACHE);
161MALLOC_DECLARE(M_DEVBUF);
162MALLOC_DECLARE(M_TEMP);
163
164/*
165 * XXX this should be declared in <sys/uio.h>, but that tends to fail
166 * because <sys/uio.h> is included in a header before the source file
167 * has a chance to include <sys/malloc.h> to get MALLOC_DECLARE() defined.
168 */
169MALLOC_DECLARE(M_IOV);
170
171struct domainset;
172extern struct mtx malloc_mtx;
173
174/*
175 * Function type used when iterating over the list of malloc types.
176 */
177typedef void malloc_type_list_func_t(struct malloc_type *, void *);
178
179void	contigfree(void *addr, unsigned long size, struct malloc_type *type);
180void	*contigmalloc(unsigned long size, struct malloc_type *type, int flags,
181	    vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
182	    vm_paddr_t boundary) __malloc_like __result_use_check
183	    __alloc_size(1) __alloc_align(6);
184void	*contigmalloc_domainset(unsigned long size, struct malloc_type *type,
185	    struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
186	    unsigned long alignment, vm_paddr_t boundary)
187	    __malloc_like __result_use_check __alloc_size(1) __alloc_align(7);
188void	free(void *addr, struct malloc_type *type);
189void	zfree(void *addr, struct malloc_type *type);
190void	*malloc(size_t size, struct malloc_type *type, int flags) __malloc_like
191	    __result_use_check __alloc_size(1);
192/*
193 * Try to optimize malloc(..., ..., M_ZERO) allocations by doing zeroing in
194 * place if the size is known at compilation time.
195 *
196 * Passing the flag down requires malloc to blindly zero the entire object.
197 * In practice a lot of the zeroing can be avoided if most of the object
198 * gets explicitly initialized after the allocation. Letting the compiler
199 * zero in place gives it the opportunity to take advantage of this state.
200 *
201 * Note that the operation is only applicable if both flags and size are
202 * known at compilation time. If M_ZERO is passed but M_WAITOK is not, the
203 * allocation can fail and a NULL check is needed. However, if M_WAITOK is
204 * passed we know the allocation must succeed and the check can be elided.
205 *
206 *	_malloc_item = malloc(_size, type, (flags) &~ M_ZERO);
207 *	if (((flags) & M_WAITOK) != 0 || _malloc_item != NULL)
208 *		bzero(_malloc_item, _size);
209 *
210 * If the flag is set, the compiler knows the left side is always true,
211 * therefore the entire statement is true and the callsite is:
212 *
213 *	_malloc_item = malloc(_size, type, (flags) &~ M_ZERO);
214 *	bzero(_malloc_item, _size);
215 *
216 * If the flag is not set, the compiler knows the left size is always false
217 * and the NULL check is needed, therefore the callsite is:
218 *
219 * 	_malloc_item = malloc(_size, type, (flags) &~ M_ZERO);
220 *	if (_malloc_item != NULL)
221 *		bzero(_malloc_item, _size);			
222 *
223 * The implementation is a macro because of what appears to be a clang 6 bug:
224 * an inline function variant ended up being compiled to a mere malloc call
225 * regardless of argument. gcc generates expected code (like the above).
226 */
227#define	malloc(size, type, flags) ({					\
228	void *_malloc_item;						\
229	size_t _size = (size);						\
230	if (__builtin_constant_p(size) && __builtin_constant_p(flags) &&\
231	    ((flags) & M_ZERO) != 0) {					\
232		_malloc_item = malloc(_size, type, (flags) &~ M_ZERO);	\
233		if (((flags) & M_WAITOK) != 0 ||			\
234		    __predict_true(_malloc_item != NULL))		\
235			memset(_malloc_item, 0, _size);			\
236	} else {							\
237		_malloc_item = malloc(_size, type, flags);		\
238	}								\
239	_malloc_item;							\
240})
241
242void	*malloc_domainset(size_t size, struct malloc_type *type,
243	    struct domainset *ds, int flags) __malloc_like __result_use_check
244	    __alloc_size(1);
245void	*mallocarray(size_t nmemb, size_t size, struct malloc_type *type,
246	    int flags) __malloc_like __result_use_check
247	    __alloc_size2(1, 2);
248void	*mallocarray_domainset(size_t nmemb, size_t size, struct malloc_type *type,
249	    struct domainset *ds, int flags) __malloc_like __result_use_check
250	    __alloc_size2(1, 2);
251void	*malloc_exec(size_t size, struct malloc_type *type, int flags) __malloc_like
252	    __result_use_check __alloc_size(1);
253void	*malloc_domainset_exec(size_t size, struct malloc_type *type,
254	    struct domainset *ds, int flags) __malloc_like __result_use_check
255	    __alloc_size(1);
256void	malloc_init(void *);
257void	malloc_type_allocated(struct malloc_type *type, unsigned long size);
258void	malloc_type_freed(struct malloc_type *type, unsigned long size);
259void	malloc_type_list(malloc_type_list_func_t *, void *);
260void	malloc_uninit(void *);
261size_t	malloc_size(size_t);
262size_t	malloc_usable_size(const void *);
263void	*realloc(void *addr, size_t size, struct malloc_type *type, int flags)
264	    __result_use_check __alloc_size(2);
265void	*reallocf(void *addr, size_t size, struct malloc_type *type, int flags)
266	    __result_use_check __alloc_size(2);
267void	*malloc_aligned(size_t size, size_t align, struct malloc_type *type,
268	    int flags) __malloc_like __result_use_check __alloc_size(1);
269void	*malloc_domainset_aligned(size_t size, size_t align,
270	    struct malloc_type *mtp, struct domainset *ds, int flags)
271	    __malloc_like __result_use_check __alloc_size(1);
272
273struct malloc_type *malloc_desc2type(const char *desc);
274
275/*
276 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
277 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
278 */
279#define MUL_NO_OVERFLOW		(1UL << (sizeof(size_t) * 8 / 2))
280static inline bool
281WOULD_OVERFLOW(size_t nmemb, size_t size)
282{
283
284	return ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
285	    nmemb > 0 && __SIZE_T_MAX / nmemb < size);
286}
287#undef MUL_NO_OVERFLOW
288#endif /* _KERNEL */
289
290#else
291/*
292 * The native stand malloc / free interface we're mapping to
293 */
294extern void Free(void *p, const char *file, int line);
295extern void *Malloc(size_t bytes, const char *file, int line);
296
297/*
298 * Minimal standalone malloc implementation / environment. None of the
299 * flags mean anything and there's no need declare malloc types.
300 * Define the simple alloc / free routines in terms of Malloc and
301 * Free. None of the kernel features that this stuff disables are needed.
302 */
303#define M_WAITOK 1
304#define M_ZERO 0
305#define M_NOWAIT 2
306#define MALLOC_DECLARE(x)
307
308#define kmem_zalloc(size, flags) ({					\
309	void *p = Malloc((size), __FILE__, __LINE__);			\
310	if (p == NULL && (flags &  M_WAITOK) != 0)			\
311		panic("Could not malloc %zd bytes with M_WAITOK from %s line %d", \
312		    (size_t)size, __FILE__, __LINE__);			\
313	p;								\
314})
315
316#define kmem_free(p, size) Free(p, __FILE__, __LINE__)
317
318/*
319 * ZFS mem.h define that's the OpenZFS porting layer way of saying
320 * M_WAITOK. Given the above, it will also be a nop.
321 */
322#define KM_SLEEP M_WAITOK
323#define KM_NOSLEEP M_NOWAIT
324#endif /* _STANDALONE */
325#endif /* !_SYS_MALLOC_H_ */