master
  1#ifndef MALLOC_META_H
  2#define MALLOC_META_H
  3
  4#include <stdint.h>
  5#include <errno.h>
  6#include <limits.h>
  7#include "glue.h"
  8
  9__attribute__((__visibility__("hidden")))
 10extern const uint16_t size_classes[];
 11
 12#define MMAP_THRESHOLD 131052
 13
 14#define UNIT 16
 15#define IB 4
 16
 17struct group {
 18	struct meta *meta;
 19	unsigned char active_idx:5;
 20	char pad[UNIT - sizeof(struct meta *) - 1];
 21	unsigned char storage[];
 22};
 23
 24struct meta {
 25	struct meta *prev, *next;
 26	struct group *mem;
 27	volatile int avail_mask, freed_mask;
 28	uintptr_t last_idx:5;
 29	uintptr_t freeable:1;
 30	uintptr_t sizeclass:6;
 31	uintptr_t maplen:8*sizeof(uintptr_t)-12;
 32};
 33
 34struct meta_area {
 35	uint64_t check;
 36	struct meta_area *next;
 37	int nslots;
 38	struct meta slots[];
 39};
 40
 41struct malloc_context {
 42	uint64_t secret;
 43#ifndef PAGESIZE
 44	size_t pagesize;
 45#endif
 46	int init_done;
 47	unsigned mmap_counter;
 48	struct meta *free_meta_head;
 49	struct meta *avail_meta;
 50	size_t avail_meta_count, avail_meta_area_count, meta_alloc_shift;
 51	struct meta_area *meta_area_head, *meta_area_tail;
 52	unsigned char *avail_meta_areas;
 53	struct meta *active[48];
 54	size_t usage_by_class[48];
 55	uint8_t unmap_seq[32], bounces[32];
 56	uint8_t seq;
 57	uintptr_t brk;
 58};
 59
 60__attribute__((__visibility__("hidden")))
 61extern struct malloc_context ctx;
 62
 63#ifdef PAGESIZE
 64#define PGSZ PAGESIZE
 65#else
 66#define PGSZ ctx.pagesize
 67#endif
 68
 69__attribute__((__visibility__("hidden")))
 70struct meta *alloc_meta(void);
 71
 72__attribute__((__visibility__("hidden")))
 73int is_allzero(void *);
 74
 75static inline void queue(struct meta **phead, struct meta *m)
 76{
 77	assert(!m->next);
 78	assert(!m->prev);
 79	if (*phead) {
 80		struct meta *head = *phead;
 81		m->next = head;
 82		m->prev = head->prev;
 83		m->next->prev = m->prev->next = m;
 84	} else {
 85		m->prev = m->next = m;
 86		*phead = m;
 87	}
 88}
 89
 90static inline void dequeue(struct meta **phead, struct meta *m)
 91{
 92	if (m->next != m) {
 93		m->prev->next = m->next;
 94		m->next->prev = m->prev;
 95		if (*phead == m) *phead = m->next;
 96	} else {
 97		*phead = 0;
 98	}
 99	m->prev = m->next = 0;
100}
101
102static inline struct meta *dequeue_head(struct meta **phead)
103{
104	struct meta *m = *phead;
105	if (m) dequeue(phead, m);
106	return m;
107}
108
109static inline void free_meta(struct meta *m)
110{
111	*m = (struct meta){0};
112	queue(&ctx.free_meta_head, m);
113}
114
115static inline uint32_t activate_group(struct meta *m)
116{
117	assert(!m->avail_mask);
118	uint32_t mask, act = (2u<<m->mem->active_idx)-1;
119	do mask = m->freed_mask;
120	while (a_cas(&m->freed_mask, mask, mask&~act)!=mask);
121	return m->avail_mask = mask & act;
122}
123
124static inline int get_slot_index(const unsigned char *p)
125{
126	return p[-3] & 31;
127}
128
129static inline struct meta *get_meta(const unsigned char *p)
130{
131	assert(!((uintptr_t)p & 15));
132	int offset = *(const uint16_t *)(p - 2);
133	int index = get_slot_index(p);
134	if (p[-4]) {
135		assert(!offset);
136		offset = *(uint32_t *)(p - 8);
137		assert(offset > 0xffff);
138	}
139	const struct group *base = (const void *)(p - UNIT*offset - UNIT);
140	const struct meta *meta = base->meta;
141	assert(meta->mem == base);
142	assert(index <= meta->last_idx);
143	assert(!(meta->avail_mask & (1u<<index)));
144	assert(!(meta->freed_mask & (1u<<index)));
145	const struct meta_area *area = (void *)((uintptr_t)meta & -4096);
146	assert(area->check == ctx.secret);
147	if (meta->sizeclass < 48) {
148		assert(offset >= size_classes[meta->sizeclass]*index);
149		assert(offset < size_classes[meta->sizeclass]*(index+1));
150	} else {
151		assert(meta->sizeclass == 63);
152	}
153	if (meta->maplen) {
154		assert(offset <= meta->maplen*4096UL/UNIT - 1);
155	}
156	return (struct meta *)meta;
157}
158
159static inline size_t get_nominal_size(const unsigned char *p, const unsigned char *end)
160{
161	size_t reserved = p[-3] >> 5;
162	if (reserved >= 5) {
163		assert(reserved == 5);
164		reserved = *(const uint32_t *)(end-4);
165		assert(reserved >= 5);
166		assert(!end[-5]);
167	}
168	assert(reserved <= end-p);
169	assert(!*(end-reserved));
170	// also check the slot's overflow byte
171	assert(!*end);
172	return end-reserved-p;
173}
174
175static inline size_t get_stride(const struct meta *g)
176{
177	if (!g->last_idx && g->maplen) {
178		return g->maplen*4096UL - UNIT;
179	} else {
180		return UNIT*size_classes[g->sizeclass];
181	}
182}
183
184static inline void set_size(unsigned char *p, unsigned char *end, size_t n)
185{
186	int reserved = end-p-n;
187	if (reserved) end[-reserved] = 0;
188	if (reserved >= 5) {
189		*(uint32_t *)(end-4) = reserved;
190		end[-5] = 0;
191		reserved = 5;
192	}
193	p[-3] = (p[-3]&31) + (reserved<<5);
194}
195
196static inline void *enframe(struct meta *g, int idx, size_t n, int ctr)
197{
198	size_t stride = get_stride(g);
199	size_t slack = (stride-IB-n)/UNIT;
200	unsigned char *p = g->mem->storage + stride*idx;
201	unsigned char *end = p+stride-IB;
202	// cycle offset within slot to increase interval to address
203	// reuse, facilitate trapping double-free.
204	int off = (p[-3] ? *(uint16_t *)(p-2) + 1 : ctr) & 255;
205	assert(!p[-4]);
206	if (off > slack) {
207		size_t m = slack;
208		m |= m>>1; m |= m>>2; m |= m>>4;
209		off &= m;
210		if (off > slack) off -= slack+1;
211		assert(off <= slack);
212	}
213	if (off) {
214		// store offset in unused header at offset zero
215		// if enframing at non-zero offset.
216		*(uint16_t *)(p-2) = off;
217		p[-3] = 7<<5;
218		p += UNIT*off;
219		// for nonzero offset there is no permanent check
220		// byte, so make one.
221		p[-4] = 0;
222	}
223	*(uint16_t *)(p-2) = (size_t)(p-g->mem->storage)/UNIT;
224	p[-3] = idx;
225	set_size(p, end, n);
226	return p;
227}
228
229static inline int size_to_class(size_t n)
230{
231	n = (n+IB-1)>>4;
232	if (n<10) return n;
233	n++;
234	int i = (28-a_clz_32(n))*4 + 8;
235	if (n>size_classes[i+1]) i+=2;
236	if (n>size_classes[i]) i++;
237	return i;
238}
239
240static inline int size_overflows(size_t n)
241{
242	if (n >= SIZE_MAX/2 - 4096) {
243		errno = ENOMEM;
244		return 1;
245	}
246	return 0;
247}
248
249static inline void step_seq(void)
250{
251	if (ctx.seq==255) {
252		for (int i=0; i<32; i++) ctx.unmap_seq[i] = 0;
253		ctx.seq = 1;
254	} else {
255		ctx.seq++;
256	}
257}
258
259static inline void record_seq(int sc)
260{
261	if (sc-7U < 32) ctx.unmap_seq[sc-7] = ctx.seq;
262}
263
264static inline void account_bounce(int sc)
265{
266	if (sc-7U < 32) {
267		int seq = ctx.unmap_seq[sc-7];
268		if (seq && ctx.seq-seq < 10) {
269			if (ctx.bounces[sc-7]+1 < 100)
270				ctx.bounces[sc-7]++;
271			else
272				ctx.bounces[sc-7] = 150;
273		}
274	}
275}
276
277static inline void decay_bounces(int sc)
278{
279	if (sc-7U < 32 && ctx.bounces[sc-7])
280		ctx.bounces[sc-7]--;
281}
282
283static inline int is_bouncing(int sc)
284{
285	return (sc-7U < 32 && ctx.bounces[sc-7] >= 100);
286}
287
288#endif