1/*-
  2 * SPDX-License-Identifier: BSD-2-Clause
  3 *
  4 * Copyright (c) 2012, 2013 Konstantin Belousov <kib@FreeBSD.org>
  5 * All rights reserved.
  6 *
  7 * Redistribution and use in source and binary forms, with or without
  8 * modification, are permitted provided that the following conditions
  9 * are met:
 10 * 1. Redistributions of source code must retain the above copyright
 11 *    notice, this list of conditions and the following disclaimer.
 12 * 2. Redistributions in binary form must reproduce the above copyright
 13 *    notice, this list of conditions and the following disclaimer in the
 14 *    documentation and/or other materials provided with the distribution.
 15 *
 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 26 * SUCH DAMAGE.
 27 */
 28
 29#ifndef __MACHINE_COUNTER_H__
 30#define __MACHINE_COUNTER_H__
 31
 32#include <sys/pcpu.h>
 33#ifdef INVARIANTS
 34#include <sys/proc.h>
 35#endif
 36
 37#define	EARLY_COUNTER	&__pcpu[0].pc_early_dummy_counter
 38
 39#ifdef __powerpc64__
 40
 41#define	counter_enter()	do {} while (0)
 42#define	counter_exit()	do {} while (0)
 43
 44#ifdef IN_SUBR_COUNTER_C
 45static inline uint64_t
 46counter_u64_read_one(uint64_t *p, int cpu)
 47{
 48
 49	return (*(uint64_t *)((char *)p + UMA_PCPU_ALLOC_SIZE * cpu));
 50}
 51
 52static inline uint64_t
 53counter_u64_fetch_inline(uint64_t *p)
 54{
 55	uint64_t r;
 56	int i;
 57
 58	r = 0;
 59	CPU_FOREACH(i)
 60		r += counter_u64_read_one((uint64_t *)p, i);
 61
 62	return (r);
 63}
 64
 65static void
 66counter_u64_zero_one_cpu(void *arg)
 67{
 68
 69	*((uint64_t *)((char *)arg + UMA_PCPU_ALLOC_SIZE *
 70	    PCPU_GET(cpuid))) = 0;
 71}
 72
 73static inline void
 74counter_u64_zero_inline(counter_u64_t c)
 75{
 76
 77	smp_rendezvous(smp_no_rendezvous_barrier, counter_u64_zero_one_cpu,
 78	    smp_no_rendezvous_barrier, c);
 79}
 80#endif
 81
 82#define	counter_u64_add_protected(c, i)	counter_u64_add(c, i)
 83
 84static inline void
 85counter_u64_add(counter_u64_t c, int64_t inc)
 86{
 87	uint64_t ccpu, old;
 88
 89	__asm __volatile("\n"
 90      "1:\n\t"
 91	    "mfsprg	%0, 0\n\t"
 92	    "ldarx	%1, %0, %2\n\t"
 93	    "add	%1, %1, %3\n\t"
 94	    "stdcx.	%1, %0, %2\n\t"
 95	    "bne-	1b"
 96	    : "=&b" (ccpu), "=&r" (old)
 97	    : "r" ((char *)c - (char *)&__pcpu[0]), "r" (inc)
 98	    : "cr0", "memory");
 99}
100
101#else	/* !64bit */
102
103#define	counter_enter()	critical_enter()
104#define	counter_exit()	critical_exit()
105
106#ifdef IN_SUBR_COUNTER_C
107/* XXXKIB non-atomic 64bit read */
108static inline uint64_t
109counter_u64_read_one(uint64_t *p, int cpu)
110{
111
112	return (*(uint64_t *)((char *)p + UMA_PCPU_ALLOC_SIZE * cpu));
113}
114
115static inline uint64_t
116counter_u64_fetch_inline(uint64_t *p)
117{
118	uint64_t r;
119	int i;
120
121	r = 0;
122	for (i = 0; i < mp_ncpus; i++)
123		r += counter_u64_read_one((uint64_t *)p, i);
124
125	return (r);
126}
127
128/* XXXKIB non-atomic 64bit store, might interrupt increment */
129static void
130counter_u64_zero_one_cpu(void *arg)
131{
132
133	*((uint64_t *)((char *)arg + UMA_PCPU_ALLOC_SIZE *
134	    PCPU_GET(cpuid))) = 0;
135}
136
137static inline void
138counter_u64_zero_inline(counter_u64_t c)
139{
140
141	smp_rendezvous(smp_no_rendezvous_barrier, counter_u64_zero_one_cpu,
142	    smp_no_rendezvous_barrier, c);
143}
144#endif
145
146#define	counter_u64_add_protected(c, inc)	do {	\
147	CRITICAL_ASSERT(curthread);			\
148	*(uint64_t *)zpcpu_get(c) += (inc);		\
149} while (0)
150
151static inline void
152counter_u64_add(counter_u64_t c, int64_t inc)
153{
154
155	counter_enter();
156	counter_u64_add_protected(c, inc);
157	counter_exit();
158}
159
160#endif	/* 64bit */
161
162#endif	/* ! __MACHINE_COUNTER_H__ */