master
  1/*	$NetBSD: nvmereg.h,v 1.19 2022/10/12 20:50:43 andvar Exp $	*/
  2/*	$OpenBSD: nvmereg.h,v 1.10 2016/04/14 11:18:32 dlg Exp $ */
  3
  4/*
  5 * Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
  6 *
  7 * Permission to use, copy, modify, and distribute this software for any
  8 * purpose with or without fee is hereby granted, provided that the above
  9 * copyright notice and this permission notice appear in all copies.
 10 *
 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 18 */
 19
 20#ifndef	__NVMEREG_H__
 21#define	__NVMEREG_H__
 22
 23#ifndef	NVME_CTASSERT
 24#define	NVME_CTASSERT(x, s)	__CTASSERT(x)
 25#endif
 26
 27#define NVME_CAP	0x0000	/* Controller Capabilities */
 28#define  NVME_CAP_MPSMAX(_r)	(12 + (((_r) >> 52) & 0xf)) /* shift */
 29#define  NVME_CAP_MPSMIN(_r)	(12 + (((_r) >> 48) & 0xf)) /* shift */
 30#define  NVME_CAP_CSS(_r)	(((_r) >> 37) & 0x7f)
 31#define  NVME_CAP_CSS_NVM	__BIT(0)
 32#define  NVME_CAP_NSSRS(_r)	ISSET((_r), __BIT(36))
 33#define  NVME_CAP_DSTRD(_r)	__BIT(2 + (((_r) >> 32) & 0xf)) /* bytes */
 34#define  NVME_CAP_TO(_r)	(500 * (((_r) >> 24) & 0xff)) /* ms */
 35#define  NVME_CAP_AMS(_r)	(((_r) >> 17) & 0x3)
 36#define  NVME_CAP_AMS_WRR	__BIT(0)
 37#define  NVME_CAP_AMS_VENDOR	__BIT(1)
 38#define  NVME_CAP_CQR(_r)	ISSET((_r), __BIT(16))
 39#define  NVME_CAP_MQES(_r)	(((_r) & 0xffff) + 1)
 40#define NVME_CAP_LO	0x0000
 41#define NVME_CAP_HI	0x0004
 42#define NVME_VS		0x0008	/* Version */
 43#define  NVME_VS_MJR(_r)	(((_r) >> 16) & 0xffff)
 44#define  NVME_VS_MNR(_r)	(((_r) >> 8) & 0xff)
 45#define  NVME_VS_TER(_r)	((_r) & 0xff)
 46#define NVME_INTMS	0x000c	/* Interrupt Mask Set */
 47#define NVME_INTMC	0x0010	/* Interrupt Mask Clear */
 48#define NVME_CC		0x0014	/* Controller Configuration */
 49#define  NVME_CC_IOCQES(_v)	(((_v) & 0xf) << 20)
 50#define  NVME_CC_IOCQES_MASK	NVME_CC_IOCQES(0xf)
 51#define  NVME_CC_IOCQES_R(_v)	(((_v) >> 20) & 0xf)
 52#define  NVME_CC_IOSQES(_v)	(((_v) & 0xf) << 16)
 53#define  NVME_CC_IOSQES_MASK	NVME_CC_IOSQES(0xf)
 54#define  NVME_CC_IOSQES_R(_v)	(((_v) >> 16) & 0xf)
 55#define  NVME_CC_SHN(_v)	(((_v) & 0x3) << 14)
 56#define  NVME_CC_SHN_MASK	NVME_CC_SHN(0x3)
 57#define  NVME_CC_SHN_R(_v)	(((_v) >> 15) & 0x3)
 58#define  NVME_CC_SHN_NONE	0
 59#define  NVME_CC_SHN_NORMAL	1
 60#define  NVME_CC_SHN_ABRUPT	2
 61#define  NVME_CC_AMS(_v)	(((_v) & 0x7) << 11)
 62#define  NVME_CC_AMS_MASK	NVME_CC_AMS(0x7)
 63#define  NVME_CC_AMS_R(_v)	(((_v) >> 11) & 0xf)
 64#define  NVME_CC_AMS_RR		0 /* round-robin */
 65#define  NVME_CC_AMS_WRR_U	1 /* weighted round-robin w/ urgent */
 66#define  NVME_CC_AMS_VENDOR	7 /* vendor */
 67#define  NVME_CC_MPS(_v)	((((_v) - 12) & 0xf) << 7)
 68#define  NVME_CC_MPS_MASK	(0xf << 7)
 69#define  NVME_CC_MPS_R(_v)	(12 + (((_v) >> 7) & 0xf))
 70#define  NVME_CC_CSS(_v)	(((_v) & 0x7) << 4)
 71#define  NVME_CC_CSS_MASK	NVME_CC_CSS(0x7)
 72#define  NVME_CC_CSS_R(_v)	(((_v) >> 4) & 0x7)
 73#define  NVME_CC_CSS_NVM	0
 74#define  NVME_CC_EN		__BIT(0)
 75#define NVME_CSTS	0x001c	/* Controller Status */
 76#define  NVME_CSTS_SHST_MASK	(0x3 << 2)
 77#define  NVME_CSTS_SHST_NONE	(0x0 << 2) /* normal operation */
 78#define  NVME_CSTS_SHST_WAIT	(0x1 << 2) /* shutdown processing occurring */
 79#define  NVME_CSTS_SHST_DONE	(0x2 << 2) /* shutdown processing complete */
 80#define  NVME_CSTS_CFS		__BIT(1)
 81#define  NVME_CSTS_RDY		__BIT(0)
 82#define NVME_NSSR	0x0020	/* NVM Subsystem Reset (Optional) */
 83#define NVME_AQA	0x0024	/* Admin Queue Attributes */
 84				/* Admin Completion Queue Size */
 85#define  NVME_AQA_ACQS(_v)	(((_v) - 1) << 16)
 86#define  NVME_AQA_ACQS_R(_v)	((_v >> 16) & (__BIT(12) - 1))
 87				/* Admin Submission Queue Size */
 88#define  NVME_AQA_ASQS(_v)	(((_v) - 1) << 0)
 89#define  NVME_AQA_ASQS_R(_v)	(_v & (__BIT(12) - 1))
 90#define NVME_ASQ	0x0028	/* Admin Submission Queue Base Address */
 91#define NVME_ACQ	0x0030	/* Admin Completion Queue Base Address */
 92
 93#define NVME_ADMIN_Q		0
 94/* Submission Queue Tail Doorbell */
 95#define NVME_SQTDBL(_q, _s)	(0x1000 + (2 * (_q) + 0) * (_s))
 96/* Completion Queue Head Doorbell */
 97#define NVME_CQHDBL(_q, _s)	(0x1000 + (2 * (_q) + 1) * (_s))
 98
 99struct nvme_sge {
100	uint8_t		id;
101	uint8_t		_reserved[15];
102} __packed __aligned(8);
103NVME_CTASSERT(sizeof(struct nvme_sge) == 16, "bad size for nvme_sge");
104
105struct nvme_sge_data {
106	uint8_t		id;
107	uint8_t		_reserved[3];
108
109	uint32_t	length;
110
111	uint64_t	address;
112} __packed __aligned(8);
113NVME_CTASSERT(sizeof(struct nvme_sge_data) == 16, "bad size for nvme_sge_data");
114
115struct nvme_sge_bit_bucket {
116	uint8_t		id;
117	uint8_t		_reserved[3];
118
119	uint32_t	length;
120
121	uint64_t	address;
122} __packed __aligned(8);
123NVME_CTASSERT(sizeof(struct nvme_sge_bit_bucket) == 16, "bad size for nvme_sge_bit_bucket");
124
125struct nvme_sqe {
126	uint8_t		opcode;
127	uint8_t		flags;
128	uint16_t	cid;
129
130	uint32_t	nsid;
131
132	uint8_t		_reserved[8];
133
134	uint64_t	mptr;
135
136	union {
137		uint64_t	prp[2];
138		struct nvme_sge	sge;
139	} entry;
140
141	uint32_t	cdw10;
142	uint32_t	cdw11;
143	uint32_t	cdw12;
144	uint32_t	cdw13;
145	uint32_t	cdw14;
146	uint32_t	cdw15;
147} __packed __aligned(8);
148NVME_CTASSERT(sizeof(struct nvme_sqe) == 64, "bad size for nvme_sqe");
149
150struct nvme_sqe_q {
151	uint8_t		opcode;
152	uint8_t		flags;
153	uint16_t	cid;
154
155	uint8_t		_reserved1[20];
156
157	uint64_t	prp1;
158
159	uint8_t		_reserved2[8];
160
161	uint16_t	qid;
162	uint16_t	qsize;
163
164	uint8_t		qflags;
165#define NVM_SQE_SQ_QPRIO_URG	(0x0 << 1)
166#define NVM_SQE_SQ_QPRIO_HI	(0x1 << 1)
167#define NVM_SQE_SQ_QPRIO_MED	(0x2 << 1)
168#define NVM_SQE_SQ_QPRIO_LOW	(0x3 << 1)
169#define NVM_SQE_CQ_IEN		__BIT(1)
170#define NVM_SQE_Q_PC		__BIT(0)
171	uint8_t		_reserved3;
172	uint16_t	cqid; /* XXX interrupt vector for cq */
173
174	uint8_t		_reserved4[16];
175} __packed __aligned(8);
176NVME_CTASSERT(sizeof(struct nvme_sqe_q) == 64, "bad size for nvme_sqe_q");
177
178struct nvme_sqe_io {
179	uint8_t		opcode;
180	uint8_t		flags;
181	uint16_t	cid;
182
183	uint32_t	nsid;
184
185	uint8_t		_reserved[8];
186
187	uint64_t	mptr;
188
189	union {
190		uint64_t	prp[2];
191		struct nvme_sge	sge;
192	} entry;
193
194	uint64_t	slba;	/* Starting LBA */
195
196	uint16_t	nlb;	/* Number of Logical Blocks */
197	uint16_t	ioflags;
198#define NVM_SQE_IO_LR	__BIT(15)	/* Limited Retry */
199#define NVM_SQE_IO_FUA	__BIT(14)	/* Force Unit Access (bypass cache) */
200
201	uint8_t		dsm;	/* Dataset Management */
202#define NVM_SQE_IO_INCOMP	__BIT(7)	/* Incompressible */
203#define NVM_SQE_IO_SEQ		__BIT(6)	/* Sequential request */
204#define NVM_SQE_IO_LAT_MASK	__BITS(4, 5)	/* Access Latency */
205#define  NVM_SQE_IO_LAT_NONE	0		/* Latency: none */
206#define  NVM_SQE_IO_LAT_IDLE	__BIT(4)	/* Latency: idle */
207#define  NVM_SQE_IO_LAT_NORMAL	__BIT(5)	/* Latency: normal */
208#define  NVM_SQE_IO_LAT_LOW	__BITS(4, 5)	/* Latency: low */
209#define NVM_SQE_IO_FREQ_MASK	__BITS(0, 3)	/* Access Frequency */
210#define  NVM_SQE_IO_FREQ_TYPICAL	0x1	/* Typical */
211#define  NVM_SQE_IO_FREQ_INFR_INFW	0x2	/* Infrequent read and writes */
212#define  NVM_SQE_IO_FREQ_FRR_INFW	0x3	/* Frequent read, inf. writes */
213#define  NVM_SQE_IO_FREQ_INFR_FRW	0x4	/* Inf. read, freq. writes */
214#define  NVM_SQE_IO_FREQ_FRR_FRW	0x5	/* Freq. read and writes */
215#define  NVM_SQE_IO_FREQ_ONCE		0x6	/* One time i/o operation */
216/* Extra Access Frequency bits for read operations */
217#define  NVM_SQE_IO_FREQ_SPEC		0x7	/* Speculative read - prefetch */
218#define  NVM_SQE_IO_FREQ_OVERWRITE	0x8	/* Will be overwritten soon */
219	uint8_t		_reserved2[3];
220
221	uint32_t	eilbrt;	/* Expected Initial Logical Block
222				   Reference Tag */
223
224	uint16_t	elbat;	/* Expected Logical Block
225				   Application Tag */
226	uint16_t	elbatm;	/* Expected Logical Block
227				   Application Tag Mask */
228} __packed __aligned(8);
229NVME_CTASSERT(sizeof(struct nvme_sqe_io) == 64, "bad size for nvme_sqe_io");
230
231struct nvme_cqe {
232	uint32_t	cdw0;
233
234	uint32_t	_reserved;
235
236	uint16_t	sqhd; /* SQ Head Pointer */
237	uint16_t	sqid; /* SQ Identifier */
238
239	uint16_t	cid; /* Command Identifier */
240	uint16_t	flags;
241#define NVME_CQE_DNR		__BIT(15)
242#define NVME_CQE_M		__BIT(14)
243#define NVME_CQE_SCT_MASK	__BITS(9, 11)
244#define NVME_CQE_SCT(_f)	((_f) & NVME_CQE_SCT_MASK)
245#define  NVME_CQE_SCT_GENERIC		(0x00 << 9)
246#define  NVME_CQE_SCT_COMMAND		(0x01 << 9)
247#define  NVME_CQE_SCT_MEDIAERR		(0x02 << 9)
248#define  NVME_CQE_SCT_VENDOR		(0x07 << 9)
249#define NVME_CQE_SC_MASK	__BITS(1, 8)
250#define NVME_CQE_SC(_f)		((_f) & NVME_CQE_SC_MASK)
251/* generic command status codes */
252#define  NVME_CQE_SC_SUCCESS		(0x00 << 1)
253#define  NVME_CQE_SC_INVALID_OPCODE	(0x01 << 1)
254#define  NVME_CQE_SC_INVALID_FIELD	(0x02 << 1)
255#define  NVME_CQE_SC_CID_CONFLICT	(0x03 << 1)
256#define  NVME_CQE_SC_DATA_XFER_ERR	(0x04 << 1)
257#define  NVME_CQE_SC_ABRT_BY_NO_PWR	(0x05 << 1)
258#define  NVME_CQE_SC_INTERNAL_DEV_ERR	(0x06 << 1)
259#define  NVME_CQE_SC_CMD_ABRT_REQD	(0x07 << 1)
260#define  NVME_CQE_SC_CMD_ABDR_SQ_DEL	(0x08 << 1)
261#define  NVME_CQE_SC_CMD_ABDR_FUSE_ERR	(0x09 << 1)
262#define  NVME_CQE_SC_CMD_ABDR_FUSE_MISS	(0x0a << 1)
263#define  NVME_CQE_SC_INVALID_NS		(0x0b << 1)
264#define  NVME_CQE_SC_CMD_SEQ_ERR	(0x0c << 1)
265#define  NVME_CQE_SC_INVALID_LAST_SGL	(0x0d << 1)
266#define  NVME_CQE_SC_INVALID_NUM_SGL	(0x0e << 1)
267#define  NVME_CQE_SC_DATA_SGL_LEN	(0x0f << 1)
268#define  NVME_CQE_SC_MDATA_SGL_LEN	(0x10 << 1)
269#define  NVME_CQE_SC_SGL_TYPE_INVALID	(0x11 << 1)
270#define  NVME_CQE_SC_LBA_RANGE		(0x80 << 1)
271#define  NVME_CQE_SC_CAP_EXCEEDED	(0x81 << 1)
272#define  NVME_CQE_SC_NS_NOT_RDY		(0x82 << 1)
273#define  NVME_CQE_SC_RSV_CONFLICT	(0x83 << 1)
274/* command specific status codes */
275#define  NVME_CQE_SC_CQE_INVALID	(0x00 << 1)
276#define  NVME_CQE_SC_INVALID_QID	(0x01 << 1)
277#define  NVME_CQE_SC_MAX_Q_SIZE		(0x02 << 1)
278#define  NVME_CQE_SC_ABORT_LIMIT	(0x03 << 1)
279#define  NVME_CQE_SC_ASYNC_EV_REQ_LIMIT	(0x05 << 1)
280#define  NVME_CQE_SC_INVALID_FW_SLOT	(0x06 << 1)
281#define  NVME_CQE_SC_INVALID_FW_IMAGE	(0x07 << 1)
282#define  NVME_CQE_SC_INVALID_INT_VEC	(0x08 << 1)
283#define  NVME_CQE_SC_INVALID_LOG_PAGE	(0x09 << 1)
284#define  NVME_CQE_SC_INVALID_FORMAT	(0x0a << 1)
285#define  NVME_CQE_SC_FW_REQ_CNV_RESET	(0x0b << 1)
286#define  NVME_CQE_SC_FW_REQ_NVM_RESET	(0x10 << 1)
287#define  NVME_CQE_SC_FW_REQ_RESET	(0x11 << 1)
288#define  NVME_CQE_SC_FW_MAX_TIME_VIO	(0x12 << 1)
289#define  NVME_CQE_SC_FW_PROHIBIT	(0x13 << 1)
290#define  NVME_CQE_SC_OVERLAP_RANGE	(0x14 << 1)
291#define  NVME_CQE_SC_CONFLICT_ATTRS	(0x80 << 1)
292#define  NVME_CQE_SC_INVALID_PROT_INFO	(0x81 << 1)
293#define  NVME_CQE_SC_ATT_WR_TO_RO_PAGE	(0x82 << 1)
294/* media error status codes */
295#define  NVME_CQE_SC_WRITE_FAULTS	(0x80 << 1)
296#define  NVME_CQE_SC_UNRECV_READ_ERR	(0x81 << 1)
297#define  NVME_CQE_SC_GUARD_CHECK_ERR	(0x82 << 1)
298#define  NVME_CQE_SC_APPL_TAG_CHECK_ERR	(0x83 << 1)
299#define  NVME_CQE_SC_REF_TAG_CHECK_ERR	(0x84 << 1)
300#define  NVME_CQE_SC_CMP_FAIL		(0x85 << 1)
301#define  NVME_CQE_SC_ACCESS_DENIED	(0x86 << 1)
302#define NVME_CQE_PHASE		__BIT(0)
303} __packed __aligned(8);
304NVME_CTASSERT(sizeof(struct nvme_cqe) == 16, "bad size for nvme_cqe");
305
306#define NVM_ADMIN_DEL_IOSQ	0x00 /* Delete I/O Submission Queue */
307#define NVM_ADMIN_ADD_IOSQ	0x01 /* Create I/O Submission Queue */
308#define NVM_ADMIN_GET_LOG_PG	0x02 /* Get Log Page */
309#define NVM_ADMIN_DEL_IOCQ	0x04 /* Delete I/O Completion Queue */
310#define NVM_ADMIN_ADD_IOCQ	0x05 /* Create I/O Completion Queue */
311#define NVM_ADMIN_IDENTIFY	0x06 /* Identify */
312#define NVM_ADMIN_ABORT		0x08 /* Abort */
313#define NVM_ADMIN_SET_FEATURES	0x09 /* Set Features */
314#define NVM_ADMIN_GET_FEATURES	0x0a /* Get Features */
315#define NVM_ADMIN_ASYNC_EV_REQ	0x0c /* Asynchronous Event Request */
316#define NVM_ADMIN_NS_MANAGEMENT	0x0d /* Namespace Management */
317/* 0x0e-0x0f - reserved */
318#define NVM_ADMIN_FW_COMMIT	0x10 /* Firmware Commit */
319#define NVM_ADMIN_FW_DOWNLOAD	0x11 /* Firmware Image Download */
320#define NVM_ADMIN_DEV_SELFTEST	0x14 /* Device Self Test */
321#define NVM_ADMIN_NS_ATTACHMENT	0x15 /* Namespace Attachment */
322#define NVM_ADMIN_KEEP_ALIVE	0x18 /* Keep Alive */
323#define NVM_ADMIN_DIRECTIVE_SND	0x19 /* Directive Send */
324#define NVM_ADMIN_DIRECTIVE_RCV	0x1a /* Directive Receive */
325#define NVM_ADMIN_VIRT_MGMT	0x1c /* Virtualization Management */
326#define NVM_ADMIN_NVME_MI_SEND	0x1d /* NVMe-MI Send */
327#define NVM_ADMIN_NVME_MI_RECV	0x1e /* NVMe-MI Receive */
328#define NVM_ADMIN_DOORBELL_BC	0x7c /* Doorbell Buffer Config */
329#define NVM_ADMIN_FORMAT_NVM	0x80 /* Format NVM */
330#define NVM_ADMIN_SECURITY_SND	0x81 /* Security Send */
331#define NVM_ADMIN_SECURITY_RCV	0x82 /* Security Receive */
332#define NVM_ADMIN_SANITIZE	0x84 /* Sanitize */
333
334#define NVM_CMD_FLUSH		0x00 /* Flush */
335#define NVM_CMD_WRITE		0x01 /* Write */
336#define NVM_CMD_READ		0x02 /* Read */
337#define NVM_CMD_WR_UNCOR	0x04 /* Write Uncorrectable */
338#define NVM_CMD_COMPARE		0x05 /* Compare */
339/* 0x06-0x07 - reserved */
340#define NVM_CMD_WRITE_ZEROES	0x08 /* Write Zeroes */
341#define NVM_CMD_DSM		0x09 /* Dataset Management */
342
343/* Features for GET/SET FEATURES */
344/* 0x00 - reserved */
345#define NVM_FEAT_ARBITRATION			0x01
346#define NVM_FEAT_POWER_MANAGEMENT		0x02
347#define NVM_FEAT_LBA_RANGE_TYPE			0x03
348#define NVM_FEAT_TEMPERATURE_THRESHOLD		0x04
349#define NVM_FEAT_ERROR_RECOVERY			0x05
350#define NVM_FEATURE_VOLATILE_WRITE_CACHE	0x06	/* optional */
351#define NVM_FEATURE_NUMBER_OF_QUEUES		0x07	/* mandatory */
352#define NVM_FEAT_INTERRUPT_COALESCING		0x08
353#define NVM_FEAT_INTERRUPT_VECTOR_CONFIGURATION 0x09
354#define NVM_FEAT_WRITE_ATOMICITY		0x0a
355#define NVM_FEAT_ASYNC_EVENT_CONFIGURATION	0x0b
356#define NVM_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION 0x0c
357#define NVM_FEAT_HOST_MEMORY_BUFFER		0x0d
358#define NVM_FEAT_TIMESTAMP			0x0e
359#define NVM_FEAT_KEEP_ALIVE_TIMER		0x0f
360#define NVM_FEAT_HOST_CONTROLLED_THERMAL_MGMT	0x10
361#define NVM_FEAT_NON_OP_POWER_STATE_CONFIG	0x11
362/* 0x12-0x77 - reserved */
363/* 0x78-0x7f - NVMe Management Interface */
364#define NVM_FEAT_SOFTWARE_PROGRESS_MARKER	0x80
365#define NVM_FEAT_HOST_IDENTIFIER		0x81
366#define NVM_FEAT_RESERVATION_NOTIFICATION_MASK	0x82
367#define NVM_FEAT_RESERVATION_PERSISTANCE	0x83
368/* 0x84-0xBF - command set specific (reserved) */
369/* 0xC0-0xFF - vendor specific */
370
371#define NVM_SET_FEATURES_SV		__BIT(31)	/* Persist */
372
373#define NVM_VOLATILE_WRITE_CACHE_WCE	__BIT(0) 	/* Write Cache Enable */
374
375/* Power State Descriptor Data */
376struct nvm_identify_psd {
377	uint16_t	mp;		/* Max Power */
378	uint8_t		_reserved1;
379	uint8_t		flags;
380#define	NVME_PSD_NOPS		__BIT(1)
381#define	NVME_PSD_MPS		__BIT(0)
382
383	uint32_t	enlat;		/* Entry Latency */
384
385	uint32_t	exlat;		/* Exit Latency */
386
387	uint8_t		rrt;		/* Relative Read Throughput */
388#define	NVME_PSD_RRT_MASK	__BITS(0, 4)
389	uint8_t		rrl;		/* Relative Read Latency */
390#define	NVME_PSD_RRL_MASK	__BITS(0, 4)
391	uint8_t		rwt;		/* Relative Write Throughput */
392#define	NVME_PSD_RWT_MASK	__BITS(0, 4)
393	uint8_t		rwl;		/* Relative Write Latency */
394#define	NVME_PSD_RWL_MASK	__BITS(0, 4)
395
396	uint16_t	idlp;		/* Idle Power */
397	uint8_t		ips;		/* Idle Power Scale */
398#define	NVME_PSD_IPS_MASK	__BITS(0, 1)
399	uint8_t		_reserved2;
400	uint16_t	actp;		/* Active Power */
401	uint16_t	ap;		/* Active Power Workload/Scale */
402#define	NVME_PSD_APW_MASK	__BITS(0, 2)
403#define	NVME_PSD_APS_MASK	__BITS(6, 7)
404
405	uint8_t		_reserved[8];
406} __packed __aligned(8);
407NVME_CTASSERT(sizeof(struct nvm_identify_psd) == 32, "bad size for nvm_identify_psd");
408
409struct nvm_identify_controller {
410	/* Controller Capabilities and Features */
411
412	uint16_t	vid;		/* PCI Vendor ID */
413	uint16_t	ssvid;		/* PCI Subsystem Vendor ID */
414
415	uint8_t		sn[20];		/* Serial Number */
416	uint8_t		mn[40];		/* Model Number */
417	uint8_t		fr[8];		/* Firmware Revision */
418
419	uint8_t		rab;		/* Recommended Arbitration Burst */
420	uint8_t		ieee[3];	/* IEEE OUI Identifier */
421
422	uint8_t		cmic;		/* Controller Multi-Path I/O and
423					   Namespace Sharing Capabilities */
424	uint8_t		mdts;		/* Maximum Data Transfer Size */
425
426	uint16_t	cntlid;		/* Controller ID */
427	uint32_t	ver;		/* Version */
428
429	uint32_t	rtd3r;		/* RTD3 Resume Latency */
430	uint32_t	rtd3e;		/* RTD3 Enter Latency */
431
432	uint32_t	oaes;		/* Optional Asynchronous Events Supported */
433	uint32_t	ctrattr;	/* Controller Attributes */
434
435	uint8_t		_reserved1[12];
436
437	uint8_t		fguid[16];	/* FRU Globally Unique Identifier */
438
439	uint8_t		_reserved2[128];
440
441	/* Admin Command Set Attributes & Optional Controller Capabilities */
442
443	uint16_t	oacs;		/* Optional Admin Command Support */
444#define	NVME_ID_CTRLR_OACS_DOORBELL_BC	__BIT(8)
445#define	NVME_ID_CTRLR_OACS_VIRT_MGMT	__BIT(7)
446#define	NVME_ID_CTRLR_OACS_NVME_MI	__BIT(6)
447#define	NVME_ID_CTRLR_OACS_DIRECTIVES	__BIT(5)
448#define	NVME_ID_CTRLR_OACS_DEV_SELFTEST	__BIT(4)
449#define	NVME_ID_CTRLR_OACS_NS		__BIT(3)
450#define	NVME_ID_CTRLR_OACS_FW		__BIT(2)
451#define	NVME_ID_CTRLR_OACS_FORMAT	__BIT(1)
452#define	NVME_ID_CTRLR_OACS_SECURITY	__BIT(0)
453	uint8_t		acl;		/* Abort Command Limit */
454	uint8_t		aerl;		/* Asynchronous Event Request Limit */
455
456	uint8_t		frmw;		/* Firmware Updates */
457#define	NVME_ID_CTRLR_FRMW_NOREQ_RESET	__BIT(4)
458#define	NVME_ID_CTRLR_FRMW_NSLOT	__BITS(1, 3)
459#define	NVME_ID_CTRLR_FRMW_SLOT1_RO	__BIT(0)
460	uint8_t		lpa;		/* Log Page Attributes */
461#define	NVME_ID_CTRLR_LPA_CMD_EFFECT	__BIT(1)
462#define	NVME_ID_CTRLR_LPA_NS_SMART	__BIT(0)
463	uint8_t		elpe;		/* Error Log Page Entries */
464	uint8_t		npss;		/* Number of Power States Support */
465
466	uint8_t		avscc;		/* Admin Vendor Specific Command
467					   Configuration */
468	uint8_t		apsta;		/* Autonomous Power State Transition
469					   Attributes */
470#define	NVME_ID_CTRLR_APSTA_PRESENT	__BIT(0)
471
472	uint16_t	wctemp;		/* Warning Composite Temperature
473					   Threshold */
474	uint16_t	cctemp;		/* Critical Composite Temperature
475					   Threshold */
476
477	uint16_t	mtfa;		/* Maximum Time for Firmware Activation */
478
479	uint32_t	hmpre;		/* Host Memory Buffer Preferred Size */
480	uint32_t	hmmin;		/* Host Memory Buffer Minimum Size */
481
482	struct {
483		uint64_t	tnvmcap[2];
484		uint64_t	unvmcap[2];
485	} __packed untncap;		/* Name space capabilities:
486					   if NVME_ID_CTRLR_OACS_NS,
487					   report tnvmcap and unvmcap */
488
489	uint32_t	rpmbs;		/* Replay Protected Memory Block Support */
490
491	uint16_t	edstt;		/* Extended Device Self-test Time */
492	uint8_t		dsto;		/* Device Self-test Options */
493
494	uint8_t		fwug;		/* Firmware Update Granularity */
495
496	uint16_t	kas;		/* Keep Alive Support */
497
498	uint16_t	hctma;		/* Host Controlled Thermal Management
499					   Attributes */
500	uint16_t	mntmt;		/* Minimum Thermal Management Temperature */
501	uint16_t	mxtmt;		/* Maximum Thermal Management Temperature */
502
503	uint32_t	sanicap;	/* Sanitize Capabilities */
504
505	uint8_t		_reserved3[180];
506
507	/* NVM Command Set Attributes */
508
509	uint8_t		sqes;		/* Submission Queue Entry Size */
510#define	NVME_ID_CTRLR_SQES_MAX		__BITS(4, 7)
511#define	NVME_ID_CTRLR_SQES_MIN		__BITS(0, 3)
512	uint8_t		cqes;		/* Completion Queue Entry Size */
513#define	NVME_ID_CTRLR_CQES_MAX		__BITS(4, 7)
514#define	NVME_ID_CTRLR_CQES_MIN		__BITS(0, 3)
515
516	uint16_t	maxcmd;		/* Maximum Outstanding Commands */
517
518	uint32_t	nn;		/* Number of Namespaces */
519
520	uint16_t	oncs;		/* Optional NVM Command Support */
521#define	NVME_ID_CTRLR_ONCS_TIMESTAMP	__BIT(6)
522#define	NVME_ID_CTRLR_ONCS_RESERVATION	__BIT(5)
523#define	NVME_ID_CTRLR_ONCS_SET_FEATURES	__BIT(4)
524#define	NVME_ID_CTRLR_ONCS_WRITE_ZERO	__BIT(3)
525#define	NVME_ID_CTRLR_ONCS_DSM		__BIT(2)
526#define	NVME_ID_CTRLR_ONCS_WRITE_UNC	__BIT(1)
527#define	NVME_ID_CTRLR_ONCS_COMPARE	__BIT(0)
528	uint16_t	fuses;		/* Fused Operation Support */
529
530	uint8_t		fna;		/* Format NVM Attributes */
531#define	NVME_ID_CTRLR_FNA_CRYPTO_ERASE	__BIT(2)
532#define	NVME_ID_CTRLR_FNA_ERASE_ALL	__BIT(1)
533#define	NVME_ID_CTRLR_FNA_FORMAT_ALL	__BIT(0)
534	uint8_t		vwc;		/* Volatile Write Cache */
535#define	NVME_ID_CTRLR_VWC_PRESENT	__BIT(0)
536	uint16_t	awun;		/* Atomic Write Unit Normal */
537	uint16_t	awupf;		/* Atomic Write Unit Power Fail */
538
539	uint8_t		nvscc;		/* NVM Vendor Specific Command */
540	uint8_t		_reserved4[1];
541
542	uint16_t	acwu;		/* Atomic Compare & Write Unit */
543	uint8_t		_reserved5[2];
544
545	uint32_t	sgls;		/* SGL Support */
546
547	uint8_t		_reserved6[228];
548
549	uint8_t		subnqn[256];	/* NVM Subsystem NVMe Qualified Name */
550
551	uint8_t		_reserved7[768];
552
553	uint8_t		_reserved8[256]; /* NVMe over Fabrics specification */
554
555	struct nvm_identify_psd psd[32]; /* Power State Descriptors */
556
557	uint8_t		vs[1024];	/* Vendor Specific */
558} __packed __aligned(8);
559NVME_CTASSERT(sizeof(struct nvm_identify_controller) == 4096, "bad size for nvm_identify_controller");
560
561struct nvm_namespace_format {
562	uint16_t	ms;		/* Metadata Size */
563	uint8_t		lbads;		/* LBA Data Size */
564	uint8_t		rp;		/* Relative Performance */
565} __packed __aligned(4);
566NVME_CTASSERT(sizeof(struct nvm_namespace_format) == 4, "bad size for nvm_namespace_format");
567
568struct nvm_identify_namespace {
569	uint64_t	nsze;		/* Namespace Size */
570
571	uint64_t	ncap;		/* Namespace Capacity */
572
573	uint64_t	nuse;		/* Namespace Utilization */
574
575	uint8_t		nsfeat;		/* Namespace Features */
576#define	NVME_ID_NS_NSFEAT_LOGICAL_BLK_ERR	__BIT(2)
577#define	NVME_ID_NS_NSFEAT_NS			__BIT(1)
578#define	NVME_ID_NS_NSFEAT_THIN_PROV		__BIT(0)
579	uint8_t		nlbaf;		/* Number of LBA Formats */
580	uint8_t		flbas;		/* Formatted LBA Size */
581#define NVME_ID_NS_FLBAS(_f)			((_f) & 0x0f)
582#define NVME_ID_NS_FLBAS_MD			0x10
583	uint8_t		mc;		/* Metadata Capabilities */
584	uint8_t		dpc;		/* End-to-end Data Protection
585					   Capabilities */
586	uint8_t		dps;		/* End-to-end Data Protection Type
587					   Settings */
588#define	NVME_ID_NS_DPS_MD_START			__BIT(3)
589#define	NVME_ID_NS_DPS_PIT(_f)			((_f) & 0x7)
590
591	uint8_t		nmic;		/* Namespace Multi-path I/O and Namespace
592					   Sharing Capabilities */
593
594	uint8_t		rescap;		/* Reservation Capabilities */
595
596	uint8_t		fpi;		/* Format Progress Indicator */
597
598	uint8_t		dlfeat;		/* Deallocate Logical Block Features */
599
600	uint16_t	nawun;		/* Namespace Atomic Write Unit Normal  */
601	uint16_t	nawupf;		/* Namespace Atomic Write Unit Power Fail */
602	uint16_t	nacwu;		/* Namespace Atomic Compare & Write Unit */
603	uint16_t	nabsn;		/* Namespace Atomic Boundary Size Normal */
604	uint16_t	nabo;		/* Namespace Atomic Boundary Offset */
605	uint16_t	nabspf;		/* Namespace Atomic Boundary Size Power
606					   Fail */
607	uint16_t	noiob;		/* Namespace Optimal IO Boundary */
608
609	uint8_t		nvmcap[16];	/* NVM Capacity */
610
611	uint8_t		_reserved1[40];	/* bytes 64-103: Reserved */
612
613	uint8_t		nguid[16];	/* Namespace Globally Unique Identifier */
614	uint8_t		eui64[8];	/* IEEE Extended Unique Identifier */
615
616	struct nvm_namespace_format
617			lbaf[16];	/* LBA Format Support */
618
619	uint8_t		_reserved2[192];
620
621	uint8_t		vs[3712];
622} __packed __aligned(8);
623NVME_CTASSERT(sizeof(struct nvm_identify_namespace) == 4096, "bad size for nvm_identify_namespace");
624
625#endif	/* __NVMEREG_H__ */