master
1/*-
2 * Data structures and definitions for CAM Control Blocks (CCBs).
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 *
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification, immediately at the beginning of the file.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#ifndef _CAM_CAM_CCB_H
32#define _CAM_CAM_CCB_H 1
33
34#include <sys/queue.h>
35#include <sys/cdefs.h>
36#include <sys/time.h>
37#include <sys/limits.h>
38#ifndef _KERNEL
39#include <sys/callout.h>
40#endif
41#include <cam/cam_debug.h>
42#include <cam/scsi/scsi_all.h>
43#include <cam/ata/ata_all.h>
44#include <cam/nvme/nvme_all.h>
45#include <cam/mmc/mmc_all.h>
46
47/* General allocation length definitions for CCB structures */
48#define IOCDBLEN CAM_MAX_CDBLEN /* Space for CDB bytes/pointer */
49#define VUHBALEN 14 /* Vendor Unique HBA length */
50#define SIM_IDLEN 16 /* ASCII string len for SIM ID */
51#define HBA_IDLEN 16 /* ASCII string len for HBA ID */
52#define DEV_IDLEN 16 /* ASCII string len for device names */
53#define CCB_PERIPH_PRIV_SIZE 2 /* size of peripheral private area */
54#define CCB_SIM_PRIV_SIZE 2 /* size of sim private area */
55
56/* Struct definitions for CAM control blocks */
57
58/* Common CCB header */
59
60/* CCB memory allocation flags */
61typedef enum {
62 CAM_CCB_FROM_UMA = 0x00000001,/* CCB from a periph UMA zone */
63} ccb_alloc_flags;
64
65/* CAM CCB flags */
66typedef enum {
67 CAM_CDB_POINTER = 0x00000001,/* The CDB field is a pointer */
68 CAM_unused1 = 0x00000002,
69 CAM_unused2 = 0x00000004,
70 CAM_NEGOTIATE = 0x00000008,/*
71 * Perform transport negotiation
72 * with this command.
73 */
74 CAM_DATA_ISPHYS = 0x00000010,/* Data type with physical addrs */
75 CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */
76 CAM_DIR_BOTH = 0x00000000,/* Data direction (00:IN/OUT) */
77 CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */
78 CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */
79 CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */
80 CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */
81 CAM_DATA_VADDR = 0x00000000,/* Data type (000:Virtual) */
82 CAM_DATA_PADDR = 0x00000010,/* Data type (001:Physical) */
83 CAM_DATA_SG = 0x00040000,/* Data type (010:sglist) */
84 CAM_DATA_SG_PADDR = 0x00040010,/* Data type (011:sglist phys) */
85 CAM_DATA_BIO = 0x00200000,/* Data type (100:bio) */
86 CAM_DATA_MASK = 0x00240010,/* Data type mask */
87 CAM_unused3 = 0x00000100,
88 CAM_unused4 = 0x00000200,
89 CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */
90 CAM_DEV_QFREEZE = 0x00000800,/* Freeze DEV Q on execution */
91 CAM_HIGH_POWER = 0x00001000,/* Command takes a lot of power */
92 CAM_SENSE_PTR = 0x00002000,/* Sense data is a pointer */
93 CAM_SENSE_PHYS = 0x00004000,/* Sense pointer is physical addr*/
94 CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/
95 CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/
96 CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */
97 CAM_unused5 = 0x00080000,
98 CAM_unused6 = 0x00100000,
99 CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */
100 CAM_unused7 = 0x00800000,
101
102/* Phase cognizant mode flags */
103 CAM_unused8 = 0x01000000,
104 CAM_unused9 = 0x02000000,
105 CAM_unused10 = 0x04000000,
106 CAM_unused11 = 0x08000000,
107 CAM_unused12 = 0x10000000,
108 CAM_unused13 = 0x20000000,
109 CAM_unused14 = 0x40000000,
110
111/* Host target Mode flags */
112 CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */
113 CAM_unused15 = 0x10000000,
114 CAM_unused16 = 0x20000000,
115 CAM_SEND_STATUS = 0x40000000,/* Send status after data phase */
116
117 CAM_UNLOCKED = 0x80000000 /* Call callback without lock. */
118} ccb_flags;
119
120typedef enum {
121 CAM_USER_DATA_ADDR = 0x00000002,/* Userspace data pointers */
122 CAM_SG_FORMAT_IOVEC = 0x00000004,/* iovec instead of busdma S/G*/
123 CAM_UNMAPPED_BUF = 0x00000008 /* use unmapped I/O */
124} ccb_xflags;
125
126/* XPT Opcodes for xpt_action */
127typedef enum {
128/* Function code flags are bits greater than 0xff */
129 XPT_FC_QUEUED = 0x100,
130 /* Non-immediate function code */
131 XPT_FC_USER_CCB = 0x200,
132 XPT_FC_XPT_ONLY = 0x400,
133 /* Only for the transport layer device */
134 XPT_FC_DEV_QUEUED = 0x800 | XPT_FC_QUEUED,
135 /* Passes through the device queues */
136/* Common function commands: 0x00->0x0F */
137 XPT_NOOP = 0x00,
138 /* Execute Nothing */
139 XPT_SCSI_IO = 0x01 | XPT_FC_DEV_QUEUED,
140 /* Execute the requested I/O operation */
141 XPT_GDEV_TYPE = 0x02,
142 /* Get type information for specified device */
143 XPT_GDEVLIST = 0x03,
144 /* Get a list of peripheral devices */
145 XPT_PATH_INQ = 0x04,
146 /* Path routing inquiry */
147 XPT_REL_SIMQ = 0x05,
148 /* Release a frozen device queue */
149 XPT_SASYNC_CB = 0x06,
150 /* Set Asynchronous Callback Parameters */
151 XPT_SDEV_TYPE = 0x07,
152 /* Set device type information */
153 XPT_SCAN_BUS = 0x08 | XPT_FC_QUEUED | XPT_FC_USER_CCB
154 | XPT_FC_XPT_ONLY,
155 /* (Re)Scan the SCSI Bus */
156 XPT_DEV_MATCH = 0x09 | XPT_FC_XPT_ONLY,
157 /* Get EDT entries matching the given pattern */
158 XPT_DEBUG = 0x0a,
159 /* Turn on debugging for a bus, target or lun */
160 XPT_PATH_STATS = 0x0b,
161 /* Path statistics (error counts, etc.) */
162 XPT_GDEV_STATS = 0x0c,
163 /* Device statistics (error counts, etc.) */
164 XPT_DEV_ADVINFO = 0x0e,
165 /* Get/Set Device advanced information */
166 XPT_ASYNC = 0x0f | XPT_FC_QUEUED | XPT_FC_USER_CCB
167 | XPT_FC_XPT_ONLY,
168 /* Asynchronous event */
169/* SCSI Control Functions: 0x10->0x1F */
170 XPT_ABORT = 0x10,
171 /* Abort the specified CCB */
172 XPT_RESET_BUS = 0x11 | XPT_FC_XPT_ONLY,
173 /* Reset the specified SCSI bus */
174 XPT_RESET_DEV = 0x12 | XPT_FC_DEV_QUEUED,
175 /* Bus Device Reset the specified SCSI device */
176 XPT_TERM_IO = 0x13,
177 /* Terminate the I/O process */
178 XPT_SCAN_LUN = 0x14 | XPT_FC_QUEUED | XPT_FC_USER_CCB
179 | XPT_FC_XPT_ONLY,
180 /* Scan Logical Unit */
181 XPT_GET_TRAN_SETTINGS = 0x15,
182 /*
183 * Get default/user transfer settings
184 * for the target
185 */
186 XPT_SET_TRAN_SETTINGS = 0x16,
187 /*
188 * Set transfer rate/width
189 * negotiation settings
190 */
191 XPT_CALC_GEOMETRY = 0x17,
192 /*
193 * Calculate the geometry parameters for
194 * a device give the sector size and
195 * volume size.
196 */
197 XPT_ATA_IO = 0x18 | XPT_FC_DEV_QUEUED,
198 /* Execute the requested ATA I/O operation */
199
200 XPT_GET_SIM_KNOB_OLD = 0x18, /* Compat only */
201
202 XPT_SET_SIM_KNOB = 0x19,
203 /*
204 * Set SIM specific knob values.
205 */
206
207 XPT_GET_SIM_KNOB = 0x1a,
208 /*
209 * Get SIM specific knob values.
210 */
211
212 XPT_SMP_IO = 0x1b | XPT_FC_DEV_QUEUED,
213 /* Serial Management Protocol */
214
215 XPT_NVME_IO = 0x1c | XPT_FC_DEV_QUEUED,
216 /* Execute the requested NVMe I/O operation */
217
218 XPT_MMC_IO = 0x1d | XPT_FC_DEV_QUEUED,
219 /* Placeholder for MMC / SD / SDIO I/O stuff */
220
221 XPT_SCAN_TGT = 0x1e | XPT_FC_QUEUED | XPT_FC_USER_CCB
222 | XPT_FC_XPT_ONLY,
223 /* Scan Target */
224
225 XPT_NVME_ADMIN = 0x1f | XPT_FC_DEV_QUEUED,
226 /* Execute the requested NVMe Admin operation */
227
228/* HBA engine commands 0x20->0x2F */
229 XPT_ENG_INQ = 0x20 | XPT_FC_XPT_ONLY,
230 /* HBA engine feature inquiry */
231 XPT_ENG_EXEC = 0x21 | XPT_FC_DEV_QUEUED,
232 /* HBA execute engine request */
233
234/* Target mode commands: 0x30->0x3F */
235 XPT_EN_LUN = 0x30,
236 /* Enable LUN as a target */
237 XPT_TARGET_IO = 0x31 | XPT_FC_DEV_QUEUED,
238 /* Execute target I/O request */
239 XPT_ACCEPT_TARGET_IO = 0x32 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
240 /* Accept Host Target Mode CDB */
241 XPT_CONT_TARGET_IO = 0x33 | XPT_FC_DEV_QUEUED,
242 /* Continue Host Target I/O Connection */
243 XPT_IMMED_NOTIFY = 0x34 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
244 /* Notify Host Target driver of event (obsolete) */
245 XPT_NOTIFY_ACK = 0x35,
246 /* Acknowledgement of event (obsolete) */
247 XPT_IMMEDIATE_NOTIFY = 0x36 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
248 /* Notify Host Target driver of event */
249 XPT_NOTIFY_ACKNOWLEDGE = 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
250 /* Acknowledgement of event */
251 XPT_REPROBE_LUN = 0x38 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
252 /* Query device capacity and notify GEOM */
253
254 XPT_MMC_SET_TRAN_SETTINGS = 0x40 | XPT_FC_DEV_QUEUED,
255 XPT_MMC_GET_TRAN_SETTINGS = 0x41 | XPT_FC_DEV_QUEUED,
256
257/* Vendor Unique codes: 0x80->0x8F */
258 XPT_VUNIQUE = 0x80
259} xpt_opcode;
260
261#define XPT_FC_GROUP_MASK 0xF0
262#define XPT_FC_GROUP(op) ((op) & XPT_FC_GROUP_MASK)
263#define XPT_FC_GROUP_COMMON 0x00
264#define XPT_FC_GROUP_SCSI_CONTROL 0x10
265#define XPT_FC_GROUP_HBA_ENGINE 0x20
266#define XPT_FC_GROUP_TMODE 0x30
267#define XPT_FC_GROUP_VENDOR_UNIQUE 0x80
268
269#define XPT_FC_IS_DEV_QUEUED(ccb) \
270 (((ccb)->ccb_h.func_code & XPT_FC_DEV_QUEUED) == XPT_FC_DEV_QUEUED)
271#define XPT_FC_IS_QUEUED(ccb) \
272 (((ccb)->ccb_h.func_code & XPT_FC_QUEUED) != 0)
273
274typedef enum {
275 PROTO_UNKNOWN,
276 PROTO_UNSPECIFIED,
277 PROTO_SCSI, /* Small Computer System Interface */
278 PROTO_ATA, /* AT Attachment */
279 PROTO_ATAPI, /* AT Attachment Packetized Interface */
280 PROTO_SATAPM, /* SATA Port Multiplier */
281 PROTO_SEMB, /* SATA Enclosure Management Bridge */
282 PROTO_NVME, /* NVME */
283 PROTO_MMCSD, /* MMC, SD, SDIO */
284} cam_proto;
285
286typedef enum {
287 XPORT_UNKNOWN,
288 XPORT_UNSPECIFIED,
289 XPORT_SPI, /* SCSI Parallel Interface */
290 XPORT_FC, /* Fiber Channel */
291 XPORT_SSA, /* Serial Storage Architecture */
292 XPORT_USB, /* Universal Serial Bus */
293 XPORT_PPB, /* Parallel Port Bus */
294 XPORT_ATA, /* AT Attachment */
295 XPORT_SAS, /* Serial Attached SCSI */
296 XPORT_SATA, /* Serial AT Attachment */
297 XPORT_ISCSI, /* iSCSI */
298 XPORT_SRP, /* SCSI RDMA Protocol */
299 XPORT_NVME, /* NVMe over PCIe */
300 XPORT_MMCSD, /* MMC, SD, SDIO card */
301} cam_xport;
302
303#define XPORT_IS_NVME(t) ((t) == XPORT_NVME)
304#define XPORT_IS_ATA(t) ((t) == XPORT_ATA || (t) == XPORT_SATA)
305#define XPORT_IS_SCSI(t) ((t) != XPORT_UNKNOWN && \
306 (t) != XPORT_UNSPECIFIED && \
307 !XPORT_IS_ATA(t) && !XPORT_IS_NVME(t))
308#define XPORT_DEVSTAT_TYPE(t) (XPORT_IS_ATA(t) ? DEVSTAT_TYPE_IF_IDE : \
309 XPORT_IS_SCSI(t) ? DEVSTAT_TYPE_IF_SCSI : \
310 DEVSTAT_TYPE_IF_OTHER)
311
312#define PROTO_VERSION_UNKNOWN (UINT_MAX - 1)
313#define PROTO_VERSION_UNSPECIFIED UINT_MAX
314#define XPORT_VERSION_UNKNOWN (UINT_MAX - 1)
315#define XPORT_VERSION_UNSPECIFIED UINT_MAX
316
317typedef union {
318 LIST_ENTRY(ccb_hdr) le;
319 SLIST_ENTRY(ccb_hdr) sle;
320 TAILQ_ENTRY(ccb_hdr) tqe;
321 STAILQ_ENTRY(ccb_hdr) stqe;
322} camq_entry;
323
324typedef union {
325 void *ptr;
326 u_long field;
327 uint8_t bytes[sizeof(uintptr_t)];
328} ccb_priv_entry;
329
330typedef union {
331 ccb_priv_entry entries[CCB_PERIPH_PRIV_SIZE];
332 uint8_t bytes[CCB_PERIPH_PRIV_SIZE * sizeof(ccb_priv_entry)];
333} ccb_ppriv_area;
334
335typedef union {
336 ccb_priv_entry entries[CCB_SIM_PRIV_SIZE];
337 uint8_t bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)];
338} ccb_spriv_area;
339
340typedef struct {
341 struct timeval *etime;
342 uintptr_t sim_data;
343 uintptr_t periph_data;
344} ccb_qos_area;
345
346struct ccb_hdr {
347 cam_pinfo pinfo; /* Info for priority scheduling */
348 camq_entry xpt_links; /* For chaining in the XPT layer */
349 camq_entry sim_links; /* For chaining in the SIM layer */
350 camq_entry periph_links; /* For chaining in the type driver */
351#if BYTE_ORDER == LITTLE_ENDIAN
352 uint16_t retry_count;
353 uint16_t alloc_flags; /* ccb_alloc_flags */
354#else
355 uint16_t alloc_flags; /* ccb_alloc_flags */
356 uint16_t retry_count;
357#endif
358 void (*cbfcnp)(struct cam_periph *, union ccb *);
359 /* Callback on completion function */
360 xpt_opcode func_code; /* XPT function code */
361 uint32_t status; /* Status returned by CAM subsystem */
362 struct cam_path *path; /* Compiled path for this ccb */
363 path_id_t path_id; /* Path ID for the request */
364 target_id_t target_id; /* Target device ID */
365 lun_id_t target_lun; /* Target LUN number */
366 uint32_t flags; /* ccb_flags */
367 uint32_t xflags; /* Extended flags */
368 ccb_ppriv_area periph_priv;
369 ccb_spriv_area sim_priv;
370 ccb_qos_area qos;
371 uint32_t timeout; /* Hard timeout value in mseconds */
372 struct timeval softtimeout; /* Soft timeout value in sec + usec */
373};
374
375/* Get Device Information CCB */
376struct ccb_getdev {
377 struct ccb_hdr ccb_h;
378 cam_proto protocol;
379 struct scsi_inquiry_data inq_data;
380 struct ata_params ident_data;
381 uint8_t serial_num[252];
382 uint8_t inq_flags;
383 uint8_t serial_num_len;
384 void *padding[2];
385};
386
387/* Device Statistics CCB */
388struct ccb_getdevstats {
389 struct ccb_hdr ccb_h;
390 int dev_openings; /* Space left for more work on device*/
391 int dev_active; /* Transactions running on the device */
392 int allocated; /* CCBs allocated for the device */
393 int queued; /* CCBs queued to be sent to the device */
394 int held; /*
395 * CCBs held by peripheral drivers
396 * for this device
397 */
398 int maxtags; /*
399 * Boundary conditions for number of
400 * tagged operations
401 */
402 int mintags;
403 struct timeval last_reset; /* Time of last bus reset/loop init */
404};
405
406typedef enum {
407 CAM_GDEVLIST_LAST_DEVICE,
408 CAM_GDEVLIST_LIST_CHANGED,
409 CAM_GDEVLIST_MORE_DEVS,
410 CAM_GDEVLIST_ERROR
411} ccb_getdevlist_status_e;
412
413struct ccb_getdevlist {
414 struct ccb_hdr ccb_h;
415 char periph_name[DEV_IDLEN];
416 uint32_t unit_number;
417 unsigned int generation;
418 uint32_t index;
419 ccb_getdevlist_status_e status;
420};
421
422typedef enum {
423 PERIPH_MATCH_ANY = 0x000,
424 PERIPH_MATCH_PATH = 0x001,
425 PERIPH_MATCH_TARGET = 0x002,
426 PERIPH_MATCH_LUN = 0x004,
427 PERIPH_MATCH_NAME = 0x008,
428 PERIPH_MATCH_UNIT = 0x010,
429} periph_pattern_flags;
430
431struct periph_match_pattern {
432 char periph_name[DEV_IDLEN];
433 uint32_t unit_number;
434 path_id_t path_id;
435 target_id_t target_id;
436 lun_id_t target_lun;
437 periph_pattern_flags flags;
438};
439
440typedef enum {
441 DEV_MATCH_ANY = 0x000,
442 DEV_MATCH_PATH = 0x001,
443 DEV_MATCH_TARGET = 0x002,
444 DEV_MATCH_LUN = 0x004,
445 DEV_MATCH_INQUIRY = 0x008,
446 DEV_MATCH_DEVID = 0x010,
447} dev_pattern_flags;
448
449struct device_id_match_pattern {
450 uint8_t id_len;
451 uint8_t id[256];
452};
453
454struct device_match_pattern {
455 path_id_t path_id;
456 target_id_t target_id;
457 lun_id_t target_lun;
458 dev_pattern_flags flags;
459 union {
460 struct scsi_static_inquiry_pattern inq_pat;
461 struct device_id_match_pattern devid_pat;
462 } data;
463};
464
465typedef enum {
466 BUS_MATCH_ANY = 0x000,
467 BUS_MATCH_PATH = 0x001,
468 BUS_MATCH_NAME = 0x002,
469 BUS_MATCH_UNIT = 0x004,
470 BUS_MATCH_BUS_ID = 0x008,
471} bus_pattern_flags;
472
473struct bus_match_pattern {
474 path_id_t path_id;
475 char dev_name[DEV_IDLEN];
476 uint32_t unit_number;
477 uint32_t bus_id;
478 bus_pattern_flags flags;
479};
480
481union match_pattern {
482 struct periph_match_pattern periph_pattern;
483 struct device_match_pattern device_pattern;
484 struct bus_match_pattern bus_pattern;
485};
486
487typedef enum {
488 DEV_MATCH_PERIPH,
489 DEV_MATCH_DEVICE,
490 DEV_MATCH_BUS
491} dev_match_type;
492
493struct dev_match_pattern {
494 dev_match_type type;
495 union match_pattern pattern;
496};
497
498struct periph_match_result {
499 char periph_name[DEV_IDLEN];
500 uint32_t unit_number;
501 path_id_t path_id;
502 target_id_t target_id;
503 lun_id_t target_lun;
504};
505
506typedef enum {
507 DEV_RESULT_NOFLAG = 0x00,
508 DEV_RESULT_UNCONFIGURED = 0x01
509} dev_result_flags;
510
511struct device_match_result {
512 path_id_t path_id;
513 target_id_t target_id;
514 lun_id_t target_lun;
515 cam_proto protocol;
516 struct scsi_inquiry_data inq_data;
517 struct ata_params ident_data;
518 dev_result_flags flags;
519};
520
521struct bus_match_result {
522 path_id_t path_id;
523 char dev_name[DEV_IDLEN];
524 uint32_t unit_number;
525 uint32_t bus_id;
526};
527
528union match_result {
529 struct periph_match_result periph_result;
530 struct device_match_result device_result;
531 struct bus_match_result bus_result;
532};
533
534struct dev_match_result {
535 dev_match_type type;
536 union match_result result;
537};
538
539typedef enum {
540 CAM_DEV_MATCH_LAST,
541 CAM_DEV_MATCH_MORE,
542 CAM_DEV_MATCH_LIST_CHANGED,
543 CAM_DEV_MATCH_SIZE_ERROR,
544 CAM_DEV_MATCH_ERROR
545} ccb_dev_match_status;
546
547typedef enum {
548 CAM_DEV_POS_NONE = 0x000,
549 CAM_DEV_POS_BUS = 0x001,
550 CAM_DEV_POS_TARGET = 0x002,
551 CAM_DEV_POS_DEVICE = 0x004,
552 CAM_DEV_POS_PERIPH = 0x008,
553 CAM_DEV_POS_PDPTR = 0x010,
554 CAM_DEV_POS_TYPEMASK = 0xf00,
555 CAM_DEV_POS_EDT = 0x100,
556 CAM_DEV_POS_PDRV = 0x200
557} dev_pos_type;
558
559struct ccb_dm_cookie {
560 void *bus;
561 void *target;
562 void *device;
563 void *periph;
564 void *pdrv;
565};
566
567struct ccb_dev_position {
568 u_int generations[4];
569#define CAM_BUS_GENERATION 0x00
570#define CAM_TARGET_GENERATION 0x01
571#define CAM_DEV_GENERATION 0x02
572#define CAM_PERIPH_GENERATION 0x03
573 dev_pos_type position_type;
574 struct ccb_dm_cookie cookie;
575};
576
577struct ccb_dev_match {
578 struct ccb_hdr ccb_h;
579 ccb_dev_match_status status;
580 uint32_t num_patterns;
581 uint32_t pattern_buf_len;
582 struct dev_match_pattern *patterns;
583 uint32_t num_matches;
584 uint32_t match_buf_len;
585 struct dev_match_result *matches;
586 struct ccb_dev_position pos;
587};
588
589/*
590 * Definitions for the path inquiry CCB fields.
591 */
592#define CAM_VERSION 0x1a /* Hex value for current version */
593
594typedef enum {
595 PI_MDP_ABLE = 0x80, /* Supports MDP message */
596 PI_WIDE_32 = 0x40, /* Supports 32 bit wide SCSI */
597 PI_WIDE_16 = 0x20, /* Supports 16 bit wide SCSI */
598 PI_SDTR_ABLE = 0x10, /* Supports SDTR message */
599 PI_LINKED_CDB = 0x08, /* Supports linked CDBs */
600 PI_SATAPM = 0x04, /* Supports SATA PM */
601 PI_TAG_ABLE = 0x02, /* Supports tag queue messages */
602 PI_SOFT_RST = 0x01 /* Supports soft reset alternative */
603} pi_inqflag;
604
605typedef enum {
606 PIT_PROCESSOR = 0x80, /* Target mode processor mode */
607 PIT_PHASE = 0x40, /* Target mode phase cog. mode */
608 PIT_DISCONNECT = 0x20, /* Disconnects supported in target mode */
609 PIT_TERM_IO = 0x10, /* Terminate I/O message supported in TM */
610 PIT_GRP_6 = 0x08, /* Group 6 commands supported */
611 PIT_GRP_7 = 0x04 /* Group 7 commands supported */
612} pi_tmflag;
613
614typedef enum {
615 PIM_ATA_EXT = 0x200,/* ATA requests can understand ata_ext requests */
616 PIM_EXTLUNS = 0x100,/* 64bit extended LUNs supported */
617 PIM_SCANHILO = 0x80, /* Bus scans from high ID to low ID */
618 PIM_NOREMOVE = 0x40, /* Removeable devices not included in scan */
619 PIM_NOINITIATOR = 0x20, /* Initiator role not supported. */
620 PIM_NOBUSRESET = 0x10, /* User has disabled initial BUS RESET */
621 PIM_NO_6_BYTE = 0x08, /* Do not send 6-byte commands */
622 PIM_SEQSCAN = 0x04, /* Do bus scans sequentially, not in parallel */
623 PIM_UNMAPPED = 0x02,
624 PIM_NOSCAN = 0x01 /* SIM does its own scanning */
625} pi_miscflag;
626
627/* Path Inquiry CCB */
628struct ccb_pathinq_settings_spi {
629 uint8_t ppr_options;
630};
631
632struct ccb_pathinq_settings_fc {
633 uint64_t wwnn; /* world wide node name */
634 uint64_t wwpn; /* world wide port name */
635 uint32_t port; /* 24 bit port id, if known */
636 uint32_t bitrate; /* Mbps */
637};
638
639struct ccb_pathinq_settings_sas {
640 uint32_t bitrate; /* Mbps */
641};
642
643#define NVME_DEV_NAME_LEN 52
644struct ccb_pathinq_settings_nvme {
645 uint32_t nsid; /* Namespace ID for this path */
646 uint32_t domain;
647 uint8_t bus;
648 uint8_t slot;
649 uint8_t function;
650 uint8_t extra;
651 char dev_name[NVME_DEV_NAME_LEN]; /* nvme controller dev name for this device */
652};
653_Static_assert(sizeof(struct ccb_pathinq_settings_nvme) == 64,
654 "ccb_pathinq_settings_nvme too big");
655
656#define PATHINQ_SETTINGS_SIZE 128
657
658struct ccb_pathinq {
659 struct ccb_hdr ccb_h;
660 uint8_t version_num; /* Version number for the SIM/HBA */
661 uint8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */
662 uint16_t target_sprt; /* Flags for target mode support */
663 uint32_t hba_misc; /* Misc HBA features */
664 uint16_t hba_eng_cnt; /* HBA engine count */
665 /* Vendor Unique capabilities */
666 uint8_t vuhba_flags[VUHBALEN];
667 uint32_t max_target; /* Maximum supported Target */
668 uint32_t max_lun; /* Maximum supported Lun */
669 uint32_t async_flags; /* Installed Async handlers */
670 path_id_t hpath_id; /* Highest Path ID in the subsystem */
671 target_id_t initiator_id; /* ID of the HBA on the SCSI bus */
672 char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */
673 char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */
674 char dev_name[DEV_IDLEN];/* Device name for SIM */
675 uint32_t unit_number; /* Unit number for SIM */
676 uint32_t bus_id; /* Bus ID for SIM */
677 uint32_t base_transfer_speed;/* Base bus speed in KB/sec */
678 cam_proto protocol;
679 u_int protocol_version;
680 cam_xport transport;
681 u_int transport_version;
682 union {
683 struct ccb_pathinq_settings_spi spi;
684 struct ccb_pathinq_settings_fc fc;
685 struct ccb_pathinq_settings_sas sas;
686 struct ccb_pathinq_settings_nvme nvme;
687 char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE];
688 } xport_specific;
689 u_int maxio; /* Max supported I/O size, in bytes. */
690 uint16_t hba_vendor; /* HBA vendor ID */
691 uint16_t hba_device; /* HBA device ID */
692 uint16_t hba_subvendor; /* HBA subvendor ID */
693 uint16_t hba_subdevice; /* HBA subdevice ID */
694};
695
696/* Path Statistics CCB */
697struct ccb_pathstats {
698 struct ccb_hdr ccb_h;
699 struct timeval last_reset; /* Time of last bus reset/loop init */
700};
701
702typedef enum {
703 SMP_FLAG_NONE = 0x00,
704 SMP_FLAG_REQ_SG = 0x01,
705 SMP_FLAG_RSP_SG = 0x02
706} ccb_smp_pass_flags;
707
708/*
709 * Serial Management Protocol CCB
710 * XXX Currently the semantics for this CCB are that it is executed either
711 * by the addressed device, or that device's parent (i.e. an expander for
712 * any device on an expander) if the addressed device doesn't support SMP.
713 * Later, once we have the ability to probe SMP-only devices and put them
714 * in CAM's topology, the CCB will only be executed by the addressed device
715 * if possible.
716 */
717struct ccb_smpio {
718 struct ccb_hdr ccb_h;
719 uint8_t *smp_request;
720 int smp_request_len;
721 uint16_t smp_request_sglist_cnt;
722 uint8_t *smp_response;
723 int smp_response_len;
724 uint16_t smp_response_sglist_cnt;
725 ccb_smp_pass_flags flags;
726};
727
728typedef union {
729 uint8_t *sense_ptr; /*
730 * Pointer to storage
731 * for sense information
732 */
733 /* Storage Area for sense information */
734 struct scsi_sense_data sense_buf;
735} sense_t;
736
737typedef union {
738 uint8_t *cdb_ptr; /* Pointer to the CDB bytes to send */
739 /* Area for the CDB send */
740 uint8_t cdb_bytes[IOCDBLEN];
741} cdb_t;
742
743/*
744 * SCSI I/O Request CCB used for the XPT_SCSI_IO and XPT_CONT_TARGET_IO
745 * function codes.
746 */
747struct ccb_scsiio {
748 struct ccb_hdr ccb_h;
749 union ccb *next_ccb; /* Ptr for next CCB for action */
750 uint8_t *req_map; /* Ptr to mapping info */
751 uint8_t *data_ptr; /* Ptr to the data buf/SG list */
752 uint32_t dxfer_len; /* Data transfer length */
753 /* Autosense storage */
754 struct scsi_sense_data sense_data;
755 uint8_t sense_len; /* Number of bytes to autosense */
756 uint8_t cdb_len; /* Number of bytes for the CDB */
757 uint16_t sglist_cnt; /* Number of SG list entries */
758 uint8_t scsi_status; /* Returned SCSI status */
759 uint8_t sense_resid; /* Autosense resid length: 2's comp */
760 uint32_t resid; /* Transfer residual length: 2's comp */
761 cdb_t cdb_io; /* Union for CDB bytes/pointer */
762 uint8_t *msg_ptr; /* Pointer to the message buffer */
763 uint16_t msg_len; /* Number of bytes for the Message */
764 uint8_t tag_action; /* What to do for tag queueing */
765 /*
766 * The tag action should be either the define below (to send a
767 * non-tagged transaction) or one of the defined scsi tag messages
768 * from scsi_message.h.
769 */
770#define CAM_TAG_ACTION_NONE 0x00
771 uint8_t priority; /* Command priority for SIMPLE tag */
772 u_int tag_id; /* tag id from initator (target mode) */
773 u_int init_id; /* initiator id of who selected */
774#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
775 struct bio *bio; /* Associated bio */
776#endif
777};
778
779static __inline uint8_t *
780scsiio_cdb_ptr(struct ccb_scsiio *ccb)
781{
782 return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
783 ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes);
784}
785
786/*
787 * ATA I/O Request CCB used for the XPT_ATA_IO function code.
788 */
789struct ccb_ataio {
790 struct ccb_hdr ccb_h;
791 union ccb *next_ccb; /* Ptr for next CCB for action */
792 struct ata_cmd cmd; /* ATA command register set */
793 struct ata_res res; /* ATA result register set */
794 uint8_t *data_ptr; /* Ptr to the data buf/SG list */
795 uint32_t dxfer_len; /* Data transfer length */
796 uint32_t resid; /* Transfer residual length: 2's comp */
797 uint8_t ata_flags; /* Flags for the rest of the buffer */
798#define ATA_FLAG_AUX 0x1
799#define ATA_FLAG_ICC 0x2
800 uint8_t icc; /* Isochronous Command Completion */
801 uint32_t aux;
802 uint32_t unused;
803};
804
805/*
806 * MMC I/O Request CCB used for the XPT_MMC_IO function code.
807 */
808struct ccb_mmcio {
809 struct ccb_hdr ccb_h;
810 union ccb *next_ccb; /* Ptr for next CCB for action */
811 struct mmc_command cmd;
812 struct mmc_command stop;
813};
814
815struct ccb_accept_tio {
816 struct ccb_hdr ccb_h;
817 cdb_t cdb_io; /* Union for CDB bytes/pointer */
818 uint8_t cdb_len; /* Number of bytes for the CDB */
819 uint8_t tag_action; /* What to do for tag queueing */
820 uint8_t sense_len; /* Number of bytes of Sense Data */
821 uint8_t priority; /* Command priority for SIMPLE tag */
822 u_int tag_id; /* tag id from initator (target mode) */
823 u_int init_id; /* initiator id of who selected */
824 struct scsi_sense_data sense_data;
825};
826
827static __inline uint8_t *
828atio_cdb_ptr(struct ccb_accept_tio *ccb)
829{
830 return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
831 ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes);
832}
833
834/* Release SIM Queue */
835struct ccb_relsim {
836 struct ccb_hdr ccb_h;
837 uint32_t release_flags;
838#define RELSIM_ADJUST_OPENINGS 0x01
839#define RELSIM_RELEASE_AFTER_TIMEOUT 0x02
840#define RELSIM_RELEASE_AFTER_CMDCMPLT 0x04
841#define RELSIM_RELEASE_AFTER_QEMPTY 0x08
842 uint32_t openings;
843 uint32_t release_timeout; /* Abstract argument. */
844 uint32_t qfrozen_cnt;
845};
846
847/*
848 * NVMe I/O Request CCB used for the XPT_NVME_IO and XPT_NVME_ADMIN function codes.
849 */
850struct ccb_nvmeio {
851 struct ccb_hdr ccb_h;
852 union ccb *next_ccb; /* Ptr for next CCB for action */
853 struct nvme_command cmd; /* NVME command, per NVME standard */
854 struct nvme_completion cpl; /* NVME completion, per NVME standard */
855 uint8_t *data_ptr; /* Ptr to the data buf/SG list */
856 uint32_t dxfer_len; /* Data transfer length */
857 uint16_t sglist_cnt; /* Number of SG list entries */
858 uint16_t unused; /* padding for removed uint32_t */
859};
860
861/*
862 * Definitions for the asynchronous callback CCB fields.
863 */
864typedef enum {
865 AC_UNIT_ATTENTION = 0x4000,/* Device reported UNIT ATTENTION */
866 AC_ADVINFO_CHANGED = 0x2000,/* Advance info might have changes */
867 AC_CONTRACT = 0x1000,/* A contractual callback */
868 AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */
869 AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */
870 AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */
871 AC_LOST_DEVICE = 0x100,/* A device went away */
872 AC_FOUND_DEVICE = 0x080,/* A new device was found */
873 AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */
874 AC_PATH_REGISTERED = 0x020,/* A new path has been registered */
875 AC_SENT_BDR = 0x010,/* A BDR message was sent to target */
876 AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */
877 AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */
878 AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */
879} ac_code;
880
881typedef void ac_callback_t (void *softc, uint32_t code,
882 struct cam_path *path, void *args);
883
884/*
885 * Generic Asynchronous callbacks.
886 *
887 * Generic arguments passed bac which are then interpreted between a per-system
888 * contract number.
889 */
890#define AC_CONTRACT_DATA_MAX (128 - sizeof (uint64_t))
891struct ac_contract {
892 uint64_t contract_number;
893 uint8_t contract_data[AC_CONTRACT_DATA_MAX];
894};
895
896#define AC_CONTRACT_DEV_CHG 1
897struct ac_device_changed {
898 uint64_t wwpn;
899 uint32_t port;
900 target_id_t target;
901 uint8_t arrived;
902};
903
904/* Set Asynchronous Callback CCB */
905struct ccb_setasync {
906 struct ccb_hdr ccb_h;
907 uint32_t event_enable; /* Async Event enables */
908 ac_callback_t *callback;
909 void *callback_arg;
910};
911
912/* Set Device Type CCB */
913struct ccb_setdev {
914 struct ccb_hdr ccb_h;
915 uint8_t dev_type; /* Value for dev type field in EDT */
916};
917
918/* SCSI Control Functions */
919
920/* Abort XPT request CCB */
921struct ccb_abort {
922 struct ccb_hdr ccb_h;
923 union ccb *abort_ccb; /* Pointer to CCB to abort */
924};
925
926/* Reset SCSI Bus CCB */
927struct ccb_resetbus {
928 struct ccb_hdr ccb_h;
929};
930
931/* Reset SCSI Device CCB */
932struct ccb_resetdev {
933 struct ccb_hdr ccb_h;
934};
935
936/* Terminate I/O Process Request CCB */
937struct ccb_termio {
938 struct ccb_hdr ccb_h;
939 union ccb *termio_ccb; /* Pointer to CCB to terminate */
940};
941
942typedef enum {
943 CTS_TYPE_CURRENT_SETTINGS,
944 CTS_TYPE_USER_SETTINGS
945} cts_type;
946
947struct ccb_trans_settings_scsi
948{
949 u_int valid; /* Which fields to honor */
950#define CTS_SCSI_VALID_TQ 0x01
951 u_int flags;
952#define CTS_SCSI_FLAGS_TAG_ENB 0x01
953};
954
955struct ccb_trans_settings_ata
956{
957 u_int valid; /* Which fields to honor */
958#define CTS_ATA_VALID_TQ 0x01
959 u_int flags;
960#define CTS_ATA_FLAGS_TAG_ENB 0x01
961};
962
963struct ccb_trans_settings_spi
964{
965 u_int valid; /* Which fields to honor */
966#define CTS_SPI_VALID_SYNC_RATE 0x01
967#define CTS_SPI_VALID_SYNC_OFFSET 0x02
968#define CTS_SPI_VALID_BUS_WIDTH 0x04
969#define CTS_SPI_VALID_DISC 0x08
970#define CTS_SPI_VALID_PPR_OPTIONS 0x10
971 u_int flags;
972#define CTS_SPI_FLAGS_DISC_ENB 0x01
973 u_int sync_period;
974 u_int sync_offset;
975 u_int bus_width;
976 u_int ppr_options;
977};
978
979struct ccb_trans_settings_fc {
980 u_int valid; /* Which fields to honor */
981#define CTS_FC_VALID_WWNN 0x8000
982#define CTS_FC_VALID_WWPN 0x4000
983#define CTS_FC_VALID_PORT 0x2000
984#define CTS_FC_VALID_SPEED 0x1000
985 uint64_t wwnn; /* world wide node name */
986 uint64_t wwpn; /* world wide port name */
987 uint32_t port; /* 24 bit port id, if known */
988 uint32_t bitrate; /* Mbps */
989};
990
991struct ccb_trans_settings_sas {
992 u_int valid; /* Which fields to honor */
993#define CTS_SAS_VALID_SPEED 0x1000
994 uint32_t bitrate; /* Mbps */
995};
996
997struct ccb_trans_settings_pata {
998 u_int valid; /* Which fields to honor */
999#define CTS_ATA_VALID_MODE 0x01
1000#define CTS_ATA_VALID_BYTECOUNT 0x02
1001#define CTS_ATA_VALID_ATAPI 0x20
1002#define CTS_ATA_VALID_CAPS 0x40
1003 int mode; /* Mode */
1004 u_int bytecount; /* Length of PIO transaction */
1005 u_int atapi; /* Length of ATAPI CDB */
1006 u_int caps; /* Device and host SATA caps. */
1007#define CTS_ATA_CAPS_H 0x0000ffff
1008#define CTS_ATA_CAPS_H_DMA48 0x00000001 /* 48-bit DMA */
1009#define CTS_ATA_CAPS_D 0xffff0000
1010};
1011
1012struct ccb_trans_settings_sata {
1013 u_int valid; /* Which fields to honor */
1014#define CTS_SATA_VALID_MODE 0x01
1015#define CTS_SATA_VALID_BYTECOUNT 0x02
1016#define CTS_SATA_VALID_REVISION 0x04
1017#define CTS_SATA_VALID_PM 0x08
1018#define CTS_SATA_VALID_TAGS 0x10
1019#define CTS_SATA_VALID_ATAPI 0x20
1020#define CTS_SATA_VALID_CAPS 0x40
1021 int mode; /* Legacy PATA mode */
1022 u_int bytecount; /* Length of PIO transaction */
1023 int revision; /* SATA revision */
1024 u_int pm_present; /* PM is present (XPT->SIM) */
1025 u_int tags; /* Number of allowed tags */
1026 u_int atapi; /* Length of ATAPI CDB */
1027 u_int caps; /* Device and host SATA caps. */
1028#define CTS_SATA_CAPS_H 0x0000ffff
1029#define CTS_SATA_CAPS_H_PMREQ 0x00000001
1030#define CTS_SATA_CAPS_H_APST 0x00000002
1031#define CTS_SATA_CAPS_H_DMAAA 0x00000010 /* Auto-activation */
1032#define CTS_SATA_CAPS_H_AN 0x00000020 /* Async. notification */
1033#define CTS_SATA_CAPS_D 0xffff0000
1034#define CTS_SATA_CAPS_D_PMREQ 0x00010000
1035#define CTS_SATA_CAPS_D_APST 0x00020000
1036};
1037
1038struct ccb_trans_settings_nvme
1039{
1040 u_int valid; /* Which fields to honor */
1041#define CTS_NVME_VALID_SPEC 0x01
1042#define CTS_NVME_VALID_CAPS 0x02
1043#define CTS_NVME_VALID_LINK 0x04
1044 uint32_t spec; /* NVMe spec implemented -- same as vs register */
1045 uint32_t max_xfer; /* Max transfer size (0 -> unlimited */
1046 uint32_t caps;
1047 uint8_t lanes; /* Number of PCIe lanes */
1048 uint8_t speed; /* PCIe generation for each lane */
1049 uint8_t max_lanes; /* Number of PCIe lanes */
1050 uint8_t max_speed; /* PCIe generation for each lane */
1051};
1052
1053#include <cam/mmc/mmc_bus.h>
1054struct ccb_trans_settings_mmc {
1055 struct mmc_ios ios;
1056#define MMC_CLK (1 << 1)
1057#define MMC_VDD (1 << 2)
1058#define MMC_CS (1 << 3)
1059#define MMC_BW (1 << 4)
1060#define MMC_PM (1 << 5)
1061#define MMC_BT (1 << 6)
1062#define MMC_BM (1 << 7)
1063#define MMC_VCCQ (1 << 8)
1064 uint32_t ios_valid;
1065/* The folowing is used only for GET_TRAN_SETTINGS */
1066 uint32_t host_ocr;
1067 int host_f_min;
1068 int host_f_max;
1069/* Copied from sys/dev/mmc/bridge.h */
1070#define MMC_CAP_4_BIT_DATA (1 << 0) /* Can do 4-bit data transfers */
1071#define MMC_CAP_8_BIT_DATA (1 << 1) /* Can do 8-bit data transfers */
1072#define MMC_CAP_HSPEED (1 << 2) /* Can do High Speed transfers */
1073#define MMC_CAP_BOOT_NOACC (1 << 4) /* Cannot access boot partitions */
1074#define MMC_CAP_WAIT_WHILE_BUSY (1 << 5) /* Host waits for busy responses */
1075#define MMC_CAP_UHS_SDR12 (1 << 6) /* Can do UHS SDR12 */
1076#define MMC_CAP_UHS_SDR25 (1 << 7) /* Can do UHS SDR25 */
1077#define MMC_CAP_UHS_SDR50 (1 << 8) /* Can do UHS SDR50 */
1078#define MMC_CAP_UHS_SDR104 (1 << 9) /* Can do UHS SDR104 */
1079#define MMC_CAP_UHS_DDR50 (1 << 10) /* Can do UHS DDR50 */
1080#define MMC_CAP_MMC_DDR52_120 (1 << 11) /* Can do eMMC DDR52 at 1.2 V */
1081#define MMC_CAP_MMC_DDR52_180 (1 << 12) /* Can do eMMC DDR52 at 1.8 V */
1082#define MMC_CAP_MMC_DDR52 (MMC_CAP_MMC_DDR52_120 | MMC_CAP_MMC_DDR52_180)
1083#define MMC_CAP_MMC_HS200_120 (1 << 13) /* Can do eMMC HS200 at 1.2 V */
1084#define MMC_CAP_MMC_HS200_180 (1 << 14) /* Can do eMMC HS200 at 1.8 V */
1085#define MMC_CAP_MMC_HS200 (MMC_CAP_MMC_HS200_120| MMC_CAP_MMC_HS200_180)
1086#define MMC_CAP_MMC_HS400_120 (1 << 15) /* Can do eMMC HS400 at 1.2 V */
1087#define MMC_CAP_MMC_HS400_180 (1 << 16) /* Can do eMMC HS400 at 1.8 V */
1088#define MMC_CAP_MMC_HS400 (MMC_CAP_MMC_HS400_120 | MMC_CAP_MMC_HS400_180)
1089#define MMC_CAP_MMC_HSX00_120 (MMC_CAP_MMC_HS200_120 | MMC_CAP_MMC_HS400_120)
1090#define MMC_CAP_MMC_ENH_STROBE (1 << 17) /* Can do eMMC Enhanced Strobe */
1091#define MMC_CAP_SIGNALING_120 (1 << 18) /* Can do signaling at 1.2 V */
1092#define MMC_CAP_SIGNALING_180 (1 << 19) /* Can do signaling at 1.8 V */
1093#define MMC_CAP_SIGNALING_330 (1 << 20) /* Can do signaling at 3.3 V */
1094#define MMC_CAP_DRIVER_TYPE_A (1 << 21) /* Can do Driver Type A */
1095#define MMC_CAP_DRIVER_TYPE_C (1 << 22) /* Can do Driver Type C */
1096#define MMC_CAP_DRIVER_TYPE_D (1 << 23) /* Can do Driver Type D */
1097
1098 uint32_t host_caps;
1099 uint32_t host_max_data;
1100};
1101
1102/* Get/Set transfer rate/width/disconnection/tag queueing settings */
1103struct ccb_trans_settings {
1104 struct ccb_hdr ccb_h;
1105 cts_type type; /* Current or User settings */
1106 cam_proto protocol;
1107 u_int protocol_version;
1108 cam_xport transport;
1109 u_int transport_version;
1110 union {
1111 u_int valid; /* Which fields to honor */
1112 struct ccb_trans_settings_ata ata;
1113 struct ccb_trans_settings_scsi scsi;
1114 struct ccb_trans_settings_nvme nvme;
1115 struct ccb_trans_settings_mmc mmc;
1116 } proto_specific;
1117 union {
1118 u_int valid; /* Which fields to honor */
1119 struct ccb_trans_settings_spi spi;
1120 struct ccb_trans_settings_fc fc;
1121 struct ccb_trans_settings_sas sas;
1122 struct ccb_trans_settings_pata ata;
1123 struct ccb_trans_settings_sata sata;
1124 struct ccb_trans_settings_nvme nvme;
1125 } xport_specific;
1126};
1127
1128/*
1129 * Calculate the geometry parameters for a device
1130 * give the block size and volume size in blocks.
1131 */
1132struct ccb_calc_geometry {
1133 struct ccb_hdr ccb_h;
1134 uint32_t block_size;
1135 uint64_t volume_size;
1136 uint32_t cylinders;
1137 uint8_t heads;
1138 uint8_t secs_per_track;
1139};
1140
1141/*
1142 * Set or get SIM (and transport) specific knobs
1143 */
1144
1145#define KNOB_VALID_ADDRESS 0x1
1146#define KNOB_VALID_ROLE 0x2
1147
1148#define KNOB_ROLE_NONE 0x0
1149#define KNOB_ROLE_INITIATOR 0x1
1150#define KNOB_ROLE_TARGET 0x2
1151#define KNOB_ROLE_BOTH 0x3
1152
1153struct ccb_sim_knob_settings_spi {
1154 u_int valid;
1155 u_int initiator_id;
1156 u_int role;
1157};
1158
1159struct ccb_sim_knob_settings_fc {
1160 u_int valid;
1161 uint64_t wwnn; /* world wide node name */
1162 uint64_t wwpn; /* world wide port name */
1163 u_int role;
1164};
1165
1166struct ccb_sim_knob_settings_sas {
1167 u_int valid;
1168 uint64_t wwnn; /* world wide node name */
1169 u_int role;
1170};
1171#define KNOB_SETTINGS_SIZE 128
1172
1173struct ccb_sim_knob {
1174 struct ccb_hdr ccb_h;
1175 union {
1176 u_int valid; /* Which fields to honor */
1177 struct ccb_sim_knob_settings_spi spi;
1178 struct ccb_sim_knob_settings_fc fc;
1179 struct ccb_sim_knob_settings_sas sas;
1180 char pad[KNOB_SETTINGS_SIZE];
1181 } xport_specific;
1182};
1183
1184/*
1185 * Rescan the given bus, or bus/target/lun
1186 */
1187struct ccb_rescan {
1188 struct ccb_hdr ccb_h;
1189 cam_flags flags;
1190};
1191
1192/*
1193 * Turn on debugging for the given bus, bus/target, or bus/target/lun.
1194 */
1195struct ccb_debug {
1196 struct ccb_hdr ccb_h;
1197 cam_debug_flags flags;
1198};
1199
1200/* Target mode structures. */
1201
1202struct ccb_en_lun {
1203 struct ccb_hdr ccb_h;
1204 uint16_t grp6_len; /* Group 6 VU CDB length */
1205 uint16_t grp7_len; /* Group 7 VU CDB length */
1206 uint8_t enable;
1207};
1208
1209/* old, barely used immediate notify, binary compatibility */
1210struct ccb_immed_notify {
1211 struct ccb_hdr ccb_h;
1212 struct scsi_sense_data sense_data;
1213 uint8_t sense_len; /* Number of bytes in sense buffer */
1214 uint8_t initiator_id; /* Id of initiator that selected */
1215 uint8_t message_args[7]; /* Message Arguments */
1216};
1217
1218struct ccb_notify_ack {
1219 struct ccb_hdr ccb_h;
1220 uint16_t seq_id; /* Sequence identifier */
1221 uint8_t event; /* Event flags */
1222};
1223
1224struct ccb_immediate_notify {
1225 struct ccb_hdr ccb_h;
1226 u_int tag_id; /* Tag for immediate notify */
1227 u_int seq_id; /* Tag for target of notify */
1228 u_int initiator_id; /* Initiator Identifier */
1229 u_int arg; /* Function specific */
1230};
1231
1232struct ccb_notify_acknowledge {
1233 struct ccb_hdr ccb_h;
1234 u_int tag_id; /* Tag for immediate notify */
1235 u_int seq_id; /* Tar for target of notify */
1236 u_int initiator_id; /* Initiator Identifier */
1237 u_int arg; /* Response information */
1238 /*
1239 * Lower byte of arg is one of RESPONSE CODE values defined below
1240 * (subset of response codes from SPL-4 and FCP-4 specifications),
1241 * upper 3 bytes is code-specific ADDITIONAL RESPONSE INFORMATION.
1242 */
1243#define CAM_RSP_TMF_COMPLETE 0x00
1244#define CAM_RSP_TMF_REJECTED 0x04
1245#define CAM_RSP_TMF_FAILED 0x05
1246#define CAM_RSP_TMF_SUCCEEDED 0x08
1247#define CAM_RSP_TMF_INCORRECT_LUN 0x09
1248};
1249
1250/* HBA engine structures. */
1251
1252typedef enum {
1253 EIT_BUFFER, /* Engine type: buffer memory */
1254 EIT_LOSSLESS, /* Engine type: lossless compression */
1255 EIT_LOSSY, /* Engine type: lossy compression */
1256 EIT_ENCRYPT /* Engine type: encryption */
1257} ei_type;
1258
1259typedef enum {
1260 EAD_VUNIQUE, /* Engine algorithm ID: vendor unique */
1261 EAD_LZ1V1, /* Engine algorithm ID: LZ1 var.1 */
1262 EAD_LZ2V1, /* Engine algorithm ID: LZ2 var.1 */
1263 EAD_LZ2V2 /* Engine algorithm ID: LZ2 var.2 */
1264} ei_algo;
1265
1266struct ccb_eng_inq {
1267 struct ccb_hdr ccb_h;
1268 uint16_t eng_num; /* The engine number for this inquiry */
1269 ei_type eng_type; /* Returned engine type */
1270 ei_algo eng_algo; /* Returned engine algorithm type */
1271 uint32_t eng_memeory; /* Returned engine memory size */
1272};
1273
1274struct ccb_eng_exec { /* This structure must match SCSIIO size */
1275 struct ccb_hdr ccb_h;
1276 uint8_t *pdrv_ptr; /* Ptr used by the peripheral driver */
1277 uint8_t *req_map; /* Ptr for mapping info on the req. */
1278 uint8_t *data_ptr; /* Pointer to the data buf/SG list */
1279 uint32_t dxfer_len; /* Data transfer length */
1280 uint8_t *engdata_ptr; /* Pointer to the engine buffer data */
1281 uint16_t sglist_cnt; /* Num of scatter gather list entries */
1282 uint32_t dmax_len; /* Destination data maximum length */
1283 uint32_t dest_len; /* Destination data length */
1284 int32_t src_resid; /* Source residual length: 2's comp */
1285 uint32_t timeout; /* Timeout value */
1286 uint16_t eng_num; /* Engine number for this request */
1287 uint16_t vu_flags; /* Vendor Unique flags */
1288};
1289
1290/*
1291 * Definitions for the timeout field in the SCSI I/O CCB.
1292 */
1293#define CAM_TIME_DEFAULT 0x00000000 /* Use SIM default value */
1294#define CAM_TIME_INFINITY 0xFFFFFFFF /* Infinite timeout */
1295
1296#define CAM_SUCCESS 0 /* For signaling general success */
1297
1298#define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */
1299
1300/*
1301 * CCB for working with advanced device information. This operates in a fashion
1302 * similar to XPT_GDEV_TYPE. Specify the target in ccb_h, the buffer
1303 * type requested, and provide a buffer size/buffer to write to. If the
1304 * buffer is too small, provsiz will be larger than bufsiz.
1305 */
1306struct ccb_dev_advinfo {
1307 struct ccb_hdr ccb_h;
1308 uint32_t flags;
1309#define CDAI_FLAG_NONE 0x0 /* No flags set */
1310#define CDAI_FLAG_STORE 0x1 /* If set, action becomes store */
1311 uint32_t buftype; /* IN: Type of data being requested */
1312 /* NB: buftype is interpreted on a per-transport basis */
1313#define CDAI_TYPE_SCSI_DEVID 1
1314#define CDAI_TYPE_SERIAL_NUM 2
1315#define CDAI_TYPE_PHYS_PATH 3
1316#define CDAI_TYPE_RCAPLONG 4
1317#define CDAI_TYPE_EXT_INQ 5
1318#define CDAI_TYPE_NVME_CNTRL 6 /* NVMe Identify Controller data */
1319#define CDAI_TYPE_NVME_NS 7 /* NVMe Identify Namespace data */
1320#define CDAI_TYPE_MMC_PARAMS 8 /* MMC/SD ident */
1321 off_t bufsiz; /* IN: Size of external buffer */
1322#define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */
1323 off_t provsiz; /* OUT: Size required/used */
1324 uint8_t *buf; /* IN/OUT: Buffer for requested data */
1325};
1326
1327/*
1328 * CCB for sending async events
1329 */
1330struct ccb_async {
1331 struct ccb_hdr ccb_h;
1332 uint32_t async_code;
1333 off_t async_arg_size;
1334 void *async_arg_ptr;
1335};
1336
1337/*
1338 * Union of all CCB types for kernel space allocation. This union should
1339 * never be used for manipulating CCBs - its only use is for the allocation
1340 * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc
1341 * and the argument to xpt_ccb_free.
1342 */
1343union ccb {
1344 struct ccb_hdr ccb_h; /* For convenience */
1345 struct ccb_scsiio csio;
1346 struct ccb_getdev cgd;
1347 struct ccb_getdevlist cgdl;
1348 struct ccb_pathinq cpi;
1349 struct ccb_relsim crs;
1350 struct ccb_setasync csa;
1351 struct ccb_setdev csd;
1352 struct ccb_pathstats cpis;
1353 struct ccb_getdevstats cgds;
1354 struct ccb_dev_match cdm;
1355 struct ccb_trans_settings cts;
1356 struct ccb_calc_geometry ccg;
1357 struct ccb_sim_knob knob;
1358 struct ccb_abort cab;
1359 struct ccb_resetbus crb;
1360 struct ccb_resetdev crd;
1361 struct ccb_termio tio;
1362 struct ccb_accept_tio atio;
1363 struct ccb_scsiio ctio;
1364 struct ccb_en_lun cel;
1365 struct ccb_immed_notify cin;
1366 struct ccb_notify_ack cna;
1367 struct ccb_immediate_notify cin1;
1368 struct ccb_notify_acknowledge cna2;
1369 struct ccb_eng_inq cei;
1370 struct ccb_eng_exec cee;
1371 struct ccb_smpio smpio;
1372 struct ccb_rescan crcn;
1373 struct ccb_debug cdbg;
1374 struct ccb_ataio ataio;
1375 struct ccb_dev_advinfo cdai;
1376 struct ccb_async casync;
1377 struct ccb_nvmeio nvmeio;
1378 struct ccb_mmcio mmcio;
1379};
1380
1381#define CCB_CLEAR_ALL_EXCEPT_HDR(ccbp) \
1382 bzero((char *)(ccbp) + sizeof((ccbp)->ccb_h), \
1383 sizeof(*(ccbp)) - sizeof((ccbp)->ccb_h))
1384
1385__BEGIN_DECLS
1386static __inline void
1387cam_fill_csio(struct ccb_scsiio *csio, uint32_t retries,
1388 void (*cbfcnp)(struct cam_periph *, union ccb *),
1389 uint32_t flags, uint8_t tag_action,
1390 uint8_t *data_ptr, uint32_t dxfer_len,
1391 uint8_t sense_len, uint8_t cdb_len,
1392 uint32_t timeout)
1393{
1394 csio->ccb_h.func_code = XPT_SCSI_IO;
1395 csio->ccb_h.flags = flags;
1396 csio->ccb_h.xflags = 0;
1397 csio->ccb_h.retry_count = retries;
1398 csio->ccb_h.cbfcnp = cbfcnp;
1399 csio->ccb_h.timeout = timeout;
1400 csio->data_ptr = data_ptr;
1401 csio->dxfer_len = dxfer_len;
1402 csio->sense_len = sense_len;
1403 csio->cdb_len = cdb_len;
1404 csio->tag_action = tag_action;
1405 csio->priority = 0;
1406#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1407 csio->bio = NULL;
1408#endif
1409}
1410
1411static __inline void
1412cam_fill_ctio(struct ccb_scsiio *csio, uint32_t retries,
1413 void (*cbfcnp)(struct cam_periph *, union ccb *),
1414 uint32_t flags, u_int tag_action, u_int tag_id,
1415 u_int init_id, u_int scsi_status, uint8_t *data_ptr,
1416 uint32_t dxfer_len, uint32_t timeout)
1417{
1418 csio->ccb_h.func_code = XPT_CONT_TARGET_IO;
1419 csio->ccb_h.flags = flags;
1420 csio->ccb_h.xflags = 0;
1421 csio->ccb_h.retry_count = retries;
1422 csio->ccb_h.cbfcnp = cbfcnp;
1423 csio->ccb_h.timeout = timeout;
1424 csio->data_ptr = data_ptr;
1425 csio->dxfer_len = dxfer_len;
1426 csio->scsi_status = scsi_status;
1427 csio->tag_action = tag_action;
1428 csio->priority = 0;
1429 csio->tag_id = tag_id;
1430 csio->init_id = init_id;
1431}
1432
1433static __inline void
1434cam_fill_ataio(struct ccb_ataio *ataio, uint32_t retries,
1435 void (*cbfcnp)(struct cam_periph *, union ccb *),
1436 uint32_t flags, u_int tag_action __unused,
1437 uint8_t *data_ptr, uint32_t dxfer_len,
1438 uint32_t timeout)
1439{
1440 ataio->ccb_h.func_code = XPT_ATA_IO;
1441 ataio->ccb_h.flags = flags;
1442 ataio->ccb_h.retry_count = retries;
1443 ataio->ccb_h.cbfcnp = cbfcnp;
1444 ataio->ccb_h.timeout = timeout;
1445 ataio->data_ptr = data_ptr;
1446 ataio->dxfer_len = dxfer_len;
1447 ataio->ata_flags = 0;
1448}
1449
1450static __inline void
1451cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries,
1452 void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags,
1453 uint8_t *smp_request, int smp_request_len,
1454 uint8_t *smp_response, int smp_response_len,
1455 uint32_t timeout)
1456{
1457#ifdef _KERNEL
1458 KASSERT((flags & CAM_DIR_MASK) == CAM_DIR_BOTH,
1459 ("direction != CAM_DIR_BOTH"));
1460 KASSERT((smp_request != NULL) && (smp_response != NULL),
1461 ("need valid request and response buffers"));
1462 KASSERT((smp_request_len != 0) && (smp_response_len != 0),
1463 ("need non-zero request and response lengths"));
1464#endif /*_KERNEL*/
1465 smpio->ccb_h.func_code = XPT_SMP_IO;
1466 smpio->ccb_h.flags = flags;
1467 smpio->ccb_h.retry_count = retries;
1468 smpio->ccb_h.cbfcnp = cbfcnp;
1469 smpio->ccb_h.timeout = timeout;
1470 smpio->smp_request = smp_request;
1471 smpio->smp_request_len = smp_request_len;
1472 smpio->smp_response = smp_response;
1473 smpio->smp_response_len = smp_response_len;
1474}
1475
1476static __inline void
1477cam_fill_mmcio(struct ccb_mmcio *mmcio, uint32_t retries,
1478 void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags,
1479 uint32_t mmc_opcode, uint32_t mmc_arg, uint32_t mmc_flags,
1480 struct mmc_data *mmc_d,
1481 uint32_t timeout)
1482{
1483 mmcio->ccb_h.func_code = XPT_MMC_IO;
1484 mmcio->ccb_h.flags = flags;
1485 mmcio->ccb_h.retry_count = retries;
1486 mmcio->ccb_h.cbfcnp = cbfcnp;
1487 mmcio->ccb_h.timeout = timeout;
1488 mmcio->cmd.opcode = mmc_opcode;
1489 mmcio->cmd.arg = mmc_arg;
1490 mmcio->cmd.flags = mmc_flags;
1491 mmcio->stop.opcode = 0;
1492 mmcio->stop.arg = 0;
1493 mmcio->stop.flags = 0;
1494 if (mmc_d != NULL) {
1495 mmcio->cmd.data = mmc_d;
1496 } else
1497 mmcio->cmd.data = NULL;
1498 mmcio->cmd.resp[0] = 0;
1499 mmcio->cmd.resp[1] = 0;
1500 mmcio->cmd.resp[2] = 0;
1501 mmcio->cmd.resp[3] = 0;
1502}
1503
1504static __inline void
1505cam_set_ccbstatus(union ccb *ccb, cam_status status)
1506{
1507 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1508 ccb->ccb_h.status |= status;
1509}
1510
1511static __inline cam_status
1512cam_ccb_status(union ccb *ccb)
1513{
1514 return ((cam_status)(ccb->ccb_h.status & CAM_STATUS_MASK));
1515}
1516
1517static inline bool
1518cam_ccb_success(union ccb *ccb)
1519{
1520 return (cam_ccb_status(ccb) == CAM_REQ_CMP);
1521}
1522
1523void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
1524
1525static __inline void
1526cam_fill_nvmeio(struct ccb_nvmeio *nvmeio, uint32_t retries,
1527 void (*cbfcnp)(struct cam_periph *, union ccb *),
1528 uint32_t flags, uint8_t *data_ptr, uint32_t dxfer_len,
1529 uint32_t timeout)
1530{
1531 nvmeio->ccb_h.func_code = XPT_NVME_IO;
1532 nvmeio->ccb_h.flags = flags;
1533 nvmeio->ccb_h.retry_count = retries;
1534 nvmeio->ccb_h.cbfcnp = cbfcnp;
1535 nvmeio->ccb_h.timeout = timeout;
1536 nvmeio->data_ptr = data_ptr;
1537 nvmeio->dxfer_len = dxfer_len;
1538}
1539
1540static __inline void
1541cam_fill_nvmeadmin(struct ccb_nvmeio *nvmeio, uint32_t retries,
1542 void (*cbfcnp)(struct cam_periph *, union ccb *),
1543 uint32_t flags, uint8_t *data_ptr, uint32_t dxfer_len,
1544 uint32_t timeout)
1545{
1546 nvmeio->ccb_h.func_code = XPT_NVME_ADMIN;
1547 nvmeio->ccb_h.flags = flags;
1548 nvmeio->ccb_h.retry_count = retries;
1549 nvmeio->ccb_h.cbfcnp = cbfcnp;
1550 nvmeio->ccb_h.timeout = timeout;
1551 nvmeio->data_ptr = data_ptr;
1552 nvmeio->dxfer_len = dxfer_len;
1553}
1554__END_DECLS
1555
1556#endif /* _CAM_CAM_CCB_H */