0
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?

nvme

Last updated at Posted at 2023-02-23

nvme_queue

struct request

/*
 * Try to put the fields that are referenced together in the same cacheline.
 *
 * If you modify this structure, make sure to update blk_rq_init() and
 * especially blk_mq_rq_ctx_init() to take care of the added fields.
 */
struct request {
	struct request_queue *q;
	struct blk_mq_ctx *mq_ctx;
	struct blk_mq_hw_ctx *mq_hctx;

	blk_opf_t cmd_flags;		/* op and common flags */
	req_flags_t rq_flags;

	int tag;
	int internal_tag;

	unsigned int timeout;

	/* the following two fields are internal, NEVER access directly */
	unsigned int __data_len;	/* total data len */
	sector_t __sector;		/* sector cursor */

	struct bio *bio;
	struct bio *biotail;

	union {
		struct list_head queuelist;
		struct request *rq_next;
	};

	struct block_device *part;
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
	/* Time that the first bio started allocating this request. */
	u64 alloc_time_ns;
#endif
	/* Time that this request was allocated for this IO. */
	u64 start_time_ns;
	/* Time that I/O was submitted to the device. */
	u64 io_start_time_ns;

#ifdef CONFIG_BLK_WBT
	unsigned short wbt_flags;
#endif
	/*
	 * rq sectors used for blk stats. It has the same value
	 * with blk_rq_sectors(rq), except that it never be zeroed
	 * by completion.
	 */
	unsigned short stats_sectors;

	/*
	 * Number of scatter-gather DMA addr+len pairs after
	 * physical address coalescing is performed.
	 */
	unsigned short nr_phys_segments;
	unsigned short nr_integrity_segments;

#ifdef CONFIG_BLK_INLINE_ENCRYPTION
	struct bio_crypt_ctx *crypt_ctx;
	struct blk_crypto_keyslot *crypt_keyslot;
#endif

	enum mq_rq_state state;
	atomic_t ref;

	unsigned long deadline;

	/*
	 * The hash is used inside the scheduler, and killed once the
	 * request reaches the dispatch list. The ipi_list is only used
	 * to queue the request for softirq completion, which is long
	 * after the request has been unhashed (and even removed from
	 * the dispatch list).
	 */
	union {
		struct hlist_node hash;	/* merge hash */
		struct llist_node ipi_list;
	};

	/*
	 * The rb_node is only used inside the io scheduler, requests
	 * are pruned when moved to the dispatch queue. special_vec must
	 * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be
	 * insert into an IO scheduler.
	 */
	union {
		struct rb_node rb_node;	/* sort/lookup */
		struct bio_vec special_vec;
	};

	/*
	 * Three pointers are available for the IO schedulers, if they need
	 * more they have to dynamically allocate it.
	 */
	struct {
		struct io_cq		*icq;
		void			*priv[2];
	} elv;

	struct {
		unsigned int		seq;
		rq_end_io_fn		*saved_end_io;
	} flush;

	u64 fifo_time;

	/*
	 * completion callback.
	 */
	rq_end_io_fn *end_io;
	void *end_io_data;
};

struct blk_mq_tags

/*
 * Tag address space map.
 */
struct blk_mq_tags {
	unsigned int nr_tags;
	unsigned int nr_reserved_tags;
	unsigned int active_queues;

	struct sbitmap_queue bitmap_tags;
	struct sbitmap_queue breserved_tags;

	struct request **rqs;
	struct request **static_rqs;
	struct list_head page_list;

	/*
	 * used to clear request reference in rqs[] before freeing one
	 * request pool
	 */
	spinlock_t lock;
};

struct blk_mq_tag_set

/**
 * struct blk_mq_tag_set - tag set that can be shared between request queues
 * @ops:	   Pointers to functions that implement block driver behavior.
 * @map:	   One or more ctx -> hctx mappings. One map exists for each
 *		   hardware queue type (enum hctx_type) that the driver wishes
 *		   to support. There are no restrictions on maps being of the
 *		   same size, and it's perfectly legal to share maps between
 *		   types.
 * @nr_maps:	   Number of elements in the @map array. A number in the range
 *		   [1, HCTX_MAX_TYPES].
 * @nr_hw_queues:  Number of hardware queues supported by the block driver that
 *		   owns this data structure.
 * @queue_depth:   Number of tags per hardware queue, reserved tags included.
 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
 *		   allocations.
 * @cmd_size:	   Number of additional bytes to allocate per request. The block
 *		   driver owns these additional bytes.
 * @numa_node:	   NUMA node the storage adapter has been connected to.
 * @timeout:	   Request processing timeout in jiffies.
 * @flags:	   Zero or more BLK_MQ_F_* flags.
 * @driver_data:   Pointer to data owned by the block driver that created this
 *		   tag set.
 * @tags:	   Tag sets. One tag set per hardware queue. Has @nr_hw_queues
 *		   elements.
 * @shared_tags:
 *		   Shared set of tags. Has @nr_hw_queues elements. If set,
 *		   shared by all @tags.
 * @tag_list_lock: Serializes tag_list accesses.
 * @tag_list:	   List of the request queues that use this tag set. See also
 *		   request_queue.tag_set_list.
 * @srcu:	   Use as lock when type of the request queue is blocking
 *		   (BLK_MQ_F_BLOCKING).
 */
struct blk_mq_tag_set {
	const struct blk_mq_ops	*ops;
	struct blk_mq_queue_map	map[HCTX_MAX_TYPES];
	unsigned int		nr_maps;
	unsigned int		nr_hw_queues;
	unsigned int		queue_depth;
	unsigned int		reserved_tags;
	unsigned int		cmd_size;
	int			numa_node;
	unsigned int		timeout;
	unsigned int		flags;
	void			*driver_data;

	struct blk_mq_tags	**tags;

	struct blk_mq_tags	*shared_tags;

	struct mutex		tag_list_lock;
	struct list_head	tag_list;
	struct srcu_struct	*srcu;
};

struct nvme_dev

/*
 * Represents an NVM Express device.  Each nvme_dev is a PCI function.
 */
struct nvme_dev {
	struct nvme_queue *queues;
	struct blk_mq_tag_set tagset;
	struct blk_mq_tag_set admin_tagset;
	u32 __iomem *dbs;
	struct device *dev;
	struct dma_pool *prp_page_pool;
	struct dma_pool *prp_small_pool;
	unsigned online_queues;
	unsigned max_qid;
	unsigned io_queues[HCTX_MAX_TYPES];
	unsigned int num_vecs;
	u32 q_depth;
	int io_sqes;
	u32 db_stride;
	void __iomem *bar;
	unsigned long bar_mapped_size;
	struct mutex shutdown_lock;
	bool subsystem;
	u64 cmb_size;
	bool cmb_use_sqes;
	u32 cmbsz;
	u32 cmbloc;
	struct nvme_ctrl ctrl;
	u32 last_ps;
	bool hmb;
	struct sg_table *hmb_sgt;

	mempool_t *iod_mempool;
	mempool_t *iod_meta_mempool;

	/* shadow doorbell buffer support: */
	__le32 *dbbuf_dbs;
	dma_addr_t dbbuf_dbs_dma_addr;
	__le32 *dbbuf_eis;
	dma_addr_t dbbuf_eis_dma_addr;

	/* host memory buffer support: */
	u64 host_mem_size;
	u32 nr_host_mem_descs;
	u32 host_mem_descs_size;
	dma_addr_t host_mem_descs_dma;
	struct nvme_host_mem_buf_desc *host_mem_descs;
	void **host_mem_desc_bufs;
	unsigned int nr_allocated_queues;
	unsigned int nr_write_queues;
	unsigned int nr_poll_queues;
};

struct nvme_queue

/*
 * An NVM Express queue.  Each device has at least two (one for admin
 * commands and one for I/O commands).
 */
struct nvme_queue {
	struct nvme_dev *dev;
	spinlock_t sq_lock;
	void *sq_cmds;
	 /* only used for poll queues: */
	spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
	struct nvme_completion *cqes;
	dma_addr_t sq_dma_addr;
	dma_addr_t cq_dma_addr;
	u32 __iomem *q_db;
	u32 q_depth;
	u16 cq_vector;
	u16 sq_tail;
	u16 last_sq_tail;
	u16 cq_head;
	u16 qid;
	u8 cq_phase;
	u8 sqes;
	unsigned long flags;
#define NVMEQ_ENABLED		0
#define NVMEQ_SQ_CMB		1
#define NVMEQ_DELETE_ERROR	2
#define NVMEQ_POLLED		3
	__le32 *dbbuf_sq_db;
	__le32 *dbbuf_cq_db;
	__le32 *dbbuf_sq_ei;
	__le32 *dbbuf_cq_ei;
	struct completion delete_done;
};

cid

/*
 * nvme command_id is constructed as such:
 * | xxxx | xxxxxxxxxxxx |
 *   gen    request tag
 */

nvm set

multipath

CMIC

Controller Multi-Path I/O and Namespace Sharing Capabilities (CMIC): This field
specifies multi-path I/O and namespace sharing capabilities of the controller and NVM
subsystem.
Bits 7:4 are reserved.
Bit 3 if set to ‘1’, then the NVM subsystem supports Asymmetric Namespace Access
Reporting (refer to section 8.1). If cleared to ‘0’, then the NVM subsystem does not
support Asymmetric Namespace Access Reporting.
Bit 2 if set to ‘1’, then the controller is associated with an SR-IOV Virtual Function. If
cleared to ‘0’, then the controller is associated with a PCI Function or a Fabrics
connection.
Bit 1 if set to ‘1’, then the NVM subsystem may contain two or more controllers. If cleared
to ‘0’, then the NVM subsystem contains only a single controller. As described in section
2.4.1, an NVM subsystem that contains multiple controllers may be used by multiple
hosts, or may provide multiple paths for a single host.
Bit 0 if set to ‘1’, then the NVM subsystem may contain more than one NVM subsystem
port. If cleared to ‘0’, then the NVM subsystem contains only a single NVM subsystem
port.

CMIC

コントローラーと NVM サブシステムのマルチパス I/O および名前空間共有機能を指定するコントローラーのマルチパス I/O および名前空間共有機能 (CMIC) 構造体。

CMIC.MultiPCIePorts

CMIC 構造体のビット 0。
この値が に 1設定されている場合、NVM サブシステムには 2 つ以上の物理 PCI Express ポートが含まれている可能性があります。 値が に 0クリアされると、NVM サブシステムには 1 つの PCI Express ポートが含まれます。

CMIC.MultiControllers

CMIC 構造体のビット 1。
この値が に 1設定されている場合、NVM サブシステムには 2 つ以上のコントローラーが含まれている可能性があります。 値が に 0クリアされると、NVM サブシステムには 1 つのコントローラーが含まれます。

CMIC.SRIOV

CMIC 構造体のビット 2。
この値が に 1設定されている場合、コントローラーは単一ルート I/O 仮想化 (SR-IOV) 仮想関数に関連付けられます。 値が に 0クリアされると、コントローラーは PCI 関数に関連付けられます。

CMIC.ANAR

CMIC.Reserved

CMIC 構造体のビット 3:7 が予約されています。

NMIC

Namespace Multi-path I/O and Namespace Sharing Capabilities (NMIC): This
field specifies multi-path I/O and namespace sharing capabilities of the
namespace.
Bits 7:1 are reserved.
Bit 0: If set to ‘1’, then the namespace may be attached to two or more controllers
in the NVM subsystem concurrently (i.e., may be a shared namespace). If cleared
to ‘0’, then the namespace is a private namespace and is able to be attached to
only one controller at a time.

NMIC

名前空間のマルチパス I/O および名前空間共有機能を指定するフィールドを含む名前空間マルチパス I/O および名前空間共有機能 (NMIC) 構造体。

NMIC.SharedNameSpace

NMIC 構造体のビット 0 は、名前空間が共有名前空間であるかどうかを示します。
この値が に 1設定されている場合、NVM サブシステム内の 2 つ以上のコントローラーから名前空間にアクセスできる場合があります。 この値を に 0クリアすると、名前空間はプライベート名前空間であり、この名前空間データ構造を返したコントローラーのみがアクセスできます。

hwmon

tokunori@tokunori-desktop:~$ ls /sys/class/nvme/nvme0/hwmon1/
device  name  power  subsystem  temp1_alarm  temp1_crit  temp1_input  temp1_label  temp1_max  temp1_min  temp2_input  temp2_label  temp2_max  temp2_min  temp3_input  temp3_label  temp3_max  temp3_min  uevent
tokunori@tokunori-desktop:~$ cat /sys/class/nvme/nvme0/hwmon1/*
cat: /sys/class/nvme/nvme0/hwmon1/device: ディレクトリです
nvme
cat: /sys/class/nvme/nvme0/hwmon1/power: ディレクトリです
cat: /sys/class/nvme/nvme0/hwmon1/subsystem: ディレクトリです
0
82850
38850
Composite
79850
-273150
38850
Sensor 1
65261850
-273150
46850
Sensor 2
65261850
-273150

module

tokunori@tokunori-desktop:~$ ls /sys/module/nvme
nvme/         nvme_auth/    nvme_core/    nvme_fabrics/ 
tokunori@tokunori-desktop:~$ ls /sys/module/nvme/
coresize    drivers/    holders/    initsize    initstate   notes/      parameters/ refcnt      sections/   srcversion  taint       uevent      version     
tokunori@tokunori-desktop:~$ ls /sys/module/nvme/parameters/
io_queue_depth  max_host_mem_size_mb  noacpi  poll_queues  sgl_threshold  use_cmb_sqes  use_threaded_interrupts  write_queues
tokunori@tokunori-desktop:~$ cat /sys/module/nvme/parameters/*
1024
128
N
0
32768
Y
0
0
tokunori@tokunori-desktop:~$ la /sys/module/nvme_core/parameters/
admin_timeout                apst_primary_timeout_ms        apst_secondary_timeout_ms  force_apst  iopolicy     multipath
apst_primary_latency_tol_us  apst_secondary_latency_tol_us  default_ps_max_latency_us  io_timeout  max_retries  shutdown_timeout
tokunori@tokunori-desktop:~$ cat /sys/module/nvme_core/parameters/*
60
15000
100
100000
2000
100000
N
30
numa
5
Y
5

Register

[2024-01-18 00:00:43.573] tokunori@tokunori-desktop:~$ lspci | grep Volait
[2024-01-18 00:00:53.182] 03:00.0 Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller PM9A1/PM9A3/980PRO
[2024-01-18 00:00:53.198] 0d:00.0 Non-Volatile memory controller: Kingston Technology Company, Inc. Device 500a (rev 01)
[2024-01-18 00:00:53.198] 0f:00.0 Non-Volatile memory controller: Intel Corporation SSD 660P Series (rev 03)
tokunori@tokunori-desktop:~$ setpci -s 03:00.0 BASE_ADDRESS_0
fc810004
tokunori@tokunori-desktop:~$ sudo dd if=/dev/mem skip=16548096 count=1 bs=256 | hexdump
0000000 3fff 2803 0030 0000 0400 0001 0000 0000
0000010 0000 0000 0001 0046 0000 0000 0001 0000
0000020 0000 0000 001f 001f 6000 0588 0001 0000
0000030 7000 0588 0001 0000 0000 0000 0000 0000
0000040 0000 0000 0000 0000 0000 0000 0000 0000
*
1+0 records in
1+0 records out
256 bytes copied, 0.000121943 s, 2.1 MB/s
0000100
tokunori@tokunori-desktop:~$ cd nvme-cli
tokunori@tokunori-desktop:~/nvme-cli$ sudo .build/nvme show-regs /dev/nvme0
cap     : 3028033fff
version : 10400
cc      : 460001
csts    : 1
nssr    : 0
crto    : 0
intms   : 0
intmc   : 0
aqa     : 1f001f
asq     : 105886000
acq     : 105887000
cmbloc  : 0
cmbsz   : 0
bpinfo  : 0
bprsel  : 0
bpmbl   : 0
cmbmsc  : 0
cmbsts  : 0
pmrcap  : 0
pmrctl  : 0
pmrsts  : 100
pmrebs  : 0
pmrswtp : 0
pmrmscl : 0
pmrmscu : 0
tokunori@tokunori-desktop:~/nvme-cli$ sudo dd if=/dev/mem skip=1059078144 count=32 bs=4 | hexdump
0000000 3fff 2803 0030 0000 0400 0001 0000 0000
0000010 0000 0000 0001 0046 0000 0000 0001 0000
0000020 0000 0000 001f 001f 6000 0588 0001 0000
0000030 7000 0588 0001 0000 0000 0000 0000 0000
0000040 0000 0000 0000 0000 0000 0000 0000 0000
*
32+0 records in
32+0 records out
128 bytes copied, 0.000344517 s, 372 kB/s
0000080
tokunori@tokunori-desktop:~/nvme-cli$ sudo dd if=/dev/mem skip=132384768 count=1 bs=32 | hexdump
0000000 3fff 2803 0030 0000 0400 0001 0000 0000
0000010 0000 0000 0001 0046 0000 0000 0001 0000
1+0 records in
1+0 records out
32 bytes copied, 4.466e-05 s, 717 kB/s
0000020
tokunori@tokunori-desktop:~/nvme-cli$ sudo dd if=/dev/mem skip=132384768 count=1 bs=32 > bar0-32bytes.bin
1+0 records in
1+0 records out
32 bytes copied, 6.087e-05 s, 526 kB/s
tokunori@tokunori-desktop:~/nvme-cli$ sudo dd if=../data/bar0-32bytes-cc.en-0.bin of=/dev/mem seek=132384768 count=1
bs=32
1+0 records in
1+0 records out
32 bytes copied, 0.000223679 s, 143 kB/s
tokunori@tokunori-desktop:~/nvme-cli$ sudo dd if=/dev/mem skip=132384768 count=1 bs=32 | hexdump
0000000 3fff 2803 0030 0000 0400 0001 0000 0000
0000010 0000 0000 0000 0000 0000 0000 0000 0000
1+0 records in
1+0 records out
32 bytes copied, 4.3791e-05 s, 731 kB/s
0000020
tokunori@tokunori-desktop:~/nvme-cli$ sudo dmesg | tail -n 40
...
[ 4867.119989] nvme nvme0: I/O 4 QID 0 timeout, reset controller
[ 4867.218589] nvme nvme0: Shutdown timeout set to 8 seconds
[ 4867.224540] nvme nvme0: 32/0/0 default/read/poll queues
tokunori@tokunori-desktop:~/nvme-cli$ sudo dd if=../data/bar0-32bytes.bin of=/dev/mem seek=132384768 count=1 bs=32
1+0 records in
1+0 records out
32 bytes copied, 0.000216383 s, 148 kB/s
tokunori@tokunori-desktop:~/nvme-cli$ sudo dd if=/dev/mem skip=132384768 count=1 bs=32 | hexdump
0000000 3fff 2803 0030 0000 0400 0001 0000 0000
0000010 0000 0000 0001 0046 0000 0000 0001 0000
1+0 records in
1+0 records out
32 bytes copied, 6.9068e-05 s, 463 kB/s
0000020
tokunori@tokunori-desktop:~/nvme-cli$ sudo dmesg | tail -n 40
...
[ 4867.119989] nvme nvme0: I/O 4 QID 0 timeout, reset controller
[ 4867.218589] nvme nvme0: Shutdown timeout set to 8 seconds
[ 4867.224540] nvme nvme0: 32/0/0 default/read/poll queues
tokunori@tokunori-desktop:~/nvme-cli$

Remove and rescan

root@tokunori-desktop:/home/tokunori/nvme-cli# echo 1 > /sys/bus/pci/devices/0000\:03\:00.0/remove
root@tokunori-desktop:/home/tokunori/nvme-cli# dmesg | tail -n 40
...
[ 4867.119989] nvme nvme0: I/O 4 QID 0 timeout, reset controller
[ 4867.218589] nvme nvme0: Shutdown timeout set to 8 seconds
[ 4867.224540] nvme nvme0: 32/0/0 default/read/poll queues
root@tokunori-desktop:/home/tokunori/nvme-cli# echo 1 > /sys/bus/pci/rescan
root@tokunori-desktop:/home/tokunori/nvme-cli# dmesg | tail -n 40
...
[ 4867.119989] nvme nvme0: I/O 4 QID 0 timeout, reset controller
[ 4867.218589] nvme nvme0: Shutdown timeout set to 8 seconds
[ 4867.224540] nvme nvme0: 32/0/0 default/read/poll queues
[ 5382.401564] pci 0000:03:00.0: [144d:a80a] type 00 class 0x010802
[ 5382.401608] pci 0000:03:00.0: reg 0x10: [mem 0xfc810000-0xfc813fff 64bit]
[ 5382.401680] pci 0000:03:00.0: reg 0x30: [mem 0xfc800000-0xfc80ffff pref]
[ 5382.425587] pci 0000:03:00.0: BAR 6: assigned [mem 0xfc800000-0xfc80ffff pref]
[ 5382.425598] pci 0000:03:00.0: BAR 0: assigned [mem 0xfc810000-0xfc813fff 64bit]
[ 5382.425839] nvme nvme0: pci function 0000:03:00.0
[ 5382.438761] nvme nvme0: Shutdown timeout set to 8 seconds
[ 5382.446595] nvme nvme0: 32/0/0 default/read/poll queues

Human readable

tokunori@tokunori-desktop:~/nvme-cli$ sudo .build/nvme show-regs /dev/nvme0 -H
cap     : 3028033fff
        Controller Ready With Media Support (CRWMS): Not Supported
        Controller Ready Independent of Media Support (CRIMS): Not Supported
        NVM Subsystem Shutdown Supported   (NSSS): Not Supported
        Controller Memory Buffer Supported (CMBS): The Controller Memory Buffer is Not Supported
        Persistent Memory Region Supported (PMRS): The Persistent Memory Region is Not Supported
        Memory Page Size Maximum         (MPSMAX): 4096 bytes
        Memory Page Size Minimum         (MPSMIN): 4096 bytes
        Controller Power Scope              (CPS): Not Reported
        Boot Partition Support              (BPS): No
        Command Sets Supported              (CSS): NVM command set is Supported
                                                   One or more I/O Command Sets are Not Supported
                                                   I/O Command Set is Supported
        NVM Subsystem Reset Supported     (NSSRS): Yes
        Doorbell Stride                   (DSTRD): 4 bytes
        Timeout                              (TO): 20000 ms
        Arbitration Mechanism Supported     (AMS): Weighted Round Robin with Urgent Priority Class is Not supported
        Contiguous Queues Required          (CQR): Yes
        Maximum Queue Entries Supported    (MQES): 16384

version : 10400
        NVMe specification 1.4

cc      : 460001
        Controller Ready Independent of Media Enable (CRIME): Disabled
        I/O Completion Queue Entry Size (IOCQES): 16 bytes
        I/O Submission Queue Entry Size (IOSQES): 64 bytes
        Shutdown Notification              (SHN): No notification; no effect
        Arbitration Mechanism Selected     (AMS): Round Robin
        Memory Page Size                   (MPS): 4096 bytes
        I/O Command Set Selected           (CSS): NVM Command Set
        Enable                              (EN): Yes

csts    : 1
        Processing Paused               (PP): No
        NVM Subsystem Reset Occurred (NSSRO): No
        Shutdown Status               (SHST): Normal operation (no shutdown has been requested)
        Controller Fatal Status        (CFS): False
        Ready                          (RDY): Yes

nssr    : 0
        NVM Subsystem Reset Control (NSSRC): 0

crto    : 0
        CRIMT                               : 0 secs
        CRWMT                               : 0 secs
intms   : 0
        Interrupt Vector Mask Set (IVMS): 0

intmc   : 0
        Interrupt Vector Mask Clear (IVMC): 0

aqa     : 1f001f
        Admin Completion Queue Size (ACQS): 32
        Admin Submission Queue Size (ASQS): 32

asq     : 16bf2f000
        Admin Submission Queue Base (ASQB): 16bf2f000

acq     : 77f7ba000
        Admin Completion Queue Base (ACQB): 77f7ba000

cmbloc  : 0
        Controller Memory Buffer feature is not supported

cmbsz   : 0
        Controller Memory Buffer feature is not supported

bpinfo  : 0
        Active Boot Partition ID      (ABPID): 0
        Boot Read Status                (BRS): No Boot Partition read operation requested
        Boot Partition Size            (BPSZ): 0
bprsel  : 0
        Boot Partition Identifier      (BPID): 0
        Boot Partition Read Offset    (BPROF): 0
        Boot Partition Read Size      (BPRSZ): 0
bpmbl   : 0
        Boot Partition Memory Buffer Base Address (BMBBA): 0
cmbmsc  : 0
        Controller Base Address         (CBA): 0
        Controller Memory Space Enable (CMSE): 0
        Capabilities Registers Enabled  (CRE): CMBLOC and CMBSZ registers are NOT enabled

cmbsts  : 0
        Controller Base Address Invalid (CBAI): 0

pmrcap  : 0
        Controller Memory Space Supported                   (CMSS): Referencing PMR with host supplied addresses is Not Supported
        Persistent Memory Region Timeout                   (PMRTO): 0
        Persistent Memory Region Write Barrier Mechanisms (PMRWBM): 0
        Persistent Memory Region Time Units                (PMRTU): PMR time unit is 500 milliseconds
        Base Indicator Register                              (BIR): 0
        Write Data Support                                   (WDS): Write data to the PMR is not supported
        Read Data Support                                    (RDS): Read data from the PMR is not supported
pmrctl  : 0
        Enable (EN): PMR is Disabled
pmrsts  : 100
        Controller Base Address Invalid (CBAI): 0
        Health Status                   (HSTS): Normal Operation
        Not Ready                       (NRDY): The Persistent Memory Region is Not Ready to process PCI Express memory read and write requests
        Error                            (ERR): 0
pmrebs  : 0
        PMR Elasticity Buffer Size Base  (PMRWBZ): 0
        Read Bypass Behavior                     : memory reads not conflicting with memory writes in the PMR Elasticity Buffer MAY bypass those memory writes
        PMR Elasticity Buffer Size Units (PMRSZU): Bytes
pmrswtp : 0
        PMR Sustained Write Throughput       (PMRSWTV): 0
        PMR Sustained Write Throughput Units (PMRSWTU): Bytes/second
pmrmscl : 0
        Controller Base Address         (CBA): 0
        Controller Memory Space Enable (CMSE): 0

pmrmscu : 0
        Controller Base Address         (CBA): 0

link

smartctl

tokunori@tokunori-desktop:~/linux$ sudo smartctl -a /dev/nvme0
[sudo] tokunori のパスワード:
smartctl 7.3 2022-02-28 r5338 [x86_64-linux-6.4.0-rc7+] (local build)
Copyright (C) 2002-22, Bruce Allen, Christian Franke, www.smartmontools.org

=== START OF INFORMATION SECTION ===
Model Number:                       SAMSUNG MZQL2960HCJR-00B7C
Serial Number:                      S63WNE0R100201
Firmware Version:                   GDA53C2Q
PCI Vendor/Subsystem ID:            0x144d
IEEE OUI Identifier:                0x002538
Total NVM Capacity:                 960,197,124,096 [960 GB]
Unallocated NVM Capacity:           0
Controller ID:                      6
NVMe Version:                       1.4
Number of Namespaces:               1
Namespace 1 Size/Capacity:          960,197,124,096 [960 GB]
Namespace 1 Utilization:            61,440 [61.4 KB]
Namespace 1 Formatted LBA Size:     4096
Local Time is:                      Fri Sep 15 00:08:39 2023 JST
Firmware Updates (0x17):            3 Slots, Slot 1 R/O, no Reset required
Optional Admin Commands (0x005f):   Security Format Frmw_DL NS_Mngmt Self_Test MI_Snd/Rec
Optional NVM Commands (0x005f):     Comp Wr_Unc DS_Mngmt Wr_Zero Sav/Sel_Feat Timestmp
Log Page Attributes (0x0e):         Cmd_Eff_Lg Ext_Get_Lg Telmtry_Lg
Maximum Data Transfer Size:         512 Pages
Warning  Comp. Temp. Threshold:     80 Celsius
Critical Comp. Temp. Threshold:     83 Celsius
Namespace 1 Features (0x1a):        NA_Fields No_ID_Reuse NP_Fields

Supported Power States
St Op     Max   Active     Idle   RL RT WL WT  Ent_Lat  Ex_Lat
 0 +    15.00W       -        -    0  0  0  0        0       0
 1 +     8.00W       -        -    1  1  1  1        0       0

Supported LBA Sizes (NSID 0x1)
Id Fmt  Data  Metadt  Rel_Perf
 0 -     512       0         0
 1 +    4096       0         0

=== START OF SMART DATA SECTION ===
SMART overall-health self-assessment test result: PASSED

SMART/Health Information (NVMe Log 0x02)
Critical Warning:                   0x00
Temperature:                        43 Celsius
Available Spare:                    100%
Available Spare Threshold:          10%
Percentage Used:                    0%
Data Units Read:                    1,055 [540 MB]
Data Units Written:                 1 [512 KB]
Host Read Commands:                 26,274
Host Write Commands:                1
Controller Busy Time:               0
Power Cycles:                       328
Power On Hours:                     678
Unsafe Shutdowns:                   22
Media and Data Integrity Errors:    0
Error Information Log Entries:      0
Warning  Comp. Temperature Time:    0
Critical Comp. Temperature Time:    0
Temperature Sensor 1:               43 Celsius
Temperature Sensor 2:               52 Celsius

Error Information (NVMe Log 0x01, 16 of 64 entries)
No Errors Logged
tokunori@tokunori-desktop:~/linux$ sudo smartctl -a /dev/nvme1
smartctl 7.3 2022-02-28 r5338 [x86_64-linux-6.4.0-rc7+] (local build)
Copyright (C) 2002-22, Bruce Allen, Christian Franke, www.smartmontools.org

=== START OF INFORMATION SECTION ===
Model Number:                       KINGSTON SEDC1000BM8240G
Serial Number:                      50026B76862F98CA
Firmware Version:                   ECEK22.3
PCI Vendor/Subsystem ID:            0x2646
IEEE OUI Identifier:                0x0026b7
Total NVM Capacity:                 240,057,409,536 [240 GB]
Unallocated NVM Capacity:           0
Controller ID:                      1
NVMe Version:                       1.3
Number of Namespaces:               1
Namespace 1 Size/Capacity:          240,057,409,536 [240 GB]
Namespace 1 Formatted LBA Size:     4096
Namespace 1 IEEE EUI-64:            0026b7 6862f98ca0
Local Time is:                      Fri Sep 15 00:08:45 2023 JST
Firmware Updates (0x12):            1 Slot, no Reset required
Optional Admin Commands (0x001f):   Security Format Frmw_DL NS_Mngmt Self_Test
Optional NVM Commands (0x0054):     DS_Mngmt Sav/Sel_Feat Timestmp
Log Page Attributes (0x0a):         Cmd_Eff_Lg Telmtry_Lg
Maximum Data Transfer Size:         512 Pages
Warning  Comp. Temp. Threshold:     75 Celsius
Critical Comp. Temp. Threshold:     80 Celsius

Supported Power States
St Op     Max   Active     Idle   RL RT WL WT  Ent_Lat  Ex_Lat
 0 +     6.80W       -        -    0  0  0  0        0       0

Supported LBA Sizes (NSID 0x1)
Id Fmt  Data  Metadt  Rel_Perf
 0 -     512       0         2
 1 +    4096       0         1

=== START OF SMART DATA SECTION ===
SMART overall-health self-assessment test result: PASSED

SMART/Health Information (NVMe Log 0x02)
Critical Warning:                   0x00
Temperature:                        41 Celsius
Available Spare:                    100%
Available Spare Threshold:          10%
Percentage Used:                    0%
Data Units Read:                    5,295 [2.71 GB]
Data Units Written:                 2,098 [1.07 GB]
Host Read Commands:                 550,363
Host Write Commands:                8,194
Controller Busy Time:               0
Power Cycles:                       362
Power On Hours:                     732
Unsafe Shutdowns:                   25
Media and Data Integrity Errors:    0
Error Information Log Entries:      9,074
Warning  Comp. Temperature Time:    0
Critical Comp. Temperature Time:    0

Error Information (NVMe Log 0x01, 16 of 63 entries)
Num   ErrCount  SQId   CmdId  Status  PELoc          LBA  NSID    VS
  0       9074     0  0x0000  0x4004  0x028            0     0     -
  1       9073     0  0x0017  0x4004      -            0     0     -
tokunori@tokunori-desktop:~/linux$ sudo smartctl -a /dev/nvme2
smartctl 7.3 2022-02-28 r5338 [x86_64-linux-6.4.0-rc7+] (local build)
Copyright (C) 2002-22, Bruce Allen, Christian Franke, www.smartmontools.org

=== START OF INFORMATION SECTION ===
Model Number:                       INTEL SSDPEKNW010T8
Serial Number:                      BTNH922508PS1P0B
Firmware Version:                   002C
PCI Vendor/Subsystem ID:            0x8086
IEEE OUI Identifier:                0x5cd2e4
Controller ID:                      1
NVMe Version:                       1.3
Number of Namespaces:               1
Namespace 1 Size/Capacity:          1,024,209,543,168 [1.02 TB]
Namespace 1 Formatted LBA Size:     512
Local Time is:                      Fri Sep 15 00:08:47 2023 JST
Firmware Updates (0x14):            2 Slots, no Reset required
Optional Admin Commands (0x0017):   Security Format Frmw_DL Self_Test
Optional NVM Commands (0x005f):     Comp Wr_Unc DS_Mngmt Wr_Zero Sav/Sel_Feat Timestmp
Log Page Attributes (0x0f):         S/H_per_NS Cmd_Eff_Lg Ext_Get_Lg Telmtry_Lg
Maximum Data Transfer Size:         32 Pages
Warning  Comp. Temp. Threshold:     77 Celsius
Critical Comp. Temp. Threshold:     80 Celsius

Supported Power States
St Op     Max   Active     Idle   RL RT WL WT  Ent_Lat  Ex_Lat
 0 +     4.00W       -        -    0  0  0  0        0       0
 1 +     3.00W       -        -    1  1  1  1        0       0
 2 +     2.20W       -        -    2  2  2  2        0       0
 3 -   0.0300W       -        -    3  3  3  3     5000    5000
 4 -   0.0040W       -        -    4  4  4  4     5000    9000

Supported LBA Sizes (NSID 0x1)
Id Fmt  Data  Metadt  Rel_Perf
 0 +     512       0         0

=== START OF SMART DATA SECTION ===
SMART overall-health self-assessment test result: PASSED

SMART/Health Information (NVMe Log 0x02)
Critical Warning:                   0x00
Temperature:                        27 Celsius
Available Spare:                    100%
Available Spare Threshold:          10%
Percentage Used:                    1%
Data Units Read:                    20,237,038 [10.3 TB]
Data Units Written:                 15,225,081 [7.79 TB]
Host Read Commands:                 967,264,118
Host Write Commands:                133,626,542
Controller Busy Time:               2,881
Power Cycles:                       2,326
Power On Hours:                     4,313
Unsafe Shutdowns:                   228
Media and Data Integrity Errors:    0
Error Information Log Entries:      2
Warning  Comp. Temperature Time:    0
Critical Comp. Temperature Time:    0

Error Information (NVMe Log 0x01, 16 of 256 entries)
No Errors Logged

nvmeof

scripts/setup.sh

root@tokunori-desktop:/home/tokunori/spdk# scripts/setup.sh status
Hugepages
node     hugesize     free /  total
node0   1048576kB        0 /      0
node0      2048kB        0 /      0

Type     BDF             Vendor Device NUMA    Driver           Device     Block devices
NVMe     0000:03:00.0    144d   a80a   0       nvme             nvme0      nvme0n1
NVMe     0000:0d:00.0    2646   500a   0       nvme             nvme1      nvme1n1
NVMe     0000:0f:00.0    8086   f1a8   0       nvme             nvme2      nvme2n1
root@tokunori-desktop:/home/tokunori/spdk# scripts/setup.sh
0000:0f:00.0 (8086 f1a8): Active devices: mount@nvme2n1:nvme2n1p2,mount@nvme2n1:nvme2n1p3, so not binding PCI dev
0000:0d:00.0 (2646 500a): nvme -> uio_pci_generic
0000:03:00.0 (144d a80a): nvme -> uio_pci_generic
root@tokunori-desktop:/home/tokunori/spdk# scripts/setup.sh status
Hugepages
node     hugesize     free /  total
node0   1048576kB        0 /      0
node0      2048kB     1024 /   1024

Type     BDF             Vendor Device NUMA    Driver           Device     Block devices
NVMe     0000:03:00.0    144d   a80a   0       uio_pci_generic  -          -
NVMe     0000:0d:00.0    2646   500a   0       uio_pci_generic  -          -
NVMe     0000:0f:00.0    8086   f1a8   0       nvme             nvme2      nvme2n1

./build/bin/nvmf_tgt

root@tokunori-desktop:/home/tokunori/spdk# ./build/bin/nvmf_tgt
[2023-06-24 10:27:37.570976] Starting SPDK v23.09-pre git sha1 568e4802a / DPDK 23.03.0 initialization...
[2023-06-24 10:27:37.571048] [ DPDK EAL parameters: nvmf --no-shconf -c 0x1 --huge-unlink --log-level=lib.eal:6 --log-level=lib.cryptodev:5 --log-level=user1:6 --iova-mode=pa --base-virtaddr=0x200000000000 --match-allocations --file-prefix=spdk_pid3899 ]
TELEMETRY: No legacy callbacks, legacy socket not created
[2023-06-24 10:27:37.685472] app.c: 767:spdk_app_start: *NOTICE*: Total cores available: 1
[2023-06-24 10:27:37.710889] reactor.c: 937:reactor_run: *NOTICE*: Reactor started on core 0
[2023-06-24 10:27:37.732998] accel_sw.c: 605:sw_accel_module_init: *NOTICE*: Accel framework software module initialized.
root@tokunori-desktop:/home/tokunori/spdk# ./build/bin/nvmf_tgt
[2023-06-24 10:27:37.570976] Starting SPDK v23.09-pre git sha1 568e4802a / DPDK 23.03.0 initialization...
[2023-06-24 10:27:37.571048] [ DPDK EAL parameters: nvmf --no-shconf -c 0x1 --huge-unlink --log-level=lib.eal:6 --log-level=lib.cryptodev:5 --log-level=user1:6 --iova-mode=pa --base-virtaddr=0x200000000000 --match-allocations --file-prefix=spdk_pid3899 ]
TELEMETRY: No legacy callbacks, legacy socket not created
[2023-06-24 10:27:37.685472] app.c: 767:spdk_app_start: *NOTICE*: Total cores available: 1
[2023-06-24 10:27:37.710889] reactor.c: 937:reactor_run: *NOTICE*: Reactor started on core 0
[2023-06-24 10:27:37.732998] accel_sw.c: 605:sw_accel_module_init: *NOTICE*: Accel framework software module initialized.
[2023-06-24 10:39:33.447339] nvmf.c: 677:spdk_nvmf_tgt_listen_ext: *ERROR*: Unable to find TCP transport. The transport must be created first also make sure it is properly registered.
[2023-06-24 10:51:05.723132] subsystem.c: 228:spdk_nvmf_subsystem_create: *ERROR*: Subsystem NQN 'nqn.2016-06.io.spdk:cnode1' already exists
[2023-06-24 10:51:05.723167] nvmf_rpc.c: 418:rpc_nvmf_create_subsystem: *ERROR*: Unable to create subsystem nqn.2016-06.io.spdk:cnode1
[2023-06-24 10:54:29.899471] nvmf.c: 677:spdk_nvmf_tgt_listen_ext: *ERROR*: Unable to find TCP transport. The transport must be created first also make sure it is properly registered.
[2023-06-24 10:58:26.057381] nvmf_rpc.c: 882:rpc_nvmf_subsystem_add_listener: *ERROR*: Unable to find subsystem with NQN nqn.1994-11.com.samsung:nvme:PM9A3:2.5-inch:S63WNE0R100201
[2023-06-24 10:59:16.649572] nvmf_rpc.c: 882:rpc_nvmf_subsystem_add_listener: *ERROR*: Unable to find subsystem with NQN nqn.1994-11.com.samsung:nvme:PM9A3:2.5-inch:S63WNE0R100201
[2023-06-24 11:03:04.825889] nvmf_rpc.c: 882:rpc_nvmf_subsystem_add_listener: *ERROR*: Unable to find subsystem with NQN nqn.2014-08.org.nvmexpress:uuid:fdc08b78-bf35-413a-9e30-44531ae17f48
[2023-06-24 11:04:06.905882] subsystem.c: 228:spdk_nvmf_subsystem_create: *ERROR*: Subsystem NQN 'nqn.2016-06.io.spdk:cnode1' already exists
[2023-06-24 11:04:06.905924] nvmf_rpc.c: 418:rpc_nvmf_create_subsystem: *ERROR*: Unable to create subsystem nqn.2016-06.io.spdk:cnode1
[2023-06-24 11:07:20.709657] nvmf_rpc.c: 882:rpc_nvmf_subsystem_add_listener: *ERROR*: Unable to find subsystem with NQN nqn.2014-08.org.nvmexpress:uuid:fdc08b78-bf35-413a-9e30-44531ae17f48
[2023-06-24 11:09:26.793500] nvmf.c: 677:spdk_nvmf_tgt_listen_ext: *ERROR*: Unable to find TCP transport. The transport must be created first also make sure it is properly registered.
[2023-06-24 11:09:29.929501] nvmf.c: 677:spdk_nvmf_tgt_listen_ext: *ERROR*: Unable to find TCP transport. The transport must be created first also make sure it is properly registered.
[2023-06-24 11:10:32.645414] nvmf.c: 677:spdk_nvmf_tgt_listen_ext: *ERROR*: Unable to find TCP transport. The transport must be created first also make sure it is properly registered.
[2023-06-24 11:11:48.774556] tcp.c: 655:nvmf_tcp_create: *NOTICE*: *** TCP Transport Init ***
[2023-06-24 11:11:50.126944] tcp.c: 943:nvmf_tcp_listen: *NOTICE*: *** NVMe/TCP Target Listening on 192.168.1.14 port 4420 ***
[2023-06-24 11:12:01.126752] subsystem.c:1209:spdk_nvmf_subsystem_listener_allowed: *WARNING*: Allowing connection to discovery subsystem on TCP/192.168.1.14/4420, even though this listener was not added to the discovery subsystem.  This behavior is deprecated and will be removed in a future release.

libaio.so.1: cannot open shared object file: No such file or directory

root@tokunori-desktop:/home/tokunori/spdk# ./build/bin/nvmf_tgt
./build/bin/nvmf_tgt: error while loading shared libraries: libaio.so.1: cannot open shared object file: No such file or directory

ln -s /usr/lib/x86_64-linux-gnu/libaio.so.1t64 /usr/lib/x86_64-linux-gnu/libaio.so.1
root@tokunori-desktop:/home/tokunori/spdk# ls /usr/lib/x86_64-linux-gnu/libaio.so.1t64
libaio.so.1t64      libaio.so.1t64.0.2  
root@tokunori-desktop:/home/tokunori/spdk# ls /usr/lib/x86_64-linux-gnu/libaio.so.1t64
/usr/lib/x86_64-linux-gnu/libaio.so.1t64
root@tokunori-desktop:/home/tokunori/spdk# ln -s /usr/lib/x86_64-linux-gnu/libaio.so.1t64 /usr/lib/x86_64-linux-gnu/libaio.so.1
root@tokunori-desktop:/home/tokunori/spdk# ls /usr/lib/x86_64-linux-gnu/libaio.so.1
libaio.so.1         libaio.so.1t64      libaio.so.1t64.0.2  
root@tokunori-desktop:/home/tokunori/spdk# ls -la /usr/lib/x86_64-linux-gnu/libaio.so.1*
lrwxrwxrwx 1 root root    40  5月 12 01:27 /usr/lib/x86_64-linux-gnu/libaio.so.1 -> /usr/lib/x86_64-linux-gnu/libaio.so.1t64
lrwxrwxrwx 1 root root    18  3月 28  2024 /usr/lib/x86_64-linux-gnu/libaio.so.1t64 -> libaio.so.1t64.0.2
-rw-r--r-- 1 root root 14336  3月 28  2024 /usr/lib/x86_64-linux-gnu/libaio.so.1t64.0.2
root@tokunori-desktop:/home/tokunori/spdk# 

scripts/rpc.py

root@tokunori-desktop:/home/tokunori# cd spdk/
root@tokunori-desktop:/home/tokunori/spdk# scripts/rpc.py bdev_nvme_attach_controller -b NVMe0 -a 0000:03:00.0 -t pcie
NVMe0n1
root@tokunori-desktop:/home/tokunori/spdk# scripts/rpc.py bdev_nvme_get_controllers
[
  {
    "name": "NVMe0",
    "ctrlrs": [
      {
        "state": "enabled",
        "trid": {
          "trtype": "PCIe",
          "traddr": "0000:03:00.0"
        },
        "cntlid": 6,
        "host": {
          "nqn": "nqn.2014-08.org.nvmexpress:uuid:fdc08b78-bf35-413a-9e30-44531ae17f48",
          "addr": "",
          "svcid": ""
        }
      }
    ]
  }
]
root@tokunori-desktop:/home/tokunori/spdk# scripts/rpc.py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 -d SPDK_Controller1
root@tokunori-desktop:/home/tokunori/spdk# scripts/rpc.py bdev_get_bdevs | grep "name"
    "name": "NVMe0n1",
    "product_name": "NVMe disk",
root@tokunori-desktop:/home/tokunori/spdk# scripts/rpc.py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 NVMe0n1
root@tokunori-desktop:/home/tokunori/spdk# ifconfig
...
wlp6s0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.1.14  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 fe80::3a2b:3216:ccf9:a89d  prefixlen 64  scopeid 0x20<link>
        inet6 240b:10:2720:5500:faad:601:61dd:a844  prefixlen 64  scopeid 0x0<global>
        inet6 240b:10:2720:5500:f020:f531:8c9e:c912  prefixlen 64  scopeid 0x0<global>
        ether 14:f6:d8:42:8a:6f  txqueuelen 1000  (イーサネット)
        RX packets 27463  bytes 22139115 (22.1 MB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 16456  bytes 4780844 (4.7 MB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

root@tokunori-desktop:/home/tokunori/spdk# scripts/rpc.py nvmf_create_transport -t tcp
root@tokunori-desktop:/home/tokunori/spdk# scripts/rpc.py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t tcp -a 192.168.1.14 -s 4420

nvme discover

root@tokunori-desktop:/home/tokunori# nvme discover -t tcp -a 192.168.1.14 -s 4420
Segmentation fault (コアダンプ)
root@tokunori-desktop:/home/tokunori# nvme discover -t tcp -a 192.168.1.14 -s 4420

Discovery Log Number of Records 1, Generation counter 1
=====Discovery Log Entry 0======
trtype:  tcp
adrfam:  ipv4
subtype: nvme subsystem
treq:    not required
portid:  0
trsvcid: 4420
subnqn:  nqn.2016-06.io.spdk:cnode1
traddr:  192.168.1.14
eflags:  none
sectype: none
root@tokunori-desktop:/home/tokunori# nvme connect -t tcp -n "nqn.2016-06.io.spdk:cnode1" -a 192.168.1.14 -s 4420
Segmentation fault (コアダンプ)
root@tokunori-desktop:/home/tokunori# nvme list
Node                  Generic               SN                   Model                                    Namespace  Usage                      Format           FW Rev
--------------------- --------------------- -------------------- ---------------------------------------- ---------- -------------------------- ---------------- --------
/dev/nvme2n1          /dev/ng2n1            BTNH922508PS1P0B     INTEL SSDPEKNW010T8                      0x1          1.02  TB /   1.02  TB    512   B +  0 B   002C
/dev/nvme0n1          /dev/ng0n1            SPDK00000000000001   SPDK_Controller1                         0x1        960.20  GB / 960.20  GB      4 KiB +  0 B   23.09
root@tokunori-desktop:/home/tokunori# nvme disconnect-all
root@tokunori-desktop:/home/tokunori# nvme list
Node                  Generic               SN                   Model                                    Namespace  Usage                      Format           FW Rev
--------------------- --------------------- -------------------- ---------------------------------------- ---------- -------------------------- ---------------- --------
/dev/nvme2n1          /dev/ng2n1            BTNH922508PS1P0B     INTEL SSDPEKNW010T8                      0x1          1.02  TB /   1.02  TB    512   B +  0 B   002C
tokunori@tokunori-desktop:~/nvme-cli$ sudo .build/nvme list
Node                  Generic               SN                   Model                                    Namespace  Usage                      Format           FW Rev
--------------------- --------------------- -------------------- ---------------------------------------- ---------- -------------------------- ---------------- --------
/dev/nvme0n1          /dev/ng0n1            BTNH922508PS1P0B     INTEL SSDPEKNW010T8                      0x1          1.02  TB /   1.02  TB    512   B +  0 B   002C
tokunori@tokunori-desktop:~/nvme-cli$ sudo .build/nvme discover -t tcp -a 192.168.1.14 -s 4420

Discovery Log Number of Records 1, Generation counter 1
=====Discovery Log Entry 0======
trtype:  tcp
adrfam:  ipv4
subtype: nvme subsystem
treq:    not required
portid:  0
trsvcid: 4420
subnqn:  nqn.2016-06.io.spdk:cnode1
traddr:  192.168.1.14
eflags:  none
sectype: none
tokunori@tokunori-desktop:~/nvme-cli$ sudo .build/nvme connect -t tcp -n "nqn.2016-06.io.spdk:cnode1" -a 192.168.1.14 -s 4420
tokunori@tokunori-desktop:~/nvme-cli$ sudo .build/nvme list
Node                  Generic               SN                   Model                                    Namespace  Usage                      Format           FW Rev
--------------------- --------------------- -------------------- ---------------------------------------- ---------- -------------------------- ---------------- --------
/dev/nvme1n1          /dev/ng1n1            SPDK00000000000001   SPDK_Controller1                         0x1        960.20  GB / 960.20  GB      4 KiB +  0 B   23.09
/dev/nvme0n1          /dev/ng0n1            BTNH922508PS1P0B     INTEL SSDPEKNW010T8                      0x1          1.02  TB /   1.02  TB    512   B +  0 B   002C

nvme-mi

Configuration Get: Health Status Change

  1. MT (Message Type): 4h (NVM Express Management Messages over MCTP)
  2. NMIMT (NVMe-MI Message Type): 1h (NVMe-MI Command)
  3. OPC (Opcode): 04h (Configuration Get)
  4. Configuration Identifier: 02h (Health Status Change)

tokunori@tokunori-desktop:~/nvme-cli$ sudo .build/nvme admin-passthru /dev/nvme0n1 -o 0x1e -4 0x804 -5 0x4 -6 0x2 -r
Admin Command NVMe-MI Receive is Success and result: 0x00000000

VPD Read

  1. MT (Message Type): 4h (NVM Express Management Messages over MCTP)
  2. NMIMT (NVMe-MI Message Type): 1h (NVMe-MI Command)
  3. OPC (Opcode): 05h (VPD Read)

tokunori@tokunori-desktop:~/nvme-cli$ sudo .build/nvme admin-passthru /dev/nvme0n1 -o 0x1e -4 0x804 -5 0x5 -6 0x0 -7 0x10 -r -i temp.dat -l 16
Admin Command NVMe-MI Receive is Success and result: 0x00000000
tokunori@tokunori-desktop:~/nvme-cli$ hexdump temp.dat
0000000 0001 0000 0f01 ef00 0e01 c819 6700 756e
0000010
tokunori@tokunori-desktop:~/nvme-cli$ sudo .build/nvme admin-passthru /dev/nvme0n1 -o 0x1e -4 0x804 -5 0x5 -6 0x8 -7
0x10 -r -i temp-2.dat -l 16
Admin Command NVMe-MI Receive is Success and result: 0x00000000
tokunori@tokunori-desktop:~/nvme-cli$ hexdump temp-2.dat
0000000 0e01 c819 6700 756e 6d73 5361 00d8 0000
0000010

tokunori@tokunori-desktop:~/nvme-cli$ sudo .build/nvme nvme-mi-recv /dev/nvme0n1 -o 5 -m 1 -0 0 -1 0x100 -l 256
NVMe-MI Receive Command is Success and result: 0x00000000 (status: 0x00, response: 0x000000)
       0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f
0000: 01 00 00 00 01 0f 00 ef 01 0e 19 c8 00 67 6e 75 ".............gnu"
0010: 73 6d 61 53 d8 00 00 00 00 00 00 00 00 00 00 00 "smaS............"
0020: 00 00 00 00 00 00 00 00 33 41 39 4d 50 e8 00 00 "........3A9MP..."
0030: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"
0040: 00 00 00 00 43 37 42 30 30 2d 52 4a 43 48 30 36 "....C7B00-RJCH06"
0050: 39 32 4c 51 5a 4d c2 00 00 d4 53 36 33 57 4e 45 "92LQZM....S63WNE"
0060: 30 52 31 30 30 32 30 31 00 00 00 00 00 00 00 00 "0R100201........"
0070: c1 00 00 00 00 00 00 00 0b 02 3b bc fc 00 12 00 "..........;....."
0080: 00 00 00 00 00 00 00 00 00 00 02 00 00 04 14 14 "................"
0090: 00 60 35 90 df 00 00 00 00 00 00 00 00 00 00 00 ".`5............."
00a0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"
00b0: 00 00 00 00 00 00 00 00 0c 82 0b ea 7d 01 00 01 "............}..."
00c0: 0f 04 00 01 00 00 00 00 00 00 00 00 00 00 00 00 "................"
00d0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"
00e0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"
00f0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"

VPD Write


tokunori@tokunori-desktop:~/nvme-cli$ sudo .build/nvme admin-passthru /dev/nvme0n1 -o 0x1d -4 0x804 -5 0x6 -6 0x0 -7 0x100 -w -i vpd-2.bin -l 256
Admin Command NVMe-MI Send is Success and result: 0x00000000
tokunori@tokunori-desktop:~/nvme-cli$ sudo .build/nvme admin-passthru /dev/nvme0n1 -o 0x1e -4 0x804 -5 0x5 -6 0x0 -7
0x100 -r -l 256
Admin Command NVMe-MI Receive is Success and result: 0x00000000
       0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f
0000: 01 00 00 00 01 0f 00 ef 01 0e 19 c8 00 67 6e 75 ".............gnu"
0010: 73 6d 61 53 d8 00 00 00 00 00 00 00 00 00 00 00 "smaS............"
0020: 00 00 00 00 00 00 00 00 33 41 39 4d 50 e8 00 00 "........3A9MP..."
0030: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"
0040: 00 00 00 00 43 37 42 30 30 2d 52 4a 43 48 30 36 "....C7B00-RJCH06"
0050: 39 32 4c 51 5a 4d c2 00 00 d4 53 36 33 57 4e 45 "92LQZM....S63WNE"
0060: 30 52 31 30 30 32 30 31 00 00 00 00 00 00 00 00 "0R100201........"
0070: c1 00 00 00 00 00 00 00 0b 02 3b bc fc 00 12 00 "..........;....."
0080: 00 00 00 00 00 00 00 00 00 00 02 00 00 04 14 14 "................"
0090: 00 60 35 90 df 00 00 00 00 00 00 00 00 00 00 00 ".`5............."
00a0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"
00b0: 00 00 00 00 00 00 00 00 0c 82 0b ea 7d 01 00 01 "............}..."
00c0: 0f 04 00 01 00 00 00 00 00 00 00 00 00 00 00 00 "................"
00d0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"
00e0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"
00f0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 aa "................"

tokunori@tokunori-desktop:~/nvme-cli$ sudo .build/nvme nvme-mi-send /dev/nvme0n1 -o 6 -m 1 -0 0 -1 0x100 -l 256 -i vpd.bin
NVMe-MI Send Command is Success and result: 0x00000000 (status: 0x00, response: 0x000000)
tokunori@tokunori-desktop:~/nvme-cli$ sudo .build/nvme nvme-mi-recv /dev/nvme0n1 -o 5 -m 1 -0 0 -1 0x100 -l 256
NVMe-MI Receive Command is Success and result: 0x00000000 (status: 0x00, response: 0x000000)
       0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f
0000: 01 00 00 00 01 0f 00 ef 01 0e 19 c8 00 67 6e 75 ".............gnu"
0010: 73 6d 61 53 d8 00 00 00 00 00 00 00 00 00 00 00 "smaS............"
0020: 00 00 00 00 00 00 00 00 33 41 39 4d 50 e8 00 00 "........3A9MP..."
0030: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"
0040: 00 00 00 00 43 37 42 30 30 2d 52 4a 43 48 30 36 "....C7B00-RJCH06"
0050: 39 32 4c 51 5a 4d c2 00 00 d4 53 36 33 57 4e 45 "92LQZM....S63WNE"
0060: 30 52 31 30 30 32 30 31 00 00 00 00 00 00 00 00 "0R100201........"
0070: c1 00 00 00 00 00 00 00 0b 02 3b bc fc 00 12 00 "..........;....."
0080: 00 00 00 00 00 00 00 00 00 00 02 00 00 04 14 14 "................"
0090: 00 60 35 90 df 00 00 00 00 00 00 00 00 00 00 00 ".`5............."
00a0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"
00b0: 00 00 00 00 00 00 00 00 0c 82 0b ea 7d 01 00 01 "............}..."
00c0: 0f 04 00 01 00 00 00 00 00 00 00 00 00 00 00 00 "................"
00d0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"
00e0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"
00f0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"

nvme admin-pa

history


tokunori@tokunori-desktop:~$ history | tail -n 40 | grep admin
 1971  sudo ./nvme admin-pa /dev/nvme0 -f 
 1972  sudo ./nvme admin-pa /dev/nvme0 -o 0x2 -r -4 0xc1
 1973  sudo ./nvme admin-pa /dev/nvme0n0 -o 0x2 -r -4 0xc1
 1974  sudo ./nvme admin-pa /dev/nvme0 -o 0x2 -r -4 0xc1 -n 0
 1975  sudo ./nvme admin-pa /dev/nvme0 -o 0xa -r -4 0xc1 -n 0
 1976  sudo ./nvme admin-pa /dev/nvme0 -o 0xa -r -4 0xc1 -n 0 -l 32
 1977  sudo ./nvme admin-pa /dev/nvme0 -o 0xa -r -4 0xc2 -n 0 -l 32
 1978  sudo ./nvme admin-pa /dev/nvme0 -o 0xa -r -4 0xc1 -n 0 -l 64
 1979  sudo ./nvme admin-pa /dev/nvme0 -o 0xa -r -4 0xc2 -n 0 -l 64
 1980  sudo ./nvme admin-pa /dev/nvme0 -o 0xa -r -4 0xc0 -n 0 -l 64
 1981  sudo ./nvme admin-pa /dev/nvme0 -o 0xa -r -4 0xc3 -n 0 -l 64
 1982  sudo ./nvme admin-pa /dev/nvme0 -o 0xa -r -4 0xc4 -n 0 -l 64
 1983  sudo ./nvme admin-pa /dev/nvme0 -o 0x2 -r -4 0xc4 -n 0 -l 64
 1984  sudo ./nvme admin-pa /dev/nvme0 -o 0x2 -r -4 0xc0 -n 0 -l 64
 1985  sudo ./nvme admin-pa /dev/nvme0 -o 0x2 -r -4 0xc1 -n 0 -l 64
 1986  sudo ./nvme admin-pa /dev/nvme0 -o 0x2 -r -4 0xc2 -n 0 -l 64
 1987  sudo ./nvme admin-pa /dev/nvme0 -o 0x2 -r -4 0xc3 -n 0 -l 64
 1988  sudo ./nvme admin-pa /dev/nvme0 -o 0x2 -r -4 0xc3 -n 0 -l 4096
 1989  sudo ./nvme admin-pa /dev/nvme0 -o 0x2 -r -4 0xc2 -n 0 -l 4096
 1990  sudo ./nvme admin-pa /dev/nvme0 -o 0x2 -r -4 0xc2 -n 0 -l 4096 | less
 1991  sudo ./nvme admin-pa /dev/nvme0 -o 0x2 -r -4 0xc2 -n 0 -l 4096 | head
 2001  history | tail -n 40 | grep admin
tokunori@tokunori-desktop:~$ 

sudo ./nvme admin-pa /dev/nvme0 -o 0xa -r -4 0xc1 -n 0 -l 32


tokunori@tokunori-desktop:~/nvme-cli/.build$  sudo ./nvme admin-pa /dev/nvme0 -o 0xa -r -4 0xc1 -n 0 -l 32
Admin Command Get Features is Success and result: 0x00000000
       0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f
0000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"
0010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 "................"
0
0
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
0
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?