Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * nvme-lightnvm.c - LightNVM NVMe device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2014-2015 IT University of Copenhagen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Initial release: Matias Bjorling <mb@lightnvm.io>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include "nvme.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/nvme.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/lightnvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/sched/sysctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <uapi/linux/lightnvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) enum nvme_nvm_admin_opcode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 	nvme_nvm_admin_identity		= 0xe2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 	nvme_nvm_admin_get_bb_tbl	= 0xf2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 	nvme_nvm_admin_set_bb_tbl	= 0xf1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) enum nvme_nvm_log_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 	NVME_NVM_LOG_REPORT_CHUNK	= 0xca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) struct nvme_nvm_ph_rw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	__u8			opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	__u8			flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	__u16			command_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	__le32			nsid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	__u64			rsvd2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	__le64			metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	__le64			prp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	__le64			prp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	__le64			spba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	__le16			length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	__le16			control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	__le32			dsmgmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	__le64			resv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) struct nvme_nvm_erase_blk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	__u8			opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	__u8			flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	__u16			command_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	__le32			nsid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	__u64			rsvd[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	__le64			prp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	__le64			prp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	__le64			spba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	__le16			length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	__le16			control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	__le32			dsmgmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	__le64			resv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) struct nvme_nvm_identity {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	__u8			opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	__u8			flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	__u16			command_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	__le32			nsid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	__u64			rsvd[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	__le64			prp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	__le64			prp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	__u32			rsvd11[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) struct nvme_nvm_getbbtbl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	__u8			opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	__u8			flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	__u16			command_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	__le32			nsid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	__u64			rsvd[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	__le64			prp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	__le64			prp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	__le64			spba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	__u32			rsvd4[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) struct nvme_nvm_setbbtbl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	__u8			opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	__u8			flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	__u16			command_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	__le32			nsid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	__le64			rsvd[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	__le64			prp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	__le64			prp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	__le64			spba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	__le16			nlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	__u8			value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	__u8			rsvd3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	__u32			rsvd4[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) struct nvme_nvm_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		struct nvme_common_command common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		struct nvme_nvm_ph_rw ph_rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		struct nvme_nvm_erase_blk erase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		struct nvme_nvm_identity identity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		struct nvme_nvm_getbbtbl get_bb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		struct nvme_nvm_setbbtbl set_bb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) struct nvme_nvm_id12_grp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	__u8			mtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	__u8			fmtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	__le16			res16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	__u8			num_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	__u8			num_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	__u8			num_pln;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	__u8			rsvd1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	__le16			num_chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	__le16			num_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	__le16			fpg_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	__le16			csecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	__le16			sos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	__le16			rsvd2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	__le32			trdt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	__le32			trdm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	__le32			tprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	__le32			tprm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	__le32			tbet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	__le32			tbem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	__le32			mpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	__le32			mccap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	__le16			cpar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	__u8			reserved[906];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) struct nvme_nvm_id12_addrf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	__u8			ch_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	__u8			ch_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	__u8			lun_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	__u8			lun_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	__u8			pln_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	__u8			pln_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	__u8			blk_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	__u8			blk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	__u8			pg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	__u8			pg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	__u8			sec_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	__u8			sec_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	__u8			res[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) struct nvme_nvm_id12 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	__u8			ver_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	__u8			vmnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	__u8			cgrps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	__u8			res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	__le32			cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	__le32			dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	struct nvme_nvm_id12_addrf ppaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	__u8			resv[228];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	struct nvme_nvm_id12_grp grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	__u8			resv2[2880];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) struct nvme_nvm_bb_tbl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	__u8	tblid[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	__le16	verid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	__le16	revid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	__le32	rvsd1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	__le32	tblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	__le32	tfact;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	__le32	tgrown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	__le32	tdresv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	__le32	thresv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	__le32	rsvd2[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	__u8	blk[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) struct nvme_nvm_id20_addrf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	__u8			grp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	__u8			pu_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	__u8			chk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	__u8			lba_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	__u8			resv[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) struct nvme_nvm_id20 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	__u8			mjr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	__u8			mnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	__u8			resv[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	struct nvme_nvm_id20_addrf lbaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	__le32			mccap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	__u8			resv2[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	__u8			wit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	__u8			resv3[31];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	/* Geometry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	__le16			num_grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	__le16			num_pu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	__le32			num_chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	__le32			clba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	__u8			resv4[52];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	/* Write data requirements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	__le32			ws_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	__le32			ws_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	__le32			mw_cunits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	__le32			maxoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	__le32			maxocpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	__u8			resv5[44];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	/* Performance related metrics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	__le32			trdt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	__le32			trdm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	__le32			twrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	__le32			twrm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	__le32			tcrst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	__le32			tcrsm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	__u8			resv6[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	/* Reserved area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	__u8			resv7[2816];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	/* Vendor specific */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	__u8			vs[1024];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) struct nvme_nvm_chk_meta {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	__u8	state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	__u8	type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	__u8	wi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	__u8	rsvd[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	__le64	slba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	__le64	cnlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	__le64	wp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240)  * Check we didn't inadvertently grow the command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) static inline void _nvme_nvm_check_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_grp) != 960);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	BUILD_BUG_ON(sizeof(struct nvme_nvm_id20_addrf) != 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	BUILD_BUG_ON(sizeof(struct nvme_nvm_id20) != NVME_IDENTIFY_DATA_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) != 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 						sizeof(struct nvm_chk_meta));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) static void nvme_nvm_set_addr_12(struct nvm_addrf_12 *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 				 struct nvme_nvm_id12_addrf *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	dst->ch_len = src->ch_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	dst->lun_len = src->lun_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	dst->blk_len = src->blk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	dst->pg_len = src->pg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	dst->pln_len = src->pln_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	dst->sec_len = src->sec_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	dst->ch_offset = src->ch_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	dst->lun_offset = src->lun_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	dst->blk_offset = src->blk_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	dst->pg_offset = src->pg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	dst->pln_offset = src->pln_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	dst->sec_offset = src->sec_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 			     struct nvm_geo *geo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	struct nvme_nvm_id12_grp *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	int sec_per_pg, sec_per_pl, pg_per_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	if (id->cgrps != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	src = &id->grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	if (src->mtype != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		pr_err("nvm: memory type not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	/* 1.2 spec. only reports a single version id - unfold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	geo->major_ver_id = id->ver_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	geo->minor_ver_id = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	/* Set compacted version for upper layers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	geo->version = NVM_OCSSD_SPEC_12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	geo->num_ch = src->num_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	geo->num_lun = src->num_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	geo->all_luns = geo->num_ch * geo->num_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	geo->num_chk = le16_to_cpu(src->num_chk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	geo->csecs = le16_to_cpu(src->csecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	geo->sos = le16_to_cpu(src->sos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	pg_per_blk = le16_to_cpu(src->num_pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	sec_per_pg = le16_to_cpu(src->fpg_sz) / geo->csecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	sec_per_pl = sec_per_pg * src->num_pln;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	geo->clba = sec_per_pl * pg_per_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	geo->all_chunks = geo->all_luns * geo->num_chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	geo->total_secs = geo->clba * geo->all_chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	geo->ws_min = sec_per_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	geo->ws_opt = sec_per_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	geo->mw_cunits = geo->ws_opt << 3;	/* default to MLC safe values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	/* Do not impose values for maximum number of open blocks as it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	 * unspecified in 1.2. Users of 1.2 must be aware of this and eventually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	 * specify these values through a quirk if restrictions apply.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	geo->maxoc = geo->all_luns * geo->num_chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	geo->maxocpu = geo->num_chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	geo->mccap = le32_to_cpu(src->mccap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	geo->trdt = le32_to_cpu(src->trdt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	geo->trdm = le32_to_cpu(src->trdm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	geo->tprt = le32_to_cpu(src->tprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	geo->tprm = le32_to_cpu(src->tprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	geo->tbet = le32_to_cpu(src->tbet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	geo->tbem = le32_to_cpu(src->tbem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	/* 1.2 compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	geo->vmnt = id->vmnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	geo->cap = le32_to_cpu(id->cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	geo->dom = le32_to_cpu(id->dom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	geo->mtype = src->mtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	geo->fmtype = src->fmtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	geo->cpar = le16_to_cpu(src->cpar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	geo->mpos = le32_to_cpu(src->mpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	geo->pln_mode = NVM_PLANE_SINGLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	if (geo->mpos & 0x020202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		geo->pln_mode = NVM_PLANE_DOUBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		geo->ws_opt <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	} else if (geo->mpos & 0x040404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		geo->pln_mode = NVM_PLANE_QUAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		geo->ws_opt <<= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	geo->num_pln = src->num_pln;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	geo->num_pg = le16_to_cpu(src->num_pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	geo->fpg_sz = le16_to_cpu(src->fpg_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	nvme_nvm_set_addr_12((struct nvm_addrf_12 *)&geo->addrf, &id->ppaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) static void nvme_nvm_set_addr_20(struct nvm_addrf *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 				 struct nvme_nvm_id20_addrf *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	dst->ch_len = src->grp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	dst->lun_len = src->pu_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	dst->chk_len = src->chk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	dst->sec_len = src->lba_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	dst->sec_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	dst->chk_offset = dst->sec_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	dst->lun_offset = dst->chk_offset + dst->chk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	dst->ch_offset = dst->lun_offset + dst->lun_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	dst->chk_mask = ((1ULL << dst->chk_len) - 1) << dst->chk_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			     struct nvm_geo *geo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	geo->major_ver_id = id->mjr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	geo->minor_ver_id = id->mnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	/* Set compacted version for upper layers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	geo->version = NVM_OCSSD_SPEC_20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	geo->num_ch = le16_to_cpu(id->num_grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	geo->num_lun = le16_to_cpu(id->num_pu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	geo->all_luns = geo->num_ch * geo->num_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	geo->num_chk = le32_to_cpu(id->num_chk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	geo->clba = le32_to_cpu(id->clba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	geo->all_chunks = geo->all_luns * geo->num_chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	geo->total_secs = geo->clba * geo->all_chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	geo->ws_min = le32_to_cpu(id->ws_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	geo->ws_opt = le32_to_cpu(id->ws_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	geo->mw_cunits = le32_to_cpu(id->mw_cunits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	geo->maxoc = le32_to_cpu(id->maxoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	geo->maxocpu = le32_to_cpu(id->maxocpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	geo->trdt = le32_to_cpu(id->trdt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	geo->trdm = le32_to_cpu(id->trdm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	geo->tprt = le32_to_cpu(id->twrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	geo->tprm = le32_to_cpu(id->twrm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	geo->tbet = le32_to_cpu(id->tcrst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	geo->tbem = le32_to_cpu(id->tcrsm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	nvme_nvm_set_addr_20(&geo->addrf, &id->lbaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) static int nvme_nvm_identity(struct nvm_dev *nvmdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	struct nvme_ns *ns = nvmdev->q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	struct nvme_nvm_id12 *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	struct nvme_nvm_command c = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	c.identity.opcode = nvme_nvm_admin_identity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	c.identity.nsid = cpu_to_le32(ns->head->ns_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (!id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 				id, sizeof(struct nvme_nvm_id12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	 * The 1.2 and 2.0 specifications share the first byte in their geometry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	 * command to make it possible to know what version a device implements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	switch (id->ver_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		ret = nvme_nvm_setup_12(id, &nvmdev->geo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		ret = nvme_nvm_setup_20((struct nvme_nvm_id20 *)id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 							&nvmdev->geo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		dev_err(ns->ctrl->device, "OCSSD revision not supported (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 							id->ver_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	kfree(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 								u8 *blks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	struct request_queue *q = nvmdev->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	struct nvm_geo *geo = &nvmdev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	struct nvme_ns *ns = q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	struct nvme_ctrl *ctrl = ns->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	struct nvme_nvm_command c = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	struct nvme_nvm_bb_tbl *bb_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	int nr_blks = geo->num_chk * geo->num_pln;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	c.get_bb.spba = cpu_to_le64(ppa.ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	bb_tbl = kzalloc(tblsz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	if (!bb_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 								bb_tbl, tblsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		dev_err(ctrl->device, "bbt format mismatch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	if (le16_to_cpu(bb_tbl->verid) != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		dev_err(ctrl->device, "bbt version not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 				"bbt unsuspected blocks returned (%u!=%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 				le32_to_cpu(bb_tbl->tblks), nr_blks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	memcpy(blks, bb_tbl->blk, geo->num_chk * geo->num_pln);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	kfree(bb_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 							int nr_ppas, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	struct nvme_ns *ns = nvmdev->q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	struct nvme_nvm_command c = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	c.set_bb.spba = cpu_to_le64(ppas->ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	c.set_bb.value = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 								NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 									ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  * Expect the lba in device format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 				 sector_t slba, int nchks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 				 struct nvm_chk_meta *meta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	struct nvm_geo *geo = &ndev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	struct nvme_ns *ns = ndev->q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	struct nvme_ctrl *ctrl = ns->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	struct nvme_nvm_chk_meta *dev_meta, *dev_meta_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	struct ppa_addr ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	size_t log_pos, offset, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	int i, max_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	 * limit requests to maximum 256K to avoid issuing arbitrary large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	 * requests when the device does not specific a maximum transfer size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	max_len = min_t(unsigned int, ctrl->max_hw_sectors << 9, 256 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	dev_meta = kmalloc(max_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (!dev_meta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	/* Normalize lba address space to obtain log offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	ppa.ppa = slba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	ppa = dev_to_generic_addr(ndev, ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	log_pos = ppa.m.chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	log_pos += ppa.m.pu * geo->num_chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	log_pos += ppa.m.grp * geo->num_lun * geo->num_chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	offset = log_pos * sizeof(struct nvme_nvm_chk_meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	while (left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		len = min_t(unsigned int, left, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		memset(dev_meta, 0, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		dev_meta_off = dev_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		ret = nvme_get_log(ctrl, ns->head->ns_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 				NVME_NVM_LOG_REPORT_CHUNK, 0, NVME_CSI_NVM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 				dev_meta, len, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 			dev_err(ctrl->device, "Get REPORT CHUNK log error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		for (i = 0; i < len; i += sizeof(struct nvme_nvm_chk_meta)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 			meta->state = dev_meta_off->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			meta->type = dev_meta_off->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			meta->wi = dev_meta_off->wi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			meta->slba = le64_to_cpu(dev_meta_off->slba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			meta->cnlb = le64_to_cpu(dev_meta_off->cnlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 			meta->wp = le64_to_cpu(dev_meta_off->wp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			meta++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			dev_meta_off++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		left -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	kfree(dev_meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 				    struct nvme_nvm_command *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	c->ph_rw.opcode = rqd->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	c->ph_rw.control = cpu_to_le16(rqd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	struct nvm_rq *rqd = rq->end_io_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	rqd->error = nvme_req(rq)->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	nvm_end_io(rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	kfree(nvme_req(rq)->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	blk_mq_free_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) static struct request *nvme_nvm_alloc_request(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 					      struct nvm_rq *rqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 					      struct nvme_nvm_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	struct nvme_ns *ns = q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	nvme_nvm_rqtocmd(rqd, ns, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	if (IS_ERR(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		return rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	if (rqd->bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		blk_rq_append_bio(rq, &rqd->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	return rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 			      void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	struct request_queue *q = dev->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	struct nvme_nvm_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	rq = nvme_nvm_alloc_request(q, rqd, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	if (IS_ERR(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		ret = PTR_ERR(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		goto err_free_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	if (buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		ret = blk_rq_map_kern(q, rq, buf, geo->csecs * rqd->nr_ppas,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 			goto err_free_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	rq->end_io_data = rqd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) err_free_cmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	kfree(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 					int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	struct nvme_ns *ns = nvmdev->q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	return dma_pool_create(name, ns->ctrl->dev, size, PAGE_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) static void nvme_nvm_destroy_dma_pool(void *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	struct dma_pool *dma_pool = pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	dma_pool_destroy(dma_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 				    gfp_t mem_flags, dma_addr_t *dma_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	return dma_pool_alloc(pool, mem_flags, dma_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) static void nvme_nvm_dev_dma_free(void *pool, void *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 							dma_addr_t dma_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	dma_pool_free(pool, addr, dma_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) static struct nvm_dev_ops nvme_nvm_dev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	.identity		= nvme_nvm_identity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	.get_bb_tbl		= nvme_nvm_get_bb_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	.set_bb_tbl		= nvme_nvm_set_bb_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	.get_chk_meta		= nvme_nvm_get_chk_meta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	.submit_io		= nvme_nvm_submit_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	.create_dma_pool	= nvme_nvm_create_dma_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	.destroy_dma_pool	= nvme_nvm_destroy_dma_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	.dev_dma_alloc		= nvme_nvm_dev_dma_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	.dev_dma_free		= nvme_nvm_dev_dma_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) static int nvme_nvm_submit_user_cmd(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 				struct nvme_ns *ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 				struct nvme_nvm_command *vcmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 				void __user *ubuf, unsigned int bufflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 				void __user *meta_buf, unsigned int meta_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 				void __user *ppa_buf, unsigned int ppa_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 				u32 *result, u64 *status, unsigned int timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	bool write = nvme_is_write((struct nvme_command *)vcmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	struct nvm_dev *dev = ns->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	struct gendisk *disk = ns->disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	struct bio *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	__le64 *ppa_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	dma_addr_t ppa_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	__le64 *metadata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	dma_addr_t metadata_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	DECLARE_COMPLETION_ONSTACK(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			NVME_QID_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if (IS_ERR(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		goto err_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	if (ppa_buf && ppa_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		if (!ppa_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			goto err_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		if (copy_from_user(ppa_list, (void __user *)ppa_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 						sizeof(u64) * (ppa_len + 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 			ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			goto err_ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	if (ubuf && bufflen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			goto err_ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		bio = rq->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		if (meta_buf && meta_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 								&metadata_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			if (!metadata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 				goto err_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 				if (copy_from_user(metadata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 						(void __user *)meta_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 						meta_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 					ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 					goto err_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		bio->bi_disk = disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	blk_execute_rq(q, NULL, rq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	else if (nvme_req(rq)->status & 0x7ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		*result = nvme_req(rq)->status & 0x7ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		*status = le64_to_cpu(nvme_req(rq)->result.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	if (metadata && !ret && !write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		if (copy_to_user(meta_buf, (void *)metadata, meta_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) err_meta:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	if (meta_buf && meta_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		dma_pool_free(dev->dma_pool, metadata, metadata_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) err_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	if (bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		blk_rq_unmap_user(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) err_ppa:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (ppa_buf && ppa_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) err_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	blk_mq_free_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) err_cmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) static int nvme_nvm_submit_vio(struct nvme_ns *ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 					struct nvm_user_vio __user *uvio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	struct nvm_user_vio vio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	struct nvme_nvm_command c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	unsigned int length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	if (copy_from_user(&vio, uvio, sizeof(vio)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	if (vio.flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	memset(&c, 0, sizeof(c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	c.ph_rw.opcode = vio.opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	c.ph_rw.control = cpu_to_le16(vio.control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	c.ph_rw.length = cpu_to_le16(vio.nppas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	length = (vio.nppas + 1) << ns->lba_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			(void __user *)(uintptr_t)vio.addr, length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 			(void __user *)(uintptr_t)vio.metadata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 							vio.metadata_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			(void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			&vio.result, &vio.status, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 					struct nvm_passthru_vio __user *uvcmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	struct nvm_passthru_vio vcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	struct nvme_nvm_command c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	unsigned int timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	if (vcmd.flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	memset(&c, 0, sizeof(c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	c.common.opcode = vcmd.opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	c.common.nsid = cpu_to_le32(ns->head->ns_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	/* cdw11-12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	c.ph_rw.length = cpu_to_le16(vcmd.nppas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	c.ph_rw.control  = cpu_to_le16(vcmd.control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	c.common.cdw13 = cpu_to_le32(vcmd.cdw13);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	c.common.cdw14 = cpu_to_le32(vcmd.cdw14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	c.common.cdw15 = cpu_to_le32(vcmd.cdw15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	if (vcmd.timeout_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		timeout = msecs_to_jiffies(vcmd.timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	q = admin ? ns->ctrl->admin_q : ns->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	ret = nvme_nvm_submit_user_cmd(q, ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			(struct nvme_nvm_command *)&c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 			(void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 			(void __user *)(uintptr_t)vcmd.metadata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 							vcmd.metadata_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			(void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			&vcmd.result, &vcmd.status, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	case NVME_NVM_IOCTL_ADMIN_VIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	case NVME_NVM_IOCTL_IO_VIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	case NVME_NVM_IOCTL_SUBMIT_VIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		return nvme_nvm_submit_vio(ns, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	struct request_queue *q = ns->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	struct nvm_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	struct nvm_geo *geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	_nvme_nvm_check_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	dev = nvm_alloc_dev(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	/* Note that csecs and sos will be overridden if it is a 1.2 drive. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	geo->csecs = 1 << ns->lba_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	geo->sos = ns->ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	if (ns->features & NVME_NS_EXT_LBAS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		geo->ext = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		geo->ext = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	geo->mdts = ns->ctrl->max_hw_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	dev->q = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	memcpy(dev->name, disk_name, DISK_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	dev->ops = &nvme_nvm_dev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	dev->private_data = ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	ns->ndev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	return nvm_register(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) void nvme_nvm_unregister(struct nvme_ns *ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	nvm_unregister(ns->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) static ssize_t nvm_dev_attr_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		struct device_attribute *dattr, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	struct nvm_dev *ndev = ns->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	struct nvm_geo *geo = &ndev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	struct attribute *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	if (!ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	attr = &dattr->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (strcmp(attr->name, "version") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		if (geo->major_ver_id == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 			return scnprintf(page, PAGE_SIZE, "%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 						geo->major_ver_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			return scnprintf(page, PAGE_SIZE, "%u.%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 						geo->major_ver_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 						geo->minor_ver_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	} else if (strcmp(attr->name, "capabilities") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	} else if (strcmp(attr->name, "read_typ") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	} else if (strcmp(attr->name, "read_max") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		return scnprintf(page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 				 PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 				 "Unhandled attr(%s) in `%s`\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 				 attr->name, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static ssize_t nvm_dev_attr_show_ppaf(struct nvm_addrf_12 *ppaf, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	return scnprintf(page, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		"0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 				ppaf->ch_offset, ppaf->ch_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 				ppaf->lun_offset, ppaf->lun_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 				ppaf->pln_offset, ppaf->pln_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 				ppaf->blk_offset, ppaf->blk_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 				ppaf->pg_offset, ppaf->pg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 				ppaf->sec_offset, ppaf->sec_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static ssize_t nvm_dev_attr_show_12(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		struct device_attribute *dattr, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	struct nvm_dev *ndev = ns->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	struct nvm_geo *geo = &ndev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	struct attribute *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	if (!ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	attr = &dattr->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	if (strcmp(attr->name, "vendor_opcode") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->vmnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	} else if (strcmp(attr->name, "device_mode") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->dom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	/* kept for compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	} else if (strcmp(attr->name, "media_manager") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	} else if (strcmp(attr->name, "ppa_format") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		return nvm_dev_attr_show_ppaf((void *)&geo->addrf, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	} else if (strcmp(attr->name, "media_type") == 0) {	/* u8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->mtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	} else if (strcmp(attr->name, "flash_media_type") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->fmtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	} else if (strcmp(attr->name, "num_channels") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	} else if (strcmp(attr->name, "num_luns") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	} else if (strcmp(attr->name, "num_planes") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pln);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	} else if (strcmp(attr->name, "num_blocks") == 0) {	/* u16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	} else if (strcmp(attr->name, "num_pages") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	} else if (strcmp(attr->name, "page_size") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->fpg_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	} else if (strcmp(attr->name, "hw_sector_size") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->csecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	} else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->sos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	} else if (strcmp(attr->name, "prog_typ") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	} else if (strcmp(attr->name, "prog_max") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	} else if (strcmp(attr->name, "erase_typ") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	} else if (strcmp(attr->name, "erase_max") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	} else if (strcmp(attr->name, "multiplane_modes") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	} else if (strcmp(attr->name, "media_capabilities") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mccap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	} else if (strcmp(attr->name, "max_phys_secs") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		return scnprintf(page, PAGE_SIZE, "%u\n", NVM_MAX_VLBA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		return scnprintf(page, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			"Unhandled attr(%s) in `%s`\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			attr->name, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) static ssize_t nvm_dev_attr_show_20(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		struct device_attribute *dattr, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	struct nvm_dev *ndev = ns->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	struct nvm_geo *geo = &ndev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	struct attribute *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	if (!ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	attr = &dattr->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	if (strcmp(attr->name, "groups") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	} else if (strcmp(attr->name, "punits") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	} else if (strcmp(attr->name, "chunks") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	} else if (strcmp(attr->name, "clba") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->clba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	} else if (strcmp(attr->name, "ws_min") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	} else if (strcmp(attr->name, "ws_opt") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	} else if (strcmp(attr->name, "maxoc") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	} else if (strcmp(attr->name, "maxocpu") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxocpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	} else if (strcmp(attr->name, "mw_cunits") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->mw_cunits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	} else if (strcmp(attr->name, "write_typ") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	} else if (strcmp(attr->name, "write_max") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	} else if (strcmp(attr->name, "reset_typ") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	} else if (strcmp(attr->name, "reset_max") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		return scnprintf(page, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 			"Unhandled attr(%s) in `%s`\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 			attr->name, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) #define NVM_DEV_ATTR_RO(_name)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) #define NVM_DEV_ATTR_12_RO(_name)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_12, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) #define NVM_DEV_ATTR_20_RO(_name)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_20, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /* general attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) static NVM_DEV_ATTR_RO(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static NVM_DEV_ATTR_RO(capabilities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) static NVM_DEV_ATTR_RO(read_typ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) static NVM_DEV_ATTR_RO(read_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /* 1.2 values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) static NVM_DEV_ATTR_12_RO(vendor_opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) static NVM_DEV_ATTR_12_RO(device_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static NVM_DEV_ATTR_12_RO(ppa_format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static NVM_DEV_ATTR_12_RO(media_manager);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static NVM_DEV_ATTR_12_RO(media_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static NVM_DEV_ATTR_12_RO(flash_media_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) static NVM_DEV_ATTR_12_RO(num_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static NVM_DEV_ATTR_12_RO(num_luns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static NVM_DEV_ATTR_12_RO(num_planes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) static NVM_DEV_ATTR_12_RO(num_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static NVM_DEV_ATTR_12_RO(num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static NVM_DEV_ATTR_12_RO(page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static NVM_DEV_ATTR_12_RO(hw_sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) static NVM_DEV_ATTR_12_RO(oob_sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static NVM_DEV_ATTR_12_RO(prog_typ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) static NVM_DEV_ATTR_12_RO(prog_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) static NVM_DEV_ATTR_12_RO(erase_typ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) static NVM_DEV_ATTR_12_RO(erase_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static NVM_DEV_ATTR_12_RO(multiplane_modes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static NVM_DEV_ATTR_12_RO(media_capabilities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) static NVM_DEV_ATTR_12_RO(max_phys_secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /* 2.0 values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) static NVM_DEV_ATTR_20_RO(groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) static NVM_DEV_ATTR_20_RO(punits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) static NVM_DEV_ATTR_20_RO(chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) static NVM_DEV_ATTR_20_RO(clba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static NVM_DEV_ATTR_20_RO(ws_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static NVM_DEV_ATTR_20_RO(ws_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) static NVM_DEV_ATTR_20_RO(maxoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static NVM_DEV_ATTR_20_RO(maxocpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) static NVM_DEV_ATTR_20_RO(mw_cunits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static NVM_DEV_ATTR_20_RO(write_typ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) static NVM_DEV_ATTR_20_RO(write_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) static NVM_DEV_ATTR_20_RO(reset_typ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static NVM_DEV_ATTR_20_RO(reset_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static struct attribute *nvm_dev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	/* version agnostic attrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	&dev_attr_version.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	&dev_attr_capabilities.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	&dev_attr_read_typ.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	&dev_attr_read_max.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	/* 1.2 attrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	&dev_attr_vendor_opcode.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	&dev_attr_device_mode.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	&dev_attr_media_manager.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	&dev_attr_ppa_format.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	&dev_attr_media_type.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	&dev_attr_flash_media_type.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	&dev_attr_num_channels.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	&dev_attr_num_luns.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	&dev_attr_num_planes.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	&dev_attr_num_blocks.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	&dev_attr_num_pages.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	&dev_attr_page_size.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	&dev_attr_hw_sector_size.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	&dev_attr_oob_sector_size.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	&dev_attr_prog_typ.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	&dev_attr_prog_max.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	&dev_attr_erase_typ.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	&dev_attr_erase_max.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	&dev_attr_multiplane_modes.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	&dev_attr_media_capabilities.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	&dev_attr_max_phys_secs.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	/* 2.0 attrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	&dev_attr_groups.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	&dev_attr_punits.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	&dev_attr_chunks.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	&dev_attr_clba.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	&dev_attr_ws_min.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	&dev_attr_ws_opt.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	&dev_attr_maxoc.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	&dev_attr_maxocpu.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	&dev_attr_mw_cunits.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	&dev_attr_write_typ.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	&dev_attr_write_max.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	&dev_attr_reset_typ.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	&dev_attr_reset_max.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) static umode_t nvm_dev_attrs_visible(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 				     struct attribute *attr, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	struct device *dev = container_of(kobj, struct device, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	struct gendisk *disk = dev_to_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	struct nvme_ns *ns = disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	struct nvm_dev *ndev = ns->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	struct device_attribute *dev_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		container_of(attr, typeof(*dev_attr), attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	if (!ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	if (dev_attr->show == nvm_dev_attr_show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	switch (ndev->geo.major_ver_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		if (dev_attr->show == nvm_dev_attr_show_12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 			return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		if (dev_attr->show == nvm_dev_attr_show_20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 			return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) const struct attribute_group nvme_nvm_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	.name		= "lightnvm",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	.attrs		= nvm_dev_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	.is_visible	= nvm_dev_attrs_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) };