ufs-sec-feature.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Samsung Specific feature
  4. *
  5. * Copyright (C) 2023 Samsung Electronics Co., Ltd.
  6. *
  7. * Authors:
  8. * Storage Driver <[email protected]>
  9. */
  10. #include "ufs-sec-feature.h"
  11. #include "ufs-sec-sysfs.h"
  12. #include <asm/unaligned.h>
  13. #include <trace/hooks/ufshcd.h>
  14. #include <scsi/scsi_cmnd.h>
  15. #include <linux/panic_notifier.h>
  16. #include <linux/delay.h>
  17. #include <linux/sec_debug.h>
  18. #include <scsi/scsi_proto.h>
  19. #include <linux/reboot.h>
  20. #define NOTI_WORK_DELAY_MS 500
  21. void (*ufs_sec_wb_reset_notify)(void);
  22. struct ufs_sec_feature_info ufs_sec_features;
  23. static void ufs_sec_print_evt_hist(struct ufs_hba *hba);
  24. static inline int ufs_sec_read_unit_desc_param(struct ufs_hba *hba,
  25. int lun,
  26. enum unit_desc_param param_offset,
  27. u8 *param_read_buf,
  28. u32 param_size)
  29. {
  30. /*
  31. * Unit descriptors are only available for general purpose LUs (LUN id
  32. * from 0 to 7) and RPMB Well known LU.
  33. */
  34. if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
  35. return -EOPNOTSUPP;
  36. return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
  37. param_offset, param_read_buf, param_size);
  38. }
  39. static void ufs_sec_set_unique_number(struct ufs_hba *hba, u8 *desc_buf)
  40. {
  41. struct ufs_vendor_dev_info *vdi = ufs_sec_features.vdi;
  42. u8 manid = desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
  43. u8 serial_num_index = desc_buf[DEVICE_DESC_PARAM_SN];
  44. u8 snum_buf[SERIAL_NUM_SIZE];
  45. u8 *str_desc_buf = NULL;
  46. int err;
  47. /* read string desc */
  48. str_desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
  49. if (!str_desc_buf)
  50. return;
  51. /* spec is unicode but sec uses hex data */
  52. err = ufshcd_read_desc_param(hba,
  53. QUERY_DESC_IDN_STRING, serial_num_index, 0,
  54. str_desc_buf, QUERY_DESC_MAX_SIZE);
  55. if (err) {
  56. dev_err(hba->dev, "%s: Failed reading string descriptor. err %d",
  57. __func__, err);
  58. goto out;
  59. }
  60. /* setup unique_number */
  61. memset(snum_buf, 0, sizeof(snum_buf));
  62. memcpy(snum_buf, str_desc_buf + QUERY_DESC_HDR_SIZE, SERIAL_NUM_SIZE);
  63. memset(vdi->unique_number, 0, sizeof(vdi->unique_number));
  64. sprintf(vdi->unique_number, "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X",
  65. manid,
  66. desc_buf[DEVICE_DESC_PARAM_MANF_DATE],
  67. desc_buf[DEVICE_DESC_PARAM_MANF_DATE + 1],
  68. snum_buf[0], snum_buf[1], snum_buf[2], snum_buf[3],
  69. snum_buf[4], snum_buf[5], snum_buf[6]);
  70. /* Null terminate the unique number string */
  71. vdi->unique_number[UFS_UN_20_DIGITS] = '\0';
  72. dev_dbg(hba->dev, "%s: ufs un : %s\n", __func__, vdi->unique_number);
  73. out:
  74. kfree(str_desc_buf);
  75. }
  76. void ufs_sec_get_health_desc(struct ufs_hba *hba)
  77. {
  78. struct ufs_vendor_dev_info *vdi = ufs_sec_features.vdi;
  79. u8 *desc_buf = NULL;
  80. int err;
  81. desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
  82. if (!desc_buf)
  83. return;
  84. err = ufshcd_read_desc_param(hba,
  85. QUERY_DESC_IDN_HEALTH, 0, 0,
  86. desc_buf, QUERY_DESC_MAX_SIZE);
  87. if (err) {
  88. dev_err(hba->dev, "%s: Failed reading health descriptor. err %d",
  89. __func__, err);
  90. goto out;
  91. }
  92. /* getting Life Time, Firmware block Life Time and EOL info at Device Health DESC*/
  93. vdi->lt = desc_buf[HEALTH_DESC_PARAM_LIFE_TIME_EST_A];
  94. vdi->flt = desc_buf[HEALTH_DESC_PARAM_VENDOR_LIFE_TIME_EST];
  95. vdi->eli = desc_buf[HEALTH_DESC_PARAM_EOL_INFO];
  96. dev_info(hba->dev, "LT: 0x%02x, FLT: 0x%02x, ELI: 0x%01x\n",
  97. ((desc_buf[HEALTH_DESC_PARAM_LIFE_TIME_EST_A] << 4) |
  98. desc_buf[HEALTH_DESC_PARAM_LIFE_TIME_EST_B]),
  99. vdi->flt, vdi->eli);
  100. out:
  101. kfree(desc_buf);
  102. }
  103. /*
  104. * get dExtendedUFSFeaturesSupport from device descriptor
  105. *
  106. * checking device spec version for UFS v2.2 , v3.1 or later
  107. */
  108. static void ufs_sec_get_ext_feature(struct ufs_hba *hba, u8 *desc_buf)
  109. {
  110. struct ufs_dev_info *dev_info = &hba->dev_info;
  111. if (!(dev_info->wspecversion >= 0x310 ||
  112. dev_info->wspecversion == 0x220 ||
  113. (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))) {
  114. ufs_sec_features.ext_ufs_feature_sup = 0x0;
  115. return;
  116. }
  117. ufs_sec_features.ext_ufs_feature_sup = get_unaligned_be32(desc_buf +
  118. DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
  119. }
  120. /* SEC next WB : begin */
  121. #define UFS_WB_DISABLE_THRESHOLD_LT 9
  122. static void ufs_sec_wb_update_err(void)
  123. {
  124. struct ufs_sec_wb_info *wb_info = ufs_sec_features.ufs_wb;
  125. wb_info->err_cnt++;
  126. }
  127. static void ufs_sec_wb_update_info(struct ufs_hba *hba, int write_transfer_len)
  128. {
  129. struct ufs_sec_wb_info *wb_info = ufs_sec_features.ufs_wb;
  130. enum ufs_sec_wb_state wb_state = hba->dev_info.wb_enabled;
  131. if (write_transfer_len) {
  132. /*
  133. * write_transfer_len : Byte
  134. * wb_info->amount_kb : KB
  135. */
  136. wb_info->amount_kb += (unsigned long)(write_transfer_len >> 10);
  137. return;
  138. }
  139. switch (wb_state) {
  140. case WB_OFF:
  141. wb_info->enable_ms += jiffies_to_msecs(jiffies - wb_info->state_ts);
  142. wb_info->state_ts = jiffies;
  143. wb_info->disable_cnt++;
  144. break;
  145. case WB_ON:
  146. wb_info->disable_ms += jiffies_to_msecs(jiffies - wb_info->state_ts);
  147. wb_info->state_ts = jiffies;
  148. wb_info->enable_cnt++;
  149. break;
  150. default:
  151. break;
  152. }
  153. }
  154. bool ufs_sec_is_wb_supported(void)
  155. {
  156. struct ufs_sec_wb_info *ufs_wb = ufs_sec_features.ufs_wb;
  157. struct ufs_vendor_dev_info *vdi = ufs_sec_features.vdi;
  158. if (!ufs_wb)
  159. return false;
  160. if (vdi->lt >= UFS_WB_DISABLE_THRESHOLD_LT)
  161. ufs_wb->support = false;
  162. return ufs_wb->support;
  163. }
  164. EXPORT_SYMBOL(ufs_sec_is_wb_supported);
  165. static bool ufs_sec_check_ext_feature(u32 mask)
  166. {
  167. if (ufs_sec_features.ext_ufs_feature_sup & mask)
  168. return true;
  169. return false;
  170. }
  171. static u32 ufs_sec_wb_buf_alloc(struct ufs_hba *hba,
  172. struct ufs_dev_info *dev_info, u8 *desc_buf)
  173. {
  174. u32 wb_buf_alloc = 0;
  175. u8 lun;
  176. dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
  177. if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED &&
  178. (hba->dev_info.wspecversion >= 0x310 ||
  179. hba->dev_info.wspecversion == 0x220)) {
  180. wb_buf_alloc = get_unaligned_be32(desc_buf +
  181. DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
  182. } else {
  183. for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
  184. wb_buf_alloc = 0;
  185. ufs_sec_read_unit_desc_param(hba,
  186. lun,
  187. UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
  188. (u8 *)&wb_buf_alloc,
  189. sizeof(wb_buf_alloc));
  190. if (wb_buf_alloc) {
  191. dev_info->wb_dedicated_lu = lun;
  192. break;
  193. }
  194. }
  195. }
  196. return wb_buf_alloc;
  197. }
  198. static int __ufs_sec_wb_ctrl(bool enable)
  199. {
  200. struct ufs_hba *hba = get_vdi_member(hba);
  201. enum query_opcode opcode;
  202. int ret = 0;
  203. u8 index;
  204. if (!ufs_sec_is_wb_supported())
  205. return -EOPNOTSUPP;
  206. if (enable)
  207. opcode = UPIU_QUERY_OPCODE_SET_FLAG;
  208. else
  209. opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
  210. index = ufshcd_wb_get_query_index(hba);
  211. ret = ufshcd_query_flag_retry(hba, opcode,
  212. QUERY_FLAG_IDN_WB_EN, index, NULL);
  213. if (!ret) {
  214. hba->dev_info.wb_enabled = enable;
  215. ufs_sec_wb_update_info(hba, 0);
  216. } else
  217. ufs_sec_wb_update_err();
  218. pr_info("%s(%s) is %s, ret=%d.\n", __func__,
  219. enable ? "enable" : "disable",
  220. ret ? "failed" : "done", ret);
  221. return ret;
  222. }
  223. static inline int ufs_sec_shost_in_recovery(struct Scsi_Host *shost)
  224. {
  225. return shost->shost_state == SHOST_RECOVERY ||
  226. shost->shost_state == SHOST_CANCEL_RECOVERY ||
  227. shost->shost_state == SHOST_DEL_RECOVERY ||
  228. shost->tmf_in_progress;
  229. }
  230. int ufs_sec_wb_ctrl(bool enable)
  231. {
  232. struct ufs_hba *hba = get_vdi_member(hba);
  233. int ret = 0;
  234. unsigned long flags;
  235. struct Scsi_Host *shost = hba->host;
  236. spin_lock_irqsave(hba->host->host_lock, flags);
  237. if (hba->pm_op_in_progress || hba->is_sys_suspended) {
  238. pr_err("%s: ufs is suspended.\n", __func__);
  239. ret = -EBUSY;
  240. goto out;
  241. }
  242. if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
  243. pr_err("%s: UFS Host state=%d.\n", __func__, hba->ufshcd_state);
  244. ret = -EBUSY;
  245. goto out;
  246. }
  247. if (ufs_sec_shost_in_recovery(shost)) {
  248. ret = -EBUSY;
  249. goto out;
  250. }
  251. if (!(enable ^ hba->dev_info.wb_enabled)) {
  252. pr_info("%s: write booster is already %s\n",
  253. __func__, enable ? "enabled" : "disabled");
  254. ret = 0;
  255. goto out;
  256. }
  257. spin_unlock_irqrestore(hba->host->host_lock, flags);
  258. ufshcd_rpm_get_sync(hba);
  259. ret = __ufs_sec_wb_ctrl(enable);
  260. ufshcd_rpm_put(hba);
  261. return ret;
  262. out:
  263. spin_unlock_irqrestore(hba->host->host_lock, flags);
  264. return ret;
  265. }
  266. EXPORT_SYMBOL(ufs_sec_wb_ctrl);
  267. static void ufs_sec_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
  268. {
  269. enum query_opcode opcode;
  270. u8 index;
  271. int ret = 0;
  272. if (!ufs_sec_is_wb_supported())
  273. return;
  274. if (set)
  275. opcode = UPIU_QUERY_OPCODE_SET_FLAG;
  276. else
  277. opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
  278. index = ufshcd_wb_get_query_index(hba);
  279. ret = ufshcd_query_flag_retry(hba, opcode,
  280. QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
  281. index, NULL);
  282. dev_info(hba->dev, "%s: %s WB flush during H8 is %s.\n", __func__,
  283. set ? "set" : "clear",
  284. ret ? "failed" : "done");
  285. }
  286. static void ufs_sec_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
  287. {
  288. struct ufs_dev_info *dev_info = &hba->dev_info;
  289. struct ufs_sec_wb_info *ufs_wb = NULL;
  290. struct ufs_sec_wb_info *ufs_wb_backup = NULL;
  291. u32 wb_buf_alloc = 0;
  292. if (!ufs_sec_check_ext_feature(UFS_DEV_WRITE_BOOSTER_SUP)) {
  293. dev_err(hba->dev, "%s: Failed check_ext_feature", __func__);
  294. goto wb_disabled;
  295. }
  296. dev_info->b_presrv_uspc_en =
  297. desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
  298. if (!dev_info->b_presrv_uspc_en) {
  299. dev_err(hba->dev, "%s: Failed preserved user space en value",
  300. __func__);
  301. goto wb_disabled;
  302. }
  303. wb_buf_alloc = ufs_sec_wb_buf_alloc(hba, dev_info, desc_buf);
  304. if (!wb_buf_alloc) {
  305. dev_err(hba->dev, "%s: Failed wb_buf_alloc", __func__);
  306. goto wb_disabled;
  307. }
  308. ufs_wb = devm_kzalloc(hba->dev, sizeof(struct ufs_sec_wb_info),
  309. GFP_KERNEL);
  310. if (!ufs_wb) {
  311. dev_err(hba->dev, "%s: Failed allocating ufs_wb(%lu)",
  312. __func__, sizeof(struct ufs_sec_wb_info));
  313. goto wb_disabled;
  314. }
  315. ufs_wb_backup = devm_kzalloc(hba->dev, sizeof(struct ufs_sec_wb_info),
  316. GFP_KERNEL);
  317. if (!ufs_wb_backup) {
  318. dev_err(hba->dev, "%s: Failed allocating ufs_wb_backup(%lu)",
  319. __func__, sizeof(struct ufs_sec_wb_info));
  320. goto wb_disabled;
  321. }
  322. ufs_wb->support = true;
  323. ufs_wb->state_ts = jiffies;
  324. ufs_wb_backup->state_ts = jiffies;
  325. ufs_sec_features.ufs_wb = ufs_wb;
  326. ufs_sec_features.ufs_wb_backup = ufs_wb_backup;
  327. hba->dev_info.wb_enabled = WB_OFF;
  328. dev_info(hba->dev, "%s: SEC WB is supported. type=%s%d, size=%u.\n",
  329. __func__,
  330. (dev_info->wb_buffer_type == WB_BUF_MODE_LU_DEDICATED) ?
  331. " dedicated LU" : " shared",
  332. (dev_info->wb_buffer_type == WB_BUF_MODE_LU_DEDICATED) ?
  333. dev_info->wb_dedicated_lu : 0, wb_buf_alloc);
  334. wb_disabled:
  335. return;
  336. }
  337. /* Called by blk-sec-wb */
  338. void ufs_sec_wb_register_reset_notify(void *func)
  339. {
  340. ufs_sec_wb_reset_notify = func;
  341. }
  342. EXPORT_SYMBOL(ufs_sec_wb_register_reset_notify);
  343. void ufs_sec_wb_config(struct ufs_hba *hba)
  344. {
  345. /*
  346. * 1. Default WB state is WB_OFF when UFS is initialized.
  347. * 2. If UFS error handling occurs in WB_ON state,
  348. * It needs to be reset to WB_OFF state,
  349. * due to UFS reset.
  350. */
  351. if (hba->dev_info.wb_enabled == WB_ON) {
  352. hba->dev_info.wb_enabled = WB_OFF;
  353. /* Call ssg's reset_noti func. */
  354. if (ufs_sec_wb_reset_notify != NULL)
  355. (*ufs_sec_wb_reset_notify)();
  356. }
  357. ufs_sec_wb_toggle_flush_during_h8(hba, true);
  358. }
  359. /* SEC next WB : end */
  360. /* SEC test mode : begin */
  361. #if IS_ENABLED(CONFIG_SCSI_UFS_TEST_MODE)
  362. #define UFS_SEC_TESTMODE_CMD_LOG_MAX 10
  363. static void ufs_sec_print_cmdlog(struct ufs_hba *hba)
  364. {
  365. struct ufs_sec_cmd_log_info *ufs_cmd_log =
  366. ufs_sec_features.ufs_cmd_log;
  367. struct ufs_sec_cmd_log_entry *entry = NULL;
  368. int i = (ufs_cmd_log->pos + UFS_SEC_CMD_LOGGING_MAX
  369. - UFS_SEC_TESTMODE_CMD_LOG_MAX);
  370. int idx = 0;
  371. if (!ufs_cmd_log)
  372. return;
  373. dev_err(hba->dev, "UFS CMD hist\n");
  374. dev_err(hba->dev, "%02s: %10s: %2s %3s %4s %9s %6s %16s\n",
  375. "No", "log string", "lu", "tag",
  376. "c_id", "lba", "length", "time");
  377. for (idx = 0; idx < UFS_SEC_TESTMODE_CMD_LOG_MAX; idx++, i++) {
  378. i %= UFS_SEC_CMD_LOGGING_MAX;
  379. entry = &ufs_cmd_log->entries[i];
  380. dev_err(hba->dev, "%2d: %10s: %2d %3d 0x%02x %9u %6d %16llu\n",
  381. idx,
  382. entry->str, entry->lun, entry->tag,
  383. entry->cmd_id, entry->lba,
  384. entry->transfer_len, entry->tstamp);
  385. }
  386. }
  387. static void ufs_sec_trigger_bug(struct ufs_hba *hba)
  388. {
  389. ufs_sec_print_evt_hist(hba);
  390. ufs_sec_print_cmdlog(hba);
  391. BUG();
  392. }
  393. #endif // CONFIG_SCSI_UFS_TEST_MODE
  394. /* SEC test mode : end */
  395. /* SEC error info : begin */
  396. inline bool ufs_sec_is_err_cnt_allowed(void)
  397. {
  398. return ufs_sec_features.ufs_err && ufs_sec_features.ufs_err_backup;
  399. }
  400. void ufs_sec_inc_hwrst_cnt(void)
  401. {
  402. if (!ufs_sec_is_err_cnt_allowed())
  403. return;
  404. SEC_UFS_OP_ERR_CNT_INC(HW_RESET_cnt, UINT_MAX);
  405. #if IS_ENABLED(CONFIG_SEC_ABC)
  406. {
  407. struct SEC_UFS_op_cnt *op_cnt = &get_err_member(op_cnt);
  408. if ((op_cnt->HW_RESET_cnt % 3) == 0)
  409. sec_abc_send_event("MODULE=storage@WARN=ufs_hwreset_err");
  410. }
  411. #endif
  412. }
  413. static void ufs_sec_inc_link_startup_error_cnt(void)
  414. {
  415. if (!ufs_sec_is_err_cnt_allowed())
  416. return;
  417. SEC_UFS_OP_ERR_CNT_INC(link_startup_cnt, UINT_MAX);
  418. }
  419. static void ufs_sec_inc_uic_cmd_error(u32 cmd)
  420. {
  421. struct SEC_UFS_UIC_cmd_cnt *uiccmd_cnt = NULL;
  422. if (!ufs_sec_is_err_cnt_allowed())
  423. return;
  424. uiccmd_cnt = &get_err_member(UIC_cmd_cnt);
  425. switch (cmd & COMMAND_OPCODE_MASK) {
  426. case UIC_CMD_DME_GET:
  427. SEC_UFS_ERR_CNT_INC(uiccmd_cnt->DME_GET_err, U8_MAX);
  428. break;
  429. case UIC_CMD_DME_SET:
  430. SEC_UFS_ERR_CNT_INC(uiccmd_cnt->DME_SET_err, U8_MAX);
  431. break;
  432. case UIC_CMD_DME_PEER_GET:
  433. SEC_UFS_ERR_CNT_INC(uiccmd_cnt->DME_PEER_GET_err, U8_MAX);
  434. break;
  435. case UIC_CMD_DME_PEER_SET:
  436. SEC_UFS_ERR_CNT_INC(uiccmd_cnt->DME_PEER_SET_err, U8_MAX);
  437. break;
  438. case UIC_CMD_DME_POWERON:
  439. SEC_UFS_ERR_CNT_INC(uiccmd_cnt->DME_POWERON_err, U8_MAX);
  440. break;
  441. case UIC_CMD_DME_POWEROFF:
  442. SEC_UFS_ERR_CNT_INC(uiccmd_cnt->DME_POWEROFF_err, U8_MAX);
  443. break;
  444. case UIC_CMD_DME_ENABLE:
  445. SEC_UFS_ERR_CNT_INC(uiccmd_cnt->DME_ENABLE_err, U8_MAX);
  446. break;
  447. case UIC_CMD_DME_RESET:
  448. SEC_UFS_ERR_CNT_INC(uiccmd_cnt->DME_RESET_err, U8_MAX);
  449. break;
  450. case UIC_CMD_DME_END_PT_RST:
  451. SEC_UFS_ERR_CNT_INC(uiccmd_cnt->DME_END_PT_RST_err, U8_MAX);
  452. break;
  453. case UIC_CMD_DME_LINK_STARTUP:
  454. SEC_UFS_ERR_CNT_INC(uiccmd_cnt->DME_LINK_STARTUP_err, U8_MAX);
  455. break;
  456. case UIC_CMD_DME_HIBER_ENTER:
  457. SEC_UFS_ERR_CNT_INC(uiccmd_cnt->DME_HIBER_ENTER_err, U8_MAX);
  458. SEC_UFS_OP_ERR_CNT_INC(Hibern8_enter_cnt, UINT_MAX);
  459. break;
  460. case UIC_CMD_DME_HIBER_EXIT:
  461. SEC_UFS_ERR_CNT_INC(uiccmd_cnt->DME_HIBER_EXIT_err, U8_MAX);
  462. SEC_UFS_OP_ERR_CNT_INC(Hibern8_exit_cnt, UINT_MAX);
  463. break;
  464. case UIC_CMD_DME_TEST_MODE:
  465. SEC_UFS_ERR_CNT_INC(uiccmd_cnt->DME_TEST_MODE_err, U8_MAX);
  466. break;
  467. default:
  468. break;
  469. }
  470. SEC_UFS_ERR_CNT_INC(uiccmd_cnt->UIC_cmd_err, UINT_MAX);
  471. }
  472. static void ufs_sec_inc_uic_fatal(u32 errors)
  473. {
  474. struct SEC_UFS_Fatal_err_cnt *f_ec = &get_err_member(Fatal_err_cnt);
  475. if (!ufs_sec_is_err_cnt_allowed())
  476. return;
  477. if (errors & DEVICE_FATAL_ERROR) {
  478. SEC_UFS_ERR_CNT_INC(f_ec->DFE, U8_MAX);
  479. SEC_UFS_ERR_CNT_INC(f_ec->Fatal_err, UINT_MAX);
  480. }
  481. if (errors & CONTROLLER_FATAL_ERROR) {
  482. SEC_UFS_ERR_CNT_INC(f_ec->CFE, U8_MAX);
  483. SEC_UFS_ERR_CNT_INC(f_ec->Fatal_err, UINT_MAX);
  484. }
  485. if (errors & SYSTEM_BUS_FATAL_ERROR) {
  486. SEC_UFS_ERR_CNT_INC(f_ec->SBFE, U8_MAX);
  487. SEC_UFS_ERR_CNT_INC(f_ec->Fatal_err, UINT_MAX);
  488. }
  489. if (errors & CRYPTO_ENGINE_FATAL_ERROR) {
  490. SEC_UFS_ERR_CNT_INC(f_ec->CEFE, U8_MAX);
  491. SEC_UFS_ERR_CNT_INC(f_ec->Fatal_err, UINT_MAX);
  492. }
  493. /* UIC_LINK_LOST : can not be checked in ufshcd.c */
  494. if (errors & UIC_LINK_LOST) {
  495. SEC_UFS_ERR_CNT_INC(f_ec->LLE, U8_MAX);
  496. SEC_UFS_ERR_CNT_INC(f_ec->Fatal_err, UINT_MAX);
  497. }
  498. }
  499. static void ufs_sec_inc_uic_error(enum ufs_event_type evt, u32 reg)
  500. {
  501. struct SEC_UFS_UIC_err_cnt *uicerr_cnt = &get_err_member(UIC_err_cnt);
  502. unsigned int bit_count = 0;
  503. int val = 0;
  504. if (!ufs_sec_is_err_cnt_allowed())
  505. return;
  506. switch (evt) {
  507. case UFS_EVT_PA_ERR:
  508. SEC_UFS_ERR_CNT_INC(uicerr_cnt->PAERR_cnt, U8_MAX);
  509. SEC_UFS_ERR_CNT_INC(uicerr_cnt->UIC_err, UINT_MAX);
  510. if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR)
  511. SEC_UFS_ERR_CNT_INC(uicerr_cnt->PAERR_linereset,
  512. UINT_MAX);
  513. val = reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK;
  514. if (val)
  515. SEC_UFS_ERR_CNT_INC(uicerr_cnt->PAERR_lane[val - 1],
  516. UINT_MAX);
  517. break;
  518. case UFS_EVT_DL_ERR:
  519. if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
  520. SEC_UFS_ERR_CNT_INC(uicerr_cnt->DL_PA_INIT_ERR_cnt, U8_MAX);
  521. if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
  522. SEC_UFS_ERR_CNT_INC(uicerr_cnt->DL_NAC_RCVD_ERR_cnt, U8_MAX);
  523. if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
  524. SEC_UFS_ERR_CNT_INC(uicerr_cnt->DL_TC_REPLAY_ERR_cnt, U8_MAX);
  525. if (reg & UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP)
  526. SEC_UFS_ERR_CNT_INC(uicerr_cnt->DL_FC_PROTECT_ERR_cnt, U8_MAX);
  527. reg &= UIC_DATA_LINK_LAYER_ERROR_CODE_MASK;
  528. bit_count = __builtin_popcount(reg);
  529. SEC_UFS_ERR_CNT_ADD(uicerr_cnt->DLERR_cnt, bit_count, UINT_MAX);
  530. SEC_UFS_ERR_CNT_ADD(uicerr_cnt->UIC_err, bit_count, UINT_MAX);
  531. break;
  532. case UFS_EVT_NL_ERR:
  533. SEC_UFS_ERR_CNT_INC(uicerr_cnt->NLERR_cnt, U8_MAX);
  534. SEC_UFS_ERR_CNT_INC(uicerr_cnt->UIC_err, UINT_MAX);
  535. break;
  536. case UFS_EVT_TL_ERR:
  537. SEC_UFS_ERR_CNT_INC(uicerr_cnt->TLERR_cnt, U8_MAX);
  538. SEC_UFS_ERR_CNT_INC(uicerr_cnt->UIC_err, UINT_MAX);
  539. break;
  540. case UFS_EVT_DME_ERR:
  541. SEC_UFS_ERR_CNT_INC(uicerr_cnt->DMEERR_cnt, U8_MAX);
  542. SEC_UFS_ERR_CNT_INC(uicerr_cnt->UIC_err, UINT_MAX);
  543. break;
  544. default:
  545. break;
  546. }
  547. }
  548. static void ufs_sec_inc_tm_error(u8 tm_cmd)
  549. {
  550. struct SEC_UFS_UTP_cnt *utp_err = &get_err_member(UTP_cnt);
  551. #if !IS_ENABLED(CONFIG_SAMSUNG_PRODUCT_SHIP)
  552. struct ufs_vendor_dev_info *vdi = ufs_sec_features.vdi;
  553. if (vdi && (tm_cmd == UFS_LOGICAL_RESET))
  554. vdi->device_stuck = true;
  555. #endif
  556. if (!ufs_sec_is_err_cnt_allowed())
  557. return;
  558. switch (tm_cmd) {
  559. case UFS_QUERY_TASK:
  560. SEC_UFS_ERR_CNT_INC(utp_err->UTMR_query_task_cnt, U8_MAX);
  561. break;
  562. case UFS_ABORT_TASK:
  563. SEC_UFS_ERR_CNT_INC(utp_err->UTMR_abort_task_cnt, U8_MAX);
  564. break;
  565. case UFS_LOGICAL_RESET:
  566. SEC_UFS_ERR_CNT_INC(utp_err->UTMR_logical_reset_cnt, U8_MAX);
  567. break;
  568. default:
  569. break;
  570. }
  571. SEC_UFS_ERR_CNT_INC(utp_err->UTP_err, UINT_MAX);
  572. }
  573. static void ufs_sec_inc_utp_error(struct ufs_hba *hba, int tag)
  574. {
  575. struct SEC_UFS_UTP_cnt *utp_err = &get_err_member(UTP_cnt);
  576. struct ufshcd_lrb *lrbp = NULL;
  577. int opcode = 0;
  578. if (!ufs_sec_is_err_cnt_allowed())
  579. return;
  580. if (tag >= hba->nutrs)
  581. return;
  582. lrbp = &hba->lrb[tag];
  583. if (!lrbp || !lrbp->cmd || (lrbp->task_tag != tag))
  584. return;
  585. opcode = lrbp->cmd->cmnd[0];
  586. switch (opcode) {
  587. case WRITE_10:
  588. SEC_UFS_ERR_CNT_INC(utp_err->UTR_write_err, U8_MAX);
  589. break;
  590. case READ_10:
  591. case READ_16:
  592. SEC_UFS_ERR_CNT_INC(utp_err->UTR_read_err, U8_MAX);
  593. break;
  594. case SYNCHRONIZE_CACHE:
  595. SEC_UFS_ERR_CNT_INC(utp_err->UTR_sync_cache_err, U8_MAX);
  596. break;
  597. case UNMAP:
  598. SEC_UFS_ERR_CNT_INC(utp_err->UTR_unmap_err, U8_MAX);
  599. break;
  600. default:
  601. SEC_UFS_ERR_CNT_INC(utp_err->UTR_etc_err, U8_MAX);
  602. break;
  603. }
  604. SEC_UFS_ERR_CNT_INC(utp_err->UTP_err, UINT_MAX);
  605. }
  606. static enum utp_ocs ufshcd_sec_get_tr_ocs(struct ufshcd_lrb *lrbp,
  607. struct cq_entry *cqe)
  608. {
  609. if (cqe)
  610. return le32_to_cpu(cqe->status) & MASK_OCS;
  611. return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
  612. }
  613. static void ufs_sec_inc_query_error(struct ufs_hba *hba,
  614. struct ufshcd_lrb *lrbp, bool timeout)
  615. {
  616. struct SEC_UFS_QUERY_cnt *query_cnt = NULL;
  617. struct ufs_query_req *request = &hba->dev_cmd.query.request;
  618. struct ufs_hw_queue *hwq = NULL;
  619. struct cq_entry *cqe = NULL;
  620. enum query_opcode opcode = request->upiu_req.opcode;
  621. enum dev_cmd_type cmd_type = hba->dev_cmd.type;
  622. enum utp_ocs ocs;
  623. if (!ufs_sec_is_err_cnt_allowed())
  624. return;
  625. if (is_mcq_enabled(hba)) {
  626. hwq = hba->dev_cmd_queue;
  627. cqe = ufshcd_mcq_cur_cqe(hwq);
  628. }
  629. ocs = ufshcd_sec_get_tr_ocs(lrbp, cqe);
  630. if (!timeout && (ocs == OCS_SUCCESS))
  631. return;
  632. /* get last query cmd information when timeout occurs */
  633. if (timeout) {
  634. opcode = ufs_sec_features.last_qcmd;
  635. cmd_type = ufs_sec_features.qcmd_type;
  636. }
  637. query_cnt = &get_err_member(Query_cnt);
  638. if (cmd_type == DEV_CMD_TYPE_NOP) {
  639. SEC_UFS_ERR_CNT_INC(query_cnt->NOP_err, U8_MAX);
  640. } else {
  641. switch (opcode) {
  642. case UPIU_QUERY_OPCODE_READ_DESC:
  643. SEC_UFS_ERR_CNT_INC(query_cnt->R_Desc_err, U8_MAX);
  644. break;
  645. case UPIU_QUERY_OPCODE_WRITE_DESC:
  646. SEC_UFS_ERR_CNT_INC(query_cnt->W_Desc_err, U8_MAX);
  647. break;
  648. case UPIU_QUERY_OPCODE_READ_ATTR:
  649. SEC_UFS_ERR_CNT_INC(query_cnt->R_Attr_err, U8_MAX);
  650. break;
  651. case UPIU_QUERY_OPCODE_WRITE_ATTR:
  652. SEC_UFS_ERR_CNT_INC(query_cnt->W_Attr_err, U8_MAX);
  653. break;
  654. case UPIU_QUERY_OPCODE_READ_FLAG:
  655. SEC_UFS_ERR_CNT_INC(query_cnt->R_Flag_err, U8_MAX);
  656. break;
  657. case UPIU_QUERY_OPCODE_SET_FLAG:
  658. SEC_UFS_ERR_CNT_INC(query_cnt->Set_Flag_err, U8_MAX);
  659. break;
  660. case UPIU_QUERY_OPCODE_CLEAR_FLAG:
  661. SEC_UFS_ERR_CNT_INC(query_cnt->Clear_Flag_err, U8_MAX);
  662. break;
  663. case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
  664. SEC_UFS_ERR_CNT_INC(query_cnt->Toggle_Flag_err,
  665. U8_MAX);
  666. break;
  667. default:
  668. break;
  669. }
  670. }
  671. SEC_UFS_ERR_CNT_INC(query_cnt->Query_err, UINT_MAX);
  672. }
  673. void ufs_sec_inc_op_err(struct ufs_hba *hba, enum ufs_event_type evt,
  674. void *data)
  675. {
  676. u32 error_val = *(u32 *)data;
  677. switch (evt) {
  678. case UFS_EVT_LINK_STARTUP_FAIL:
  679. ufs_sec_inc_link_startup_error_cnt();
  680. break;
  681. case UFS_EVT_DEV_RESET:
  682. break;
  683. case UFS_EVT_PA_ERR:
  684. case UFS_EVT_DL_ERR:
  685. case UFS_EVT_NL_ERR:
  686. case UFS_EVT_TL_ERR:
  687. case UFS_EVT_DME_ERR:
  688. if (error_val)
  689. ufs_sec_inc_uic_error(evt, error_val);
  690. break;
  691. case UFS_EVT_FATAL_ERR:
  692. if (error_val)
  693. ufs_sec_inc_uic_fatal(error_val);
  694. break;
  695. case UFS_EVT_ABORT:
  696. ufs_sec_inc_utp_error(hba, (int)error_val);
  697. break;
  698. case UFS_EVT_HOST_RESET:
  699. break;
  700. case UFS_EVT_SUSPEND_ERR:
  701. case UFS_EVT_RESUME_ERR:
  702. break;
  703. case UFS_EVT_AUTO_HIBERN8_ERR:
  704. SEC_UFS_OP_ERR_CNT_INC(AH8_err_cnt, UINT_MAX);
  705. break;
  706. default:
  707. break;
  708. }
  709. #if IS_ENABLED(CONFIG_SCSI_UFS_TEST_MODE)
  710. #define UFS_TEST_COUNT 3
  711. if (evt == UFS_EVT_PA_ERR || evt == UFS_EVT_DL_ERR ||
  712. evt == UFS_EVT_LINK_STARTUP_FAIL) {
  713. struct ufs_event_hist *e = &hba->ufs_stats.event[evt];
  714. if (e->cnt > UFS_TEST_COUNT)
  715. ufs_sec_trigger_bug(hba);
  716. }
  717. #endif
  718. }
  719. static void ufs_sec_inc_sense_err(struct ufshcd_lrb *lrbp,
  720. struct ufs_sec_cmd_info *ufs_cmd)
  721. {
  722. struct SEC_SCSI_SENSE_cnt *sense_err = NULL;
  723. u8 sense_key = 0;
  724. u8 asc = 0;
  725. u8 ascq = 0;
  726. bool secdbgMode = false;
  727. sense_key = lrbp->ucd_rsp_ptr->sr.sense_data[2] & 0x0F;
  728. if (sense_key != MEDIUM_ERROR && sense_key != HARDWARE_ERROR)
  729. return;
  730. #if IS_ENABLED(CONFIG_SEC_DEBUG)
  731. secdbgMode = sec_debug_is_enabled();
  732. #endif
  733. asc = lrbp->ucd_rsp_ptr->sr.sense_data[12];
  734. ascq = lrbp->ucd_rsp_ptr->sr.sense_data[13];
  735. pr_err("UFS: LU%u: sense key 0x%x(asc 0x%x, ascq 0x%x),"
  736. "opcode 0x%x, lba 0x%x, len 0x%x.\n",
  737. ufs_cmd->lun, sense_key, asc, ascq,
  738. ufs_cmd->opcode, ufs_cmd->lba, ufs_cmd->transfer_len);
  739. if (!ufs_sec_is_err_cnt_allowed())
  740. goto out;
  741. sense_err = &get_err_member(sense_cnt);
  742. if (sense_key == MEDIUM_ERROR) {
  743. sense_err->scsi_medium_err++;
  744. #if IS_ENABLED(CONFIG_SEC_ABC)
  745. sec_abc_send_event("MODULE=storage@WARN=ufs_medium_err");
  746. #endif
  747. } else {
  748. sense_err->scsi_hw_err++;
  749. #if IS_ENABLED(CONFIG_SEC_ABC)
  750. sec_abc_send_event("MODULE=storage@WARN=ufs_hardware_err");
  751. #endif
  752. }
  753. out:
  754. if (secdbgMode)
  755. panic("ufs %s error\n", (sense_key == MEDIUM_ERROR) ?
  756. "medium" : "hardware");
  757. }
  758. static void ufs_sec_init_error_logging(struct device *dev)
  759. {
  760. struct ufs_sec_err_info *ufs_err = NULL;
  761. struct ufs_sec_err_info *ufs_err_backup = NULL;
  762. struct ufs_sec_err_info *ufs_err_hist = NULL;
  763. ufs_err = devm_kzalloc(dev, sizeof(struct ufs_sec_err_info),
  764. GFP_KERNEL);
  765. ufs_err_backup = devm_kzalloc(dev, sizeof(struct ufs_sec_err_info),
  766. GFP_KERNEL);
  767. ufs_err_hist = devm_kzalloc(dev, sizeof(struct ufs_sec_err_info),
  768. GFP_KERNEL);
  769. if (!ufs_err || !ufs_err_backup || !ufs_err_hist) {
  770. dev_err(dev, "%s: Failed allocating ufs_err(backup)(%lu)",
  771. __func__, sizeof(struct ufs_sec_err_info));
  772. devm_kfree(dev, ufs_err);
  773. devm_kfree(dev, ufs_err_backup);
  774. devm_kfree(dev, ufs_err_hist);
  775. return;
  776. }
  777. ufs_sec_features.ufs_err = ufs_err;
  778. ufs_sec_features.ufs_err_backup = ufs_err_backup;
  779. ufs_sec_features.ufs_err_hist = ufs_err_hist;
  780. ufs_sec_features.ucmd_complete = true;
  781. ufs_sec_features.qcmd_complete = true;
  782. }
  783. /* SEC error info : end */
  784. void ufs_sec_check_device_stuck(void)
  785. {
  786. #if IS_ENABLED(CONFIG_SCSI_UFS_TEST_MODE)
  787. struct ufs_vendor_dev_info *vdi = ufs_sec_features.vdi;
  788. if (!vdi)
  789. return;
  790. /*
  791. * do not recover system if test mode is enabled
  792. * reset recovery is in progress from ufshcd_err_handler
  793. */
  794. if (vdi->hba && vdi->hba->eh_flags)
  795. ufs_sec_trigger_bug(vdi->hba);
  796. #endif
  797. }
  798. /* SEC cmd log : begin */
  799. static void __ufs_sec_log_cmd(struct ufs_hba *hba, int str_idx,
  800. unsigned int tag, u8 cmd_id, u8 idn, u8 lun, u32 lba,
  801. int transfer_len)
  802. {
  803. struct ufs_sec_cmd_log_info *ufs_cmd_log =
  804. ufs_sec_features.ufs_cmd_log;
  805. struct ufs_sec_cmd_log_entry *entry =
  806. &ufs_cmd_log->entries[ufs_cmd_log->pos];
  807. int cpu = raw_smp_processor_id();
  808. entry->lun = lun;
  809. entry->str = ufs_sec_log_str[str_idx];
  810. entry->cmd_id = cmd_id;
  811. entry->lba = lba;
  812. entry->transfer_len = transfer_len;
  813. entry->idn = idn;
  814. entry->tag = tag;
  815. entry->tstamp = cpu_clock(cpu);
  816. entry->outstanding_reqs = hba->outstanding_reqs;
  817. ufs_cmd_log->pos =
  818. (ufs_cmd_log->pos + 1) % UFS_SEC_CMD_LOGGING_MAX;
  819. }
  820. static void ufs_sec_log_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
  821. int str_t, struct ufs_sec_cmd_info *ufs_cmd, u8 cmd_id)
  822. {
  823. u8 opcode = 0;
  824. u8 idn = 0;
  825. if (!ufs_sec_features.ufs_cmd_log)
  826. return;
  827. switch (str_t) {
  828. case UFS_SEC_CMD_SEND:
  829. case UFS_SEC_CMD_COMP:
  830. __ufs_sec_log_cmd(hba, str_t, lrbp->task_tag, ufs_cmd->opcode,
  831. 0, lrbp->lun, ufs_cmd->lba,
  832. ufs_cmd->transfer_len);
  833. break;
  834. case UFS_SEC_QUERY_SEND:
  835. case UFS_SEC_QUERY_COMP:
  836. opcode = hba->dev_cmd.query.request.upiu_req.opcode;
  837. idn = hba->dev_cmd.query.request.upiu_req.idn;
  838. __ufs_sec_log_cmd(hba, str_t, lrbp->task_tag, opcode,
  839. idn, lrbp->lun, 0, 0);
  840. break;
  841. case UFS_SEC_NOP_SEND:
  842. case UFS_SEC_NOP_COMP:
  843. __ufs_sec_log_cmd(hba, str_t, lrbp->task_tag, 0,
  844. 0, lrbp->lun, 0, 0);
  845. break;
  846. case UFS_SEC_TM_SEND:
  847. case UFS_SEC_TM_COMP:
  848. case UFS_SEC_TM_ERR:
  849. case UFS_SEC_UIC_SEND:
  850. case UFS_SEC_UIC_COMP:
  851. __ufs_sec_log_cmd(hba, str_t, 0, cmd_id, 0, 0, 0, 0);
  852. break;
  853. default:
  854. break;
  855. }
  856. }
  857. static void ufs_sec_init_cmd_logging(struct device *dev)
  858. {
  859. struct ufs_sec_cmd_log_info *ufs_cmd_log = NULL;
  860. ufs_cmd_log = devm_kzalloc(dev, sizeof(struct ufs_sec_cmd_log_info),
  861. GFP_KERNEL);
  862. if (!ufs_cmd_log) {
  863. dev_err(dev, "%s: Failed allocating ufs_cmd_log(%lu)",
  864. __func__,
  865. sizeof(struct ufs_sec_cmd_log_info));
  866. return;
  867. }
  868. ufs_cmd_log->entries = devm_kcalloc(dev, UFS_SEC_CMD_LOGGING_MAX,
  869. sizeof(struct ufs_sec_cmd_log_entry), GFP_KERNEL);
  870. if (!ufs_cmd_log->entries) {
  871. dev_err(dev, "%s: Failed allocating cmd log entry(%lu)",
  872. __func__,
  873. sizeof(struct ufs_sec_cmd_log_entry)
  874. * UFS_SEC_CMD_LOGGING_MAX);
  875. devm_kfree(dev, ufs_cmd_log);
  876. return;
  877. }
  878. pr_info("SEC UFS cmd logging is initialized.\n");
  879. ufs_sec_features.ufs_cmd_log = ufs_cmd_log;
  880. }
  881. /* SEC cmd log : end */
  882. /* panic notifier : begin */
  883. static void ufs_sec_print_evt(struct ufs_hba *hba, u32 id,
  884. char *err_name)
  885. {
  886. int i;
  887. bool found = false;
  888. struct ufs_event_hist *e;
  889. if (id >= UFS_EVT_CNT)
  890. return;
  891. e = &hba->ufs_stats.event[id];
  892. for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
  893. int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
  894. // do not print if dev_reset[0]'s tstamp is in 2 sec.
  895. // because that happened on booting seq.
  896. if ((id == UFS_EVT_DEV_RESET) && (p == 0) &&
  897. (ktime_to_us(e->tstamp[p]) < 2000000))
  898. continue;
  899. if (e->tstamp[p] == 0)
  900. continue;
  901. dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
  902. e->val[p], ktime_to_us(e->tstamp[p]));
  903. found = true;
  904. }
  905. if (!found)
  906. dev_err(hba->dev, "No record of %s\n", err_name);
  907. else
  908. dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
  909. }
  910. static void ufs_sec_print_evt_hist(struct ufs_hba *hba)
  911. {
  912. ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
  913. ufs_sec_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
  914. ufs_sec_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
  915. ufs_sec_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
  916. ufs_sec_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
  917. ufs_sec_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
  918. ufs_sec_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
  919. "auto_hibern8_err");
  920. ufs_sec_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
  921. ufs_sec_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
  922. "link_startup_fail");
  923. ufs_sec_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
  924. ufs_sec_print_evt(hba, UFS_EVT_SUSPEND_ERR,
  925. "suspend_fail");
  926. ufs_sec_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
  927. ufs_sec_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
  928. ufs_sec_print_evt(hba, UFS_EVT_ABORT, "task_abort");
  929. }
  930. /*
  931. * ufs_sec_check_and_is_err - Check and compare UFS Error counts
  932. * @buf: has to be filled by using SEC_UFS_ERR_SUM() or SEC_UFS_ERR_HIST_SUM()
  933. *
  934. * Returns
  935. * 0 - If the buf has no error counts
  936. * other - If the buf is invalid or has error count value
  937. */
  938. static int ufs_sec_check_and_is_err(char *buf)
  939. {
  940. const char *no_err = "U0I0H0L0X0Q0R0W0F0SM0SH0";
  941. if (!buf || strlen(buf) < strlen(no_err))
  942. return -EINVAL;
  943. return strncmp(buf, no_err, strlen(no_err));
  944. }
  945. /**
  946. * If there is a UFS error, it should be printed out.
  947. * Print UFS Error info (previous boot's error & current boot's error)
  948. *
  949. * Format : U0I0H0L0X0Q0R0W0F0SM0SH0(HB0)
  950. * U : UTP cmd error count
  951. * I : UIC error count
  952. * H : HWRESET count
  953. * L : Link startup failure count
  954. * X : Link Lost Error count
  955. * Q : UTMR QUERY_TASK error count
  956. * R : READ error count
  957. * W : WRITE error count
  958. * F : Device Fatal Error count
  959. * SM : Sense Medium error count
  960. * SH : Sense Hardware error count
  961. * HB : Hibern8 enter/exit error count + Auto-H8 error count
  962. **/
  963. void ufs_sec_print_err(void)
  964. {
  965. char err_buf[ERR_SUM_SIZE];
  966. char hist_buf[ERR_HIST_SUM_SIZE];
  967. unsigned int HB_value =
  968. SEC_UFS_ERR_INFO_GET_VALUE(op_cnt, Hibern8_enter_cnt) +
  969. SEC_UFS_ERR_INFO_GET_VALUE(op_cnt, Hibern8_exit_cnt) +
  970. SEC_UFS_ERR_INFO_GET_VALUE(op_cnt, AH8_err_cnt);
  971. SEC_UFS_ERR_SUM(err_buf);
  972. SEC_UFS_ERR_HIST_SUM(hist_buf);
  973. if (ufs_sec_check_and_is_err(hist_buf))
  974. pr_info("ufs: %sHB%u hist: %s",
  975. err_buf, (HB_value > 9) ? 9 : HB_value, hist_buf);
  976. }
  977. /**
  978. * ufs_sec_panic_callback - Print UFS Error summary when panic
  979. *
  980. * If FS panic occurs,
  981. * print the UFS event history additionally
  982. **/
  983. static int ufs_sec_panic_callback(struct notifier_block *nfb,
  984. unsigned long event, void *panic_msg)
  985. {
  986. struct ufs_vendor_dev_info *vdi = ufs_sec_features.vdi;
  987. char *str = (char *)panic_msg;
  988. bool is_FSpanic = !strncmp(str, "F2FS", 4) || !strncmp(str, "EXT4", 4);
  989. char err_buf[ERR_SUM_SIZE];
  990. ufs_sec_print_err();
  991. SEC_UFS_ERR_SUM(err_buf);
  992. if (vdi && is_FSpanic && ufs_sec_check_and_is_err(err_buf))
  993. ufs_sec_print_evt_hist(vdi->hba);
  994. return NOTIFY_OK;
  995. }
  996. static struct notifier_block ufs_sec_panic_notifier = {
  997. .notifier_call = ufs_sec_panic_callback,
  998. .priority = 1,
  999. };
  1000. /* panic notifier : end */
  1001. /* reboot notifier : begin */
  1002. static int ufs_sec_reboot_notify(struct notifier_block *notify_block,
  1003. unsigned long event, void *unused)
  1004. {
  1005. ufs_sec_print_err();
  1006. return NOTIFY_OK;
  1007. }
  1008. /* reboot notifier : end */
  1009. /* I/O error uevent : begin */
  1010. static void ufs_sec_err_noti_work(struct work_struct *work)
  1011. {
  1012. int ret;
  1013. ufs_sec_print_err();
  1014. ret = kobject_uevent(&sec_ufs_node_dev->kobj, KOBJ_CHANGE);
  1015. if (ret)
  1016. pr_err("%s: Failed to send uevent with err %d\n", __func__, ret);
  1017. }
  1018. static int ufs_sec_err_uevent(struct device *dev, struct kobj_uevent_env *env)
  1019. {
  1020. char buf[ERR_SUM_SIZE];
  1021. add_uevent_var(env, "DEVNAME=%s", dev->kobj.name);
  1022. add_uevent_var(env, "NAME=UFSINFO");
  1023. SEC_UFS_ERR_SUM(buf);
  1024. return add_uevent_var(env, "DATA=%s", buf);
  1025. }
  1026. static struct device_type ufs_type = {
  1027. .uevent = ufs_sec_err_uevent,
  1028. };
  1029. static inline bool ufs_sec_is_uevent_condition(u32 hw_rst_cnt)
  1030. {
  1031. return ((hw_rst_cnt == 1) || !(hw_rst_cnt % 10));
  1032. }
  1033. static void ufs_sec_trigger_err_noti_uevent(struct ufs_hba *hba)
  1034. {
  1035. u32 hw_rst_cnt = SEC_UFS_ERR_INFO_GET_VALUE(op_cnt, HW_RESET_cnt);
  1036. char buf[ERR_SUM_SIZE];
  1037. /* eh_flags is only set during error handling */
  1038. if (!hba->eh_flags)
  1039. return;
  1040. SEC_UFS_ERR_SUM(buf);
  1041. if (!ufs_sec_check_and_is_err(buf))
  1042. return;
  1043. if (!ufs_sec_is_uevent_condition(hw_rst_cnt))
  1044. return;
  1045. if (sec_ufs_node_dev)
  1046. schedule_delayed_work(&ufs_sec_features.noti_work,
  1047. msecs_to_jiffies(NOTI_WORK_DELAY_MS));
  1048. }
  1049. /* I/O error uevent : end */
  1050. void ufs_sec_config_features(struct ufs_hba *hba)
  1051. {
  1052. ufs_sec_wb_config(hba);
  1053. ufs_sec_trigger_err_noti_uevent(hba);
  1054. }
  1055. void ufs_sec_adjust_caps_quirks(struct ufs_hba *hba)
  1056. {
  1057. hba->caps &= ~UFSHCD_CAP_WB_EN;
  1058. hba->caps &= ~UFSHCD_CAP_WB_WITH_CLK_SCALING;
  1059. }
  1060. void ufs_sec_init_logging(struct device *dev)
  1061. {
  1062. ufs_sec_init_error_logging(dev);
  1063. ufs_sec_init_cmd_logging(dev);
  1064. }
  1065. void ufs_sec_set_features(struct ufs_hba *hba)
  1066. {
  1067. struct ufs_vendor_dev_info *vdi = NULL;
  1068. u8 *desc_buf = NULL;
  1069. int err;
  1070. if (ufs_sec_features.vdi)
  1071. return;
  1072. vdi = devm_kzalloc(hba->dev, sizeof(struct ufs_vendor_dev_info),
  1073. GFP_KERNEL);
  1074. if (!vdi) {
  1075. dev_err(hba->dev, "%s: Failed allocating ufs_vdi(%lu)",
  1076. __func__, sizeof(struct ufs_vendor_dev_info));
  1077. return;
  1078. }
  1079. vdi->hba = hba;
  1080. ufs_sec_features.vdi = vdi;
  1081. desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
  1082. if (!desc_buf)
  1083. return;
  1084. err = ufshcd_read_desc_param(hba,
  1085. QUERY_DESC_IDN_DEVICE, 0, 0,
  1086. desc_buf, QUERY_DESC_MAX_SIZE);
  1087. if (err) {
  1088. dev_err(hba->dev, "%s: Failed reading device desc. err %d",
  1089. __func__, err);
  1090. goto out;
  1091. }
  1092. ufs_sec_set_unique_number(hba, desc_buf);
  1093. ufs_sec_get_health_desc(hba);
  1094. ufs_sec_get_ext_feature(hba, desc_buf);
  1095. ufs_sec_wb_probe(hba, desc_buf);
  1096. ufs_sec_add_sysfs_nodes(hba);
  1097. atomic_notifier_chain_register(&panic_notifier_list,
  1098. &ufs_sec_panic_notifier);
  1099. ufs_sec_features.reboot_notify.notifier_call = ufs_sec_reboot_notify;
  1100. register_reboot_notifier(&ufs_sec_features.reboot_notify);
  1101. sec_ufs_node_dev->type = &ufs_type;
  1102. INIT_DELAYED_WORK(&ufs_sec_features.noti_work, ufs_sec_err_noti_work);
  1103. out:
  1104. #if IS_ENABLED(CONFIG_SCSI_UFS_TEST_MODE)
  1105. dev_info(hba->dev, "UFS test mode enabled\n");
  1106. #endif
  1107. kfree(desc_buf);
  1108. }
  1109. void ufs_sec_remove_features(struct ufs_hba *hba)
  1110. {
  1111. ufs_sec_remove_sysfs_nodes(hba);
  1112. unregister_reboot_notifier(&ufs_sec_features.reboot_notify);
  1113. }
  1114. static bool ufs_sec_get_scsi_cmd_info(struct ufshcd_lrb *lrbp,
  1115. struct ufs_sec_cmd_info *ufs_cmd)
  1116. {
  1117. struct scsi_cmnd *cmd;
  1118. if (!lrbp || !lrbp->cmd || !ufs_cmd)
  1119. return false;
  1120. cmd = lrbp->cmd;
  1121. ufs_cmd->opcode = (u8)(*cmd->cmnd);
  1122. ufs_cmd->lba = ((cmd->cmnd[2] << 24) | (cmd->cmnd[3] << 16) |
  1123. (cmd->cmnd[4] << 8) | cmd->cmnd[5]);
  1124. ufs_cmd->transfer_len = (cmd->cmnd[7] << 8) | cmd->cmnd[8];
  1125. ufs_cmd->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
  1126. return true;
  1127. }
  1128. static void ufs_sec_customize_upiu_flags(struct ufshcd_lrb *lrbp)
  1129. {
  1130. u8 upiu_flags = 0x0;
  1131. struct request *rq;
  1132. if (!lrbp->cmd || !lrbp->ucd_req_ptr)
  1133. return;
  1134. rq = scsi_cmd_to_rq(lrbp->cmd);
  1135. switch (req_op(rq)) {
  1136. case REQ_OP_READ:
  1137. upiu_flags |= UPIU_CMD_PRIO_HIGH;
  1138. break;
  1139. case REQ_OP_WRITE:
  1140. if (rq->cmd_flags & REQ_SYNC)
  1141. upiu_flags |= UPIU_CMD_PRIO_HIGH;
  1142. break;
  1143. case REQ_OP_FLUSH:
  1144. upiu_flags |= UPIU_TASK_ATTR_HEADQ;
  1145. break;
  1146. case REQ_OP_DISCARD:
  1147. upiu_flags |= UPIU_TASK_ATTR_ORDERED;
  1148. break;
  1149. default:
  1150. break;
  1151. }
  1152. lrbp->ucd_req_ptr->header.dword_0 |=
  1153. UPIU_HEADER_DWORD(0, upiu_flags, 0, 0);
  1154. }
  1155. static void sec_android_vh_ufs_send_command(void *data,
  1156. struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  1157. {
  1158. struct ufs_sec_cmd_info ufs_cmd = { 0, };
  1159. struct ufs_query_req *request = NULL;
  1160. enum dev_cmd_type cmd_type;
  1161. enum query_opcode opcode;
  1162. bool is_scsi_cmd = false;
  1163. is_scsi_cmd = ufs_sec_get_scsi_cmd_info(lrbp, &ufs_cmd);
  1164. if (is_scsi_cmd) {
  1165. ufs_sec_customize_upiu_flags(lrbp);
  1166. ufs_sec_log_cmd(hba, lrbp, UFS_SEC_CMD_SEND, &ufs_cmd, 0);
  1167. } else {
  1168. if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
  1169. ufs_sec_log_cmd(hba, lrbp, UFS_SEC_NOP_SEND, NULL, 0);
  1170. else if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
  1171. ufs_sec_log_cmd(hba, lrbp, UFS_SEC_QUERY_SEND, NULL, 0);
  1172. /* in timeout error case, last cmd is not completed */
  1173. if (!ufs_sec_features.qcmd_complete)
  1174. ufs_sec_inc_query_error(hba, lrbp, true);
  1175. request = &hba->dev_cmd.query.request;
  1176. opcode = request->upiu_req.opcode;
  1177. cmd_type = hba->dev_cmd.type;
  1178. ufs_sec_features.last_qcmd = opcode;
  1179. ufs_sec_features.qcmd_type = cmd_type;
  1180. ufs_sec_features.qcmd_complete = false;
  1181. }
  1182. }
  1183. static void sec_android_vh_ufs_compl_command(void *data,
  1184. struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  1185. {
  1186. struct ufs_sec_cmd_info ufs_cmd = { 0, };
  1187. bool is_scsi_cmd = false;
  1188. int transfer_len = 0;
  1189. is_scsi_cmd = ufs_sec_get_scsi_cmd_info(lrbp, &ufs_cmd);
  1190. if (is_scsi_cmd) {
  1191. ufs_sec_log_cmd(hba, lrbp, UFS_SEC_CMD_COMP, &ufs_cmd, 0);
  1192. ufs_sec_inc_sense_err(lrbp, &ufs_cmd);
  1193. /*
  1194. * check hba->req_abort_count, if the cmd is aborting
  1195. * it's the one way to check aborting
  1196. * hba->req_abort_count is cleared in queuecommand and after
  1197. * error handling
  1198. */
  1199. if (hba->req_abort_count > 0)
  1200. ufs_sec_inc_utp_error(hba, lrbp->task_tag);
  1201. if (hba->dev_info.wb_enabled == WB_ON
  1202. && ufs_cmd.opcode == WRITE_10) {
  1203. transfer_len = be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
  1204. ufs_sec_wb_update_info(hba, transfer_len);
  1205. }
  1206. } else {
  1207. if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
  1208. ufs_sec_log_cmd(hba, lrbp, UFS_SEC_NOP_COMP, NULL, 0);
  1209. else if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
  1210. ufs_sec_log_cmd(hba, lrbp, UFS_SEC_QUERY_COMP, NULL, 0);
  1211. ufs_sec_features.qcmd_complete = true;
  1212. /* check and count error, except timeout */
  1213. ufs_sec_inc_query_error(hba, lrbp, false);
  1214. }
  1215. }
  1216. static void sec_android_vh_ufs_send_uic_command(void *data,
  1217. struct ufs_hba *hba, const struct uic_command *ucmd, int str_t)
  1218. {
  1219. u32 cmd;
  1220. u8 cmd_id;
  1221. if (str_t == UFS_CMD_SEND) {
  1222. /* in timeout error case, last cmd is not completed */
  1223. if (!ufs_sec_features.ucmd_complete)
  1224. ufs_sec_inc_uic_cmd_error(ufs_sec_features.last_ucmd);
  1225. cmd = ucmd->command;
  1226. ufs_sec_features.last_ucmd = cmd;
  1227. ufs_sec_features.ucmd_complete = false;
  1228. cmd_id = (u8)(cmd & COMMAND_OPCODE_MASK);
  1229. ufs_sec_log_cmd(hba, NULL, UFS_SEC_UIC_SEND, NULL, cmd_id);
  1230. } else {
  1231. cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
  1232. ufs_sec_features.ucmd_complete = true;
  1233. if (((hba->active_uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT)
  1234. != UIC_CMD_RESULT_SUCCESS) ||
  1235. (str_t == UFS_CMD_ERR))
  1236. ufs_sec_inc_uic_cmd_error(cmd);
  1237. cmd_id = (u8)(cmd & COMMAND_OPCODE_MASK);
  1238. ufs_sec_log_cmd(hba, NULL, UFS_SEC_UIC_COMP, NULL, cmd_id);
  1239. }
  1240. }
  1241. static void sec_android_vh_ufs_send_tm_command(void *data,
  1242. struct ufs_hba *hba, int tag, int str_t)
  1243. {
  1244. struct utp_task_req_desc treq = { { 0 }, };
  1245. u8 tm_func = 0;
  1246. int sec_log_str_t = 0;
  1247. memcpy(&treq, hba->utmrdl_base_addr + tag, sizeof(treq));
  1248. tm_func = (be32_to_cpu(treq.upiu_req.req_header.dword_1) >> 16) & 0xFF;
  1249. if (str_t == UFS_TM_SEND)
  1250. sec_log_str_t = UFS_SEC_TM_SEND;
  1251. else if (str_t == UFS_TM_COMP)
  1252. sec_log_str_t = UFS_SEC_TM_COMP;
  1253. else if (str_t == UFS_TM_ERR) {
  1254. sec_log_str_t = UFS_SEC_TM_ERR;
  1255. ufs_sec_inc_tm_error(tm_func);
  1256. } else {
  1257. dev_err(hba->dev, "%s: undefined ufs tm cmd\n", __func__);
  1258. return;
  1259. }
  1260. ufs_sec_log_cmd(hba, NULL, sec_log_str_t, NULL, tm_func);
  1261. #if IS_ENABLED(CONFIG_SCSI_UFS_TEST_MODE)
  1262. if (str_t == UFS_TM_COMP && !hba->eh_flags) {
  1263. dev_err(hba->dev,
  1264. "%s: ufs tm cmd is succeeded and forced BUG called\n", __func__);
  1265. ssleep(2);
  1266. ufs_sec_trigger_bug(hba);
  1267. }
  1268. #endif
  1269. }
  1270. static void sec_android_vh_ufs_update_sdev(void *data, struct scsi_device *sdev)
  1271. {
  1272. blk_queue_rq_timeout(sdev->request_queue, SCSI_UFS_TIMEOUT);
  1273. }
  1274. void ufs_sec_register_vendor_hooks(void)
  1275. {
  1276. register_trace_android_vh_ufs_send_command(sec_android_vh_ufs_send_command, NULL);
  1277. register_trace_android_vh_ufs_compl_command(sec_android_vh_ufs_compl_command, NULL);
  1278. register_trace_android_vh_ufs_send_uic_command(sec_android_vh_ufs_send_uic_command, NULL);
  1279. register_trace_android_vh_ufs_send_tm_command(sec_android_vh_ufs_send_tm_command, NULL);
  1280. register_trace_android_vh_ufs_update_sdev(sec_android_vh_ufs_update_sdev, NULL);
  1281. }