brcmstb_dpfe.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * DDR PHY Front End (DPFE) driver for Broadcom set top box SoCs
  4. *
  5. * Copyright (c) 2017 Broadcom
  6. */
  7. /*
  8. * This driver provides access to the DPFE interface of Broadcom STB SoCs.
  9. * The firmware running on the DCPU inside the DDR PHY can provide current
  10. * information about the system's RAM, for instance the DRAM refresh rate.
  11. * This can be used as an indirect indicator for the DRAM's temperature.
  12. * Slower refresh rate means cooler RAM, higher refresh rate means hotter
  13. * RAM.
  14. *
  15. * Throughout the driver, we use readl_relaxed() and writel_relaxed(), which
  16. * already contain the appropriate le32_to_cpu()/cpu_to_le32() calls.
  17. *
  18. * Note regarding the loading of the firmware image: we use be32_to_cpu()
  19. * and le_32_to_cpu(), so we can support the following four cases:
  20. * - LE kernel + LE firmware image (the most common case)
  21. * - LE kernel + BE firmware image
  22. * - BE kernel + LE firmware image
  23. * - BE kernel + BE firmware image
  24. *
  25. * The DPCU always runs in big endian mode. The firmware image, however, can
  26. * be in either format. Also, communication between host CPU and DCPU is
  27. * always in little endian.
  28. */
  29. #include <linux/delay.h>
  30. #include <linux/firmware.h>
  31. #include <linux/io.h>
  32. #include <linux/module.h>
  33. #include <linux/of_address.h>
  34. #include <linux/of_device.h>
  35. #include <linux/platform_device.h>
  36. #define DRVNAME "brcmstb-dpfe"
  37. /* DCPU register offsets */
  38. #define REG_DCPU_RESET 0x0
  39. #define REG_TO_DCPU_MBOX 0x10
  40. #define REG_TO_HOST_MBOX 0x14
  41. /* Macros to process offsets returned by the DCPU */
  42. #define DRAM_MSG_ADDR_OFFSET 0x0
  43. #define DRAM_MSG_TYPE_OFFSET 0x1c
  44. #define DRAM_MSG_ADDR_MASK ((1UL << DRAM_MSG_TYPE_OFFSET) - 1)
  45. #define DRAM_MSG_TYPE_MASK ((1UL << \
  46. (BITS_PER_LONG - DRAM_MSG_TYPE_OFFSET)) - 1)
  47. /* Message RAM */
  48. #define DCPU_MSG_RAM_START 0x100
  49. #define DCPU_MSG_RAM(x) (DCPU_MSG_RAM_START + (x) * sizeof(u32))
  50. /* DRAM Info Offsets & Masks */
  51. #define DRAM_INFO_INTERVAL 0x0
  52. #define DRAM_INFO_MR4 0x4
  53. #define DRAM_INFO_ERROR 0x8
  54. #define DRAM_INFO_MR4_MASK 0xff
  55. #define DRAM_INFO_MR4_SHIFT 24 /* We need to look at byte 3 */
  56. /* DRAM MR4 Offsets & Masks */
  57. #define DRAM_MR4_REFRESH 0x0 /* Refresh rate */
  58. #define DRAM_MR4_SR_ABORT 0x3 /* Self Refresh Abort */
  59. #define DRAM_MR4_PPRE 0x4 /* Post-package repair entry/exit */
  60. #define DRAM_MR4_TH_OFFS 0x5 /* Thermal Offset; vendor specific */
  61. #define DRAM_MR4_TUF 0x7 /* Temperature Update Flag */
  62. #define DRAM_MR4_REFRESH_MASK 0x7
  63. #define DRAM_MR4_SR_ABORT_MASK 0x1
  64. #define DRAM_MR4_PPRE_MASK 0x1
  65. #define DRAM_MR4_TH_OFFS_MASK 0x3
  66. #define DRAM_MR4_TUF_MASK 0x1
  67. /* DRAM Vendor Offsets & Masks (API v2) */
  68. #define DRAM_VENDOR_MR5 0x0
  69. #define DRAM_VENDOR_MR6 0x4
  70. #define DRAM_VENDOR_MR7 0x8
  71. #define DRAM_VENDOR_MR8 0xc
  72. #define DRAM_VENDOR_ERROR 0x10
  73. #define DRAM_VENDOR_MASK 0xff
  74. #define DRAM_VENDOR_SHIFT 24 /* We need to look at byte 3 */
  75. /* DRAM Information Offsets & Masks (API v3) */
  76. #define DRAM_DDR_INFO_MR4 0x0
  77. #define DRAM_DDR_INFO_MR5 0x4
  78. #define DRAM_DDR_INFO_MR6 0x8
  79. #define DRAM_DDR_INFO_MR7 0xc
  80. #define DRAM_DDR_INFO_MR8 0x10
  81. #define DRAM_DDR_INFO_ERROR 0x14
  82. #define DRAM_DDR_INFO_MASK 0xff
  83. /* Reset register bits & masks */
  84. #define DCPU_RESET_SHIFT 0x0
  85. #define DCPU_RESET_MASK 0x1
  86. #define DCPU_CLK_DISABLE_SHIFT 0x2
  87. /* DCPU return codes */
  88. #define DCPU_RET_ERROR_BIT BIT(31)
  89. #define DCPU_RET_SUCCESS 0x1
  90. #define DCPU_RET_ERR_HEADER (DCPU_RET_ERROR_BIT | BIT(0))
  91. #define DCPU_RET_ERR_INVAL (DCPU_RET_ERROR_BIT | BIT(1))
  92. #define DCPU_RET_ERR_CHKSUM (DCPU_RET_ERROR_BIT | BIT(2))
  93. #define DCPU_RET_ERR_COMMAND (DCPU_RET_ERROR_BIT | BIT(3))
  94. /* This error code is not firmware defined and only used in the driver. */
  95. #define DCPU_RET_ERR_TIMEDOUT (DCPU_RET_ERROR_BIT | BIT(4))
  96. /* Firmware magic */
  97. #define DPFE_BE_MAGIC 0xfe1010fe
  98. #define DPFE_LE_MAGIC 0xfe0101fe
  99. /* Error codes */
  100. #define ERR_INVALID_MAGIC -1
  101. #define ERR_INVALID_SIZE -2
  102. #define ERR_INVALID_CHKSUM -3
  103. /* Message types */
  104. #define DPFE_MSG_TYPE_COMMAND 1
  105. #define DPFE_MSG_TYPE_RESPONSE 2
  106. #define DELAY_LOOP_MAX 1000
  107. enum dpfe_msg_fields {
  108. MSG_HEADER,
  109. MSG_COMMAND,
  110. MSG_ARG_COUNT,
  111. MSG_ARG0,
  112. MSG_FIELD_MAX = 16 /* Max number of arguments */
  113. };
  114. enum dpfe_commands {
  115. DPFE_CMD_GET_INFO,
  116. DPFE_CMD_GET_REFRESH,
  117. DPFE_CMD_GET_VENDOR,
  118. DPFE_CMD_MAX /* Last entry */
  119. };
  120. /*
  121. * Format of the binary firmware file:
  122. *
  123. * entry
  124. * 0 header
  125. * value: 0xfe0101fe <== little endian
  126. * 0xfe1010fe <== big endian
  127. * 1 sequence:
  128. * [31:16] total segments on this build
  129. * [15:0] this segment sequence.
  130. * 2 FW version
  131. * 3 IMEM byte size
  132. * 4 DMEM byte size
  133. * IMEM
  134. * DMEM
  135. * last checksum ==> sum of everything
  136. */
  137. struct dpfe_firmware_header {
  138. u32 magic;
  139. u32 sequence;
  140. u32 version;
  141. u32 imem_size;
  142. u32 dmem_size;
  143. };
  144. /* Things we only need during initialization. */
  145. struct init_data {
  146. unsigned int dmem_len;
  147. unsigned int imem_len;
  148. unsigned int chksum;
  149. bool is_big_endian;
  150. };
  151. /* API version and corresponding commands */
  152. struct dpfe_api {
  153. int version;
  154. const char *fw_name;
  155. const struct attribute_group **sysfs_attrs;
  156. u32 command[DPFE_CMD_MAX][MSG_FIELD_MAX];
  157. };
  158. /* Things we need for as long as we are active. */
  159. struct brcmstb_dpfe_priv {
  160. void __iomem *regs;
  161. void __iomem *dmem;
  162. void __iomem *imem;
  163. struct device *dev;
  164. const struct dpfe_api *dpfe_api;
  165. struct mutex lock;
  166. };
  167. /*
  168. * Forward declaration of our sysfs attribute functions, so we can declare the
  169. * attribute data structures early.
  170. */
  171. static ssize_t show_info(struct device *, struct device_attribute *, char *);
  172. static ssize_t show_refresh(struct device *, struct device_attribute *, char *);
  173. static ssize_t store_refresh(struct device *, struct device_attribute *,
  174. const char *, size_t);
  175. static ssize_t show_vendor(struct device *, struct device_attribute *, char *);
  176. static ssize_t show_dram(struct device *, struct device_attribute *, char *);
  177. /*
  178. * Declare our attributes early, so they can be referenced in the API data
  179. * structure. We need to do this, because the attributes depend on the API
  180. * version.
  181. */
  182. static DEVICE_ATTR(dpfe_info, 0444, show_info, NULL);
  183. static DEVICE_ATTR(dpfe_refresh, 0644, show_refresh, store_refresh);
  184. static DEVICE_ATTR(dpfe_vendor, 0444, show_vendor, NULL);
  185. static DEVICE_ATTR(dpfe_dram, 0444, show_dram, NULL);
  186. /* API v2 sysfs attributes */
  187. static struct attribute *dpfe_v2_attrs[] = {
  188. &dev_attr_dpfe_info.attr,
  189. &dev_attr_dpfe_refresh.attr,
  190. &dev_attr_dpfe_vendor.attr,
  191. NULL
  192. };
  193. ATTRIBUTE_GROUPS(dpfe_v2);
  194. /* API v3 sysfs attributes */
  195. static struct attribute *dpfe_v3_attrs[] = {
  196. &dev_attr_dpfe_info.attr,
  197. &dev_attr_dpfe_dram.attr,
  198. NULL
  199. };
  200. ATTRIBUTE_GROUPS(dpfe_v3);
  201. /*
  202. * Old API v2 firmware commands, as defined in the rev 0.61 specification, we
  203. * use a version set to 1 to denote that it is not compatible with the new API
  204. * v2 and onwards.
  205. */
  206. static const struct dpfe_api dpfe_api_old_v2 = {
  207. .version = 1,
  208. .fw_name = "dpfe.bin",
  209. .sysfs_attrs = dpfe_v2_groups,
  210. .command = {
  211. [DPFE_CMD_GET_INFO] = {
  212. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  213. [MSG_COMMAND] = 1,
  214. [MSG_ARG_COUNT] = 1,
  215. [MSG_ARG0] = 1,
  216. },
  217. [DPFE_CMD_GET_REFRESH] = {
  218. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  219. [MSG_COMMAND] = 2,
  220. [MSG_ARG_COUNT] = 1,
  221. [MSG_ARG0] = 1,
  222. },
  223. [DPFE_CMD_GET_VENDOR] = {
  224. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  225. [MSG_COMMAND] = 2,
  226. [MSG_ARG_COUNT] = 1,
  227. [MSG_ARG0] = 2,
  228. },
  229. }
  230. };
  231. /*
  232. * API v2 firmware commands, as defined in the rev 0.8 specification, named new
  233. * v2 here
  234. */
  235. static const struct dpfe_api dpfe_api_new_v2 = {
  236. .version = 2,
  237. .fw_name = NULL, /* We expect the firmware to have been downloaded! */
  238. .sysfs_attrs = dpfe_v2_groups,
  239. .command = {
  240. [DPFE_CMD_GET_INFO] = {
  241. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  242. [MSG_COMMAND] = 0x101,
  243. },
  244. [DPFE_CMD_GET_REFRESH] = {
  245. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  246. [MSG_COMMAND] = 0x201,
  247. },
  248. [DPFE_CMD_GET_VENDOR] = {
  249. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  250. [MSG_COMMAND] = 0x202,
  251. },
  252. }
  253. };
  254. /* API v3 firmware commands */
  255. static const struct dpfe_api dpfe_api_v3 = {
  256. .version = 3,
  257. .fw_name = NULL, /* We expect the firmware to have been downloaded! */
  258. .sysfs_attrs = dpfe_v3_groups,
  259. .command = {
  260. [DPFE_CMD_GET_INFO] = {
  261. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  262. [MSG_COMMAND] = 0x0101,
  263. [MSG_ARG_COUNT] = 1,
  264. [MSG_ARG0] = 1,
  265. },
  266. [DPFE_CMD_GET_REFRESH] = {
  267. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  268. [MSG_COMMAND] = 0x0202,
  269. [MSG_ARG_COUNT] = 0,
  270. },
  271. /* There's no GET_VENDOR command in API v3. */
  272. },
  273. };
  274. static const char *get_error_text(unsigned int i)
  275. {
  276. static const char * const error_text[] = {
  277. "Success", "Header code incorrect",
  278. "Unknown command or argument", "Incorrect checksum",
  279. "Malformed command", "Timed out", "Unknown error",
  280. };
  281. if (unlikely(i >= ARRAY_SIZE(error_text)))
  282. i = ARRAY_SIZE(error_text) - 1;
  283. return error_text[i];
  284. }
  285. static bool is_dcpu_enabled(struct brcmstb_dpfe_priv *priv)
  286. {
  287. u32 val;
  288. mutex_lock(&priv->lock);
  289. val = readl_relaxed(priv->regs + REG_DCPU_RESET);
  290. mutex_unlock(&priv->lock);
  291. return !(val & DCPU_RESET_MASK);
  292. }
  293. static void __disable_dcpu(struct brcmstb_dpfe_priv *priv)
  294. {
  295. u32 val;
  296. if (!is_dcpu_enabled(priv))
  297. return;
  298. mutex_lock(&priv->lock);
  299. /* Put DCPU in reset if it's running. */
  300. val = readl_relaxed(priv->regs + REG_DCPU_RESET);
  301. val |= (1 << DCPU_RESET_SHIFT);
  302. writel_relaxed(val, priv->regs + REG_DCPU_RESET);
  303. mutex_unlock(&priv->lock);
  304. }
  305. static void __enable_dcpu(struct brcmstb_dpfe_priv *priv)
  306. {
  307. void __iomem *regs = priv->regs;
  308. u32 val;
  309. mutex_lock(&priv->lock);
  310. /* Clear mailbox registers. */
  311. writel_relaxed(0, regs + REG_TO_DCPU_MBOX);
  312. writel_relaxed(0, regs + REG_TO_HOST_MBOX);
  313. /* Disable DCPU clock gating */
  314. val = readl_relaxed(regs + REG_DCPU_RESET);
  315. val &= ~(1 << DCPU_CLK_DISABLE_SHIFT);
  316. writel_relaxed(val, regs + REG_DCPU_RESET);
  317. /* Take DCPU out of reset */
  318. val = readl_relaxed(regs + REG_DCPU_RESET);
  319. val &= ~(1 << DCPU_RESET_SHIFT);
  320. writel_relaxed(val, regs + REG_DCPU_RESET);
  321. mutex_unlock(&priv->lock);
  322. }
  323. static unsigned int get_msg_chksum(const u32 msg[], unsigned int max)
  324. {
  325. unsigned int sum = 0;
  326. unsigned int i;
  327. /* Don't include the last field in the checksum. */
  328. for (i = 0; i < max; i++)
  329. sum += msg[i];
  330. return sum;
  331. }
  332. static void __iomem *get_msg_ptr(struct brcmstb_dpfe_priv *priv, u32 response,
  333. char *buf, ssize_t *size)
  334. {
  335. unsigned int msg_type;
  336. unsigned int offset;
  337. void __iomem *ptr = NULL;
  338. /* There is no need to use this function for API v3 or later. */
  339. if (unlikely(priv->dpfe_api->version >= 3))
  340. return NULL;
  341. msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK;
  342. offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK;
  343. /*
  344. * msg_type == 1: the offset is relative to the message RAM
  345. * msg_type == 0: the offset is relative to the data RAM (this is the
  346. * previous way of passing data)
  347. * msg_type is anything else: there's critical hardware problem
  348. */
  349. switch (msg_type) {
  350. case 1:
  351. ptr = priv->regs + DCPU_MSG_RAM_START + offset;
  352. break;
  353. case 0:
  354. ptr = priv->dmem + offset;
  355. break;
  356. default:
  357. dev_emerg(priv->dev, "invalid message reply from DCPU: %#x\n",
  358. response);
  359. if (buf && size)
  360. *size = sprintf(buf,
  361. "FATAL: communication error with DCPU\n");
  362. }
  363. return ptr;
  364. }
  365. static void __finalize_command(struct brcmstb_dpfe_priv *priv)
  366. {
  367. unsigned int release_mbox;
  368. /*
  369. * It depends on the API version which MBOX register we have to write to
  370. * signal we are done.
  371. */
  372. release_mbox = (priv->dpfe_api->version < 2)
  373. ? REG_TO_HOST_MBOX : REG_TO_DCPU_MBOX;
  374. writel_relaxed(0, priv->regs + release_mbox);
  375. }
  376. static int __send_command(struct brcmstb_dpfe_priv *priv, unsigned int cmd,
  377. u32 result[])
  378. {
  379. void __iomem *regs = priv->regs;
  380. unsigned int i, chksum, chksum_idx;
  381. const u32 *msg;
  382. int ret = 0;
  383. u32 resp;
  384. if (cmd >= DPFE_CMD_MAX)
  385. return -1;
  386. msg = priv->dpfe_api->command[cmd];
  387. mutex_lock(&priv->lock);
  388. /* Wait for DCPU to become ready */
  389. for (i = 0; i < DELAY_LOOP_MAX; i++) {
  390. resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
  391. if (resp == 0)
  392. break;
  393. msleep(1);
  394. }
  395. if (resp != 0) {
  396. mutex_unlock(&priv->lock);
  397. return -ffs(DCPU_RET_ERR_TIMEDOUT);
  398. }
  399. /* Compute checksum over the message */
  400. chksum_idx = msg[MSG_ARG_COUNT] + MSG_ARG_COUNT + 1;
  401. chksum = get_msg_chksum(msg, chksum_idx);
  402. /* Write command and arguments to message area */
  403. for (i = 0; i < MSG_FIELD_MAX; i++) {
  404. if (i == chksum_idx)
  405. writel_relaxed(chksum, regs + DCPU_MSG_RAM(i));
  406. else
  407. writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i));
  408. }
  409. /* Tell DCPU there is a command waiting */
  410. writel_relaxed(1, regs + REG_TO_DCPU_MBOX);
  411. /* Wait for DCPU to process the command */
  412. for (i = 0; i < DELAY_LOOP_MAX; i++) {
  413. /* Read response code */
  414. resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
  415. if (resp > 0)
  416. break;
  417. msleep(1);
  418. }
  419. if (i == DELAY_LOOP_MAX) {
  420. resp = (DCPU_RET_ERR_TIMEDOUT & ~DCPU_RET_ERROR_BIT);
  421. ret = -ffs(resp);
  422. } else {
  423. /* Read response data */
  424. for (i = 0; i < MSG_FIELD_MAX; i++)
  425. result[i] = readl_relaxed(regs + DCPU_MSG_RAM(i));
  426. chksum_idx = result[MSG_ARG_COUNT] + MSG_ARG_COUNT + 1;
  427. }
  428. /* Tell DCPU we are done */
  429. __finalize_command(priv);
  430. mutex_unlock(&priv->lock);
  431. if (ret)
  432. return ret;
  433. /* Verify response */
  434. chksum = get_msg_chksum(result, chksum_idx);
  435. if (chksum != result[chksum_idx])
  436. resp = DCPU_RET_ERR_CHKSUM;
  437. if (resp != DCPU_RET_SUCCESS) {
  438. resp &= ~DCPU_RET_ERROR_BIT;
  439. ret = -ffs(resp);
  440. }
  441. return ret;
  442. }
  443. /* Ensure that the firmware file loaded meets all the requirements. */
  444. static int __verify_firmware(struct init_data *init,
  445. const struct firmware *fw)
  446. {
  447. const struct dpfe_firmware_header *header = (void *)fw->data;
  448. unsigned int dmem_size, imem_size, total_size;
  449. bool is_big_endian = false;
  450. const u32 *chksum_ptr;
  451. if (header->magic == DPFE_BE_MAGIC)
  452. is_big_endian = true;
  453. else if (header->magic != DPFE_LE_MAGIC)
  454. return ERR_INVALID_MAGIC;
  455. if (is_big_endian) {
  456. dmem_size = be32_to_cpu(header->dmem_size);
  457. imem_size = be32_to_cpu(header->imem_size);
  458. } else {
  459. dmem_size = le32_to_cpu(header->dmem_size);
  460. imem_size = le32_to_cpu(header->imem_size);
  461. }
  462. /* Data and instruction sections are 32 bit words. */
  463. if ((dmem_size % sizeof(u32)) != 0 || (imem_size % sizeof(u32)) != 0)
  464. return ERR_INVALID_SIZE;
  465. /*
  466. * The header + the data section + the instruction section + the
  467. * checksum must be equal to the total firmware size.
  468. */
  469. total_size = dmem_size + imem_size + sizeof(*header) +
  470. sizeof(*chksum_ptr);
  471. if (total_size != fw->size)
  472. return ERR_INVALID_SIZE;
  473. /* The checksum comes at the very end. */
  474. chksum_ptr = (void *)fw->data + sizeof(*header) + dmem_size + imem_size;
  475. init->is_big_endian = is_big_endian;
  476. init->dmem_len = dmem_size;
  477. init->imem_len = imem_size;
  478. init->chksum = (is_big_endian)
  479. ? be32_to_cpu(*chksum_ptr) : le32_to_cpu(*chksum_ptr);
  480. return 0;
  481. }
  482. /* Verify checksum by reading back the firmware from co-processor RAM. */
  483. static int __verify_fw_checksum(struct init_data *init,
  484. struct brcmstb_dpfe_priv *priv,
  485. const struct dpfe_firmware_header *header,
  486. u32 checksum)
  487. {
  488. u32 magic, sequence, version, sum;
  489. u32 __iomem *dmem = priv->dmem;
  490. u32 __iomem *imem = priv->imem;
  491. unsigned int i;
  492. if (init->is_big_endian) {
  493. magic = be32_to_cpu(header->magic);
  494. sequence = be32_to_cpu(header->sequence);
  495. version = be32_to_cpu(header->version);
  496. } else {
  497. magic = le32_to_cpu(header->magic);
  498. sequence = le32_to_cpu(header->sequence);
  499. version = le32_to_cpu(header->version);
  500. }
  501. sum = magic + sequence + version + init->dmem_len + init->imem_len;
  502. for (i = 0; i < init->dmem_len / sizeof(u32); i++)
  503. sum += readl_relaxed(dmem + i);
  504. for (i = 0; i < init->imem_len / sizeof(u32); i++)
  505. sum += readl_relaxed(imem + i);
  506. return (sum == checksum) ? 0 : -1;
  507. }
  508. static int __write_firmware(u32 __iomem *mem, const u32 *fw,
  509. unsigned int size, bool is_big_endian)
  510. {
  511. unsigned int i;
  512. /* Convert size to 32-bit words. */
  513. size /= sizeof(u32);
  514. /* It is recommended to clear the firmware area first. */
  515. for (i = 0; i < size; i++)
  516. writel_relaxed(0, mem + i);
  517. /* Now copy it. */
  518. if (is_big_endian) {
  519. for (i = 0; i < size; i++)
  520. writel_relaxed(be32_to_cpu(fw[i]), mem + i);
  521. } else {
  522. for (i = 0; i < size; i++)
  523. writel_relaxed(le32_to_cpu(fw[i]), mem + i);
  524. }
  525. return 0;
  526. }
  527. static int brcmstb_dpfe_download_firmware(struct brcmstb_dpfe_priv *priv)
  528. {
  529. const struct dpfe_firmware_header *header;
  530. unsigned int dmem_size, imem_size;
  531. struct device *dev = priv->dev;
  532. bool is_big_endian = false;
  533. const struct firmware *fw;
  534. const u32 *dmem, *imem;
  535. struct init_data init;
  536. const void *fw_blob;
  537. int ret;
  538. /*
  539. * Skip downloading the firmware if the DCPU is already running and
  540. * responding to commands.
  541. */
  542. if (is_dcpu_enabled(priv)) {
  543. u32 response[MSG_FIELD_MAX];
  544. ret = __send_command(priv, DPFE_CMD_GET_INFO, response);
  545. if (!ret)
  546. return 0;
  547. }
  548. /*
  549. * If the firmware filename is NULL it means the boot firmware has to
  550. * download the DCPU firmware for us. If that didn't work, we have to
  551. * bail, since downloading it ourselves wouldn't work either.
  552. */
  553. if (!priv->dpfe_api->fw_name)
  554. return -ENODEV;
  555. ret = firmware_request_nowarn(&fw, priv->dpfe_api->fw_name, dev);
  556. /*
  557. * Defer the firmware download if the firmware file couldn't be found.
  558. * The root file system may not be available yet.
  559. */
  560. if (ret)
  561. return (ret == -ENOENT) ? -EPROBE_DEFER : ret;
  562. ret = __verify_firmware(&init, fw);
  563. if (ret) {
  564. ret = -EFAULT;
  565. goto release_fw;
  566. }
  567. __disable_dcpu(priv);
  568. is_big_endian = init.is_big_endian;
  569. dmem_size = init.dmem_len;
  570. imem_size = init.imem_len;
  571. /* At the beginning of the firmware blob is a header. */
  572. header = (struct dpfe_firmware_header *)fw->data;
  573. /* Void pointer to the beginning of the actual firmware. */
  574. fw_blob = fw->data + sizeof(*header);
  575. /* IMEM comes right after the header. */
  576. imem = fw_blob;
  577. /* DMEM follows after IMEM. */
  578. dmem = fw_blob + imem_size;
  579. ret = __write_firmware(priv->dmem, dmem, dmem_size, is_big_endian);
  580. if (ret)
  581. goto release_fw;
  582. ret = __write_firmware(priv->imem, imem, imem_size, is_big_endian);
  583. if (ret)
  584. goto release_fw;
  585. ret = __verify_fw_checksum(&init, priv, header, init.chksum);
  586. if (ret)
  587. goto release_fw;
  588. __enable_dcpu(priv);
  589. release_fw:
  590. release_firmware(fw);
  591. return ret;
  592. }
  593. static ssize_t generic_show(unsigned int command, u32 response[],
  594. struct brcmstb_dpfe_priv *priv, char *buf)
  595. {
  596. int ret;
  597. if (!priv)
  598. return sprintf(buf, "ERROR: driver private data not set\n");
  599. ret = __send_command(priv, command, response);
  600. if (ret < 0)
  601. return sprintf(buf, "ERROR: %s\n", get_error_text(-ret));
  602. return 0;
  603. }
  604. static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
  605. char *buf)
  606. {
  607. u32 response[MSG_FIELD_MAX];
  608. struct brcmstb_dpfe_priv *priv;
  609. unsigned int info;
  610. ssize_t ret;
  611. priv = dev_get_drvdata(dev);
  612. ret = generic_show(DPFE_CMD_GET_INFO, response, priv, buf);
  613. if (ret)
  614. return ret;
  615. info = response[MSG_ARG0];
  616. return sprintf(buf, "%u.%u.%u.%u\n",
  617. (info >> 24) & 0xff,
  618. (info >> 16) & 0xff,
  619. (info >> 8) & 0xff,
  620. info & 0xff);
  621. }
  622. static ssize_t show_refresh(struct device *dev,
  623. struct device_attribute *devattr, char *buf)
  624. {
  625. u32 response[MSG_FIELD_MAX];
  626. void __iomem *info;
  627. struct brcmstb_dpfe_priv *priv;
  628. u8 refresh, sr_abort, ppre, thermal_offs, tuf;
  629. u32 mr4;
  630. ssize_t ret;
  631. priv = dev_get_drvdata(dev);
  632. ret = generic_show(DPFE_CMD_GET_REFRESH, response, priv, buf);
  633. if (ret)
  634. return ret;
  635. info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
  636. if (!info)
  637. return ret;
  638. mr4 = (readl_relaxed(info + DRAM_INFO_MR4) >> DRAM_INFO_MR4_SHIFT) &
  639. DRAM_INFO_MR4_MASK;
  640. refresh = (mr4 >> DRAM_MR4_REFRESH) & DRAM_MR4_REFRESH_MASK;
  641. sr_abort = (mr4 >> DRAM_MR4_SR_ABORT) & DRAM_MR4_SR_ABORT_MASK;
  642. ppre = (mr4 >> DRAM_MR4_PPRE) & DRAM_MR4_PPRE_MASK;
  643. thermal_offs = (mr4 >> DRAM_MR4_TH_OFFS) & DRAM_MR4_TH_OFFS_MASK;
  644. tuf = (mr4 >> DRAM_MR4_TUF) & DRAM_MR4_TUF_MASK;
  645. return sprintf(buf, "%#x %#x %#x %#x %#x %#x %#x\n",
  646. readl_relaxed(info + DRAM_INFO_INTERVAL),
  647. refresh, sr_abort, ppre, thermal_offs, tuf,
  648. readl_relaxed(info + DRAM_INFO_ERROR));
  649. }
  650. static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
  651. const char *buf, size_t count)
  652. {
  653. u32 response[MSG_FIELD_MAX];
  654. struct brcmstb_dpfe_priv *priv;
  655. void __iomem *info;
  656. unsigned long val;
  657. int ret;
  658. if (kstrtoul(buf, 0, &val) < 0)
  659. return -EINVAL;
  660. priv = dev_get_drvdata(dev);
  661. ret = __send_command(priv, DPFE_CMD_GET_REFRESH, response);
  662. if (ret)
  663. return ret;
  664. info = get_msg_ptr(priv, response[MSG_ARG0], NULL, NULL);
  665. if (!info)
  666. return -EIO;
  667. writel_relaxed(val, info + DRAM_INFO_INTERVAL);
  668. return count;
  669. }
  670. static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
  671. char *buf)
  672. {
  673. u32 response[MSG_FIELD_MAX];
  674. struct brcmstb_dpfe_priv *priv;
  675. void __iomem *info;
  676. ssize_t ret;
  677. u32 mr5, mr6, mr7, mr8, err;
  678. priv = dev_get_drvdata(dev);
  679. ret = generic_show(DPFE_CMD_GET_VENDOR, response, priv, buf);
  680. if (ret)
  681. return ret;
  682. info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
  683. if (!info)
  684. return ret;
  685. mr5 = (readl_relaxed(info + DRAM_VENDOR_MR5) >> DRAM_VENDOR_SHIFT) &
  686. DRAM_VENDOR_MASK;
  687. mr6 = (readl_relaxed(info + DRAM_VENDOR_MR6) >> DRAM_VENDOR_SHIFT) &
  688. DRAM_VENDOR_MASK;
  689. mr7 = (readl_relaxed(info + DRAM_VENDOR_MR7) >> DRAM_VENDOR_SHIFT) &
  690. DRAM_VENDOR_MASK;
  691. mr8 = (readl_relaxed(info + DRAM_VENDOR_MR8) >> DRAM_VENDOR_SHIFT) &
  692. DRAM_VENDOR_MASK;
  693. err = readl_relaxed(info + DRAM_VENDOR_ERROR) & DRAM_VENDOR_MASK;
  694. return sprintf(buf, "%#x %#x %#x %#x %#x\n", mr5, mr6, mr7, mr8, err);
  695. }
  696. static ssize_t show_dram(struct device *dev, struct device_attribute *devattr,
  697. char *buf)
  698. {
  699. u32 response[MSG_FIELD_MAX];
  700. struct brcmstb_dpfe_priv *priv;
  701. ssize_t ret;
  702. u32 mr4, mr5, mr6, mr7, mr8, err;
  703. priv = dev_get_drvdata(dev);
  704. ret = generic_show(DPFE_CMD_GET_REFRESH, response, priv, buf);
  705. if (ret)
  706. return ret;
  707. mr4 = response[MSG_ARG0 + 0] & DRAM_INFO_MR4_MASK;
  708. mr5 = response[MSG_ARG0 + 1] & DRAM_DDR_INFO_MASK;
  709. mr6 = response[MSG_ARG0 + 2] & DRAM_DDR_INFO_MASK;
  710. mr7 = response[MSG_ARG0 + 3] & DRAM_DDR_INFO_MASK;
  711. mr8 = response[MSG_ARG0 + 4] & DRAM_DDR_INFO_MASK;
  712. err = response[MSG_ARG0 + 5] & DRAM_DDR_INFO_MASK;
  713. return sprintf(buf, "%#x %#x %#x %#x %#x %#x\n", mr4, mr5, mr6, mr7,
  714. mr8, err);
  715. }
  716. static int brcmstb_dpfe_resume(struct platform_device *pdev)
  717. {
  718. struct brcmstb_dpfe_priv *priv = platform_get_drvdata(pdev);
  719. return brcmstb_dpfe_download_firmware(priv);
  720. }
  721. static int brcmstb_dpfe_probe(struct platform_device *pdev)
  722. {
  723. struct device *dev = &pdev->dev;
  724. struct brcmstb_dpfe_priv *priv;
  725. int ret;
  726. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  727. if (!priv)
  728. return -ENOMEM;
  729. priv->dev = dev;
  730. mutex_init(&priv->lock);
  731. platform_set_drvdata(pdev, priv);
  732. priv->regs = devm_platform_ioremap_resource_byname(pdev, "dpfe-cpu");
  733. if (IS_ERR(priv->regs)) {
  734. dev_err(dev, "couldn't map DCPU registers\n");
  735. return -ENODEV;
  736. }
  737. priv->dmem = devm_platform_ioremap_resource_byname(pdev, "dpfe-dmem");
  738. if (IS_ERR(priv->dmem)) {
  739. dev_err(dev, "Couldn't map DCPU data memory\n");
  740. return -ENOENT;
  741. }
  742. priv->imem = devm_platform_ioremap_resource_byname(pdev, "dpfe-imem");
  743. if (IS_ERR(priv->imem)) {
  744. dev_err(dev, "Couldn't map DCPU instruction memory\n");
  745. return -ENOENT;
  746. }
  747. priv->dpfe_api = of_device_get_match_data(dev);
  748. if (unlikely(!priv->dpfe_api)) {
  749. /*
  750. * It should be impossible to end up here, but to be safe we
  751. * check anyway.
  752. */
  753. dev_err(dev, "Couldn't determine API\n");
  754. return -ENOENT;
  755. }
  756. ret = brcmstb_dpfe_download_firmware(priv);
  757. if (ret)
  758. return dev_err_probe(dev, ret, "Couldn't download firmware\n");
  759. ret = sysfs_create_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
  760. if (!ret)
  761. dev_info(dev, "registered with API v%d.\n",
  762. priv->dpfe_api->version);
  763. return ret;
  764. }
  765. static int brcmstb_dpfe_remove(struct platform_device *pdev)
  766. {
  767. struct brcmstb_dpfe_priv *priv = dev_get_drvdata(&pdev->dev);
  768. sysfs_remove_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
  769. return 0;
  770. }
  771. static const struct of_device_id brcmstb_dpfe_of_match[] = {
  772. /* Use legacy API v2 for a select number of chips */
  773. { .compatible = "brcm,bcm7268-dpfe-cpu", .data = &dpfe_api_old_v2 },
  774. { .compatible = "brcm,bcm7271-dpfe-cpu", .data = &dpfe_api_old_v2 },
  775. { .compatible = "brcm,bcm7278-dpfe-cpu", .data = &dpfe_api_old_v2 },
  776. { .compatible = "brcm,bcm7211-dpfe-cpu", .data = &dpfe_api_new_v2 },
  777. /* API v3 is the default going forward */
  778. { .compatible = "brcm,dpfe-cpu", .data = &dpfe_api_v3 },
  779. {}
  780. };
  781. MODULE_DEVICE_TABLE(of, brcmstb_dpfe_of_match);
  782. static struct platform_driver brcmstb_dpfe_driver = {
  783. .driver = {
  784. .name = DRVNAME,
  785. .of_match_table = brcmstb_dpfe_of_match,
  786. },
  787. .probe = brcmstb_dpfe_probe,
  788. .remove = brcmstb_dpfe_remove,
  789. .resume = brcmstb_dpfe_resume,
  790. };
  791. module_platform_driver(brcmstb_dpfe_driver);
  792. MODULE_AUTHOR("Markus Mayer <[email protected]>");
  793. MODULE_DESCRIPTION("BRCMSTB DDR PHY Front End Driver");
  794. MODULE_LICENSE("GPL");