peci-aspeed.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. // Copyright (c) 2012-2017 ASPEED Technology Inc.
  3. // Copyright (c) 2018-2021 Intel Corporation
  4. #include <asm/unaligned.h>
  5. #include <linux/bitfield.h>
  6. #include <linux/clk.h>
  7. #include <linux/clkdev.h>
  8. #include <linux/clk-provider.h>
  9. #include <linux/delay.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/iopoll.h>
  13. #include <linux/jiffies.h>
  14. #include <linux/math.h>
  15. #include <linux/module.h>
  16. #include <linux/of.h>
  17. #include <linux/peci.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/reset.h>
  20. /* ASPEED PECI Registers */
  21. /* Control Register */
  22. #define ASPEED_PECI_CTRL 0x00
  23. #define ASPEED_PECI_CTRL_SAMPLING_MASK GENMASK(19, 16)
  24. #define ASPEED_PECI_CTRL_RD_MODE_MASK GENMASK(13, 12)
  25. #define ASPEED_PECI_CTRL_RD_MODE_DBG BIT(13)
  26. #define ASPEED_PECI_CTRL_RD_MODE_COUNT BIT(12)
  27. #define ASPEED_PECI_CTRL_CLK_SRC_HCLK BIT(11)
  28. #define ASPEED_PECI_CTRL_CLK_DIV_MASK GENMASK(10, 8)
  29. #define ASPEED_PECI_CTRL_INVERT_OUT BIT(7)
  30. #define ASPEED_PECI_CTRL_INVERT_IN BIT(6)
  31. #define ASPEED_PECI_CTRL_BUS_CONTENTION_EN BIT(5)
  32. #define ASPEED_PECI_CTRL_PECI_EN BIT(4)
  33. #define ASPEED_PECI_CTRL_PECI_CLK_EN BIT(0)
  34. /* Timing Negotiation Register */
  35. #define ASPEED_PECI_TIMING_NEGOTIATION 0x04
  36. #define ASPEED_PECI_T_NEGO_MSG_MASK GENMASK(15, 8)
  37. #define ASPEED_PECI_T_NEGO_ADDR_MASK GENMASK(7, 0)
  38. /* Command Register */
  39. #define ASPEED_PECI_CMD 0x08
  40. #define ASPEED_PECI_CMD_PIN_MONITORING BIT(31)
  41. #define ASPEED_PECI_CMD_STS_MASK GENMASK(27, 24)
  42. #define ASPEED_PECI_CMD_STS_ADDR_T_NEGO 0x3
  43. #define ASPEED_PECI_CMD_IDLE_MASK \
  44. (ASPEED_PECI_CMD_STS_MASK | ASPEED_PECI_CMD_PIN_MONITORING)
  45. #define ASPEED_PECI_CMD_FIRE BIT(0)
  46. /* Read/Write Length Register */
  47. #define ASPEED_PECI_RW_LENGTH 0x0c
  48. #define ASPEED_PECI_AW_FCS_EN BIT(31)
  49. #define ASPEED_PECI_RD_LEN_MASK GENMASK(23, 16)
  50. #define ASPEED_PECI_WR_LEN_MASK GENMASK(15, 8)
  51. #define ASPEED_PECI_TARGET_ADDR_MASK GENMASK(7, 0)
  52. /* Expected FCS Data Register */
  53. #define ASPEED_PECI_EXPECTED_FCS 0x10
  54. #define ASPEED_PECI_EXPECTED_RD_FCS_MASK GENMASK(23, 16)
  55. #define ASPEED_PECI_EXPECTED_AW_FCS_AUTO_MASK GENMASK(15, 8)
  56. #define ASPEED_PECI_EXPECTED_WR_FCS_MASK GENMASK(7, 0)
  57. /* Captured FCS Data Register */
  58. #define ASPEED_PECI_CAPTURED_FCS 0x14
  59. #define ASPEED_PECI_CAPTURED_RD_FCS_MASK GENMASK(23, 16)
  60. #define ASPEED_PECI_CAPTURED_WR_FCS_MASK GENMASK(7, 0)
  61. /* Interrupt Register */
  62. #define ASPEED_PECI_INT_CTRL 0x18
  63. #define ASPEED_PECI_TIMING_NEGO_SEL_MASK GENMASK(31, 30)
  64. #define ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO 0
  65. #define ASPEED_PECI_2ND_BIT_OF_ADDR_NEGO 1
  66. #define ASPEED_PECI_MESSAGE_NEGO 2
  67. #define ASPEED_PECI_INT_MASK GENMASK(4, 0)
  68. #define ASPEED_PECI_INT_BUS_TIMEOUT BIT(4)
  69. #define ASPEED_PECI_INT_BUS_CONTENTION BIT(3)
  70. #define ASPEED_PECI_INT_WR_FCS_BAD BIT(2)
  71. #define ASPEED_PECI_INT_WR_FCS_ABORT BIT(1)
  72. #define ASPEED_PECI_INT_CMD_DONE BIT(0)
  73. /* Interrupt Status Register */
  74. #define ASPEED_PECI_INT_STS 0x1c
  75. #define ASPEED_PECI_INT_TIMING_RESULT_MASK GENMASK(29, 16)
  76. /* bits[4..0]: Same bit fields in the 'Interrupt Register' */
  77. /* Rx/Tx Data Buffer Registers */
  78. #define ASPEED_PECI_WR_DATA0 0x20
  79. #define ASPEED_PECI_WR_DATA1 0x24
  80. #define ASPEED_PECI_WR_DATA2 0x28
  81. #define ASPEED_PECI_WR_DATA3 0x2c
  82. #define ASPEED_PECI_RD_DATA0 0x30
  83. #define ASPEED_PECI_RD_DATA1 0x34
  84. #define ASPEED_PECI_RD_DATA2 0x38
  85. #define ASPEED_PECI_RD_DATA3 0x3c
  86. #define ASPEED_PECI_WR_DATA4 0x40
  87. #define ASPEED_PECI_WR_DATA5 0x44
  88. #define ASPEED_PECI_WR_DATA6 0x48
  89. #define ASPEED_PECI_WR_DATA7 0x4c
  90. #define ASPEED_PECI_RD_DATA4 0x50
  91. #define ASPEED_PECI_RD_DATA5 0x54
  92. #define ASPEED_PECI_RD_DATA6 0x58
  93. #define ASPEED_PECI_RD_DATA7 0x5c
  94. #define ASPEED_PECI_DATA_BUF_SIZE_MAX 32
  95. /* Timing Negotiation */
  96. #define ASPEED_PECI_CLK_FREQUENCY_MIN 2000
  97. #define ASPEED_PECI_CLK_FREQUENCY_DEFAULT 1000000
  98. #define ASPEED_PECI_CLK_FREQUENCY_MAX 2000000
  99. #define ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT 8
  100. /* Timeout */
  101. #define ASPEED_PECI_IDLE_CHECK_TIMEOUT_US (50 * USEC_PER_MSEC)
  102. #define ASPEED_PECI_IDLE_CHECK_INTERVAL_US (10 * USEC_PER_MSEC)
  103. #define ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT 1000
  104. #define ASPEED_PECI_CMD_TIMEOUT_MS_MAX 1000
  105. #define ASPEED_PECI_CLK_DIV1(msg_timing) (4 * (msg_timing) + 1)
  106. #define ASPEED_PECI_CLK_DIV2(clk_div_exp) BIT(clk_div_exp)
  107. #define ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp) \
  108. (4 * ASPEED_PECI_CLK_DIV1(msg_timing) * ASPEED_PECI_CLK_DIV2(clk_div_exp))
  109. struct aspeed_peci {
  110. struct peci_controller *controller;
  111. struct device *dev;
  112. void __iomem *base;
  113. struct reset_control *rst;
  114. int irq;
  115. spinlock_t lock; /* to sync completion status handling */
  116. struct completion xfer_complete;
  117. struct clk *clk;
  118. u32 clk_frequency;
  119. u32 status;
  120. u32 cmd_timeout_ms;
  121. };
  122. struct clk_aspeed_peci {
  123. struct clk_hw hw;
  124. struct aspeed_peci *aspeed_peci;
  125. };
  126. static void aspeed_peci_controller_enable(struct aspeed_peci *priv)
  127. {
  128. u32 val = readl(priv->base + ASPEED_PECI_CTRL);
  129. val |= ASPEED_PECI_CTRL_PECI_CLK_EN;
  130. val |= ASPEED_PECI_CTRL_PECI_EN;
  131. writel(val, priv->base + ASPEED_PECI_CTRL);
  132. }
  133. static void aspeed_peci_init_regs(struct aspeed_peci *priv)
  134. {
  135. u32 val;
  136. /* Clear interrupts */
  137. writel(ASPEED_PECI_INT_MASK, priv->base + ASPEED_PECI_INT_STS);
  138. /* Set timing negotiation mode and enable interrupts */
  139. val = FIELD_PREP(ASPEED_PECI_TIMING_NEGO_SEL_MASK, ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO);
  140. val |= ASPEED_PECI_INT_MASK;
  141. writel(val, priv->base + ASPEED_PECI_INT_CTRL);
  142. val = FIELD_PREP(ASPEED_PECI_CTRL_SAMPLING_MASK, ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT);
  143. writel(val, priv->base + ASPEED_PECI_CTRL);
  144. }
  145. static int aspeed_peci_check_idle(struct aspeed_peci *priv)
  146. {
  147. u32 cmd_sts = readl(priv->base + ASPEED_PECI_CMD);
  148. int ret;
  149. /*
  150. * Under normal circumstances, we expect to be idle here.
  151. * In case there were any errors/timeouts that led to the situation
  152. * where the hardware is not in idle state - we need to reset and
  153. * reinitialize it to avoid potential controller hang.
  154. */
  155. if (FIELD_GET(ASPEED_PECI_CMD_STS_MASK, cmd_sts)) {
  156. ret = reset_control_assert(priv->rst);
  157. if (ret) {
  158. dev_err(priv->dev, "cannot assert reset control\n");
  159. return ret;
  160. }
  161. ret = reset_control_deassert(priv->rst);
  162. if (ret) {
  163. dev_err(priv->dev, "cannot deassert reset control\n");
  164. return ret;
  165. }
  166. aspeed_peci_init_regs(priv);
  167. ret = clk_set_rate(priv->clk, priv->clk_frequency);
  168. if (ret < 0) {
  169. dev_err(priv->dev, "cannot set clock frequency\n");
  170. return ret;
  171. }
  172. aspeed_peci_controller_enable(priv);
  173. }
  174. return readl_poll_timeout(priv->base + ASPEED_PECI_CMD,
  175. cmd_sts,
  176. !(cmd_sts & ASPEED_PECI_CMD_IDLE_MASK),
  177. ASPEED_PECI_IDLE_CHECK_INTERVAL_US,
  178. ASPEED_PECI_IDLE_CHECK_TIMEOUT_US);
  179. }
  180. static int aspeed_peci_xfer(struct peci_controller *controller,
  181. u8 addr, struct peci_request *req)
  182. {
  183. struct aspeed_peci *priv = dev_get_drvdata(controller->dev.parent);
  184. unsigned long timeout = msecs_to_jiffies(priv->cmd_timeout_ms);
  185. u32 peci_head;
  186. int ret, i;
  187. if (req->tx.len > ASPEED_PECI_DATA_BUF_SIZE_MAX ||
  188. req->rx.len > ASPEED_PECI_DATA_BUF_SIZE_MAX)
  189. return -EINVAL;
  190. /* Check command sts and bus idle state */
  191. ret = aspeed_peci_check_idle(priv);
  192. if (ret)
  193. return ret; /* -ETIMEDOUT */
  194. spin_lock_irq(&priv->lock);
  195. reinit_completion(&priv->xfer_complete);
  196. peci_head = FIELD_PREP(ASPEED_PECI_TARGET_ADDR_MASK, addr) |
  197. FIELD_PREP(ASPEED_PECI_WR_LEN_MASK, req->tx.len) |
  198. FIELD_PREP(ASPEED_PECI_RD_LEN_MASK, req->rx.len);
  199. writel(peci_head, priv->base + ASPEED_PECI_RW_LENGTH);
  200. for (i = 0; i < req->tx.len; i += 4) {
  201. u32 reg = (i < 16 ? ASPEED_PECI_WR_DATA0 : ASPEED_PECI_WR_DATA4) + i % 16;
  202. writel(get_unaligned_le32(&req->tx.buf[i]), priv->base + reg);
  203. }
  204. #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
  205. dev_dbg(priv->dev, "HEAD : %#08x\n", peci_head);
  206. print_hex_dump_bytes("TX : ", DUMP_PREFIX_NONE, req->tx.buf, req->tx.len);
  207. #endif
  208. priv->status = 0;
  209. writel(ASPEED_PECI_CMD_FIRE, priv->base + ASPEED_PECI_CMD);
  210. spin_unlock_irq(&priv->lock);
  211. ret = wait_for_completion_interruptible_timeout(&priv->xfer_complete, timeout);
  212. if (ret < 0)
  213. return ret;
  214. if (ret == 0) {
  215. dev_dbg(priv->dev, "timeout waiting for a response\n");
  216. return -ETIMEDOUT;
  217. }
  218. spin_lock_irq(&priv->lock);
  219. if (priv->status != ASPEED_PECI_INT_CMD_DONE) {
  220. spin_unlock_irq(&priv->lock);
  221. dev_dbg(priv->dev, "no valid response, status: %#02x\n", priv->status);
  222. return -EIO;
  223. }
  224. spin_unlock_irq(&priv->lock);
  225. /*
  226. * We need to use dword reads for register access, make sure that the
  227. * buffer size is multiple of 4-bytes.
  228. */
  229. BUILD_BUG_ON(PECI_REQUEST_MAX_BUF_SIZE % 4);
  230. for (i = 0; i < req->rx.len; i += 4) {
  231. u32 reg = (i < 16 ? ASPEED_PECI_RD_DATA0 : ASPEED_PECI_RD_DATA4) + i % 16;
  232. u32 rx_data = readl(priv->base + reg);
  233. put_unaligned_le32(rx_data, &req->rx.buf[i]);
  234. }
  235. #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
  236. print_hex_dump_bytes("RX : ", DUMP_PREFIX_NONE, req->rx.buf, req->rx.len);
  237. #endif
  238. return 0;
  239. }
  240. static irqreturn_t aspeed_peci_irq_handler(int irq, void *arg)
  241. {
  242. struct aspeed_peci *priv = arg;
  243. u32 status;
  244. spin_lock(&priv->lock);
  245. status = readl(priv->base + ASPEED_PECI_INT_STS);
  246. writel(status, priv->base + ASPEED_PECI_INT_STS);
  247. priv->status |= (status & ASPEED_PECI_INT_MASK);
  248. /*
  249. * All commands should be ended up with a ASPEED_PECI_INT_CMD_DONE bit
  250. * set even in an error case.
  251. */
  252. if (status & ASPEED_PECI_INT_CMD_DONE)
  253. complete(&priv->xfer_complete);
  254. writel(0, priv->base + ASPEED_PECI_CMD);
  255. spin_unlock(&priv->lock);
  256. return IRQ_HANDLED;
  257. }
  258. static void clk_aspeed_peci_find_div_values(unsigned long rate, int *msg_timing, int *clk_div_exp)
  259. {
  260. unsigned long best_diff = ~0ul, diff;
  261. int msg_timing_temp, clk_div_exp_temp, i, j;
  262. for (i = 1; i <= 255; i++)
  263. for (j = 0; j < 8; j++) {
  264. diff = abs(rate - ASPEED_PECI_CLK_DIV1(i) * ASPEED_PECI_CLK_DIV2(j));
  265. if (diff < best_diff) {
  266. msg_timing_temp = i;
  267. clk_div_exp_temp = j;
  268. best_diff = diff;
  269. }
  270. }
  271. *msg_timing = msg_timing_temp;
  272. *clk_div_exp = clk_div_exp_temp;
  273. }
  274. static int clk_aspeed_peci_get_div(unsigned long rate, const unsigned long *prate)
  275. {
  276. unsigned long this_rate = *prate / (4 * rate);
  277. int msg_timing, clk_div_exp;
  278. clk_aspeed_peci_find_div_values(this_rate, &msg_timing, &clk_div_exp);
  279. return ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp);
  280. }
  281. static int clk_aspeed_peci_set_rate(struct clk_hw *hw, unsigned long rate,
  282. unsigned long prate)
  283. {
  284. struct clk_aspeed_peci *peci_clk = container_of(hw, struct clk_aspeed_peci, hw);
  285. struct aspeed_peci *aspeed_peci = peci_clk->aspeed_peci;
  286. unsigned long this_rate = prate / (4 * rate);
  287. int clk_div_exp, msg_timing;
  288. u32 val;
  289. clk_aspeed_peci_find_div_values(this_rate, &msg_timing, &clk_div_exp);
  290. val = readl(aspeed_peci->base + ASPEED_PECI_CTRL);
  291. val |= FIELD_PREP(ASPEED_PECI_CTRL_CLK_DIV_MASK, clk_div_exp);
  292. writel(val, aspeed_peci->base + ASPEED_PECI_CTRL);
  293. val = FIELD_PREP(ASPEED_PECI_T_NEGO_MSG_MASK, msg_timing);
  294. val |= FIELD_PREP(ASPEED_PECI_T_NEGO_ADDR_MASK, msg_timing);
  295. writel(val, aspeed_peci->base + ASPEED_PECI_TIMING_NEGOTIATION);
  296. return 0;
  297. }
  298. static long clk_aspeed_peci_round_rate(struct clk_hw *hw, unsigned long rate,
  299. unsigned long *prate)
  300. {
  301. int div = clk_aspeed_peci_get_div(rate, prate);
  302. return DIV_ROUND_UP_ULL(*prate, div);
  303. }
  304. static unsigned long clk_aspeed_peci_recalc_rate(struct clk_hw *hw, unsigned long prate)
  305. {
  306. struct clk_aspeed_peci *peci_clk = container_of(hw, struct clk_aspeed_peci, hw);
  307. struct aspeed_peci *aspeed_peci = peci_clk->aspeed_peci;
  308. int div, msg_timing, addr_timing, clk_div_exp;
  309. u32 reg;
  310. reg = readl(aspeed_peci->base + ASPEED_PECI_TIMING_NEGOTIATION);
  311. msg_timing = FIELD_GET(ASPEED_PECI_T_NEGO_MSG_MASK, reg);
  312. addr_timing = FIELD_GET(ASPEED_PECI_T_NEGO_ADDR_MASK, reg);
  313. if (msg_timing != addr_timing)
  314. return 0;
  315. reg = readl(aspeed_peci->base + ASPEED_PECI_CTRL);
  316. clk_div_exp = FIELD_GET(ASPEED_PECI_CTRL_CLK_DIV_MASK, reg);
  317. div = ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp);
  318. return DIV_ROUND_UP_ULL(prate, div);
  319. }
  320. static const struct clk_ops clk_aspeed_peci_ops = {
  321. .set_rate = clk_aspeed_peci_set_rate,
  322. .round_rate = clk_aspeed_peci_round_rate,
  323. .recalc_rate = clk_aspeed_peci_recalc_rate,
  324. };
  325. /*
  326. * PECI HW contains a clock divider which is a combination of:
  327. * div0: 4 (fixed divider)
  328. * div1: x + 1
  329. * div2: 1 << y
  330. * In other words, out_clk = in_clk / (div0 * div1 * div2)
  331. * The resulting frequency is used by PECI Controller to drive the PECI bus to
  332. * negotiate optimal transfer rate.
  333. */
  334. static struct clk *devm_aspeed_peci_register_clk_div(struct device *dev, struct clk *parent,
  335. struct aspeed_peci *priv)
  336. {
  337. struct clk_aspeed_peci *peci_clk;
  338. struct clk_init_data init;
  339. const char *parent_name;
  340. char name[32];
  341. int ret;
  342. snprintf(name, sizeof(name), "%s_div", dev_name(dev));
  343. parent_name = __clk_get_name(parent);
  344. init.ops = &clk_aspeed_peci_ops;
  345. init.name = name;
  346. init.parent_names = (const char* []) { parent_name };
  347. init.num_parents = 1;
  348. init.flags = 0;
  349. peci_clk = devm_kzalloc(dev, sizeof(struct clk_aspeed_peci), GFP_KERNEL);
  350. if (!peci_clk)
  351. return ERR_PTR(-ENOMEM);
  352. peci_clk->hw.init = &init;
  353. peci_clk->aspeed_peci = priv;
  354. ret = devm_clk_hw_register(dev, &peci_clk->hw);
  355. if (ret)
  356. return ERR_PTR(ret);
  357. return peci_clk->hw.clk;
  358. }
  359. static void aspeed_peci_property_sanitize(struct device *dev, const char *propname,
  360. u32 min, u32 max, u32 default_val, u32 *propval)
  361. {
  362. u32 val;
  363. int ret;
  364. ret = device_property_read_u32(dev, propname, &val);
  365. if (ret) {
  366. val = default_val;
  367. } else if (val > max || val < min) {
  368. dev_warn(dev, "invalid %s: %u, falling back to: %u\n",
  369. propname, val, default_val);
  370. val = default_val;
  371. }
  372. *propval = val;
  373. }
  374. static void aspeed_peci_property_setup(struct aspeed_peci *priv)
  375. {
  376. aspeed_peci_property_sanitize(priv->dev, "clock-frequency",
  377. ASPEED_PECI_CLK_FREQUENCY_MIN, ASPEED_PECI_CLK_FREQUENCY_MAX,
  378. ASPEED_PECI_CLK_FREQUENCY_DEFAULT, &priv->clk_frequency);
  379. aspeed_peci_property_sanitize(priv->dev, "cmd-timeout-ms",
  380. 1, ASPEED_PECI_CMD_TIMEOUT_MS_MAX,
  381. ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT, &priv->cmd_timeout_ms);
  382. }
  383. static struct peci_controller_ops aspeed_ops = {
  384. .xfer = aspeed_peci_xfer,
  385. };
  386. static void aspeed_peci_reset_control_release(void *data)
  387. {
  388. reset_control_assert(data);
  389. }
  390. static int devm_aspeed_peci_reset_control_deassert(struct device *dev, struct reset_control *rst)
  391. {
  392. int ret;
  393. ret = reset_control_deassert(rst);
  394. if (ret)
  395. return ret;
  396. return devm_add_action_or_reset(dev, aspeed_peci_reset_control_release, rst);
  397. }
  398. static void aspeed_peci_clk_release(void *data)
  399. {
  400. clk_disable_unprepare(data);
  401. }
  402. static int devm_aspeed_peci_clk_enable(struct device *dev, struct clk *clk)
  403. {
  404. int ret;
  405. ret = clk_prepare_enable(clk);
  406. if (ret)
  407. return ret;
  408. return devm_add_action_or_reset(dev, aspeed_peci_clk_release, clk);
  409. }
  410. static int aspeed_peci_probe(struct platform_device *pdev)
  411. {
  412. struct peci_controller *controller;
  413. struct aspeed_peci *priv;
  414. struct clk *ref_clk;
  415. int ret;
  416. priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
  417. if (!priv)
  418. return -ENOMEM;
  419. priv->dev = &pdev->dev;
  420. dev_set_drvdata(priv->dev, priv);
  421. priv->base = devm_platform_ioremap_resource(pdev, 0);
  422. if (IS_ERR(priv->base))
  423. return PTR_ERR(priv->base);
  424. priv->irq = platform_get_irq(pdev, 0);
  425. if (priv->irq < 0)
  426. return priv->irq;
  427. ret = devm_request_irq(&pdev->dev, priv->irq, aspeed_peci_irq_handler,
  428. 0, "peci-aspeed", priv);
  429. if (ret)
  430. return ret;
  431. init_completion(&priv->xfer_complete);
  432. spin_lock_init(&priv->lock);
  433. priv->rst = devm_reset_control_get(&pdev->dev, NULL);
  434. if (IS_ERR(priv->rst))
  435. return dev_err_probe(priv->dev, PTR_ERR(priv->rst),
  436. "failed to get reset control\n");
  437. ret = devm_aspeed_peci_reset_control_deassert(priv->dev, priv->rst);
  438. if (ret)
  439. return dev_err_probe(priv->dev, ret, "cannot deassert reset control\n");
  440. aspeed_peci_property_setup(priv);
  441. aspeed_peci_init_regs(priv);
  442. ref_clk = devm_clk_get(priv->dev, NULL);
  443. if (IS_ERR(ref_clk))
  444. return dev_err_probe(priv->dev, PTR_ERR(ref_clk), "failed to get ref clock\n");
  445. priv->clk = devm_aspeed_peci_register_clk_div(priv->dev, ref_clk, priv);
  446. if (IS_ERR(priv->clk))
  447. return dev_err_probe(priv->dev, PTR_ERR(priv->clk), "cannot register clock\n");
  448. ret = clk_set_rate(priv->clk, priv->clk_frequency);
  449. if (ret < 0)
  450. return dev_err_probe(priv->dev, ret, "cannot set clock frequency\n");
  451. ret = devm_aspeed_peci_clk_enable(priv->dev, priv->clk);
  452. if (ret)
  453. return dev_err_probe(priv->dev, ret, "failed to enable clock\n");
  454. aspeed_peci_controller_enable(priv);
  455. controller = devm_peci_controller_add(priv->dev, &aspeed_ops);
  456. if (IS_ERR(controller))
  457. return dev_err_probe(priv->dev, PTR_ERR(controller),
  458. "failed to add aspeed peci controller\n");
  459. priv->controller = controller;
  460. return 0;
  461. }
  462. static const struct of_device_id aspeed_peci_of_table[] = {
  463. { .compatible = "aspeed,ast2400-peci", },
  464. { .compatible = "aspeed,ast2500-peci", },
  465. { .compatible = "aspeed,ast2600-peci", },
  466. { }
  467. };
  468. MODULE_DEVICE_TABLE(of, aspeed_peci_of_table);
  469. static struct platform_driver aspeed_peci_driver = {
  470. .probe = aspeed_peci_probe,
  471. .driver = {
  472. .name = "peci-aspeed",
  473. .of_match_table = aspeed_peci_of_table,
  474. },
  475. };
  476. module_platform_driver(aspeed_peci_driver);
  477. MODULE_AUTHOR("Ryan Chen <[email protected]>");
  478. MODULE_AUTHOR("Jae Hyun Yoo <[email protected]>");
  479. MODULE_DESCRIPTION("ASPEED PECI driver");
  480. MODULE_LICENSE("GPL");
  481. MODULE_IMPORT_NS(PECI);