dp_mst_sim_helper.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176
  1. /*
  2. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  3. * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. */
  15. /*
  16. * Copyright © 2014 Red Hat
  17. *
  18. * Permission to use, copy, modify, distribute, and sell this software and its
  19. * documentation for any purpose is hereby granted without fee, provided that
  20. * the above copyright notice appear in all copies and that both that copyright
  21. * notice and this permission notice appear in supporting documentation, and
  22. * that the name of the copyright holders not be used in advertising or
  23. * publicity pertaining to distribution of the software without specific,
  24. * written prior permission. The copyright holders make no representations
  25. * about the suitability of this software for any purpose. It is provided "as
  26. * is" without express or implied warranty.
  27. *
  28. * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
  29. * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
  30. * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
  31. * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
  32. * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  33. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
  34. * OF THIS SOFTWARE.
  35. */
  36. #include <linux/types.h>
  37. #include <linux/completion.h>
  38. #include <linux/delay.h>
  39. #include <drm/drm_fixed.h>
  40. #include <drm/drm_edid.h>
  41. #include <drm/drm_dp_mst_helper.h>
  42. #include "dp_mst_sim_helper.h"
  43. #include "dp_debug.h"
  44. #define DP_MST_DEBUG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__)
  45. #define DP_MST_INFO(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__)
  46. #define DDC_SEGMENT_ADDR 0x30
  47. struct dp_mst_sim_context {
  48. void *host_dev;
  49. void (*host_hpd_irq)(void *host_dev);
  50. void (*host_req)(void *host_dev, const u8 *in, int in_size,
  51. u8 *out, int *out_size);
  52. struct dp_mst_sim_port *ports;
  53. u32 port_num;
  54. struct drm_dp_sideband_msg_rx down_req;
  55. struct drm_dp_sideband_msg_rx down_rep;
  56. struct mutex session_lock;
  57. struct completion session_comp;
  58. struct workqueue_struct *wq;
  59. int reset_cnt;
  60. u8 esi[16];
  61. u8 guid[16];
  62. u8 dpcd[1024];
  63. };
  64. struct dp_mst_sim_work {
  65. struct work_struct base;
  66. struct dp_mst_sim_context *ctx;
  67. unsigned int address;
  68. u8 buffer[256];
  69. size_t size;
  70. };
  71. struct dp_mst_notify_work {
  72. struct work_struct base;
  73. struct dp_mst_sim_context *ctx;
  74. u32 port_mask;
  75. };
  76. #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
  77. static void dp_sideband_hex_dump(const char *name,
  78. u32 address, u8 *buffer, size_t size)
  79. {
  80. char prefix[64];
  81. int i, linelen, remaining = size;
  82. const int rowsize = 16;
  83. u8 linebuf[64];
  84. snprintf(prefix, sizeof(prefix), "%s(%d) %4xh(%2zu): ",
  85. name, current->pid, address, size);
  86. for (i = 0; i < size; i += rowsize) {
  87. linelen = min(remaining, rowsize);
  88. remaining -= rowsize;
  89. hex_dump_to_buffer(buffer + i, linelen, rowsize, 1,
  90. linebuf, sizeof(linebuf), false);
  91. DP_MST_DEBUG("%s%s\n", prefix, linebuf);
  92. }
  93. }
  94. #else
  95. static void dp_sideband_hex_dump(const char *name,
  96. u32 address, u8 *buffer, size_t size)
  97. {
  98. }
  99. #endif /* CONFIG_DYNAMIC_DEBUG */
  100. static u8 dp_mst_sim_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
  101. {
  102. u8 bitmask = 0x80;
  103. u8 bitshift = 7;
  104. u8 array_index = 0;
  105. int number_of_bits = num_nibbles * 4;
  106. u8 remainder = 0;
  107. while (number_of_bits != 0) {
  108. number_of_bits--;
  109. remainder <<= 1;
  110. remainder |= (data[array_index] & bitmask) >> bitshift;
  111. bitmask >>= 1;
  112. bitshift--;
  113. if (bitmask == 0) {
  114. bitmask = 0x80;
  115. bitshift = 7;
  116. array_index++;
  117. }
  118. if ((remainder & 0x10) == 0x10)
  119. remainder ^= 0x13;
  120. }
  121. number_of_bits = 4;
  122. while (number_of_bits != 0) {
  123. number_of_bits--;
  124. remainder <<= 1;
  125. if ((remainder & 0x10) != 0)
  126. remainder ^= 0x13;
  127. }
  128. return remainder;
  129. }
  130. static u8 dp_mst_sim_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
  131. {
  132. u8 bitmask = 0x80;
  133. u8 bitshift = 7;
  134. u8 array_index = 0;
  135. int number_of_bits = number_of_bytes * 8;
  136. u16 remainder = 0;
  137. while (number_of_bits != 0) {
  138. number_of_bits--;
  139. remainder <<= 1;
  140. remainder |= (data[array_index] & bitmask) >> bitshift;
  141. bitmask >>= 1;
  142. bitshift--;
  143. if (bitmask == 0) {
  144. bitmask = 0x80;
  145. bitshift = 7;
  146. array_index++;
  147. }
  148. if ((remainder & 0x100) == 0x100)
  149. remainder ^= 0xd5;
  150. }
  151. number_of_bits = 8;
  152. while (number_of_bits != 0) {
  153. number_of_bits--;
  154. remainder <<= 1;
  155. if ((remainder & 0x100) != 0)
  156. remainder ^= 0xd5;
  157. }
  158. return remainder & 0xff;
  159. }
  160. static bool dp_mst_sim_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
  161. u8 *buf, int buflen, u8 *hdrlen)
  162. {
  163. u8 crc4;
  164. u8 len;
  165. int i;
  166. u8 idx;
  167. if (buf[0] == 0)
  168. return false;
  169. len = 3;
  170. len += ((buf[0] & 0xf0) >> 4) / 2;
  171. if (len > buflen)
  172. return false;
  173. crc4 = dp_mst_sim_msg_header_crc4(buf, (len * 2) - 1);
  174. if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
  175. DP_MST_DEBUG("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
  176. return false;
  177. }
  178. hdr->lct = (buf[0] & 0xf0) >> 4;
  179. hdr->lcr = (buf[0] & 0xf);
  180. idx = 1;
  181. for (i = 0; i < (hdr->lct / 2); i++)
  182. hdr->rad[i] = buf[idx++];
  183. hdr->broadcast = (buf[idx] >> 7) & 0x1;
  184. hdr->path_msg = (buf[idx] >> 6) & 0x1;
  185. hdr->msg_len = buf[idx] & 0x3f;
  186. idx++;
  187. hdr->somt = (buf[idx] >> 7) & 0x1;
  188. hdr->eomt = (buf[idx] >> 6) & 0x1;
  189. hdr->seqno = (buf[idx] >> 4) & 0x1;
  190. idx++;
  191. *hdrlen = idx;
  192. return true;
  193. }
  194. static bool dp_mst_sim_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
  195. u8 *replybuf, u8 replybuflen, bool hdr)
  196. {
  197. int ret;
  198. u8 crc4;
  199. if (hdr) {
  200. u8 hdrlen;
  201. struct drm_dp_sideband_msg_hdr recv_hdr;
  202. ret = dp_mst_sim_decode_sideband_msg_hdr(&recv_hdr,
  203. replybuf, replybuflen, &hdrlen);
  204. if (ret == false)
  205. return false;
  206. /*
  207. * ignore out-of-order messages or messages that are part of a
  208. * failed transaction
  209. */
  210. if (!recv_hdr.somt && !msg->have_somt)
  211. return false;
  212. /* get length contained in this portion */
  213. msg->curchunk_len = recv_hdr.msg_len;
  214. msg->curchunk_hdrlen = hdrlen;
  215. /* we have already gotten an somt - don't bother parsing */
  216. if (recv_hdr.somt && msg->have_somt)
  217. return false;
  218. if (recv_hdr.somt) {
  219. memcpy(&msg->initial_hdr, &recv_hdr,
  220. sizeof(struct drm_dp_sideband_msg_hdr));
  221. msg->have_somt = true;
  222. }
  223. if (recv_hdr.eomt)
  224. msg->have_eomt = true;
  225. /* copy the bytes for the remainder of this header chunk */
  226. msg->curchunk_idx = min(msg->curchunk_len,
  227. (u8)(replybuflen - hdrlen));
  228. memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
  229. } else {
  230. memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
  231. msg->curchunk_idx += replybuflen;
  232. }
  233. if (msg->curchunk_idx >= msg->curchunk_len) {
  234. /* do CRC */
  235. crc4 = dp_mst_sim_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
  236. /* copy chunk into bigger msg */
  237. memcpy(&msg->msg[msg->curlen], msg->chunk,
  238. msg->curchunk_len - 1);
  239. msg->curlen += msg->curchunk_len - 1;
  240. }
  241. return true;
  242. }
  243. static void dp_mst_sim_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
  244. u8 *buf, int *len)
  245. {
  246. int idx = 0;
  247. int i;
  248. u8 crc4;
  249. buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
  250. for (i = 0; i < (hdr->lct / 2); i++)
  251. buf[idx++] = hdr->rad[i];
  252. buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
  253. (hdr->msg_len & 0x3f);
  254. buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
  255. crc4 = dp_mst_sim_msg_header_crc4(buf, (idx * 2) - 1);
  256. buf[idx - 1] |= (crc4 & 0xf);
  257. *len = idx;
  258. }
  259. static bool dp_get_one_sb_msg(struct drm_dp_sideband_msg_rx *msg,
  260. struct drm_dp_aux_msg *aux_msg)
  261. {
  262. int ret;
  263. if (!msg->have_somt) {
  264. ret = dp_mst_sim_sideband_msg_build(msg,
  265. aux_msg->buffer, aux_msg->size, true);
  266. if (!ret) {
  267. DP_ERR("sideband hdr build failed\n");
  268. return false;
  269. }
  270. } else {
  271. ret = dp_mst_sim_sideband_msg_build(msg,
  272. aux_msg->buffer, aux_msg->size, false);
  273. if (!ret) {
  274. DP_ERR("sideband msg build failed\n");
  275. return false;
  276. }
  277. }
  278. return true;
  279. }
  280. static int dp_sideband_build_nak_rep(
  281. struct dp_mst_sim_context *ctx)
  282. {
  283. struct drm_dp_sideband_msg_rx *msg = &ctx->down_req;
  284. u8 *buf = ctx->down_rep.msg;
  285. int idx = 0;
  286. buf[idx] = msg->msg[0] | 0x80;
  287. idx++;
  288. memcpy(&buf[idx], ctx->guid, 16);
  289. idx += 16;
  290. buf[idx] = 0x4;
  291. idx++;
  292. buf[idx] = 0;
  293. idx++;
  294. return idx;
  295. }
  296. static int dp_sideband_build_link_address_rep(
  297. struct dp_mst_sim_context *ctx)
  298. {
  299. struct dp_mst_sim_port *port;
  300. u8 *buf = ctx->down_rep.msg;
  301. int idx = 0;
  302. u32 i, tmp;
  303. buf[idx] = DP_LINK_ADDRESS;
  304. idx++;
  305. memcpy(&buf[idx], ctx->guid, 16);
  306. idx += 16;
  307. buf[idx] = ctx->port_num;
  308. idx++;
  309. for (i = 0; i < ctx->port_num; i++) {
  310. port = &ctx->ports[i];
  311. tmp = 0;
  312. if (port->input)
  313. tmp |= 0x80;
  314. tmp |= port->pdt << 4;
  315. tmp |= i & 0xF;
  316. buf[idx] = tmp;
  317. idx++;
  318. tmp = 0;
  319. if (port->mcs)
  320. tmp |= 0x80;
  321. if (port->ddps)
  322. tmp |= 0x40;
  323. if (port->input) {
  324. buf[idx] = tmp;
  325. idx++;
  326. continue;
  327. }
  328. if (port->ldps)
  329. tmp |= 0x20;
  330. buf[idx] = tmp;
  331. idx++;
  332. buf[idx] = port->dpcd_rev;
  333. idx++;
  334. memcpy(&buf[idx], port->peer_guid, 16);
  335. idx += 16;
  336. buf[idx] = (port->num_sdp_streams << 4) |
  337. (port->num_sdp_stream_sinks);
  338. idx++;
  339. }
  340. return idx;
  341. }
  342. static int dp_sideband_build_remote_i2c_read_rep(
  343. struct dp_mst_sim_context *ctx)
  344. {
  345. struct dp_mst_sim_port *port;
  346. struct drm_dp_remote_i2c_read i2c_read;
  347. u8 *buf;
  348. int idx;
  349. u32 i, start, len;
  350. buf = ctx->down_req.msg;
  351. idx = 1;
  352. i2c_read.num_transactions = buf[idx] & 0x3;
  353. i2c_read.port_number = buf[idx] >> 4;
  354. idx++;
  355. if (i2c_read.port_number >= ctx->port_num)
  356. goto err;
  357. for (i = 0; i < i2c_read.num_transactions; i++) {
  358. i2c_read.transactions[i].i2c_dev_id = buf[idx] & 0x7f;
  359. idx++;
  360. i2c_read.transactions[i].num_bytes = buf[idx];
  361. idx++;
  362. i2c_read.transactions[i].bytes = &buf[idx];
  363. idx += i2c_read.transactions[i].num_bytes;
  364. i2c_read.transactions[i].no_stop_bit = (buf[idx] >> 4) & 0x1;
  365. i2c_read.transactions[i].i2c_transaction_delay = buf[idx] & 0xf;
  366. idx++;
  367. }
  368. i2c_read.read_i2c_device_id = buf[idx];
  369. idx++;
  370. i2c_read.num_bytes_read = buf[idx];
  371. idx++;
  372. port = &ctx->ports[i2c_read.port_number];
  373. if (i2c_read.num_transactions == 1) {
  374. if (i2c_read.transactions[0].i2c_dev_id != DDC_ADDR ||
  375. i2c_read.transactions[0].num_bytes != 1) {
  376. DP_ERR("unsupported i2c address\n");
  377. goto err;
  378. }
  379. start = i2c_read.transactions[0].bytes[0];
  380. } else if (i2c_read.num_transactions == 2) {
  381. if (i2c_read.transactions[0].i2c_dev_id != DDC_SEGMENT_ADDR ||
  382. i2c_read.transactions[0].num_bytes != 1 ||
  383. i2c_read.transactions[1].i2c_dev_id != DDC_ADDR ||
  384. i2c_read.transactions[1].num_bytes != 1) {
  385. DP_ERR("unsupported i2c address\n");
  386. goto err;
  387. }
  388. start = i2c_read.transactions[0].bytes[0] * EDID_LENGTH * 2 +
  389. i2c_read.transactions[1].bytes[0];
  390. } else {
  391. DP_ERR("unsupported i2c transaction\n");
  392. goto err;
  393. }
  394. len = i2c_read.num_bytes_read;
  395. if (start + len > port->edid_size) {
  396. DP_ERR("edid data exceeds maximum\n");
  397. goto err;
  398. }
  399. buf = ctx->down_rep.msg;
  400. idx = 0;
  401. buf[idx] = DP_REMOTE_I2C_READ;
  402. idx++;
  403. buf[idx] = i2c_read.port_number;
  404. idx++;
  405. buf[idx] = len;
  406. idx++;
  407. memcpy(&buf[idx], &port->edid[start], len);
  408. idx += len;
  409. return idx;
  410. err:
  411. return dp_sideband_build_nak_rep(ctx);
  412. }
  413. static int dp_sideband_build_enum_path_resources_rep(
  414. struct dp_mst_sim_context *ctx)
  415. {
  416. struct dp_mst_sim_port *port;
  417. u8 port_num;
  418. u8 *buf;
  419. int idx;
  420. buf = ctx->down_req.msg;
  421. port_num = buf[1] >> 4;
  422. if (port_num >= ctx->port_num) {
  423. DP_ERR("invalid port num\n");
  424. goto err;
  425. }
  426. port = &ctx->ports[port_num];
  427. buf = ctx->down_rep.msg;
  428. idx = 0;
  429. buf[idx] = DP_ENUM_PATH_RESOURCES;
  430. idx++;
  431. buf[idx] = port_num << 4;
  432. idx++;
  433. buf[idx] = port->full_pbn >> 8;
  434. idx++;
  435. buf[idx] = port->full_pbn & 0xFF;
  436. idx++;
  437. buf[idx] = port->avail_pbn >> 8;
  438. idx++;
  439. buf[idx] = port->avail_pbn & 0xFF;
  440. idx++;
  441. return idx;
  442. err:
  443. return dp_sideband_build_nak_rep(ctx);
  444. }
  445. static int dp_sideband_build_allocate_payload_rep(
  446. struct dp_mst_sim_context *ctx)
  447. {
  448. struct drm_dp_allocate_payload allocate_payload;
  449. u8 *buf;
  450. int idx;
  451. u32 i;
  452. buf = ctx->down_req.msg;
  453. idx = 1;
  454. allocate_payload.port_number = buf[idx] >> 4;
  455. allocate_payload.number_sdp_streams = buf[idx] & 0xF;
  456. idx++;
  457. allocate_payload.vcpi = buf[idx];
  458. idx++;
  459. allocate_payload.pbn = (buf[idx] << 8) | buf[idx+1];
  460. idx += 2;
  461. for (i = 0; i < allocate_payload.number_sdp_streams / 2; i++) {
  462. allocate_payload.sdp_stream_sink[i * 2] = buf[idx] >> 4;
  463. allocate_payload.sdp_stream_sink[i * 2 + 1] = buf[idx] & 0xf;
  464. idx++;
  465. }
  466. if (allocate_payload.number_sdp_streams & 1) {
  467. i = allocate_payload.number_sdp_streams - 1;
  468. allocate_payload.sdp_stream_sink[i] = buf[idx] >> 4;
  469. idx++;
  470. }
  471. if (allocate_payload.port_number >= ctx->port_num) {
  472. DP_ERR("invalid port num\n");
  473. goto err;
  474. }
  475. buf = ctx->down_rep.msg;
  476. idx = 0;
  477. buf[idx] = DP_ALLOCATE_PAYLOAD;
  478. idx++;
  479. buf[idx] = allocate_payload.port_number;
  480. idx++;
  481. buf[idx] = allocate_payload.vcpi;
  482. idx++;
  483. buf[idx] = allocate_payload.pbn >> 8;
  484. idx++;
  485. buf[idx] = allocate_payload.pbn & 0xFF;
  486. idx++;
  487. return idx;
  488. err:
  489. return dp_sideband_build_nak_rep(ctx);
  490. }
  491. static int dp_sideband_build_power_updown_phy_rep(
  492. struct dp_mst_sim_context *ctx)
  493. {
  494. u8 port_num;
  495. u8 *buf;
  496. int idx;
  497. buf = ctx->down_req.msg;
  498. port_num = buf[1] >> 4;
  499. if (port_num >= ctx->port_num) {
  500. DP_ERR("invalid port num\n");
  501. goto err;
  502. }
  503. buf = ctx->down_rep.msg;
  504. idx = 0;
  505. buf[idx] = ctx->down_req.msg[0];
  506. idx++;
  507. buf[idx] = port_num;
  508. idx++;
  509. return idx;
  510. err:
  511. return dp_sideband_build_nak_rep(ctx);
  512. }
  513. static int dp_sideband_build_clear_payload_id_table_rep(
  514. struct dp_mst_sim_context *ctx)
  515. {
  516. u8 *buf = ctx->down_rep.msg;
  517. int idx = 0;
  518. buf[idx] = DP_CLEAR_PAYLOAD_ID_TABLE;
  519. idx++;
  520. return idx;
  521. }
  522. static int dp_sideband_build_connection_notify_req(
  523. struct dp_mst_sim_context *ctx, int port_idx)
  524. {
  525. struct dp_mst_sim_port *port = &ctx->ports[port_idx];
  526. u8 *buf = ctx->down_rep.msg;
  527. int idx = 0;
  528. buf[idx] = DP_CONNECTION_STATUS_NOTIFY;
  529. idx++;
  530. buf[idx] = port_idx << 4;
  531. idx++;
  532. memcpy(&buf[idx], &port->peer_guid, 16);
  533. idx += 16;
  534. buf[idx] = (port->ldps << 6) |
  535. (port->ddps << 5) |
  536. (port->mcs << 4) |
  537. (port->input << 3) |
  538. (port->pdt & 0x7);
  539. idx++;
  540. return idx;
  541. }
  542. static inline int dp_sideband_update_esi(
  543. struct dp_mst_sim_context *ctx, u8 val)
  544. {
  545. ctx->esi[0] = ctx->port_num;
  546. ctx->esi[1] = val;
  547. ctx->esi[2] = 0;
  548. return 0;
  549. }
  550. static inline bool dp_sideband_pending_esi(
  551. struct dp_mst_sim_context *ctx, u8 val)
  552. {
  553. return !!(ctx->esi[1] & val);
  554. }
  555. static int dp_mst_sim_clear_esi(struct dp_mst_sim_context *ctx,
  556. struct drm_dp_aux_msg *msg)
  557. {
  558. size_t i;
  559. u8 old_esi = ctx->esi[1];
  560. u32 addr = msg->address - DP_SINK_COUNT_ESI;
  561. if (msg->size - addr >= 16) {
  562. msg->reply = DP_AUX_NATIVE_REPLY_NACK;
  563. return 0;
  564. }
  565. mutex_lock(&ctx->session_lock);
  566. for (i = 0; i < msg->size; i++)
  567. ctx->esi[addr + i] &= ~((u8 *)msg->buffer)[i];
  568. if (old_esi != ctx->esi[1])
  569. complete(&ctx->session_comp);
  570. mutex_unlock(&ctx->session_lock);
  571. msg->reply = DP_AUX_NATIVE_REPLY_ACK;
  572. return 0;
  573. }
  574. static int dp_mst_sim_read_esi(struct dp_mst_sim_context *ctx,
  575. struct drm_dp_aux_msg *msg)
  576. {
  577. u32 addr = msg->address - DP_SINK_COUNT_ESI;
  578. if (msg->size - addr >= 16) {
  579. msg->reply = DP_AUX_NATIVE_REPLY_NACK;
  580. return 0;
  581. }
  582. memcpy(msg->buffer, &ctx->esi[addr], msg->size);
  583. msg->reply = DP_AUX_NATIVE_REPLY_ACK;
  584. return 0;
  585. }
  586. static int dp_mst_sim_down_req_internal(struct dp_mst_sim_context *ctx,
  587. struct drm_dp_aux_msg *aux_msg)
  588. {
  589. struct drm_dp_sideband_msg_rx *msg = &ctx->down_req;
  590. struct drm_dp_sideband_msg_hdr hdr;
  591. bool seqno;
  592. int ret, size, len, hdr_len;
  593. ret = dp_get_one_sb_msg(msg, aux_msg);
  594. if (!ret)
  595. return -EINVAL;
  596. if (!msg->have_eomt)
  597. return 0;
  598. seqno = msg->initial_hdr.seqno;
  599. switch (msg->msg[0]) {
  600. case DP_LINK_ADDRESS:
  601. size = dp_sideband_build_link_address_rep(ctx);
  602. break;
  603. case DP_REMOTE_I2C_READ:
  604. size = dp_sideband_build_remote_i2c_read_rep(ctx);
  605. break;
  606. case DP_ENUM_PATH_RESOURCES:
  607. size = dp_sideband_build_enum_path_resources_rep(ctx);
  608. break;
  609. case DP_ALLOCATE_PAYLOAD:
  610. size = dp_sideband_build_allocate_payload_rep(ctx);
  611. break;
  612. case DP_POWER_DOWN_PHY:
  613. case DP_POWER_UP_PHY:
  614. size = dp_sideband_build_power_updown_phy_rep(ctx);
  615. break;
  616. case DP_CLEAR_PAYLOAD_ID_TABLE:
  617. size = dp_sideband_build_clear_payload_id_table_rep(ctx);
  618. break;
  619. default:
  620. size = dp_sideband_build_nak_rep(ctx);
  621. break;
  622. }
  623. if (ctx->host_req)
  624. ctx->host_req(ctx->host_dev,
  625. ctx->down_req.msg, ctx->down_req.curlen,
  626. ctx->down_rep.msg, &size);
  627. memset(msg, 0, sizeof(*msg));
  628. msg = &ctx->down_rep;
  629. msg->curlen = 0;
  630. mutex_lock(&ctx->session_lock);
  631. while (msg->curlen < size) {
  632. if (ctx->reset_cnt)
  633. break;
  634. /* copy data */
  635. len = min(size - msg->curlen, 44);
  636. memcpy(&ctx->dpcd[3], &msg->msg[msg->curlen], len);
  637. msg->curlen += len;
  638. /* build header */
  639. memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
  640. hdr.broadcast = 0;
  641. hdr.path_msg = 0;
  642. hdr.lct = 1;
  643. hdr.lcr = 0;
  644. hdr.seqno = seqno;
  645. hdr.msg_len = len + 1;
  646. hdr.eomt = (msg->curlen == size);
  647. hdr.somt = (msg->curlen == len);
  648. dp_mst_sim_encode_sideband_msg_hdr(&hdr, ctx->dpcd, &hdr_len);
  649. /* build crc */
  650. ctx->dpcd[len + 3] = dp_mst_sim_msg_data_crc4(&ctx->dpcd[3], len);
  651. /* update esi */
  652. dp_sideband_update_esi(ctx, DP_DOWN_REP_MSG_RDY);
  653. /* notify host */
  654. mutex_unlock(&ctx->session_lock);
  655. ctx->host_hpd_irq(ctx->host_dev);
  656. mutex_lock(&ctx->session_lock);
  657. /* wait until esi is cleared */
  658. while (dp_sideband_pending_esi(ctx, DP_DOWN_REP_MSG_RDY)) {
  659. if (ctx->reset_cnt)
  660. break;
  661. mutex_unlock(&ctx->session_lock);
  662. wait_for_completion(&ctx->session_comp);
  663. mutex_lock(&ctx->session_lock);
  664. }
  665. }
  666. mutex_unlock(&ctx->session_lock);
  667. return 0;
  668. }
  669. static void dp_mst_sim_down_req_work(struct work_struct *work)
  670. {
  671. struct dp_mst_sim_work *sim_work =
  672. container_of(work, struct dp_mst_sim_work, base);
  673. struct drm_dp_aux_msg msg;
  674. msg.address = sim_work->address;
  675. msg.buffer = sim_work->buffer;
  676. msg.size = sim_work->size;
  677. dp_mst_sim_down_req_internal(sim_work->ctx, &msg);
  678. kfree(sim_work);
  679. }
  680. static int dp_mst_sim_down_req(struct dp_mst_sim_context *ctx,
  681. struct drm_dp_aux_msg *aux_msg)
  682. {
  683. struct dp_mst_sim_work *work;
  684. if (aux_msg->size >= 256) {
  685. aux_msg->reply = DP_AUX_NATIVE_REPLY_NACK;
  686. return 0;
  687. }
  688. dp_sideband_hex_dump("request",
  689. aux_msg->address, aux_msg->buffer, aux_msg->size);
  690. work = kzalloc(sizeof(*work), GFP_KERNEL);
  691. if (!work) {
  692. aux_msg->reply = DP_AUX_NATIVE_REPLY_NACK;
  693. return 0;
  694. }
  695. work->ctx = ctx;
  696. work->address = aux_msg->address;
  697. work->size = aux_msg->size;
  698. memcpy(work->buffer, aux_msg->buffer, aux_msg->size);
  699. INIT_WORK(&work->base, dp_mst_sim_down_req_work);
  700. queue_work(ctx->wq, &work->base);
  701. aux_msg->reply = DP_AUX_NATIVE_REPLY_ACK;
  702. return 0;
  703. }
  704. static int dp_mst_sim_down_rep(struct dp_mst_sim_context *ctx,
  705. struct drm_dp_aux_msg *msg)
  706. {
  707. u32 addr = msg->address - DP_SIDEBAND_MSG_DOWN_REP_BASE;
  708. memcpy(msg->buffer, &ctx->dpcd[addr], msg->size);
  709. msg->reply = DP_AUX_NATIVE_REPLY_ACK;
  710. dp_sideband_hex_dump("reply",
  711. addr, msg->buffer, msg->size);
  712. return 0;
  713. }
  714. static int dp_mst_sim_up_req(struct dp_mst_sim_context *ctx,
  715. struct drm_dp_aux_msg *msg)
  716. {
  717. u32 addr = msg->address - DP_SIDEBAND_MSG_UP_REQ_BASE;
  718. memcpy(msg->buffer, &ctx->dpcd[addr], msg->size);
  719. msg->reply = DP_AUX_NATIVE_REPLY_ACK;
  720. dp_sideband_hex_dump("up_req",
  721. addr, msg->buffer, msg->size);
  722. return 0;
  723. }
  724. static void dp_mst_sim_reset_work(struct work_struct *work)
  725. {
  726. struct dp_mst_notify_work *notify_work =
  727. container_of(work, struct dp_mst_notify_work, base);
  728. struct dp_mst_sim_context *ctx = notify_work->ctx;
  729. mutex_lock(&ctx->session_lock);
  730. --ctx->reset_cnt;
  731. reinit_completion(&ctx->session_comp);
  732. mutex_unlock(&ctx->session_lock);
  733. }
  734. static int dp_mst_sim_reset(struct dp_mst_sim_context *ctx,
  735. struct drm_dp_aux_msg *msg)
  736. {
  737. struct dp_mst_notify_work *work;
  738. if (!msg->size || ((u8 *)msg->buffer)[0])
  739. return msg->size;
  740. mutex_lock(&ctx->session_lock);
  741. ++ctx->reset_cnt;
  742. complete(&ctx->session_comp);
  743. mutex_unlock(&ctx->session_lock);
  744. work = kzalloc(sizeof(*work), GFP_KERNEL);
  745. if (!work)
  746. return msg->size;
  747. work->ctx = ctx;
  748. INIT_WORK(&work->base, dp_mst_sim_reset_work);
  749. queue_work(ctx->wq, &work->base);
  750. return msg->size;
  751. }
  752. int dp_mst_sim_transfer(void *mst_sim_context, struct drm_dp_aux_msg *msg)
  753. {
  754. struct dp_mst_sim_context *ctx = mst_sim_context;
  755. if (!ctx || !ctx->port_num || !msg)
  756. return -ENOENT;
  757. if (msg->request == DP_AUX_NATIVE_WRITE) {
  758. if (msg->address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
  759. msg->address < DP_SIDEBAND_MSG_DOWN_REQ_BASE + 256)
  760. return dp_mst_sim_down_req(mst_sim_context, msg);
  761. if (msg->address >= DP_SIDEBAND_MSG_UP_REP_BASE &&
  762. msg->address < DP_SIDEBAND_MSG_UP_REP_BASE + 256)
  763. return 0;
  764. if (msg->address >= DP_SINK_COUNT_ESI &&
  765. msg->address < DP_SINK_COUNT_ESI + 14)
  766. return dp_mst_sim_clear_esi(mst_sim_context, msg);
  767. if (msg->address == DP_MSTM_CTRL)
  768. return dp_mst_sim_reset(mst_sim_context, msg);
  769. } else if (msg->request == DP_AUX_NATIVE_READ) {
  770. if (msg->address >= DP_SIDEBAND_MSG_DOWN_REP_BASE &&
  771. msg->address < DP_SIDEBAND_MSG_DOWN_REP_BASE + 256)
  772. return dp_mst_sim_down_rep(mst_sim_context, msg);
  773. if (msg->address >= DP_SIDEBAND_MSG_UP_REQ_BASE &&
  774. msg->address < DP_SIDEBAND_MSG_UP_REQ_BASE + 256)
  775. return dp_mst_sim_up_req(mst_sim_context, msg);
  776. if (msg->address >= DP_SINK_COUNT_ESI &&
  777. msg->address < DP_SINK_COUNT_ESI + 14)
  778. return dp_mst_sim_read_esi(mst_sim_context, msg);
  779. }
  780. return -EINVAL;
  781. }
  782. static void dp_mst_sim_up_req_work(struct work_struct *work)
  783. {
  784. struct dp_mst_notify_work *notify_work =
  785. container_of(work, struct dp_mst_notify_work, base);
  786. struct dp_mst_sim_context *ctx = notify_work->ctx;
  787. struct drm_dp_sideband_msg_rx *msg = &ctx->down_rep;
  788. struct drm_dp_sideband_msg_hdr hdr;
  789. int len, hdr_len, i;
  790. mutex_lock(&ctx->session_lock);
  791. for (i = 0; i < ctx->port_num; i++) {
  792. if (ctx->reset_cnt)
  793. break;
  794. if (!(notify_work->port_mask & (1 << i)))
  795. continue;
  796. len = dp_sideband_build_connection_notify_req(ctx, i);
  797. /* copy data */
  798. memcpy(&ctx->dpcd[3], msg->msg, len);
  799. /* build header */
  800. memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
  801. hdr.broadcast = 0;
  802. hdr.path_msg = 0;
  803. hdr.lct = 1;
  804. hdr.lcr = 0;
  805. hdr.seqno = 0;
  806. hdr.msg_len = len + 1;
  807. hdr.eomt = 1;
  808. hdr.somt = 1;
  809. dp_mst_sim_encode_sideband_msg_hdr(&hdr, ctx->dpcd, &hdr_len);
  810. /* build crc */
  811. ctx->dpcd[len + 3] = dp_mst_sim_msg_data_crc4(&ctx->dpcd[3], len);
  812. /* update esi */
  813. dp_sideband_update_esi(ctx, DP_UP_REQ_MSG_RDY);
  814. /* notify host */
  815. mutex_unlock(&ctx->session_lock);
  816. ctx->host_hpd_irq(ctx->host_dev);
  817. mutex_lock(&ctx->session_lock);
  818. /* wait until esi is cleared */
  819. while (dp_sideband_pending_esi(ctx, DP_UP_REQ_MSG_RDY)) {
  820. if (ctx->reset_cnt)
  821. break;
  822. mutex_unlock(&ctx->session_lock);
  823. wait_for_completion(&ctx->session_comp);
  824. mutex_lock(&ctx->session_lock);
  825. }
  826. }
  827. mutex_unlock(&ctx->session_lock);
  828. kfree(notify_work);
  829. }
  830. static void dp_mst_sim_notify(struct dp_mst_sim_context *ctx,
  831. u32 port_mask)
  832. {
  833. struct dp_mst_notify_work *work;
  834. work = kzalloc(sizeof(*work), GFP_KERNEL);
  835. if (!work)
  836. return;
  837. work->ctx = ctx;
  838. work->port_mask = port_mask;
  839. INIT_WORK(&work->base, dp_mst_sim_up_req_work);
  840. queue_work(ctx->wq, &work->base);
  841. }
  842. int dp_mst_sim_update(void *mst_sim_context, u32 port_num,
  843. struct dp_mst_sim_port *ports)
  844. {
  845. struct dp_mst_sim_context *ctx = mst_sim_context;
  846. u8 *edid;
  847. int rc = 0;
  848. u32 update_mask = 0;
  849. u32 i;
  850. if (!ctx || port_num >= 15)
  851. return -EINVAL;
  852. mutex_lock(&ctx->session_lock);
  853. /* get update mask */
  854. if (port_num && ctx->port_num == port_num) {
  855. for (i = 0; i < port_num; i++) {
  856. if (ports[i].pdt != ctx->ports[i].pdt ||
  857. ports[i].input != ctx->ports[i].input ||
  858. ports[i].ldps != ctx->ports[i].ldps ||
  859. ports[i].ddps != ctx->ports[i].ddps ||
  860. ports[i].mcs != ctx->ports[i].mcs)
  861. update_mask |= (1 << i);
  862. }
  863. }
  864. for (i = 0; i < ctx->port_num; i++)
  865. kfree(ctx->ports[i].edid);
  866. kfree(ctx->ports);
  867. ctx->port_num = 0;
  868. ctx->ports = kcalloc(port_num, sizeof(*ports), GFP_KERNEL);
  869. if (!ctx->ports) {
  870. rc = -ENOMEM;
  871. goto fail;
  872. }
  873. ctx->port_num = port_num;
  874. for (i = 0; i < port_num; i++) {
  875. ctx->ports[i] = ports[i];
  876. if (ports[i].edid_size) {
  877. if (!ports[i].edid) {
  878. rc = -EINVAL;
  879. goto fail;
  880. }
  881. edid = kzalloc(ports[i].edid_size,
  882. GFP_KERNEL);
  883. if (!edid) {
  884. rc = -ENOMEM;
  885. goto fail;
  886. }
  887. memcpy(edid, ports[i].edid, ports[i].edid_size);
  888. ctx->ports[i].edid = edid;
  889. }
  890. }
  891. fail:
  892. if (rc) {
  893. for (i = 0; i < ctx->port_num; i++)
  894. kfree(ctx->ports[i].edid);
  895. kfree(ctx->ports);
  896. }
  897. mutex_unlock(&ctx->session_lock);
  898. if (update_mask)
  899. dp_mst_sim_notify(ctx, update_mask);
  900. return rc;
  901. }
  902. int dp_mst_sim_create(const struct dp_mst_sim_cfg *cfg,
  903. void **mst_sim_context)
  904. {
  905. struct dp_mst_sim_context *ctx;
  906. if (!cfg || !mst_sim_context)
  907. return -EINVAL;
  908. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  909. if (!ctx)
  910. return -ENOMEM;
  911. ctx->host_dev = cfg->host_dev;
  912. ctx->host_hpd_irq = cfg->host_hpd_irq;
  913. ctx->host_req = cfg->host_req;
  914. memcpy(ctx->guid, cfg->guid, 16);
  915. mutex_init(&ctx->session_lock);
  916. init_completion(&ctx->session_comp);
  917. ctx->wq = create_singlethread_workqueue("dp_mst_sim");
  918. if (IS_ERR_OR_NULL(ctx->wq)) {
  919. DP_ERR("Error creating wq\n");
  920. kfree(ctx);
  921. return -EPERM;
  922. }
  923. *mst_sim_context = ctx;
  924. return 0;
  925. }
  926. int dp_mst_sim_destroy(void *mst_sim_context)
  927. {
  928. struct dp_mst_sim_context *ctx = mst_sim_context;
  929. u32 i;
  930. if (!ctx)
  931. return -EINVAL;
  932. for (i = 0; i < ctx->port_num; i++)
  933. kfree(ctx->ports[i].edid);
  934. kfree(ctx->ports);
  935. destroy_workqueue(ctx->wq);
  936. return 0;
  937. }