
This change updates the include file path for necessary dp and dsc headers that have moved in upstream kernel. File path changed in upstream: include/drm/display/drm_dp_aux_bus.h include/drm/display/drm_dp_dual_mode_helper.h include/drm/display/drm_dp.h include/drm/display/drm_dp_helper.h include/drm/display/drm_dp_mst_helper.h include/drm/display/drm_dsc.h include/drm/display/drm_dsc_helper.h include/drm/display/drm_hdcp.h include/drm/display/drm_hdcp_helper.h include/drm/display/drm_hdmi_helper.h include/drm/display/drm_scdc.h include/drm/display/drm_scdc_helper.h Change-Id: Icb9a227c7464061f68fe60cbda6d93858fa768c5 Signed-off-by: GG Hou <quic_renjhou@quicinc.com> Signed-off-by: Nilaan Gunabalachandran <quic_ngunabal@quicinc.com>
1194 lines
26 KiB
C
1194 lines
26 KiB
C
/*
|
|
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
|
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 and
|
|
* only version 2 as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
*/
|
|
|
|
/*
|
|
* Copyright © 2014 Red Hat
|
|
*
|
|
* Permission to use, copy, modify, distribute, and sell this software and its
|
|
* documentation for any purpose is hereby granted without fee, provided that
|
|
* the above copyright notice appear in all copies and that both that copyright
|
|
* notice and this permission notice appear in supporting documentation, and
|
|
* that the name of the copyright holders not be used in advertising or
|
|
* publicity pertaining to distribution of the software without specific,
|
|
* written prior permission. The copyright holders make no representations
|
|
* about the suitability of this software for any purpose. It is provided "as
|
|
* is" without express or implied warranty.
|
|
*
|
|
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
|
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
|
|
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
|
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
|
|
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
|
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
|
* OF THIS SOFTWARE.
|
|
*/
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/completion.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/version.h>
|
|
#include <drm/drm_fixed.h>
|
|
#include <drm/drm_edid.h>
|
|
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
|
|
#include <drm/display/drm_dp_mst_helper.h>
|
|
#else
|
|
#include <drm/drm_dp_mst_helper.h>
|
|
#endif
|
|
#include "dp_mst_sim_helper.h"
|
|
#include "dp_debug.h"
|
|
|
|
#define DP_MST_DEBUG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__)
|
|
#define DP_MST_INFO(fmt, ...) DP_INFO(fmt, ##__VA_ARGS__)
|
|
#define DP_MST_DEBUG_V(fmt, ...) DP_DEBUG_V(fmt, ##__VA_ARGS__)
|
|
#define DP_MST_INFO_V(fmt, ...) DP_INFO_V(fmt, ##__VA_ARGS__)
|
|
|
|
#define DDC_SEGMENT_ADDR 0x30
|
|
|
|
struct dp_mst_sim_context {
|
|
void *host_dev;
|
|
void (*host_hpd_irq)(void *host_dev);
|
|
void (*host_req)(void *host_dev, const u8 *in, int in_size,
|
|
u8 *out, int *out_size);
|
|
|
|
struct dp_mst_sim_port *ports;
|
|
u32 port_num;
|
|
|
|
struct drm_dp_sideband_msg_rx down_req;
|
|
struct drm_dp_sideband_msg_rx down_rep;
|
|
|
|
struct mutex session_lock;
|
|
struct completion session_comp;
|
|
struct workqueue_struct *wq;
|
|
int reset_cnt;
|
|
|
|
u8 esi[16];
|
|
u8 guid[16];
|
|
u8 dpcd[1024];
|
|
};
|
|
|
|
struct dp_mst_sim_work {
|
|
struct work_struct base;
|
|
struct dp_mst_sim_context *ctx;
|
|
unsigned int address;
|
|
u8 buffer[256];
|
|
size_t size;
|
|
};
|
|
|
|
struct dp_mst_notify_work {
|
|
struct work_struct base;
|
|
struct dp_mst_sim_context *ctx;
|
|
u32 port_mask;
|
|
};
|
|
|
|
#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
|
|
static void dp_sideband_hex_dump(const char *name,
|
|
u32 address, u8 *buffer, size_t size)
|
|
{
|
|
char prefix[64];
|
|
int i, linelen, remaining = size;
|
|
const int rowsize = 16;
|
|
u8 linebuf[64];
|
|
|
|
snprintf(prefix, sizeof(prefix), "%s(%d) %4xh(%2zu): ",
|
|
name, current->pid, address, size);
|
|
|
|
for (i = 0; i < size; i += rowsize) {
|
|
linelen = min(remaining, rowsize);
|
|
remaining -= rowsize;
|
|
|
|
hex_dump_to_buffer(buffer + i, linelen, rowsize, 1,
|
|
linebuf, sizeof(linebuf), false);
|
|
|
|
DP_MST_DEBUG_V("%s%s\n", prefix, linebuf);
|
|
}
|
|
}
|
|
#else
|
|
static void dp_sideband_hex_dump(const char *name,
|
|
u32 address, u8 *buffer, size_t size)
|
|
{
|
|
}
|
|
#endif /* CONFIG_DYNAMIC_DEBUG */
|
|
|
|
static u8 dp_mst_sim_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
|
|
{
|
|
u8 bitmask = 0x80;
|
|
u8 bitshift = 7;
|
|
u8 array_index = 0;
|
|
int number_of_bits = num_nibbles * 4;
|
|
u8 remainder = 0;
|
|
|
|
while (number_of_bits != 0) {
|
|
number_of_bits--;
|
|
remainder <<= 1;
|
|
remainder |= (data[array_index] & bitmask) >> bitshift;
|
|
bitmask >>= 1;
|
|
bitshift--;
|
|
if (bitmask == 0) {
|
|
bitmask = 0x80;
|
|
bitshift = 7;
|
|
array_index++;
|
|
}
|
|
if ((remainder & 0x10) == 0x10)
|
|
remainder ^= 0x13;
|
|
}
|
|
|
|
number_of_bits = 4;
|
|
while (number_of_bits != 0) {
|
|
number_of_bits--;
|
|
remainder <<= 1;
|
|
if ((remainder & 0x10) != 0)
|
|
remainder ^= 0x13;
|
|
}
|
|
|
|
return remainder;
|
|
}
|
|
|
|
static u8 dp_mst_sim_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
|
|
{
|
|
u8 bitmask = 0x80;
|
|
u8 bitshift = 7;
|
|
u8 array_index = 0;
|
|
int number_of_bits = number_of_bytes * 8;
|
|
u16 remainder = 0;
|
|
|
|
while (number_of_bits != 0) {
|
|
number_of_bits--;
|
|
remainder <<= 1;
|
|
remainder |= (data[array_index] & bitmask) >> bitshift;
|
|
bitmask >>= 1;
|
|
bitshift--;
|
|
if (bitmask == 0) {
|
|
bitmask = 0x80;
|
|
bitshift = 7;
|
|
array_index++;
|
|
}
|
|
if ((remainder & 0x100) == 0x100)
|
|
remainder ^= 0xd5;
|
|
}
|
|
|
|
number_of_bits = 8;
|
|
while (number_of_bits != 0) {
|
|
number_of_bits--;
|
|
remainder <<= 1;
|
|
if ((remainder & 0x100) != 0)
|
|
remainder ^= 0xd5;
|
|
}
|
|
|
|
return remainder & 0xff;
|
|
}
|
|
|
|
static bool dp_mst_sim_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
|
|
u8 *buf, int buflen, u8 *hdrlen)
|
|
{
|
|
u8 crc4;
|
|
u8 len;
|
|
int i;
|
|
u8 idx;
|
|
|
|
if (buf[0] == 0)
|
|
return false;
|
|
len = 3;
|
|
len += ((buf[0] & 0xf0) >> 4) / 2;
|
|
if (len > buflen)
|
|
return false;
|
|
crc4 = dp_mst_sim_msg_header_crc4(buf, (len * 2) - 1);
|
|
|
|
if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
|
|
DP_MST_DEBUG("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
|
|
return false;
|
|
}
|
|
|
|
hdr->lct = (buf[0] & 0xf0) >> 4;
|
|
hdr->lcr = (buf[0] & 0xf);
|
|
idx = 1;
|
|
for (i = 0; i < (hdr->lct / 2); i++)
|
|
hdr->rad[i] = buf[idx++];
|
|
hdr->broadcast = (buf[idx] >> 7) & 0x1;
|
|
hdr->path_msg = (buf[idx] >> 6) & 0x1;
|
|
hdr->msg_len = buf[idx] & 0x3f;
|
|
idx++;
|
|
hdr->somt = (buf[idx] >> 7) & 0x1;
|
|
hdr->eomt = (buf[idx] >> 6) & 0x1;
|
|
hdr->seqno = (buf[idx] >> 4) & 0x1;
|
|
idx++;
|
|
*hdrlen = idx;
|
|
return true;
|
|
}
|
|
|
|
static bool dp_mst_sim_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
|
|
u8 *replybuf, u8 replybuflen, bool hdr)
|
|
{
|
|
int ret;
|
|
u8 crc4;
|
|
|
|
if (hdr) {
|
|
u8 hdrlen;
|
|
struct drm_dp_sideband_msg_hdr recv_hdr;
|
|
|
|
ret = dp_mst_sim_decode_sideband_msg_hdr(&recv_hdr,
|
|
replybuf, replybuflen, &hdrlen);
|
|
if (ret == false)
|
|
return false;
|
|
|
|
/*
|
|
* ignore out-of-order messages or messages that are part of a
|
|
* failed transaction
|
|
*/
|
|
if (!recv_hdr.somt && !msg->have_somt)
|
|
return false;
|
|
|
|
/* get length contained in this portion */
|
|
msg->curchunk_len = recv_hdr.msg_len;
|
|
msg->curchunk_hdrlen = hdrlen;
|
|
|
|
/* we have already gotten an somt - don't bother parsing */
|
|
if (recv_hdr.somt && msg->have_somt)
|
|
return false;
|
|
|
|
if (recv_hdr.somt) {
|
|
memcpy(&msg->initial_hdr, &recv_hdr,
|
|
sizeof(struct drm_dp_sideband_msg_hdr));
|
|
msg->have_somt = true;
|
|
}
|
|
if (recv_hdr.eomt)
|
|
msg->have_eomt = true;
|
|
|
|
/* copy the bytes for the remainder of this header chunk */
|
|
msg->curchunk_idx = min(msg->curchunk_len,
|
|
(u8)(replybuflen - hdrlen));
|
|
memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
|
|
} else {
|
|
memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
|
|
msg->curchunk_idx += replybuflen;
|
|
}
|
|
|
|
if (msg->curchunk_idx >= msg->curchunk_len) {
|
|
/* do CRC */
|
|
crc4 = dp_mst_sim_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
|
|
/* copy chunk into bigger msg */
|
|
memcpy(&msg->msg[msg->curlen], msg->chunk,
|
|
msg->curchunk_len - 1);
|
|
msg->curlen += msg->curchunk_len - 1;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static void dp_mst_sim_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
|
|
u8 *buf, int *len)
|
|
{
|
|
int idx = 0;
|
|
int i;
|
|
u8 crc4;
|
|
|
|
buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
|
|
for (i = 0; i < (hdr->lct / 2); i++)
|
|
buf[idx++] = hdr->rad[i];
|
|
buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
|
|
(hdr->msg_len & 0x3f);
|
|
buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
|
|
|
|
crc4 = dp_mst_sim_msg_header_crc4(buf, (idx * 2) - 1);
|
|
buf[idx - 1] |= (crc4 & 0xf);
|
|
|
|
*len = idx;
|
|
}
|
|
|
|
static bool dp_get_one_sb_msg(struct drm_dp_sideband_msg_rx *msg,
|
|
struct drm_dp_aux_msg *aux_msg)
|
|
{
|
|
int ret;
|
|
|
|
if (!msg->have_somt) {
|
|
ret = dp_mst_sim_sideband_msg_build(msg,
|
|
aux_msg->buffer, aux_msg->size, true);
|
|
if (!ret) {
|
|
DP_ERR("sideband hdr build failed\n");
|
|
return false;
|
|
}
|
|
} else {
|
|
ret = dp_mst_sim_sideband_msg_build(msg,
|
|
aux_msg->buffer, aux_msg->size, false);
|
|
if (!ret) {
|
|
DP_ERR("sideband msg build failed\n");
|
|
return false;
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static int dp_sideband_build_nak_rep(
|
|
struct dp_mst_sim_context *ctx)
|
|
{
|
|
struct drm_dp_sideband_msg_rx *msg = &ctx->down_req;
|
|
u8 *buf = ctx->down_rep.msg;
|
|
int idx = 0;
|
|
|
|
buf[idx] = msg->msg[0] | 0x80;
|
|
idx++;
|
|
|
|
memcpy(&buf[idx], ctx->guid, 16);
|
|
idx += 16;
|
|
|
|
buf[idx] = 0x4;
|
|
idx++;
|
|
|
|
buf[idx] = 0;
|
|
idx++;
|
|
|
|
return idx;
|
|
}
|
|
|
|
|
|
static int dp_sideband_build_link_address_rep(
|
|
struct dp_mst_sim_context *ctx)
|
|
{
|
|
struct dp_mst_sim_port *port;
|
|
u8 *buf = ctx->down_rep.msg;
|
|
int idx = 0;
|
|
u32 i, tmp;
|
|
|
|
buf[idx] = DP_LINK_ADDRESS;
|
|
idx++;
|
|
|
|
memcpy(&buf[idx], ctx->guid, 16);
|
|
idx += 16;
|
|
|
|
buf[idx] = ctx->port_num;
|
|
idx++;
|
|
|
|
for (i = 0; i < ctx->port_num; i++) {
|
|
port = &ctx->ports[i];
|
|
|
|
tmp = 0;
|
|
if (port->input)
|
|
tmp |= 0x80;
|
|
tmp |= port->pdt << 4;
|
|
tmp |= i & 0xF;
|
|
buf[idx] = tmp;
|
|
idx++;
|
|
|
|
tmp = 0;
|
|
if (port->mcs)
|
|
tmp |= 0x80;
|
|
if (port->ddps)
|
|
tmp |= 0x40;
|
|
|
|
if (port->input) {
|
|
buf[idx] = tmp;
|
|
idx++;
|
|
continue;
|
|
}
|
|
|
|
if (port->ldps)
|
|
tmp |= 0x20;
|
|
buf[idx] = tmp;
|
|
idx++;
|
|
|
|
buf[idx] = port->dpcd_rev;
|
|
idx++;
|
|
|
|
memcpy(&buf[idx], port->peer_guid, 16);
|
|
idx += 16;
|
|
|
|
buf[idx] = (port->num_sdp_streams << 4) |
|
|
(port->num_sdp_stream_sinks);
|
|
idx++;
|
|
}
|
|
|
|
return idx;
|
|
}
|
|
|
|
static int dp_sideband_build_remote_i2c_read_rep(
|
|
struct dp_mst_sim_context *ctx)
|
|
{
|
|
struct dp_mst_sim_port *port;
|
|
struct drm_dp_remote_i2c_read i2c_read;
|
|
u8 *buf;
|
|
int idx;
|
|
u32 i, start, len;
|
|
|
|
buf = ctx->down_req.msg;
|
|
idx = 1;
|
|
|
|
i2c_read.num_transactions = buf[idx] & 0x3;
|
|
i2c_read.port_number = buf[idx] >> 4;
|
|
idx++;
|
|
|
|
if (i2c_read.port_number >= ctx->port_num)
|
|
goto err;
|
|
|
|
for (i = 0; i < i2c_read.num_transactions; i++) {
|
|
i2c_read.transactions[i].i2c_dev_id = buf[idx] & 0x7f;
|
|
idx++;
|
|
|
|
i2c_read.transactions[i].num_bytes = buf[idx];
|
|
idx++;
|
|
|
|
i2c_read.transactions[i].bytes = &buf[idx];
|
|
idx += i2c_read.transactions[i].num_bytes;
|
|
|
|
i2c_read.transactions[i].no_stop_bit = (buf[idx] >> 4) & 0x1;
|
|
i2c_read.transactions[i].i2c_transaction_delay = buf[idx] & 0xf;
|
|
idx++;
|
|
}
|
|
|
|
i2c_read.read_i2c_device_id = buf[idx];
|
|
idx++;
|
|
|
|
i2c_read.num_bytes_read = buf[idx];
|
|
idx++;
|
|
|
|
port = &ctx->ports[i2c_read.port_number];
|
|
|
|
if (i2c_read.num_transactions == 1) {
|
|
if (i2c_read.transactions[0].i2c_dev_id != DDC_ADDR ||
|
|
i2c_read.transactions[0].num_bytes != 1) {
|
|
DP_ERR("unsupported i2c address\n");
|
|
goto err;
|
|
}
|
|
|
|
start = i2c_read.transactions[0].bytes[0];
|
|
} else if (i2c_read.num_transactions == 2) {
|
|
if (i2c_read.transactions[0].i2c_dev_id != DDC_SEGMENT_ADDR ||
|
|
i2c_read.transactions[0].num_bytes != 1 ||
|
|
i2c_read.transactions[1].i2c_dev_id != DDC_ADDR ||
|
|
i2c_read.transactions[1].num_bytes != 1) {
|
|
DP_ERR("unsupported i2c address\n");
|
|
goto err;
|
|
}
|
|
|
|
start = i2c_read.transactions[0].bytes[0] * EDID_LENGTH * 2 +
|
|
i2c_read.transactions[1].bytes[0];
|
|
} else {
|
|
DP_ERR("unsupported i2c transaction\n");
|
|
goto err;
|
|
}
|
|
|
|
len = i2c_read.num_bytes_read;
|
|
|
|
if (start + len > port->edid_size) {
|
|
DP_ERR("edid data exceeds maximum\n");
|
|
goto err;
|
|
}
|
|
|
|
buf = ctx->down_rep.msg;
|
|
idx = 0;
|
|
|
|
buf[idx] = DP_REMOTE_I2C_READ;
|
|
idx++;
|
|
|
|
buf[idx] = i2c_read.port_number;
|
|
idx++;
|
|
|
|
buf[idx] = len;
|
|
idx++;
|
|
|
|
memcpy(&buf[idx], &port->edid[start], len);
|
|
idx += len;
|
|
|
|
return idx;
|
|
err:
|
|
return dp_sideband_build_nak_rep(ctx);
|
|
}
|
|
|
|
static int dp_sideband_build_enum_path_resources_rep(
|
|
struct dp_mst_sim_context *ctx)
|
|
{
|
|
struct dp_mst_sim_port *port;
|
|
u8 port_num;
|
|
u8 *buf;
|
|
int idx;
|
|
|
|
buf = ctx->down_req.msg;
|
|
port_num = buf[1] >> 4;
|
|
|
|
if (port_num >= ctx->port_num) {
|
|
DP_ERR("invalid port num\n");
|
|
goto err;
|
|
}
|
|
|
|
port = &ctx->ports[port_num];
|
|
|
|
buf = ctx->down_rep.msg;
|
|
idx = 0;
|
|
|
|
buf[idx] = DP_ENUM_PATH_RESOURCES;
|
|
idx++;
|
|
|
|
buf[idx] = port_num << 4;
|
|
idx++;
|
|
|
|
buf[idx] = port->full_pbn >> 8;
|
|
idx++;
|
|
|
|
buf[idx] = port->full_pbn & 0xFF;
|
|
idx++;
|
|
|
|
buf[idx] = port->avail_pbn >> 8;
|
|
idx++;
|
|
|
|
buf[idx] = port->avail_pbn & 0xFF;
|
|
idx++;
|
|
|
|
return idx;
|
|
err:
|
|
return dp_sideband_build_nak_rep(ctx);
|
|
}
|
|
|
|
static int dp_sideband_build_allocate_payload_rep(
|
|
struct dp_mst_sim_context *ctx)
|
|
{
|
|
struct drm_dp_allocate_payload allocate_payload;
|
|
u8 *buf;
|
|
int idx;
|
|
u32 i;
|
|
|
|
buf = ctx->down_req.msg;
|
|
idx = 1;
|
|
|
|
allocate_payload.port_number = buf[idx] >> 4;
|
|
allocate_payload.number_sdp_streams = buf[idx] & 0xF;
|
|
idx++;
|
|
|
|
allocate_payload.vcpi = buf[idx];
|
|
idx++;
|
|
|
|
allocate_payload.pbn = (buf[idx] << 8) | buf[idx+1];
|
|
idx += 2;
|
|
|
|
for (i = 0; i < allocate_payload.number_sdp_streams / 2; i++) {
|
|
allocate_payload.sdp_stream_sink[i * 2] = buf[idx] >> 4;
|
|
allocate_payload.sdp_stream_sink[i * 2 + 1] = buf[idx] & 0xf;
|
|
idx++;
|
|
}
|
|
if (allocate_payload.number_sdp_streams & 1) {
|
|
i = allocate_payload.number_sdp_streams - 1;
|
|
allocate_payload.sdp_stream_sink[i] = buf[idx] >> 4;
|
|
idx++;
|
|
}
|
|
|
|
if (allocate_payload.port_number >= ctx->port_num) {
|
|
DP_ERR("invalid port num\n");
|
|
goto err;
|
|
}
|
|
|
|
buf = ctx->down_rep.msg;
|
|
idx = 0;
|
|
|
|
buf[idx] = DP_ALLOCATE_PAYLOAD;
|
|
idx++;
|
|
|
|
buf[idx] = allocate_payload.port_number;
|
|
idx++;
|
|
|
|
buf[idx] = allocate_payload.vcpi;
|
|
idx++;
|
|
|
|
buf[idx] = allocate_payload.pbn >> 8;
|
|
idx++;
|
|
|
|
buf[idx] = allocate_payload.pbn & 0xFF;
|
|
idx++;
|
|
|
|
return idx;
|
|
err:
|
|
return dp_sideband_build_nak_rep(ctx);
|
|
}
|
|
|
|
static int dp_sideband_build_power_updown_phy_rep(
|
|
struct dp_mst_sim_context *ctx)
|
|
{
|
|
u8 port_num;
|
|
u8 *buf;
|
|
int idx;
|
|
|
|
buf = ctx->down_req.msg;
|
|
port_num = buf[1] >> 4;
|
|
|
|
if (port_num >= ctx->port_num) {
|
|
DP_ERR("invalid port num\n");
|
|
goto err;
|
|
}
|
|
|
|
buf = ctx->down_rep.msg;
|
|
idx = 0;
|
|
|
|
buf[idx] = ctx->down_req.msg[0];
|
|
idx++;
|
|
|
|
buf[idx] = port_num;
|
|
idx++;
|
|
|
|
return idx;
|
|
err:
|
|
return dp_sideband_build_nak_rep(ctx);
|
|
}
|
|
|
|
static int dp_sideband_build_clear_payload_id_table_rep(
|
|
struct dp_mst_sim_context *ctx)
|
|
{
|
|
u8 *buf = ctx->down_rep.msg;
|
|
int idx = 0;
|
|
|
|
buf[idx] = DP_CLEAR_PAYLOAD_ID_TABLE;
|
|
idx++;
|
|
|
|
return idx;
|
|
}
|
|
|
|
static int dp_sideband_build_connection_notify_req(
|
|
struct dp_mst_sim_context *ctx, int port_idx)
|
|
{
|
|
struct dp_mst_sim_port *port = &ctx->ports[port_idx];
|
|
u8 *buf = ctx->down_rep.msg;
|
|
int idx = 0;
|
|
|
|
buf[idx] = DP_CONNECTION_STATUS_NOTIFY;
|
|
idx++;
|
|
|
|
buf[idx] = port_idx << 4;
|
|
idx++;
|
|
|
|
memcpy(&buf[idx], &port->peer_guid, 16);
|
|
idx += 16;
|
|
|
|
buf[idx] = (port->ldps << 6) |
|
|
(port->ddps << 5) |
|
|
(port->mcs << 4) |
|
|
(port->input << 3) |
|
|
(port->pdt & 0x7);
|
|
idx++;
|
|
|
|
return idx;
|
|
}
|
|
|
|
static inline int dp_sideband_update_esi(
|
|
struct dp_mst_sim_context *ctx, u8 val)
|
|
{
|
|
ctx->esi[0] = ctx->port_num;
|
|
ctx->esi[1] = val;
|
|
ctx->esi[2] = 0;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline bool dp_sideband_pending_esi(
|
|
struct dp_mst_sim_context *ctx, u8 val)
|
|
{
|
|
return !!(ctx->esi[1] & val);
|
|
}
|
|
|
|
static int dp_mst_sim_clear_esi(struct dp_mst_sim_context *ctx,
|
|
struct drm_dp_aux_msg *msg)
|
|
{
|
|
size_t i;
|
|
u8 old_esi = ctx->esi[1];
|
|
u32 addr = msg->address - DP_SINK_COUNT_ESI;
|
|
|
|
if (msg->size - addr >= 16) {
|
|
msg->reply = DP_AUX_NATIVE_REPLY_NACK;
|
|
return 0;
|
|
}
|
|
|
|
mutex_lock(&ctx->session_lock);
|
|
|
|
for (i = 0; i < msg->size; i++)
|
|
ctx->esi[addr + i] &= ~((u8 *)msg->buffer)[i];
|
|
|
|
if (old_esi != ctx->esi[1])
|
|
complete(&ctx->session_comp);
|
|
|
|
mutex_unlock(&ctx->session_lock);
|
|
|
|
msg->reply = DP_AUX_NATIVE_REPLY_ACK;
|
|
return 0;
|
|
}
|
|
|
|
static int dp_mst_sim_read_esi(struct dp_mst_sim_context *ctx,
|
|
struct drm_dp_aux_msg *msg)
|
|
{
|
|
u32 addr = msg->address - DP_SINK_COUNT_ESI;
|
|
|
|
if (msg->size - addr >= 16) {
|
|
msg->reply = DP_AUX_NATIVE_REPLY_NACK;
|
|
return 0;
|
|
}
|
|
|
|
memcpy(msg->buffer, &ctx->esi[addr], msg->size);
|
|
msg->reply = DP_AUX_NATIVE_REPLY_ACK;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int dp_mst_sim_down_req_internal(struct dp_mst_sim_context *ctx,
|
|
struct drm_dp_aux_msg *aux_msg)
|
|
{
|
|
struct drm_dp_sideband_msg_rx *msg = &ctx->down_req;
|
|
struct drm_dp_sideband_msg_hdr hdr;
|
|
bool seqno;
|
|
int ret, size, len, hdr_len;
|
|
|
|
ret = dp_get_one_sb_msg(msg, aux_msg);
|
|
if (!ret)
|
|
return -EINVAL;
|
|
|
|
if (!msg->have_eomt)
|
|
return 0;
|
|
|
|
seqno = msg->initial_hdr.seqno;
|
|
|
|
switch (msg->msg[0]) {
|
|
case DP_LINK_ADDRESS:
|
|
size = dp_sideband_build_link_address_rep(ctx);
|
|
break;
|
|
case DP_REMOTE_I2C_READ:
|
|
size = dp_sideband_build_remote_i2c_read_rep(ctx);
|
|
break;
|
|
case DP_ENUM_PATH_RESOURCES:
|
|
size = dp_sideband_build_enum_path_resources_rep(ctx);
|
|
break;
|
|
case DP_ALLOCATE_PAYLOAD:
|
|
size = dp_sideband_build_allocate_payload_rep(ctx);
|
|
break;
|
|
case DP_POWER_DOWN_PHY:
|
|
case DP_POWER_UP_PHY:
|
|
size = dp_sideband_build_power_updown_phy_rep(ctx);
|
|
break;
|
|
case DP_CLEAR_PAYLOAD_ID_TABLE:
|
|
size = dp_sideband_build_clear_payload_id_table_rep(ctx);
|
|
break;
|
|
default:
|
|
size = dp_sideband_build_nak_rep(ctx);
|
|
break;
|
|
}
|
|
|
|
if (ctx->host_req)
|
|
ctx->host_req(ctx->host_dev,
|
|
ctx->down_req.msg, ctx->down_req.curlen,
|
|
ctx->down_rep.msg, &size);
|
|
|
|
memset(msg, 0, sizeof(*msg));
|
|
msg = &ctx->down_rep;
|
|
msg->curlen = 0;
|
|
|
|
mutex_lock(&ctx->session_lock);
|
|
|
|
while (msg->curlen < size) {
|
|
if (ctx->reset_cnt)
|
|
break;
|
|
|
|
/* copy data */
|
|
len = min(size - msg->curlen, 44);
|
|
memcpy(&ctx->dpcd[3], &msg->msg[msg->curlen], len);
|
|
msg->curlen += len;
|
|
|
|
/* build header */
|
|
memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
|
|
hdr.broadcast = 0;
|
|
hdr.path_msg = 0;
|
|
hdr.lct = 1;
|
|
hdr.lcr = 0;
|
|
hdr.seqno = seqno;
|
|
hdr.msg_len = len + 1;
|
|
hdr.eomt = (msg->curlen == size);
|
|
hdr.somt = (msg->curlen == len);
|
|
dp_mst_sim_encode_sideband_msg_hdr(&hdr, ctx->dpcd, &hdr_len);
|
|
|
|
/* build crc */
|
|
ctx->dpcd[len + 3] = dp_mst_sim_msg_data_crc4(&ctx->dpcd[3], len);
|
|
|
|
/* update esi */
|
|
dp_sideband_update_esi(ctx, DP_DOWN_REP_MSG_RDY);
|
|
|
|
/* notify host */
|
|
mutex_unlock(&ctx->session_lock);
|
|
ctx->host_hpd_irq(ctx->host_dev);
|
|
mutex_lock(&ctx->session_lock);
|
|
|
|
/* wait until esi is cleared */
|
|
while (dp_sideband_pending_esi(ctx, DP_DOWN_REP_MSG_RDY)) {
|
|
if (ctx->reset_cnt)
|
|
break;
|
|
mutex_unlock(&ctx->session_lock);
|
|
wait_for_completion(&ctx->session_comp);
|
|
mutex_lock(&ctx->session_lock);
|
|
}
|
|
}
|
|
|
|
mutex_unlock(&ctx->session_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void dp_mst_sim_down_req_work(struct work_struct *work)
|
|
{
|
|
struct dp_mst_sim_work *sim_work =
|
|
container_of(work, struct dp_mst_sim_work, base);
|
|
struct drm_dp_aux_msg msg;
|
|
|
|
msg.address = sim_work->address;
|
|
msg.buffer = sim_work->buffer;
|
|
msg.size = sim_work->size;
|
|
|
|
dp_mst_sim_down_req_internal(sim_work->ctx, &msg);
|
|
|
|
kfree(sim_work);
|
|
}
|
|
|
|
static int dp_mst_sim_down_req(struct dp_mst_sim_context *ctx,
|
|
struct drm_dp_aux_msg *aux_msg)
|
|
{
|
|
struct dp_mst_sim_work *work;
|
|
|
|
if (aux_msg->size >= 256) {
|
|
aux_msg->reply = DP_AUX_NATIVE_REPLY_NACK;
|
|
return 0;
|
|
}
|
|
|
|
dp_sideband_hex_dump("request",
|
|
aux_msg->address, aux_msg->buffer, aux_msg->size);
|
|
|
|
work = kzalloc(sizeof(*work), GFP_KERNEL);
|
|
if (!work) {
|
|
aux_msg->reply = DP_AUX_NATIVE_REPLY_NACK;
|
|
return 0;
|
|
}
|
|
|
|
work->ctx = ctx;
|
|
work->address = aux_msg->address;
|
|
work->size = aux_msg->size;
|
|
memcpy(work->buffer, aux_msg->buffer, aux_msg->size);
|
|
|
|
INIT_WORK(&work->base, dp_mst_sim_down_req_work);
|
|
queue_work(ctx->wq, &work->base);
|
|
|
|
aux_msg->reply = DP_AUX_NATIVE_REPLY_ACK;
|
|
return 0;
|
|
}
|
|
|
|
static int dp_mst_sim_down_rep(struct dp_mst_sim_context *ctx,
|
|
struct drm_dp_aux_msg *msg)
|
|
{
|
|
u32 addr = msg->address - DP_SIDEBAND_MSG_DOWN_REP_BASE;
|
|
|
|
memcpy(msg->buffer, &ctx->dpcd[addr], msg->size);
|
|
msg->reply = DP_AUX_NATIVE_REPLY_ACK;
|
|
|
|
dp_sideband_hex_dump("reply",
|
|
addr, msg->buffer, msg->size);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int dp_mst_sim_up_req(struct dp_mst_sim_context *ctx,
|
|
struct drm_dp_aux_msg *msg)
|
|
{
|
|
u32 addr = msg->address - DP_SIDEBAND_MSG_UP_REQ_BASE;
|
|
|
|
memcpy(msg->buffer, &ctx->dpcd[addr], msg->size);
|
|
msg->reply = DP_AUX_NATIVE_REPLY_ACK;
|
|
|
|
dp_sideband_hex_dump("up_req",
|
|
addr, msg->buffer, msg->size);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void dp_mst_sim_reset_work(struct work_struct *work)
|
|
{
|
|
struct dp_mst_notify_work *notify_work =
|
|
container_of(work, struct dp_mst_notify_work, base);
|
|
struct dp_mst_sim_context *ctx = notify_work->ctx;
|
|
|
|
mutex_lock(&ctx->session_lock);
|
|
--ctx->reset_cnt;
|
|
reinit_completion(&ctx->session_comp);
|
|
mutex_unlock(&ctx->session_lock);
|
|
}
|
|
|
|
static int dp_mst_sim_reset(struct dp_mst_sim_context *ctx,
|
|
struct drm_dp_aux_msg *msg)
|
|
{
|
|
struct dp_mst_notify_work *work;
|
|
|
|
if (!msg->size || ((u8 *)msg->buffer)[0])
|
|
return msg->size;
|
|
|
|
mutex_lock(&ctx->session_lock);
|
|
++ctx->reset_cnt;
|
|
complete(&ctx->session_comp);
|
|
mutex_unlock(&ctx->session_lock);
|
|
|
|
work = kzalloc(sizeof(*work), GFP_KERNEL);
|
|
if (!work)
|
|
return msg->size;
|
|
|
|
work->ctx = ctx;
|
|
INIT_WORK(&work->base, dp_mst_sim_reset_work);
|
|
queue_work(ctx->wq, &work->base);
|
|
|
|
return msg->size;
|
|
}
|
|
|
|
int dp_mst_sim_transfer(void *mst_sim_context, struct drm_dp_aux_msg *msg)
|
|
{
|
|
struct dp_mst_sim_context *ctx = mst_sim_context;
|
|
|
|
if (!ctx || !ctx->port_num || !msg)
|
|
return -ENOENT;
|
|
|
|
if (msg->request == DP_AUX_NATIVE_WRITE) {
|
|
if (msg->address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
|
|
msg->address < DP_SIDEBAND_MSG_DOWN_REQ_BASE + 256)
|
|
return dp_mst_sim_down_req(mst_sim_context, msg);
|
|
|
|
if (msg->address >= DP_SIDEBAND_MSG_UP_REP_BASE &&
|
|
msg->address < DP_SIDEBAND_MSG_UP_REP_BASE + 256)
|
|
return 0;
|
|
|
|
if (msg->address >= DP_SINK_COUNT_ESI &&
|
|
msg->address < DP_SINK_COUNT_ESI + 14)
|
|
return dp_mst_sim_clear_esi(mst_sim_context, msg);
|
|
|
|
if (msg->address == DP_MSTM_CTRL)
|
|
return dp_mst_sim_reset(mst_sim_context, msg);
|
|
|
|
} else if (msg->request == DP_AUX_NATIVE_READ) {
|
|
if (msg->address >= DP_SIDEBAND_MSG_DOWN_REP_BASE &&
|
|
msg->address < DP_SIDEBAND_MSG_DOWN_REP_BASE + 256)
|
|
return dp_mst_sim_down_rep(mst_sim_context, msg);
|
|
|
|
if (msg->address >= DP_SIDEBAND_MSG_UP_REQ_BASE &&
|
|
msg->address < DP_SIDEBAND_MSG_UP_REQ_BASE + 256)
|
|
return dp_mst_sim_up_req(mst_sim_context, msg);
|
|
|
|
if (msg->address >= DP_SINK_COUNT_ESI &&
|
|
msg->address < DP_SINK_COUNT_ESI + 14)
|
|
return dp_mst_sim_read_esi(mst_sim_context, msg);
|
|
}
|
|
|
|
return -EINVAL;
|
|
}
|
|
|
|
static void dp_mst_sim_up_req_work(struct work_struct *work)
|
|
{
|
|
struct dp_mst_notify_work *notify_work =
|
|
container_of(work, struct dp_mst_notify_work, base);
|
|
struct dp_mst_sim_context *ctx = notify_work->ctx;
|
|
struct drm_dp_sideband_msg_rx *msg = &ctx->down_rep;
|
|
struct drm_dp_sideband_msg_hdr hdr;
|
|
int len, hdr_len, i;
|
|
|
|
mutex_lock(&ctx->session_lock);
|
|
|
|
for (i = 0; i < ctx->port_num; i++) {
|
|
if (ctx->reset_cnt)
|
|
break;
|
|
|
|
if (!(notify_work->port_mask & (1 << i)))
|
|
continue;
|
|
|
|
len = dp_sideband_build_connection_notify_req(ctx, i);
|
|
|
|
/* copy data */
|
|
memcpy(&ctx->dpcd[3], msg->msg, len);
|
|
|
|
/* build header */
|
|
memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
|
|
hdr.broadcast = 0;
|
|
hdr.path_msg = 0;
|
|
hdr.lct = 1;
|
|
hdr.lcr = 0;
|
|
hdr.seqno = 0;
|
|
hdr.msg_len = len + 1;
|
|
hdr.eomt = 1;
|
|
hdr.somt = 1;
|
|
dp_mst_sim_encode_sideband_msg_hdr(&hdr, ctx->dpcd, &hdr_len);
|
|
|
|
/* build crc */
|
|
ctx->dpcd[len + 3] = dp_mst_sim_msg_data_crc4(&ctx->dpcd[3], len);
|
|
|
|
/* update esi */
|
|
dp_sideband_update_esi(ctx, DP_UP_REQ_MSG_RDY);
|
|
|
|
/* notify host */
|
|
mutex_unlock(&ctx->session_lock);
|
|
ctx->host_hpd_irq(ctx->host_dev);
|
|
mutex_lock(&ctx->session_lock);
|
|
|
|
/* wait until esi is cleared */
|
|
while (dp_sideband_pending_esi(ctx, DP_UP_REQ_MSG_RDY)) {
|
|
if (ctx->reset_cnt)
|
|
break;
|
|
mutex_unlock(&ctx->session_lock);
|
|
wait_for_completion(&ctx->session_comp);
|
|
mutex_lock(&ctx->session_lock);
|
|
}
|
|
}
|
|
|
|
mutex_unlock(&ctx->session_lock);
|
|
|
|
kfree(notify_work);
|
|
}
|
|
|
|
static void dp_mst_sim_notify(struct dp_mst_sim_context *ctx,
|
|
u32 port_mask)
|
|
{
|
|
struct dp_mst_notify_work *work;
|
|
|
|
work = kzalloc(sizeof(*work), GFP_KERNEL);
|
|
if (!work)
|
|
return;
|
|
|
|
work->ctx = ctx;
|
|
work->port_mask = port_mask;
|
|
|
|
INIT_WORK(&work->base, dp_mst_sim_up_req_work);
|
|
queue_work(ctx->wq, &work->base);
|
|
}
|
|
|
|
static void dp_mst_sim_free_ports(struct dp_mst_sim_context *ctx)
|
|
{
|
|
u32 i;
|
|
|
|
for (i = 0; i < ctx->port_num; i++)
|
|
kfree(ctx->ports[i].edid);
|
|
|
|
kfree(ctx->ports);
|
|
ctx->ports = NULL;
|
|
ctx->port_num = 0;
|
|
}
|
|
|
|
int dp_mst_sim_update(void *mst_sim_context, u32 port_num,
|
|
struct dp_mst_sim_port *ports)
|
|
{
|
|
struct dp_mst_sim_context *ctx = mst_sim_context;
|
|
u8 *edid;
|
|
int rc = 0;
|
|
u32 update_mask = 0;
|
|
u32 i;
|
|
|
|
if (!ctx || port_num >= 15 || !ports)
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&ctx->session_lock);
|
|
|
|
/* get update mask */
|
|
if (port_num && ctx->port_num == port_num) {
|
|
for (i = 0; i < port_num; i++) {
|
|
if (ports[i].pdt != ctx->ports[i].pdt ||
|
|
ports[i].input != ctx->ports[i].input ||
|
|
ports[i].ldps != ctx->ports[i].ldps ||
|
|
ports[i].ddps != ctx->ports[i].ddps ||
|
|
ports[i].mcs != ctx->ports[i].mcs)
|
|
update_mask |= (1 << i);
|
|
}
|
|
}
|
|
|
|
dp_mst_sim_free_ports(ctx);
|
|
|
|
if (!port_num)
|
|
goto end;
|
|
|
|
ctx->ports = kcalloc(port_num, sizeof(*ports), GFP_KERNEL);
|
|
if (!ctx->ports) {
|
|
rc = -ENOMEM;
|
|
goto fail;
|
|
}
|
|
ctx->port_num = port_num;
|
|
|
|
for (i = 0; i < port_num; i++) {
|
|
ctx->ports[i] = ports[i];
|
|
if (ports[i].edid_size) {
|
|
if (!ports[i].edid) {
|
|
rc = -EINVAL;
|
|
goto fail;
|
|
}
|
|
|
|
edid = kzalloc(ports[i].edid_size,
|
|
GFP_KERNEL);
|
|
if (!edid) {
|
|
rc = -ENOMEM;
|
|
goto fail;
|
|
}
|
|
|
|
memcpy(edid, ports[i].edid, ports[i].edid_size);
|
|
ctx->ports[i].edid = edid;
|
|
}
|
|
}
|
|
|
|
fail:
|
|
if (rc)
|
|
dp_mst_sim_free_ports(ctx);
|
|
|
|
end:
|
|
mutex_unlock(&ctx->session_lock);
|
|
|
|
if (update_mask)
|
|
dp_mst_sim_notify(ctx, update_mask);
|
|
|
|
return rc;
|
|
}
|
|
|
|
int dp_mst_sim_create(const struct dp_mst_sim_cfg *cfg,
|
|
void **mst_sim_context)
|
|
{
|
|
struct dp_mst_sim_context *ctx;
|
|
|
|
if (!cfg || !mst_sim_context)
|
|
return -EINVAL;
|
|
|
|
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
|
if (!ctx)
|
|
return -ENOMEM;
|
|
|
|
ctx->host_dev = cfg->host_dev;
|
|
ctx->host_hpd_irq = cfg->host_hpd_irq;
|
|
ctx->host_req = cfg->host_req;
|
|
memcpy(ctx->guid, cfg->guid, 16);
|
|
|
|
mutex_init(&ctx->session_lock);
|
|
init_completion(&ctx->session_comp);
|
|
|
|
ctx->wq = create_singlethread_workqueue("dp_mst_sim");
|
|
if (IS_ERR_OR_NULL(ctx->wq)) {
|
|
DP_ERR("Error creating wq\n");
|
|
kfree(ctx);
|
|
return -EPERM;
|
|
}
|
|
|
|
*mst_sim_context = ctx;
|
|
return 0;
|
|
}
|
|
|
|
int dp_mst_sim_destroy(void *mst_sim_context)
|
|
{
|
|
struct dp_mst_sim_context *ctx = mst_sim_context;
|
|
u32 i;
|
|
|
|
if (!ctx)
|
|
return -EINVAL;
|
|
|
|
for (i = 0; i < ctx->port_num; i++)
|
|
kfree(ctx->ports[i].edid);
|
|
kfree(ctx->ports);
|
|
|
|
destroy_workqueue(ctx->wq);
|
|
|
|
return 0;
|
|
}
|
|
|