Merge branch 'upstream-master' into android-mainline

Partial 5.8-rc7 merge to make the final merge easier.

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I95f1b0a379e3810333300a70c5a93f449d945c54
This commit is contained in:
Greg Kroah-Hartman
2020-07-25 13:15:34 +02:00
110 changed files with 1874 additions and 2204 deletions

View File

@@ -198,6 +198,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
Mayuresh Janorkar <mayur@ti.com>
Michael Buesch <m@bues.ch>
Michel Dänzer <michel@tungstengraphics.com>
Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
Mike Rapoport <rppt@kernel.org> <rppt@linux.ibm.com>
Miodrag Dinic <miodrag.dinic@mips.com> <miodrag.dinic@imgtec.com>
Miquel Raynal <miquel.raynal@bootlin.com> <miquel.raynal@free-electrons.com>
Mitesh shah <mshah@teja.com>

View File

@@ -378,6 +378,8 @@ examples:
- |
sound {
compatible = "simple-audio-card";
#address-cells = <1>;
#size-cells = <0>;
simple-audio-card,name = "rsnd-ak4643";
simple-audio-card,format = "left_j";
@@ -391,10 +393,12 @@ examples:
"ak4642 Playback", "DAI1 Playback";
dpcmcpu: simple-audio-card,cpu@0 {
reg = <0>;
sound-dai = <&rcar_sound 0>;
};
simple-audio-card,cpu@1 {
reg = <1>;
sound-dai = <&rcar_sound 1>;
};
@@ -418,6 +422,8 @@ examples:
- |
sound {
compatible = "simple-audio-card";
#address-cells = <1>;
#size-cells = <0>;
simple-audio-card,routing =
"pcm3168a Playback", "DAI1 Playback",
@@ -426,6 +432,7 @@ examples:
"pcm3168a Playback", "DAI4 Playback";
simple-audio-card,dai-link@0 {
reg = <0>;
format = "left_j";
bitclock-master = <&sndcpu0>;
frame-master = <&sndcpu0>;
@@ -439,22 +446,23 @@ examples:
};
simple-audio-card,dai-link@1 {
reg = <1>;
format = "i2s";
bitclock-master = <&sndcpu1>;
frame-master = <&sndcpu1>;
convert-channels = <8>; /* TDM Split */
sndcpu1: cpu@0 {
sndcpu1: cpu0 {
sound-dai = <&rcar_sound 1>;
};
cpu@1 {
cpu1 {
sound-dai = <&rcar_sound 2>;
};
cpu@2 {
cpu2 {
sound-dai = <&rcar_sound 3>;
};
cpu@3 {
cpu3 {
sound-dai = <&rcar_sound 4>;
};
codec {
@@ -466,6 +474,7 @@ examples:
};
simple-audio-card,dai-link@2 {
reg = <2>;
format = "i2s";
bitclock-master = <&sndcpu2>;
frame-master = <&sndcpu2>;

View File

@@ -6956,6 +6956,7 @@ M: Timur Tabi <timur@kernel.org>
M: Nicolin Chen <nicoleotsuka@gmail.com>
M: Xiubo Li <Xiubo.Lee@gmail.com>
R: Fabio Estevam <festevam@gmail.com>
R: Shengjiu Wang <shengjiu.wang@gmail.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
@@ -9305,6 +9306,17 @@ F: Documentation/kbuild/kconfig*
F: scripts/Kconfig.include
F: scripts/kconfig/
KCOV
R: Dmitry Vyukov <dvyukov@google.com>
R: Andrey Konovalov <andreyknvl@google.com>
L: kasan-dev@googlegroups.com
S: Maintained
F: Documentation/dev-tools/kcov.rst
F: include/linux/kcov.h
F: include/uapi/linux/kcov.h
F: kernel/kcov.c
F: scripts/Makefile.kcov
KCSAN
M: Marco Elver <elver@google.com>
R: Dmitry Vyukov <dvyukov@google.com>
@@ -11240,7 +11252,7 @@ S: Maintained
F: drivers/crypto/atmel-ecc.*
MICROCHIP I2C DRIVER
M: Ludovic Desroches <ludovic.desroches@microchip.com>
M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
L: linux-i2c@vger.kernel.org
S: Supported
F: drivers/i2c/busses/i2c-at91-*.c
@@ -11333,17 +11345,17 @@ F: drivers/iio/adc/at91-sama5d2_adc.c
F: include/dt-bindings/iio/adc/at91-sama5d2_adc.h
MICROCHIP SAMA5D2-COMPATIBLE SHUTDOWN CONTROLLER
M: Nicolas Ferre <nicolas.ferre@microchip.com>
M: Claudiu Beznea <claudiu.beznea@microchip.com>
S: Supported
F: drivers/power/reset/at91-sama5d2_shdwc.c
MICROCHIP SPI DRIVER
M: Nicolas Ferre <nicolas.ferre@microchip.com>
M: Tudor Ambarus <tudor.ambarus@microchip.com>
S: Supported
F: drivers/spi/spi-atmel.*
MICROCHIP SSC DRIVER
M: Nicolas Ferre <nicolas.ferre@microchip.com>
M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/misc/atmel-ssc.c
@@ -14868,6 +14880,7 @@ F: drivers/s390/block/dasd*
F: include/linux/dasd_mod.h
S390 IOMMU (PCI)
M: Matthew Rosato <mjrosato@linux.ibm.com>
M: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported

View File

@@ -14,7 +14,7 @@ COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit))
COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..)
CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)
CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE_COMPAT))
CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments
ifneq ($(COMPAT_GCC_TOOLCHAIN),)
CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN)

View File

@@ -292,7 +292,7 @@ CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_SPECIAL, 0x00f5);
CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CCERROR, 0x00109);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109);
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
@@ -629,7 +629,7 @@ static struct attribute *cpumcf_z15_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_z15, DFLT_ACCESS),
CPUMF_EVENT_PTR(cf_z15, DFLT_CYCLES),
CPUMF_EVENT_PTR(cf_z15, DFLT_CC),
CPUMF_EVENT_PTR(cf_z15, DFLT_CCERROR),
CPUMF_EVENT_PTR(cf_z15, DFLT_CCFINISH),
CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
NULL,

View File

@@ -39,6 +39,7 @@
#define BT_MBI_UNIT_PMC 0x04
#define BT_MBI_UNIT_GFX 0x06
#define BT_MBI_UNIT_SMI 0x0C
#define BT_MBI_UNIT_CCK 0x14
#define BT_MBI_UNIT_USB 0x43
#define BT_MBI_UNIT_SATA 0xA3
#define BT_MBI_UNIT_PCIE 0xA6

View File

@@ -1074,7 +1074,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of
copy_part(offsetof(struct fxregs_state, st_space), 128,
&xsave->i387.st_space, &kbuf, &offset_start, &count);
if (header.xfeatures & XFEATURE_MASK_SSE)
copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256,
copy_part(xstate_offsets[XFEATURE_SSE], 256,
&xsave->i387.xmm_space, &kbuf, &offset_start, &count);
/*
* Fill xsave->i387.sw_reserved value for ptrace frame:

View File

@@ -57,7 +57,7 @@ static inline
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
if (access_ok(dst, len))
if (access_ok(src, len))
return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL);
if (len)

View File

@@ -778,8 +778,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
while (isspace(*++tmp_str));
while (tmp_str[0]) {
sub_str = strsep(&tmp_str, delimiter);
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@@ -1039,8 +1038,7 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
memcpy(buf_cpy, buf, bytes);
buf_cpy[bytes] = '\0';
tmp = buf_cpy;
while (tmp[0]) {
sub_str = strsep(&tmp, delimiter);
while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
if (ret)
@@ -1637,8 +1635,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
while (tmp_str[0]) {
sub_str = strsep(&tmp_str, delimiter);
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;

View File

@@ -644,9 +644,6 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
/* sclk is bigger than max sclk in the dependence table */
*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
(dep_table->entries[i - 1].vddc -
(uint16_t)VDDC_VDDCI_DELTA));
if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
*voltage |= (data->vbios_boot_state.vddci_bootup_value *
@@ -654,8 +651,13 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
else if (dep_table->entries[i - 1].vddci)
*voltage |= (dep_table->entries[i - 1].vddci *
VOLTAGE_SCALE) << VDDC_SHIFT;
else
else {
vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
(dep_table->entries[i - 1].vddc -
(uint16_t)VDDC_VDDCI_DELTA));
*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
}
if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;

View File

@@ -271,6 +271,8 @@ void lima_pp_fini(struct lima_ip *ip)
int lima_pp_bcast_resume(struct lima_ip *ip)
{
/* PP has been reset by individual PP resume */
ip->data.async_reset = false;
return 0;
}

View File

@@ -260,7 +260,7 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
unsigned long reg;
reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
if (reg & SUN4I_HDMI_HPD_HIGH) {
if (!(reg & SUN4I_HDMI_HPD_HIGH)) {
cec_phys_addr_invalidate(hdmi->cec_adap);
return connector_status_disconnected;
}

View File

@@ -421,20 +421,21 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
/* Read data if receive data valid is set */
while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
CDNS_I2C_SR_RXDV) {
/*
* Clear hold bit that was set for FIFO control if
* RX data left is less than FIFO depth, unless
* repeated start is selected.
*/
if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) &&
!id->bus_hold_flag)
cdns_i2c_clear_bus_hold(id);
if (id->recv_count > 0) {
*(id->p_recv_buf)++ =
cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
id->recv_count--;
id->curr_recv_count--;
/*
* Clear hold bit that was set for FIFO control
* if RX data left is less than or equal to
* FIFO DEPTH unless repeated start is selected
*/
if (id->recv_count <= CDNS_I2C_FIFO_DEPTH &&
!id->bus_hold_flag)
cdns_i2c_clear_bus_hold(id);
} else {
dev_err(id->adap.dev.parent,
"xfer_size reg rollover. xfer aborted!\n");
@@ -594,10 +595,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
ctrl_reg |= CDNS_I2C_CR_HOLD;
else
ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
@@ -654,11 +653,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
if (id->send_count > CDNS_I2C_FIFO_DEPTH)
ctrl_reg |= CDNS_I2C_CR_HOLD;
else
ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
/* Clear the interrupts in interrupt status register. */

View File

@@ -367,7 +367,6 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN);
geni_se_setup_m_cmd(se, I2C_READ, m_param);
if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) {
geni_se_select_mode(se, GENI_SE_FIFO);
@@ -375,6 +374,8 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_buf = NULL;
}
geni_se_setup_m_cmd(se, I2C_READ, m_param);
time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
if (!time_left)
geni_i2c_abort_xfer(gi2c);
@@ -408,7 +409,6 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN);
geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) {
geni_se_select_mode(se, GENI_SE_FIFO);
@@ -416,6 +416,8 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_buf = NULL;
}
geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
if (!dma_buf) /* Get FIFO IRQ */
writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);

View File

@@ -868,6 +868,7 @@ static int rcar_unreg_slave(struct i2c_client *slave)
/* disable irqs and ensure none is running before clearing ptr */
rcar_i2c_write(priv, ICSIER, 0);
rcar_i2c_write(priv, ICSCR, 0);
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
synchronize_irq(priv->irq);
priv->slave = NULL;
@@ -969,6 +970,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
goto out_pm_put;
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
if (priv->devtype == I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (!IS_ERR(priv->rstc)) {

View File

@@ -3676,10 +3676,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
return ret;
}
cm_id_priv->id.state = IB_CM_IDLE;
spin_lock_irq(&cm.lock);
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
}
spin_unlock_irq(&cm.lock);
return 0;
}

View File

@@ -649,9 +649,6 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
{
struct ib_uverbs_file *ufile = attrs->ufile;
/* alloc_commit consumes the uobj kref */
uobj->uapi_object->type_class->alloc_commit(uobj);
/* kref is held so long as the uobj is on the uobj list. */
uverbs_uobject_get(uobj);
spin_lock_irq(&ufile->uobjects_lock);
@@ -661,6 +658,9 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
/* matches atomic_set(-1) in alloc_uobj */
atomic_set(&uobj->usecnt, 0);
/* alloc_commit consumes the uobj kref */
uobj->uapi_object->type_class->alloc_commit(uobj);
/* Matches the down_read in rdma_alloc_begin_uobject */
up_read(&ufile->hw_destroy_rwsem);
}

View File

@@ -3954,6 +3954,15 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
return 0;
}
static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
const struct ib_qp_attr *attr)
{
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
return IB_MTU_4096;
return attr->path_mtu;
}
static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
struct hns_roce_v2_qp_context *context,
@@ -3965,6 +3974,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t trrl_ba;
dma_addr_t irrl_ba;
enum ib_mtu mtu;
u8 port_num;
u64 *mtts;
u8 *dmac;
@@ -4062,22 +4072,22 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, 0);
/* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S,
ilog2(hr_dev->caps.max_sq_inline / IB_MTU_4096));
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
mtu = get_mtu(ibqp, attr);
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
if (attr_mask & IB_QP_PATH_MTU) {
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
else if (attr_mask & IB_QP_PATH_MTU)
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
V2_QPC_BYTE_24_MTU_S, mtu);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, 0);
}
#define MAX_LP_MSG_LEN 65536
/* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S,
ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)));
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);

View File

@@ -120,7 +120,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
buf_attr.page_shift = is_fast ? PAGE_SHIFT :
hr_dev->caps.pbl_buf_pg_sz + HNS_HW_PAGE_SHIFT;
hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
buf_attr.region[0].size = length;
buf_attr.region[0].hopnum = mr->pbl_hop_num;
buf_attr.region_count = 1;

View File

@@ -601,6 +601,23 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
*/
synchronize_srcu(&dev->odp_srcu);
/*
* All work on the prefetch list must be completed, xa_erase() prevented
* new work from being created.
*/
wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
/*
* At this point it is forbidden for any other thread to enter
* pagefault_mr() on this imr. It is already forbidden to call
* pagefault_mr() on an implicit child. Due to this additions to
* implicit_children are prevented.
*/
/*
* Block destroy_unused_implicit_child_mr() from incrementing
* num_deferred_work.
*/
xa_lock(&imr->implicit_children);
xa_for_each (&imr->implicit_children, idx, mtt) {
__xa_erase(&imr->implicit_children, idx);
@@ -609,9 +626,8 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
xa_unlock(&imr->implicit_children);
/*
* num_deferred_work can only be incremented inside the odp_srcu, or
* under xa_lock while the child is in the xarray. Thus at this point
* it is only decreasing, and all work holding it is now on the wq.
* Wait for any concurrent destroy_unused_implicit_child_mr() to
* complete.
*/
wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));

View File

@@ -83,11 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
struct mlx5_srq_table *table = &dev->srq_table;
struct mlx5_core_srq *srq;
xa_lock(&table->array);
xa_lock_irq(&table->array);
srq = xa_load(&table->array, srqn);
if (srq)
refcount_inc(&srq->common.refcount);
xa_unlock(&table->array);
xa_unlock_irq(&table->array);
return srq;
}

View File

@@ -65,6 +65,7 @@ struct qcom_iommu_domain {
struct mutex init_mutex; /* Protects iommu pointer */
struct iommu_domain domain;
struct qcom_iommu_dev *iommu;
struct iommu_fwspec *fwspec;
};
static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
@@ -84,9 +85,9 @@ static struct qcom_iommu_dev * to_iommu(struct device *dev)
return dev_iommu_priv_get(dev);
}
static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid)
static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
{
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
struct qcom_iommu_dev *qcom_iommu = d->iommu;
if (!qcom_iommu)
return NULL;
return qcom_iommu->ctxs[asid - 1];
@@ -118,14 +119,12 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
static void qcom_iommu_tlb_sync(void *cookie)
{
struct iommu_fwspec *fwspec;
struct device *dev = cookie;
struct qcom_iommu_domain *qcom_domain = cookie;
struct iommu_fwspec *fwspec = qcom_domain->fwspec;
unsigned i;
fwspec = dev_iommu_fwspec_get(dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
unsigned int val, ret;
iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
@@ -139,14 +138,12 @@ static void qcom_iommu_tlb_sync(void *cookie)
static void qcom_iommu_tlb_inv_context(void *cookie)
{
struct device *dev = cookie;
struct iommu_fwspec *fwspec;
struct qcom_iommu_domain *qcom_domain = cookie;
struct iommu_fwspec *fwspec = qcom_domain->fwspec;
unsigned i;
fwspec = dev_iommu_fwspec_get(dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
}
@@ -156,16 +153,14 @@ static void qcom_iommu_tlb_inv_context(void *cookie)
static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{
struct device *dev = cookie;
struct iommu_fwspec *fwspec;
struct qcom_iommu_domain *qcom_domain = cookie;
struct iommu_fwspec *fwspec = qcom_domain->fwspec;
unsigned i, reg;
reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
fwspec = dev_iommu_fwspec_get(dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
size_t s = size;
iova = (iova >> 12) << 12;
@@ -256,7 +251,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
};
qcom_domain->iommu = qcom_iommu;
pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev);
qcom_domain->fwspec = fwspec;
pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain);
if (!pgtbl_ops) {
dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
ret = -ENOMEM;
@@ -269,7 +266,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
domain->geometry.force_aperture = true;
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
if (!ctx->secure_init) {
ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
@@ -419,7 +416,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
pm_runtime_get_sync(qcom_iommu->dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
/* Disable the context bank: */
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);

View File

@@ -2420,7 +2420,7 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
return;
spin_lock_irq(&ic->endio_wait.lock);
@@ -2481,7 +2481,7 @@ static void integrity_recalc(struct work_struct *w)
next_chunk:
if (unlikely(dm_suspended(ic->ti)))
if (unlikely(dm_post_suspending(ic->ti)))
goto unlock_ret;
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);

View File

@@ -144,6 +144,7 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
#define DMF_NOFLUSH_SUSPENDING 5
#define DMF_DEFERRED_REMOVE 6
#define DMF_SUSPENDED_INTERNALLY 7
#define DMF_POST_SUSPENDING 8
#define DM_NUMA_NODE NUMA_NO_NODE
static int dm_numa_node = DM_NUMA_NODE;
@@ -2571,6 +2572,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
set_bit(DMF_SUSPENDED, &md->flags);
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
}
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
@@ -2929,7 +2931,9 @@ retry:
if (r)
goto out_unlock;
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
clear_bit(DMF_POST_SUSPENDING, &md->flags);
out_unlock:
mutex_unlock(&md->suspend_lock);
@@ -3026,7 +3030,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
DMF_SUSPENDED_INTERNALLY);
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
clear_bit(DMF_POST_SUSPENDING, &md->flags);
}
static void __dm_internal_resume(struct mapped_device *md)
@@ -3187,6 +3193,11 @@ int dm_suspended_md(struct mapped_device *md)
return test_bit(DMF_SUSPENDED, &md->flags);
}
static int dm_post_suspending_md(struct mapped_device *md)
{
return test_bit(DMF_POST_SUSPENDING, &md->flags);
}
int dm_suspended_internally_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
@@ -3203,6 +3214,12 @@ int dm_suspended(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_suspended);
int dm_post_suspending(struct dm_target *ti)
{
return dm_post_suspending_md(dm_table_get_md(ti->table));
}
EXPORT_SYMBOL_GPL(dm_post_suspending);
int dm_noflush_suspending(struct dm_target *ti)
{
return __noflush_suspending(dm_table_get_md(ti->table));

View File

@@ -68,7 +68,7 @@ static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
if (WARN_ON(clock > host->max_clk))
clock = host->max_clk;
for (div = 1; div < 256; div *= 2) {
for (div = 2; div < 256; div *= 2) {
if ((parent / div) <= clock)
break;
}

View File

@@ -4638,8 +4638,7 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
* pcie_wait_for_link_delay - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
* @delay: Delay to wait after link has become active (in ms). Specify %0
* for no delay.
* @delay: Delay to wait after link has become active (in ms)
*
* Use this to wait till link becomes active or inactive.
*/
@@ -4680,7 +4679,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
msleep(10);
timeout -= 10;
}
if (active && ret && delay)
if (active && ret)
msleep(delay);
else if (ret != active)
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
@@ -4801,28 +4800,17 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
if (!pcie_downstream_port(dev))
return;
/*
* Per PCIe r5.0, sec 6.6.1, for downstream ports that support
* speeds > 5 GT/s, we must wait for link training to complete
* before the mandatory delay.
*
* We can only tell when link training completes via DLL Link
* Active, which is required for downstream ports that support
* speeds > 5 GT/s (sec 7.5.3.6). Unfortunately some common
* devices do not implement Link Active reporting even when it's
* required, so we'll check for that directly instead of checking
* the supported link speed. We assume devices without Link Active
* reporting can train in 100 ms regardless of speed.
*/
if (dev->link_active_reporting) {
pci_dbg(dev, "waiting for link to train\n");
if (!pcie_wait_for_link_delay(dev, true, 0)) {
if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
msleep(delay);
} else {
pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
delay);
if (!pcie_wait_for_link_delay(dev, true, delay)) {
/* Did not train, no need to wait any further */
return;
}
}
pci_dbg(child, "waiting %d ms to become accessible\n", delay);
msleep(delay);
if (!pci_device_is_present(child)) {
pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);

View File

@@ -22,7 +22,7 @@ config VIDEO_ATOMISP
module will be called atomisp
config VIDEO_ATOMISP_ISP2401
bool "VIDEO_ATOMISP_ISP2401"
bool "Use Intel Atom ISP on Cherrytail/Anniedale (ISP2401)"
depends on VIDEO_ATOMISP
help
Enable support for Atom ISP2401-based boards.

View File

@@ -156,6 +156,7 @@ atomisp-objs += \
pci/hive_isp_css_common/host/timed_ctrl.o \
pci/hive_isp_css_common/host/vmem.o \
pci/hive_isp_css_shared/host/tag.o \
pci/system_local.o \
obj-byt = \
pci/css_2400_system/hive/ia_css_isp_configs.o \
@@ -182,7 +183,6 @@ INCLUDES += \
-I$(atomisp)/include/hmm/ \
-I$(atomisp)/include/mmu/ \
-I$(atomisp)/pci/ \
-I$(atomisp)/pci/hrt/ \
-I$(atomisp)/pci/base/circbuf/interface/ \
-I$(atomisp)/pci/base/refcount/interface/ \
-I$(atomisp)/pci/camera/pipe/interface/ \
@@ -192,7 +192,6 @@ INCLUDES += \
-I$(atomisp)/pci/hive_isp_css_include/ \
-I$(atomisp)/pci/hive_isp_css_include/device_access/ \
-I$(atomisp)/pci/hive_isp_css_include/host/ \
-I$(atomisp)/pci/hive_isp_css_include/memory_access/ \
-I$(atomisp)/pci/hive_isp_css_shared/ \
-I$(atomisp)/pci/hive_isp_css_shared/host/ \
-I$(atomisp)/pci/isp/kernels/ \
@@ -311,9 +310,7 @@ INCLUDES += \
-I$(atomisp)/pci/runtime/tagger/interface/
INCLUDES_byt += \
-I$(atomisp)/pci/css_2400_system/ \
-I$(atomisp)/pci/css_2400_system/hive/ \
-I$(atomisp)/pci/css_2400_system/hrt/ \
INCLUDES_cht += \
-I$(atomisp)/pci/css_2401_system/ \
@@ -321,7 +318,6 @@ INCLUDES_cht += \
-I$(atomisp)/pci/css_2401_system/hive/ \
-I$(atomisp)/pci/css_2401_system/hrt/ \
# -I$(atomisp)/pci/css_2401_system/hrt/ \
# -I$(atomisp)/pci/css_2401_system/hive_isp_css_2401_system_generated/ \
DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__

View File

@@ -495,11 +495,11 @@ static int ov2680_h_flip(struct v4l2_subdev *sd, s32 value)
ret = ov2680_read_reg(client, 1, OV2680_MIRROR_REG, &val);
if (ret)
return ret;
if (value) {
if (value)
val |= OV2680_FLIP_MIRROR_BIT_ENABLE;
} else {
else
val &= ~OV2680_FLIP_MIRROR_BIT_ENABLE;
}
ret = ov2680_write_reg(client, 1,
OV2680_MIRROR_REG, val);
if (ret)

View File

@@ -1899,7 +1899,7 @@ static int ov5693_probe(struct i2c_client *client)
{
struct ov5693_device *dev;
int i2c;
int ret = 0;
int ret;
void *pdata;
unsigned int i;
@@ -1929,8 +1929,10 @@ static int ov5693_probe(struct i2c_client *client)
pdata = gmin_camera_platform_data(&dev->sd,
ATOMISP_INPUT_FORMAT_RAW_10,
atomisp_bayer_order_bggr);
if (!pdata)
if (!pdata) {
ret = -EINVAL;
goto out_free;
}
ret = ov5693_s_config(&dev->sd, client->irq, pdata);
if (ret)

View File

@@ -250,6 +250,7 @@ const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void);
#define IS_MFLD __IS_SOC(INTEL_FAM6_ATOM_SALTWELL_MID)
#define IS_BYT __IS_SOC(INTEL_FAM6_ATOM_SILVERMONT)
#define IS_CHT __IS_SOC(INTEL_FAM6_ATOM_AIRMONT)
#define IS_MRFD __IS_SOC(INTEL_FAM6_ATOM_SILVERMONT_MID)
#define IS_MOFD __IS_SOC(INTEL_FAM6_ATOM_AIRMONT_MID)
/* Both CHT and MOFD come with ISP2401 */

View File

@@ -20,9 +20,6 @@
#define ATOMISP_REGS_H
/* common register definitions */
#define PUNIT_PORT 0x04
#define CCK_PORT 0x14
#define PCICMDSTS 0x01
#define INTR 0x0f
#define MSI_CAPID 0x24

View File

@@ -355,11 +355,11 @@ int atomisp_acc_map(struct atomisp_sub_device *asd, struct atomisp_acc_map *map)
pgnr = DIV_ROUND_UP(map->length, PAGE_SIZE);
if (pgnr < ((PAGE_ALIGN(map->length)) >> PAGE_SHIFT)) {
dev_err(atomisp_dev,
dev_err(asd->isp->dev,
"user space memory size is less than the expected size..\n");
return -ENOMEM;
} else if (pgnr > ((PAGE_ALIGN(map->length)) >> PAGE_SHIFT)) {
dev_err(atomisp_dev,
dev_err(asd->isp->dev,
"user space memory size is large than the expected size..\n");
return -ENOMEM;
}

View File

@@ -21,6 +21,7 @@
#include <linux/firmware.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/pm_runtime.h>
@@ -109,7 +110,7 @@ struct atomisp_acc_pipe *atomisp_to_acc_pipe(struct video_device *dev)
static unsigned short atomisp_get_sensor_fps(struct atomisp_sub_device *asd)
{
struct v4l2_subdev_frame_interval fi;
struct v4l2_subdev_frame_interval fi = { 0 };
struct atomisp_device *isp = asd->isp;
unsigned short fps = 0;
@@ -206,6 +207,7 @@ int atomisp_freq_scaling(struct atomisp_device *isp,
enum atomisp_dfs_mode mode,
bool force)
{
struct pci_dev *pdev = to_pci_dev(isp->dev);
/* FIXME! Only use subdev[0] status yet */
struct atomisp_sub_device *asd = &isp->asd[0];
const struct atomisp_dfs_config *dfs;
@@ -219,7 +221,7 @@ int atomisp_freq_scaling(struct atomisp_device *isp,
return -EINVAL;
}
if ((isp->pdev->device & ATOMISP_PCI_DEVICE_SOC_MASK) ==
if ((pdev->device & ATOMISP_PCI_DEVICE_SOC_MASK) ==
ATOMISP_PCI_DEVICE_SOC_CHT && ATOMISP_USE_YUVPP(asd))
isp->dfs = &dfs_config_cht_soc;
@@ -357,39 +359,41 @@ static void clear_isp_irq(enum hrt_isp_css_irq irq)
irq_clear_all(IRQ0_ID);
}
void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev)
void atomisp_msi_irq_init(struct atomisp_device *isp)
{
struct pci_dev *pdev = to_pci_dev(isp->dev);
u32 msg32;
u16 msg16;
pci_read_config_dword(dev, PCI_MSI_CAPID, &msg32);
pci_read_config_dword(pdev, PCI_MSI_CAPID, &msg32);
msg32 |= 1 << MSI_ENABLE_BIT;
pci_write_config_dword(dev, PCI_MSI_CAPID, msg32);
pci_write_config_dword(pdev, PCI_MSI_CAPID, msg32);
msg32 = (1 << INTR_IER) | (1 << INTR_IIR);
pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, msg32);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg32);
pci_read_config_word(dev, PCI_COMMAND, &msg16);
pci_read_config_word(pdev, PCI_COMMAND, &msg16);
msg16 |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
PCI_COMMAND_INTX_DISABLE);
pci_write_config_word(dev, PCI_COMMAND, msg16);
pci_write_config_word(pdev, PCI_COMMAND, msg16);
}
void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev)
void atomisp_msi_irq_uninit(struct atomisp_device *isp)
{
struct pci_dev *pdev = to_pci_dev(isp->dev);
u32 msg32;
u16 msg16;
pci_read_config_dword(dev, PCI_MSI_CAPID, &msg32);
pci_read_config_dword(pdev, PCI_MSI_CAPID, &msg32);
msg32 &= ~(1 << MSI_ENABLE_BIT);
pci_write_config_dword(dev, PCI_MSI_CAPID, msg32);
pci_write_config_dword(pdev, PCI_MSI_CAPID, msg32);
msg32 = 0x0;
pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, msg32);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg32);
pci_read_config_word(dev, PCI_COMMAND, &msg16);
pci_read_config_word(pdev, PCI_COMMAND, &msg16);
msg16 &= ~(PCI_COMMAND_MASTER);
pci_write_config_word(dev, PCI_COMMAND, msg16);
pci_write_config_word(pdev, PCI_COMMAND, msg16);
}
static void atomisp_sof_event(struct atomisp_sub_device *asd)
@@ -480,11 +484,12 @@ static void print_csi_rx_errors(enum mipi_port_id port,
/* Clear irq reg */
static void clear_irq_reg(struct atomisp_device *isp)
{
struct pci_dev *pdev = to_pci_dev(isp->dev);
u32 msg_ret;
pci_read_config_dword(isp->pdev, PCI_INTERRUPT_CTRL, &msg_ret);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &msg_ret);
msg_ret |= 1 << INTR_IIR;
pci_write_config_dword(isp->pdev, PCI_INTERRUPT_CTRL, msg_ret);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg_ret);
}
static struct atomisp_sub_device *
@@ -665,11 +670,10 @@ bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe)
void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr,
unsigned int size)
{
u32 __iomem *io_virt_addr;
unsigned int data = 0;
unsigned int size32 = DIV_ROUND_UP(size, sizeof(u32));
dev_dbg(isp->dev, "atomisp_io_base:%p\n", atomisp_io_base);
dev_dbg(isp->dev, "atomisp mmio base: %p\n", isp->base);
dev_dbg(isp->dev, "%s, addr:0x%x, size: %d, size32: %d\n", __func__,
addr, size, size32);
if (size32 * 4 + addr > 0x4000) {
@@ -678,13 +682,12 @@ void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr,
return;
}
addr += SP_DMEM_BASE;
io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
addr &= 0x003FFFFF;
do {
data = *io_virt_addr;
data = readl(isp->base + addr);
dev_dbg(isp->dev, "%s, \t [0x%x]:0x%x\n", __func__, addr, data);
io_virt_addr += sizeof(u32);
size32 -= 1;
} while (size32 > 0);
addr += sizeof(u32);
} while (--size32);
}
static struct videobuf_buffer *atomisp_css_frame_to_vbuf(
@@ -1289,6 +1292,7 @@ void atomisp_delayed_init_work(struct work_struct *work)
static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout)
{
struct pci_dev *pdev = to_pci_dev(isp->dev);
enum ia_css_pipe_id css_pipe_id;
bool stream_restart[MAX_STREAM_NUM] = {0};
bool depth_mode = false;
@@ -1372,8 +1376,8 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout)
clear_isp_irq(hrt_isp_css_irq_sp);
/* Set the SRSE to 3 before resetting */
pci_write_config_dword(isp->pdev, PCI_I_CONTROL, isp->saved_regs.i_control |
MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
pci_write_config_dword(pdev, PCI_I_CONTROL,
isp->saved_regs.i_control | MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
/* reset ISP and restore its state */
isp->isp_timeout = true;
@@ -6158,6 +6162,7 @@ out:
/*Turn off ISP dphy */
int atomisp_ospm_dphy_down(struct atomisp_device *isp)
{
struct pci_dev *pdev = to_pci_dev(isp->dev);
unsigned long flags;
u32 reg;
@@ -6179,9 +6184,9 @@ done:
* MRFLD HW design need all CSI ports are disabled before
* powering down the IUNIT.
*/
pci_read_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, &reg);
pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, &reg);
reg |= MRFLD_ALL_CSI_PORTS_OFF_MASK;
pci_write_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, reg);
pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, reg);
return 0;
}

View File

@@ -68,8 +68,8 @@ bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe);
/*
* Interrupt functions
*/
void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev);
void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev);
void atomisp_msi_irq_init(struct atomisp_device *isp);
void atomisp_msi_irq_uninit(struct atomisp_device *isp);
void atomisp_wdt_work(struct work_struct *work);
void atomisp_wdt(struct timer_list *t);
void atomisp_setup_flash(struct atomisp_sub_device *asd);

View File

@@ -29,8 +29,6 @@ struct atomisp_sub_device;
struct video_device;
enum atomisp_input_stream_id;
extern void __iomem *atomisp_io_base;
struct atomisp_metadata_buf {
struct ia_css_metadata *metadata;
void *md_vptr;

View File

@@ -33,13 +33,12 @@
#include "atomisp_ioctl.h"
#include "atomisp_acc.h"
#include <asm/intel-mid.h>
#include "ia_css_debug.h"
#include "ia_css_isp_param.h"
#include "sh_css_hrt.h"
#include "ia_css_isys.h"
#include <linux/io.h>
#include <linux/pm_runtime.h>
/* Assume max number of ACC stages */
@@ -69,92 +68,94 @@ struct bayer_ds_factor {
static void atomisp_css2_hw_store_8(hrt_address addr, uint8_t data)
{
s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
spin_lock_irqsave(&mmio_lock, flags);
*io_virt_addr = data;
writeb(data, isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
}
static void atomisp_css2_hw_store_16(hrt_address addr, uint16_t data)
{
s16 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
spin_lock_irqsave(&mmio_lock, flags);
*io_virt_addr = data;
writew(data, isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
}
void atomisp_css2_hw_store_32(hrt_address addr, uint32_t data)
{
s32 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
spin_lock_irqsave(&mmio_lock, flags);
*io_virt_addr = data;
writel(data, isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
}
static uint8_t atomisp_css2_hw_load_8(hrt_address addr)
{
s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
u8 ret;
spin_lock_irqsave(&mmio_lock, flags);
ret = *io_virt_addr;
ret = readb(isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
return ret;
}
static uint16_t atomisp_css2_hw_load_16(hrt_address addr)
{
s16 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
u16 ret;
spin_lock_irqsave(&mmio_lock, flags);
ret = *io_virt_addr;
ret = readw(isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
return ret;
}
static uint32_t atomisp_css2_hw_load_32(hrt_address addr)
{
s32 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
u32 ret;
spin_lock_irqsave(&mmio_lock, flags);
ret = *io_virt_addr;
ret = readl(isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
return ret;
}
static void atomisp_css2_hw_store(hrt_address addr,
const void *from, uint32_t n)
static void atomisp_css2_hw_store(hrt_address addr, const void *from, uint32_t n)
{
s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
unsigned int i;
addr &= 0x003FFFFF;
spin_lock_irqsave(&mmio_lock, flags);
for (i = 0; i < n; i++, io_virt_addr++, from++)
*io_virt_addr = *(s8 *)from;
for (i = 0; i < n; i++, from++)
writeb(*(s8 *)from, isp->base + addr + i);
spin_unlock_irqrestore(&mmio_lock, flags);
}
static void atomisp_css2_hw_load(hrt_address addr, void *to, uint32_t n)
{
s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
unsigned int i;
addr &= 0x003FFFFF;
spin_lock_irqsave(&mmio_lock, flags);
for (i = 0; i < n; i++, to++, io_virt_addr++)
*(s8 *)to = *io_virt_addr;
for (i = 0; i < n; i++, to++)
*(s8 *)to = readb(isp->base + addr + i);
spin_unlock_irqrestore(&mmio_lock, flags);
}
@@ -181,10 +182,10 @@ void atomisp_load_uint32(hrt_address addr, uint32_t *data)
*data = atomisp_css2_hw_load_32(addr);
}
static int hmm_get_mmu_base_addr(unsigned int *mmu_base_addr)
static int hmm_get_mmu_base_addr(struct device *dev, unsigned int *mmu_base_addr)
{
if (!sh_mmu_mrfld.get_pd_base) {
dev_err(atomisp_dev, "get mmu base address failed.\n");
dev_err(dev, "get mmu base address failed.\n");
return -EINVAL;
}
@@ -839,7 +840,7 @@ int atomisp_css_init(struct atomisp_device *isp)
int ret;
int err;
ret = hmm_get_mmu_base_addr(&mmu_base_addr);
ret = hmm_get_mmu_base_addr(isp->dev, &mmu_base_addr);
if (ret)
return ret;
@@ -941,7 +942,7 @@ int atomisp_css_resume(struct atomisp_device *isp)
unsigned int mmu_base_addr;
int ret;
ret = hmm_get_mmu_base_addr(&mmu_base_addr);
ret = hmm_get_mmu_base_addr(isp->dev, &mmu_base_addr);
if (ret) {
dev_err(isp->dev, "get base address error.\n");
return -EINVAL;
@@ -1966,8 +1967,7 @@ void atomisp_css_input_set_mode(struct atomisp_sub_device *asd,
true,
0x13000,
&size_mem_words) != 0) {
if (intel_mid_identify_cpu() ==
INTEL_MID_CPU_CHIP_TANGIER)
if (IS_MRFD)
size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_2;
else
size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_1;
@@ -2414,13 +2414,13 @@ static void __configure_preview_pp_input(struct atomisp_sub_device *asd,
struct ia_css_resolution *effective_res =
&stream_config->input_config.effective_res;
const struct bayer_ds_factor bds_fct[] = {{2, 1}, {3, 2}, {5, 4} };
static const struct bayer_ds_factor bds_fct[] = {{2, 1}, {3, 2}, {5, 4} };
/*
* BZ201033: YUV decimation factor of 4 causes couple of rightmost
* columns to be shaded. Remove this factor to work around the CSS bug.
* const unsigned int yuv_dec_fct[] = {4, 2};
*/
const unsigned int yuv_dec_fct[] = { 2 };
static const unsigned int yuv_dec_fct[] = { 2 };
unsigned int i;
if (width == 0 && height == 0)
@@ -2540,7 +2540,7 @@ static void __configure_video_pp_input(struct atomisp_sub_device *asd,
struct ia_css_resolution *effective_res =
&stream_config->input_config.effective_res;
const struct bayer_ds_factor bds_factors[] = {
static const struct bayer_ds_factor bds_factors[] = {
{8, 1}, {6, 1}, {4, 1}, {3, 1}, {2, 1}, {3, 2}
};
unsigned int i;
@@ -4337,7 +4337,7 @@ static const char * const fw_acc_type_name[] = {
[IA_CSS_ACC_STANDALONE] = "Stand-alone acceleration",
};
int atomisp_css_dump_blob_infor(void)
int atomisp_css_dump_blob_infor(struct atomisp_device *isp)
{
struct ia_css_blob_descr *bd = sh_css_blob_info;
unsigned int i, nm = sh_css_num_binaries;
@@ -4354,8 +4354,7 @@ int atomisp_css_dump_blob_infor(void)
for (i = 0; i < sh_css_num_binaries - NUM_OF_SPS; i++) {
switch (bd[i].header.type) {
case ia_css_isp_firmware:
dev_dbg(atomisp_dev,
"Num%2d type %s (%s), binary id is %2d, name is %s\n",
dev_dbg(isp->dev, "Num%2d type %s (%s), binary id is %2d, name is %s\n",
i + NUM_OF_SPS,
fw_type_name[bd[i].header.type],
fw_acc_type_name[bd[i].header.info.isp.type],
@@ -4363,8 +4362,7 @@ int atomisp_css_dump_blob_infor(void)
bd[i].name);
break;
default:
dev_dbg(atomisp_dev,
"Num%2d type %s, name is %s\n",
dev_dbg(isp->dev, "Num%2d type %s, name is %s\n",
i + NUM_OF_SPS, fw_type_name[bd[i].header.type],
bd[i].name);
}

View File

@@ -153,7 +153,7 @@ int atomisp_css_debug_dump_isp_binary(void);
int atomisp_css_dump_sp_raw_copy_linecount(bool reduced);
int atomisp_css_dump_blob_infor(void);
int atomisp_css_dump_blob_infor(struct atomisp_device *isp);
void atomisp_css_set_isp_config_id(struct atomisp_sub_device *asd,
uint32_t isp_config_id);

View File

@@ -62,9 +62,9 @@ static inline int iunit_dump_dbgopt(struct atomisp_device *isp,
if (opt & OPTION_VALID) {
if (opt & OPTION_BIN_LIST) {
ret = atomisp_css_dump_blob_infor();
ret = atomisp_css_dump_blob_infor(isp);
if (ret) {
dev_err(atomisp_dev, "%s dump blob infor err[ret:%d]\n",
dev_err(isp->dev, "%s dump blob infor err[ret:%d]\n",
__func__, ret);
goto opt_err;
}
@@ -76,7 +76,7 @@ static inline int iunit_dump_dbgopt(struct atomisp_device *isp,
atomisp_css_debug_dump_isp_binary();
} else {
ret = -EPERM;
dev_err(atomisp_dev, "%s dump running bin err[ret:%d]\n",
dev_err(isp->dev, "%s dump running bin err[ret:%d]\n",
__func__, ret);
goto opt_err;
}
@@ -86,8 +86,7 @@ static inline int iunit_dump_dbgopt(struct atomisp_device *isp,
hmm_show_mem_stat(__func__, __LINE__);
} else {
ret = -EINVAL;
dev_err(atomisp_dev, "%s dump nothing[ret=%d]\n", __func__,
ret);
dev_err(isp->dev, "%s dump nothing[ret=%d]\n", __func__, ret);
}
opt_err:
@@ -185,8 +184,9 @@ static void iunit_drvfs_remove_files(struct device_driver *drv)
driver_remove_file(drv, &iunit_drvfs_attrs[i]);
}
int atomisp_drvfs_init(struct device_driver *drv, struct atomisp_device *isp)
int atomisp_drvfs_init(struct atomisp_device *isp)
{
struct device_driver *drv = isp->dev->driver;
int ret;
iunit_debug.isp = isp;
@@ -194,7 +194,7 @@ int atomisp_drvfs_init(struct device_driver *drv, struct atomisp_device *isp)
ret = iunit_drvfs_create_files(iunit_debug.drv);
if (ret) {
dev_err(atomisp_dev, "drvfs_create_files error: %d\n", ret);
dev_err(isp->dev, "drvfs_create_files error: %d\n", ret);
iunit_drvfs_remove_files(iunit_debug.drv);
}

View File

@@ -19,7 +19,7 @@
#ifndef __ATOMISP_DRVFS_H__
#define __ATOMISP_DRVFS_H__
int atomisp_drvfs_init(struct device_driver *drv, struct atomisp_device *isp);
int atomisp_drvfs_init(struct atomisp_device *isp);
void atomisp_drvfs_exit(void);
#endif /* __ATOMISP_DRVFS_H__ */

View File

@@ -26,6 +26,9 @@ enum clock_rate {
#define CLK_RATE_19_2MHZ 19200000
#define CLK_RATE_25_0MHZ 25000000
/* Valid clock number range from 0 to 5 */
#define MAX_CLK_COUNT 5
/* X-Powers AXP288 register set */
#define ALDO1_SEL_REG 0x28
#define ALDO1_CTRL3_REG 0x13
@@ -61,9 +64,7 @@ enum clock_rate {
struct gmin_subdev {
struct v4l2_subdev *subdev;
int clock_num;
enum clock_rate clock_src;
bool clock_on;
struct clk *pmc_clk;
struct gpio_desc *gpio0;
struct gpio_desc *gpio1;
@@ -75,11 +76,16 @@ struct gmin_subdev {
unsigned int csi_lanes;
enum atomisp_input_format csi_fmt;
enum atomisp_bayer_order csi_bayer;
bool clock_on;
bool v1p8_on;
bool v2p8_on;
bool v1p2_on;
bool v2p8_vcm_on;
int v1p8_gpio;
int v2p8_gpio;
u8 pwm_i2c_addr;
/* For PMIC AXP */
@@ -90,9 +96,9 @@ struct gmin_subdev {
static struct gmin_subdev gmin_subdevs[MAX_SUBDEVS];
/* ACPI HIDs for the PMICs that could be used by this driver */
#define PMIC_ACPI_AXP "INT33F4:00" /* XPower AXP288 PMIC */
#define PMIC_ACPI_TI "INT33F5:00" /* Dollar Cove TI PMIC */
#define PMIC_ACPI_CRYSTALCOVE "INT33FD:00" /* Crystal Cove PMIC */
#define PMIC_ACPI_AXP "INT33F4" /* XPower AXP288 PMIC */
#define PMIC_ACPI_TI "INT33F5" /* Dollar Cove TI PMIC */
#define PMIC_ACPI_CRYSTALCOVE "INT33FD" /* Crystal Cove PMIC */
#define PMIC_PLATFORM_TI "intel_soc_pmic_chtdc_ti"
@@ -105,7 +111,7 @@ static enum {
} pmic_id;
static const char *pmic_name[] = {
[PMIC_UNSET] = "unset",
[PMIC_UNSET] = "ACPI device PM",
[PMIC_REGULATOR] = "regulator driver",
[PMIC_AXP] = "XPower AXP288 PMIC",
[PMIC_TI] = "Dollar Cove TI PMIC",
@@ -119,24 +125,6 @@ static const struct atomisp_platform_data pdata = {
.subdevs = pdata_subdevs,
};
/*
* Something of a hack. The ECS E7 board drives camera 2.8v from an
* external regulator instead of the PMIC. There's a gmin_CamV2P8
* config variable that specifies the GPIO to handle this particular
* case, but this needs a broader architecture for handling camera
* power.
*/
enum { V2P8_GPIO_UNSET = -2, V2P8_GPIO_NONE = -1 };
static int v2p8_gpio = V2P8_GPIO_UNSET;
/*
* Something of a hack. The CHT RVP board drives camera 1.8v from an
* external regulator instead of the PMIC just like ECS E7 board, see the
* comments above.
*/
enum { V1P8_GPIO_UNSET = -2, V1P8_GPIO_NONE = -1 };
static int v1p8_gpio = V1P8_GPIO_UNSET;
static LIST_HEAD(vcm_devices);
static DEFINE_MUTEX(vcm_lock);
@@ -199,6 +187,8 @@ int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
* gmin_subdev struct is already initialized for us.
*/
gs = find_gmin_subdev(subdev);
if (!gs)
return -ENODEV;
pdata.subdevs[i].type = type;
pdata.subdevs[i].port = gs->csi_port;
@@ -294,6 +284,7 @@ static struct gmin_cfg_var mrd7_vars[] = {
{"INT33F8:00_CsiFmt", "13"},
{"INT33F8:00_CsiBayer", "0"},
{"INT33F8:00_CamClk", "0"},
{"INT33F9:00_CamType", "1"},
{"INT33F9:00_CsiPort", "0"},
{"INT33F9:00_CsiLanes", "1"},
@@ -309,6 +300,7 @@ static struct gmin_cfg_var ecs7_vars[] = {
{"INT33BE:00_CsiFmt", "13"},
{"INT33BE:00_CsiBayer", "2"},
{"INT33BE:00_CamClk", "0"},
{"INT33F0:00_CsiPort", "0"},
{"INT33F0:00_CsiLanes", "1"},
{"INT33F0:00_CsiFmt", "13"},
@@ -322,6 +314,7 @@ static struct gmin_cfg_var i8880_vars[] = {
{"XXOV2680:00_CsiPort", "1"},
{"XXOV2680:00_CsiLanes", "1"},
{"XXOV2680:00_CamClk", "0"},
{"XXGC0310:00_CsiPort", "0"},
{"XXGC0310:00_CsiLanes", "1"},
{"XXGC0310:00_CamClk", "1"},
@@ -381,34 +374,27 @@ static const guid_t atomisp_dsm_guid = GUID_INIT(0xdc2f6c4f, 0x045b, 0x4f1d,
#define GMIN_PMC_CLK_NAME 14 /* "pmc_plt_clk_[0..5]" */
static char gmin_pmc_clk_name[GMIN_PMC_CLK_NAME];
static int gmin_i2c_match_one(struct device *dev, const void *data)
{
const char *name = data;
struct i2c_client *client;
if (dev->type != &i2c_client_type)
return 0;
client = to_i2c_client(dev);
return (!strcmp(name, client->name));
}
static struct i2c_client *gmin_i2c_dev_exists(struct device *dev, char *name,
struct i2c_client **client)
{
struct acpi_device *adev;
struct device *d;
while ((d = bus_find_device(&i2c_bus_type, NULL, name,
gmin_i2c_match_one))) {
*client = to_i2c_client(d);
dev_dbg(dev, "found '%s' at address 0x%02x, adapter %d\n",
(*client)->name, (*client)->addr,
(*client)->adapter->nr);
return *client;
}
adev = acpi_dev_get_first_match_dev(name, NULL, -1);
if (!adev)
return NULL;
d = bus_find_device_by_acpi_dev(&i2c_bus_type, adev);
acpi_dev_put(adev);
if (!d)
return NULL;
*client = i2c_verify_client(d);
put_device(d);
dev_dbg(dev, "found '%s' at address 0x%02x, adapter %d\n",
(*client)->name, (*client)->addr, (*client)->adapter->nr);
return *client;
}
static int gmin_i2c_write(struct device *dev, u16 i2c_addr, u8 reg,
@@ -427,45 +413,80 @@ static int gmin_i2c_write(struct device *dev, u16 i2c_addr, u8 reg,
"I2C write, addr: 0x%02x, reg: 0x%02x, value: 0x%02x, mask: 0x%02x\n",
i2c_addr, reg, value, mask);
ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_addr, reg,
value, mask);
if (ret == -EOPNOTSUPP) {
ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_addr, reg, value, mask);
if (ret == -EOPNOTSUPP)
dev_err(dev,
"ACPI didn't mapped the OpRegion needed to access I2C address 0x%02x.\n"
"Need to compile the Kernel using CONFIG_*_PMIC_OPREGION settings\n",
"Need to compile the kernel using CONFIG_*_PMIC_OPREGION settings\n",
i2c_addr);
return ret;
}
return ret;
}
static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
static int atomisp_get_acpi_power(struct device *dev)
{
struct i2c_client *power = NULL, *client = v4l2_get_subdevdata(subdev);
struct acpi_device *adev;
acpi_handle handle;
struct device *dev;
int i, ret;
char name[5];
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer b_name = { sizeof(name), name };
union acpi_object *package, *element;
acpi_handle handle = ACPI_HANDLE(dev);
acpi_handle rhandle;
acpi_status status;
int clock_num = -1;
int i;
if (!client)
return NULL;
status = acpi_evaluate_object(handle, "_PR0", NULL, &buffer);
if (!ACPI_SUCCESS(status))
return -1;
dev = &client->dev;
package = buffer.pointer;
handle = ACPI_HANDLE(dev);
if (!buffer.length || !package
|| package->type != ACPI_TYPE_PACKAGE
|| !package->package.count)
goto fail;
// FIXME: may need to release resources allocated by acpi_bus_get_device()
if (!handle || acpi_bus_get_device(handle, &adev)) {
dev_err(dev, "Error could not get ACPI device\n");
return NULL;
for (i = 0; i < package->package.count; i++) {
element = &package->package.elements[i];
if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
continue;
rhandle = element->reference.handle;
if (!rhandle)
goto fail;
acpi_get_name(rhandle, ACPI_SINGLE_NAME, &b_name);
dev_dbg(dev, "Found PM resource '%s'\n", name);
if (strlen(name) == 4 && !strncmp(name, "CLK", 3)) {
if (name[3] >= '0' && name[3] <= '4')
clock_num = name[3] - '0';
#if 0
/*
* We could abort here, but let's parse all resources,
* as this is helpful for debugging purposes
*/
if (clock_num >= 0)
break;
#endif
}
}
dev_info(&client->dev, "%s: ACPI detected it on bus ID=%s, HID=%s\n",
__func__, acpi_device_bid(adev), acpi_device_hid(adev));
fail:
ACPI_FREE(buffer.pointer);
return clock_num;
}
static u8 gmin_get_pmic_id_and_addr(struct device *dev)
{
struct i2c_client *power;
static u8 pmic_i2c_addr;
if (pmic_id)
return pmic_i2c_addr;
if (!pmic_id) {
if (gmin_i2c_dev_exists(dev, PMIC_ACPI_TI, &power))
pmic_id = PMIC_TI;
else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_AXP, &power))
@@ -474,48 +495,141 @@ static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
pmic_id = PMIC_CRYSTALCOVE;
else
pmic_id = PMIC_REGULATOR;
pmic_i2c_addr = power ? power->addr : 0;
return pmic_i2c_addr;
}
for (i = 0; i < MAX_SUBDEVS && gmin_subdevs[i].subdev; i++)
;
if (i >= MAX_SUBDEVS)
return NULL;
static int gmin_detect_pmic(struct v4l2_subdev *subdev)
{
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct device *dev = &client->dev;
u8 pmic_i2c_addr;
if (power) {
gmin_subdevs[i].pwm_i2c_addr = power->addr;
dev_info(dev,
"gmin: power management provided via %s (i2c addr 0x%02x)\n",
pmic_name[pmic_id], power->addr);
} else {
dev_info(dev, "gmin: power management provided via %s\n",
pmic_name[pmic_id]);
pmic_i2c_addr = gmin_get_pmic_id_and_addr(dev);
dev_info(dev, "gmin: power management provided via %s (i2c addr 0x%02x)\n",
pmic_name[pmic_id], pmic_i2c_addr);
return pmic_i2c_addr;
}
gmin_subdevs[i].subdev = subdev;
gmin_subdevs[i].clock_num = gmin_get_var_int(dev, false, "CamClk", 0);
static int gmin_subdev_add(struct gmin_subdev *gs)
{
struct i2c_client *client = v4l2_get_subdevdata(gs->subdev);
struct device *dev = &client->dev;
struct acpi_device *adev = ACPI_COMPANION(dev);
int ret, clock_num = -1;
dev_info(dev, "%s: ACPI path is %pfw\n", __func__, dev_fwnode(dev));
/*WA:CHT requires XTAL clock as PLL is not stable.*/
gmin_subdevs[i].clock_src = gmin_get_var_int(dev, false, "ClkSrc",
gs->clock_src = gmin_get_var_int(dev, false, "ClkSrc",
VLV2_CLK_PLL_19P2MHZ);
gmin_subdevs[i].csi_port = gmin_get_var_int(dev, false, "CsiPort", 0);
gmin_subdevs[i].csi_lanes = gmin_get_var_int(dev, false, "CsiLanes", 1);
/* get PMC clock with clock framework */
snprintf(gmin_pmc_clk_name,
sizeof(gmin_pmc_clk_name),
"%s_%d", "pmc_plt_clk", gmin_subdevs[i].clock_num);
gs->csi_port = gmin_get_var_int(dev, false, "CsiPort", 0);
gs->csi_lanes = gmin_get_var_int(dev, false, "CsiLanes", 1);
gmin_subdevs[i].pmc_clk = devm_clk_get(dev, gmin_pmc_clk_name);
if (IS_ERR(gmin_subdevs[i].pmc_clk)) {
ret = PTR_ERR(gmin_subdevs[i].pmc_clk);
gs->gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW);
if (IS_ERR(gs->gpio0))
gs->gpio0 = NULL;
else
dev_info(dev, "will handle gpio0 via ACPI\n");
dev_err(dev,
"Failed to get clk from %s : %d\n",
gmin_pmc_clk_name,
ret);
gs->gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW);
if (IS_ERR(gs->gpio1))
gs->gpio1 = NULL;
else
dev_info(dev, "will handle gpio1 via ACPI\n");
return NULL;
/*
* Those are used only when there is an external regulator apart
* from the PMIC that would be providing power supply, like on the
* two cases below:
*
* The ECS E7 board drives camera 2.8v from an external regulator
* instead of the PMIC. There's a gmin_CamV2P8 config variable
* that specifies the GPIO to handle this particular case,
* but this needs a broader architecture for handling camera power.
*
* The CHT RVP board drives camera 1.8v from an* external regulator
* instead of the PMIC just like ECS E7 board.
*/
gs->v1p8_gpio = gmin_get_var_int(dev, true, "V1P8GPIO", -1);
gs->v2p8_gpio = gmin_get_var_int(dev, true, "V2P8GPIO", -1);
/*
* FIXME:
*
* The ACPI handling code checks for the _PR? tables in order to
* know what is required to switch the device from power state
* D0 (_PR0) up to D3COLD (_PR3).
*
* The adev->flags.power_manageable is set to true if the device
* has a _PR0 table, which can be checked by calling
* acpi_device_power_manageable(adev).
*
* However, this only says that the device can be set to power off
* mode.
*
* At least on the DSDT tables we've seen so far, there's no _PR3,
* nor _PS3 (which would have a somewhat similar effect).
* So, using ACPI for power management won't work, except if adding
* an ACPI override logic somewhere.
*
* So, at least for the existing devices we know, the check below
* will always be false.
*/
if (acpi_device_can_wakeup(adev) &&
acpi_device_can_poweroff(adev)) {
dev_info(dev,
"gmin: power management provided via device PM\n");
return 0;
}
/*
* The code below is here due to backward compatibility with devices
* whose ACPI BIOS may not contain everything that would be needed
* in order to set clocks and do power management.
*/
/*
* According with :
* https://github.com/projectceladon/hardware-intel-kernelflinger/blob/master/doc/fastboot.md
*
* The "CamClk" EFI var is set via fastboot on some Android devices,
* and seems to contain the number of the clock used to feed the
* sensor.
*
* On systems with a proper ACPI table, this is given via the _PR0
* power resource table. The logic below should first check if there
* is a power resource already, falling back to the EFI vars detection
* otherwise.
*/
/* Try first to use ACPI to get the clock resource */
if (acpi_device_power_manageable(adev))
clock_num = atomisp_get_acpi_power(dev);
/* Fall-back use EFI and/or DMI match */
if (clock_num < 0)
clock_num = gmin_get_var_int(dev, false, "CamClk", 0);
if (clock_num < 0 || clock_num > MAX_CLK_COUNT) {
dev_err(dev, "Invalid clock number\n");
return -EINVAL;
}
snprintf(gmin_pmc_clk_name, sizeof(gmin_pmc_clk_name),
"%s_%d", "pmc_plt_clk", clock_num);
gs->pmc_clk = devm_clk_get(dev, gmin_pmc_clk_name);
if (IS_ERR(gs->pmc_clk)) {
ret = PTR_ERR(gs->pmc_clk);
dev_err(dev, "Failed to get clk from %s: %d\n", gmin_pmc_clk_name, ret);
return ret;
}
dev_info(dev, "Will use CLK%d (%s)\n", clock_num, gmin_pmc_clk_name);
/*
* The firmware might enable the clock at
* boot (this information may or may not
@@ -526,25 +640,17 @@ static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
* to disable a clock that has not been enabled,
* we need to enable the clock first.
*/
ret = clk_prepare_enable(gmin_subdevs[i].pmc_clk);
ret = clk_prepare_enable(gs->pmc_clk);
if (!ret)
clk_disable_unprepare(gmin_subdevs[i].pmc_clk);
gmin_subdevs[i].gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW);
if (IS_ERR(gmin_subdevs[i].gpio0))
gmin_subdevs[i].gpio0 = NULL;
gmin_subdevs[i].gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW);
if (IS_ERR(gmin_subdevs[i].gpio1))
gmin_subdevs[i].gpio1 = NULL;
clk_disable_unprepare(gs->pmc_clk);
switch (pmic_id) {
case PMIC_REGULATOR:
gmin_subdevs[i].v1p8_reg = regulator_get(dev, "V1P8SX");
gmin_subdevs[i].v2p8_reg = regulator_get(dev, "V2P8SX");
gs->v1p8_reg = regulator_get(dev, "V1P8SX");
gs->v2p8_reg = regulator_get(dev, "V2P8SX");
gmin_subdevs[i].v1p2_reg = regulator_get(dev, "V1P2A");
gmin_subdevs[i].v2p8_vcm_reg = regulator_get(dev, "VPROG4B");
gs->v1p2_reg = regulator_get(dev, "V1P2A");
gs->v2p8_vcm_reg = regulator_get(dev, "VPROG4B");
/* Note: ideally we would initialize v[12]p8_on to the
* output of regulator_is_enabled(), but sadly that
@@ -556,32 +662,31 @@ static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
break;
case PMIC_AXP:
gmin_subdevs[i].eldo1_1p8v = gmin_get_var_int(dev, false,
gs->eldo1_1p8v = gmin_get_var_int(dev, false,
"eldo1_1p8v",
ELDO1_1P8V);
gmin_subdevs[i].eldo1_sel_reg = gmin_get_var_int(dev, false,
gs->eldo1_sel_reg = gmin_get_var_int(dev, false,
"eldo1_sel_reg",
ELDO1_SEL_REG);
gmin_subdevs[i].eldo1_ctrl_shift = gmin_get_var_int(dev, false,
gs->eldo1_ctrl_shift = gmin_get_var_int(dev, false,
"eldo1_ctrl_shift",
ELDO1_CTRL_SHIFT);
gmin_subdevs[i].eldo2_1p8v = gmin_get_var_int(dev, false,
gs->eldo2_1p8v = gmin_get_var_int(dev, false,
"eldo2_1p8v",
ELDO2_1P8V);
gmin_subdevs[i].eldo2_sel_reg = gmin_get_var_int(dev, false,
gs->eldo2_sel_reg = gmin_get_var_int(dev, false,
"eldo2_sel_reg",
ELDO2_SEL_REG);
gmin_subdevs[i].eldo2_ctrl_shift = gmin_get_var_int(dev, false,
gs->eldo2_ctrl_shift = gmin_get_var_int(dev, false,
"eldo2_ctrl_shift",
ELDO2_CTRL_SHIFT);
gmin_subdevs[i].pwm_i2c_addr = power->addr;
break;
default:
break;
}
return &gmin_subdevs[i];
return 0;
}
static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev)
@@ -591,7 +696,17 @@ static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev)
for (i = 0; i < MAX_SUBDEVS; i++)
if (gmin_subdevs[i].subdev == subdev)
return &gmin_subdevs[i];
return gmin_subdev_add(subdev);
return NULL;
}
static struct gmin_subdev *find_free_gmin_subdev_slot(void)
{
unsigned int i;
for (i = 0; i < MAX_SUBDEVS; i++)
if (gmin_subdevs[i].subdev == NULL)
return &gmin_subdevs[i];
return NULL;
}
static int axp_regulator_set(struct device *dev, struct gmin_subdev *gs,
@@ -700,32 +815,24 @@ static int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
int ret;
struct device *dev;
struct i2c_client *client = v4l2_get_subdevdata(subdev);
int value;
dev = &client->dev;
if (v1p8_gpio == V1P8_GPIO_UNSET) {
v1p8_gpio = gmin_get_var_int(dev, true,
"V1P8GPIO", V1P8_GPIO_NONE);
if (v1p8_gpio != V1P8_GPIO_NONE) {
if (gs->v1p8_gpio >= 0) {
pr_info("atomisp_gmin_platform: 1.8v power on GPIO %d\n",
v1p8_gpio);
ret = gpio_request(v1p8_gpio, "camera_v1p8_en");
gs->v1p8_gpio);
ret = gpio_request(gs->v1p8_gpio, "camera_v1p8_en");
if (!ret)
ret = gpio_direction_output(v1p8_gpio, 0);
ret = gpio_direction_output(gs->v1p8_gpio, 0);
if (ret)
pr_err("V1P8 GPIO initialization failed\n");
}
}
if (!gs || gs->v1p8_on == on)
return 0;
gs->v1p8_on = on;
if (v1p8_gpio >= 0)
gpio_set_value(v1p8_gpio, on);
if (gs->v1p8_gpio >= 0)
gpio_set_value(gs->v1p8_gpio, on);
if (gs->v1p8_reg) {
regulator_set_voltage(gs->v1p8_reg, 1800000, 1800000);
@@ -762,32 +869,24 @@ static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
int ret;
struct device *dev;
struct i2c_client *client = v4l2_get_subdevdata(subdev);
int value;
dev = &client->dev;
if (v2p8_gpio == V2P8_GPIO_UNSET) {
v2p8_gpio = gmin_get_var_int(dev, true,
"V2P8GPIO", V2P8_GPIO_NONE);
if (v2p8_gpio != V2P8_GPIO_NONE) {
if (gs->v2p8_gpio >= 0) {
pr_info("atomisp_gmin_platform: 2.8v power on GPIO %d\n",
v2p8_gpio);
ret = gpio_request(v2p8_gpio, "camera_v2p8");
gs->v2p8_gpio);
ret = gpio_request(gs->v2p8_gpio, "camera_v2p8");
if (!ret)
ret = gpio_direction_output(v2p8_gpio, 0);
ret = gpio_direction_output(gs->v2p8_gpio, 0);
if (ret)
pr_err("V2P8 GPIO initialization failed\n");
}
}
if (!gs || gs->v2p8_on == on)
return 0;
gs->v2p8_on = on;
if (v2p8_gpio >= 0)
gpio_set_value(v2p8_gpio, on);
if (gs->v2p8_gpio >= 0)
gpio_set_value(gs->v2p8_gpio, on);
if (gs->v2p8_reg) {
regulator_set_voltage(gs->v2p8_reg, 2900000, 2900000);
@@ -819,6 +918,37 @@ static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
return -EINVAL;
}
static int gmin_acpi_pm_ctrl(struct v4l2_subdev *subdev, int on)
{
int ret = 0;
struct gmin_subdev *gs = find_gmin_subdev(subdev);
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct acpi_device *adev = ACPI_COMPANION(&client->dev);
/* Use the ACPI power management to control it */
on = !!on;
if (gs->clock_on == on)
return 0;
dev_dbg(subdev->dev, "Setting power state to %s\n",
on ? "on" : "off");
if (on)
ret = acpi_device_set_power(adev,
ACPI_STATE_D0);
else
ret = acpi_device_set_power(adev,
ACPI_STATE_D3_COLD);
if (!ret)
gs->clock_on = on;
else
dev_err(subdev->dev, "Couldn't set power state to %s\n",
on ? "on" : "off");
return ret;
}
static int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on)
{
int ret = 0;
@@ -884,7 +1014,7 @@ static struct camera_vcm_control *gmin_get_vcm_ctrl(struct v4l2_subdev *subdev,
return NULL;
}
static struct camera_sensor_platform_data gmin_plat = {
static struct camera_sensor_platform_data pmic_gmin_plat = {
.gpio0_ctrl = gmin_gpio0_ctrl,
.gpio1_ctrl = gmin_gpio1_ctrl,
.v1p8_ctrl = gmin_v1p8_ctrl,
@@ -895,17 +1025,36 @@ static struct camera_sensor_platform_data gmin_plat = {
.get_vcm_ctrl = gmin_get_vcm_ctrl,
};
static struct camera_sensor_platform_data acpi_gmin_plat = {
.gpio0_ctrl = gmin_gpio0_ctrl,
.gpio1_ctrl = gmin_gpio1_ctrl,
.v1p8_ctrl = gmin_acpi_pm_ctrl,
.v2p8_ctrl = gmin_acpi_pm_ctrl,
.v1p2_ctrl = gmin_acpi_pm_ctrl,
.flisclk_ctrl = gmin_acpi_pm_ctrl,
.csi_cfg = gmin_csi_cfg,
.get_vcm_ctrl = gmin_get_vcm_ctrl,
};
struct camera_sensor_platform_data *gmin_camera_platform_data(
struct v4l2_subdev *subdev,
enum atomisp_input_format csi_format,
enum atomisp_bayer_order csi_bayer)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
u8 pmic_i2c_addr = gmin_detect_pmic(subdev);
struct gmin_subdev *gs;
gs = find_free_gmin_subdev_slot();
gs->subdev = subdev;
gs->csi_fmt = csi_format;
gs->csi_bayer = csi_bayer;
gs->pwm_i2c_addr = pmic_i2c_addr;
return &gmin_plat;
gmin_subdev_add(gs);
if (gs->pmc_clk)
return &pmic_gmin_plat;
else
return &acpi_gmin_plat;
}
EXPORT_SYMBOL_GPL(gmin_camera_platform_data);
@@ -957,12 +1106,28 @@ static int gmin_get_config_dsm_var(struct device *dev,
union acpi_object *obj, *cur = NULL;
int i;
/*
* The data reported by "CamClk" seems to be either 0 or 1 at the
* _DSM table.
*
* At the ACPI tables we looked so far, this is not related to the
* actual clock source for the sensor, which is given by the
* _PR0 ACPI table. So, ignore it, as otherwise this will be
* set to a wrong value.
*/
if (!strcmp(var, "CamClk"))
return -EINVAL;
obj = acpi_evaluate_dsm(handle, &atomisp_dsm_guid, 0, 0, NULL);
if (!obj) {
dev_info_once(dev, "Didn't find ACPI _DSM table.\n");
return -EINVAL;
}
/* Return on unexpected object type */
if (obj->type != ACPI_TYPE_PACKAGE)
return -EINVAL;
#if 0 /* Just for debugging purposes */
for (i = 0; i < obj->package.count; i++) {
union acpi_object *cur = &obj->package.elements[i];
@@ -1155,10 +1320,10 @@ EXPORT_SYMBOL_GPL(camera_sensor_csi);
* trying. The driver itself does direct calls to the PUNIT to manage
* ISP power.
*/
static void isp_pm_cap_fixup(struct pci_dev *dev)
static void isp_pm_cap_fixup(struct pci_dev *pdev)
{
dev_info(&dev->dev, "Disabling PCI power management on camera ISP\n");
dev->pm_cap = 0;
dev_info(&pdev->dev, "Disabling PCI power management on camera ISP\n");
pdev->pm_cap = 0;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0f38, isp_pm_cap_fixup);

View File

@@ -216,12 +216,12 @@ struct atomisp_sw_contex {
* ci device struct
*/
struct atomisp_device {
struct pci_dev *pdev;
struct device *dev;
struct v4l2_device v4l2_dev;
struct media_device media_dev;
struct atomisp_platform_data *pdata;
void *mmu_l1_base;
void __iomem *base;
const struct firmware *firmware;
struct pm_qos_request pm_qos;

View File

@@ -549,8 +549,7 @@ static int atomisp_querycap(struct file *file, void *fh,
strscpy(cap->driver, DRIVER, sizeof(cap->driver));
strscpy(cap->card, CARD, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
pci_name(isp->pdev));
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", dev_name(isp->dev));
return 0;
}
@@ -1635,6 +1634,7 @@ static int atomisp_streamon(struct file *file, void *fh,
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
struct atomisp_sub_device *asd = pipe->asd;
struct atomisp_device *isp = video_get_drvdata(vdev);
struct pci_dev *pdev = to_pci_dev(isp->dev);
enum ia_css_pipe_id css_pipe_id;
unsigned int sensor_start_stream;
unsigned int wdt_duration = ATOMISP_ISP_TIMEOUT_DURATION;
@@ -1844,9 +1844,8 @@ start_sensor:
/* Enable the CSI interface on ANN B0/K0 */
if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) {
pci_write_config_word(isp->pdev, MRFLD_PCI_CSI_CONTROL,
isp->saved_regs.csi_control |
MRFLD_PCI_CSI_CONTROL_CSI_READY);
pci_write_config_word(pdev, MRFLD_PCI_CSI_CONTROL,
isp->saved_regs.csi_control | MRFLD_PCI_CSI_CONTROL_CSI_READY);
}
/* stream on the sensor */
@@ -1891,6 +1890,7 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct pci_dev *pdev = to_pci_dev(isp->dev);
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
struct atomisp_sub_device *asd = pipe->asd;
struct atomisp_video_pipe *capture_pipe = NULL;
@@ -2076,9 +2076,8 @@ stopsensor:
/* Disable the CSI interface on ANN B0/K0 */
if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) {
pci_write_config_word(isp->pdev, MRFLD_PCI_CSI_CONTROL,
isp->saved_regs.csi_control &
~MRFLD_PCI_CSI_CONTROL_CSI_READY);
pci_write_config_word(pdev, MRFLD_PCI_CSI_CONTROL,
isp->saved_regs.csi_control & ~MRFLD_PCI_CSI_CONTROL_CSI_READY);
}
if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, false))
@@ -2111,8 +2110,8 @@ stopsensor:
}
/* disable PUNIT/ISP acknowlede/handshake - SRSE=3 */
pci_write_config_dword(isp->pdev, PCI_I_CONTROL, isp->saved_regs.i_control |
MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
pci_write_config_dword(pdev, PCI_I_CONTROL,
isp->saved_regs.i_control | MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
dev_err(isp->dev, "atomisp_reset");
atomisp_reset(isp);
for (i = 0; i < isp->num_of_streams; i++) {

View File

@@ -127,8 +127,6 @@ MODULE_PARM_DESC(pad_h, "extra data for ISP processing");
struct device *atomisp_dev;
void __iomem *atomisp_io_base;
static const struct atomisp_freq_scaling_rule dfs_rules_merr[] = {
{
.width = ISP_FREQ_RULE_ANY,
@@ -512,30 +510,27 @@ void atomisp_acc_unregister(struct atomisp_acc_pipe *video)
static int atomisp_save_iunit_reg(struct atomisp_device *isp)
{
struct pci_dev *dev = isp->pdev;
struct pci_dev *pdev = to_pci_dev(isp->dev);
dev_dbg(isp->dev, "%s\n", __func__);
pci_read_config_word(dev, PCI_COMMAND, &isp->saved_regs.pcicmdsts);
pci_read_config_word(pdev, PCI_COMMAND, &isp->saved_regs.pcicmdsts);
/* isp->saved_regs.ispmmadr is set from the atomisp_pci_probe() */
pci_read_config_dword(dev, PCI_MSI_CAPID, &isp->saved_regs.msicap);
pci_read_config_dword(dev, PCI_MSI_ADDR, &isp->saved_regs.msi_addr);
pci_read_config_word(dev, PCI_MSI_DATA, &isp->saved_regs.msi_data);
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &isp->saved_regs.intr);
pci_read_config_dword(dev, PCI_INTERRUPT_CTRL,
&isp->saved_regs.interrupt_control);
pci_read_config_dword(pdev, PCI_MSI_CAPID, &isp->saved_regs.msicap);
pci_read_config_dword(pdev, PCI_MSI_ADDR, &isp->saved_regs.msi_addr);
pci_read_config_word(pdev, PCI_MSI_DATA, &isp->saved_regs.msi_data);
pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &isp->saved_regs.intr);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &isp->saved_regs.interrupt_control);
pci_read_config_dword(dev, MRFLD_PCI_PMCS,
&isp->saved_regs.pmcs);
pci_read_config_dword(pdev, MRFLD_PCI_PMCS, &isp->saved_regs.pmcs);
/* Ensure read/write combining is enabled. */
pci_read_config_dword(dev, PCI_I_CONTROL,
&isp->saved_regs.i_control);
pci_read_config_dword(pdev, PCI_I_CONTROL, &isp->saved_regs.i_control);
isp->saved_regs.i_control |=
MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING |
MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING;
pci_read_config_dword(dev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
pci_read_config_dword(pdev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
&isp->saved_regs.csi_access_viol);
pci_read_config_dword(dev, MRFLD_PCI_CSI_RCOMP_CONTROL,
pci_read_config_dword(pdev, MRFLD_PCI_CSI_RCOMP_CONTROL,
&isp->saved_regs.csi_rcomp_config);
/*
* Hardware bugs require setting CSI_HS_OVR_CLK_GATE_ON_UPDATE.
@@ -545,65 +540,58 @@ static int atomisp_save_iunit_reg(struct atomisp_device *isp)
* is missed, and IUNIT can hang.
* For both issues, setting this bit is a workaround.
*/
isp->saved_regs.csi_rcomp_config |=
MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE;
pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
isp->saved_regs.csi_rcomp_config |= MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE;
pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
&isp->saved_regs.csi_afe_dly);
pci_read_config_dword(dev, MRFLD_PCI_CSI_CONTROL,
pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL,
&isp->saved_regs.csi_control);
if (isp->media_dev.hw_revision >=
(ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT))
isp->saved_regs.csi_control |=
MRFLD_PCI_CSI_CONTROL_PARPATHEN;
isp->saved_regs.csi_control |= MRFLD_PCI_CSI_CONTROL_PARPATHEN;
/*
* On CHT CSI_READY bit should be enabled before stream on
*/
if (IS_CHT && (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)))
isp->saved_regs.csi_control |=
MRFLD_PCI_CSI_CONTROL_CSI_READY;
pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
isp->saved_regs.csi_control |= MRFLD_PCI_CSI_CONTROL_CSI_READY;
pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
&isp->saved_regs.csi_afe_rcomp_config);
pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
&isp->saved_regs.csi_afe_hs_control);
pci_read_config_dword(dev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
pci_read_config_dword(pdev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
&isp->saved_regs.csi_deadline_control);
return 0;
}
static int __maybe_unused atomisp_restore_iunit_reg(struct atomisp_device *isp)
{
struct pci_dev *dev = isp->pdev;
struct pci_dev *pdev = to_pci_dev(isp->dev);
dev_dbg(isp->dev, "%s\n", __func__);
pci_write_config_word(dev, PCI_COMMAND, isp->saved_regs.pcicmdsts);
pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
isp->saved_regs.ispmmadr);
pci_write_config_dword(dev, PCI_MSI_CAPID, isp->saved_regs.msicap);
pci_write_config_dword(dev, PCI_MSI_ADDR, isp->saved_regs.msi_addr);
pci_write_config_word(dev, PCI_MSI_DATA, isp->saved_regs.msi_data);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, isp->saved_regs.intr);
pci_write_config_dword(dev, PCI_INTERRUPT_CTRL,
isp->saved_regs.interrupt_control);
pci_write_config_dword(dev, PCI_I_CONTROL,
isp->saved_regs.i_control);
pci_write_config_word(pdev, PCI_COMMAND, isp->saved_regs.pcicmdsts);
pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, isp->saved_regs.ispmmadr);
pci_write_config_dword(pdev, PCI_MSI_CAPID, isp->saved_regs.msicap);
pci_write_config_dword(pdev, PCI_MSI_ADDR, isp->saved_regs.msi_addr);
pci_write_config_word(pdev, PCI_MSI_DATA, isp->saved_regs.msi_data);
pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, isp->saved_regs.intr);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, isp->saved_regs.interrupt_control);
pci_write_config_dword(pdev, PCI_I_CONTROL, isp->saved_regs.i_control);
pci_write_config_dword(dev, MRFLD_PCI_PMCS,
isp->saved_regs.pmcs);
pci_write_config_dword(dev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
pci_write_config_dword(pdev, MRFLD_PCI_PMCS, isp->saved_regs.pmcs);
pci_write_config_dword(pdev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
isp->saved_regs.csi_access_viol);
pci_write_config_dword(dev, MRFLD_PCI_CSI_RCOMP_CONTROL,
pci_write_config_dword(pdev, MRFLD_PCI_CSI_RCOMP_CONTROL,
isp->saved_regs.csi_rcomp_config);
pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
isp->saved_regs.csi_afe_dly);
pci_write_config_dword(dev, MRFLD_PCI_CSI_CONTROL,
pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL,
isp->saved_regs.csi_control);
pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
isp->saved_regs.csi_afe_rcomp_config);
pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
isp->saved_regs.csi_afe_hs_control);
pci_write_config_dword(dev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
pci_write_config_dword(pdev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
isp->saved_regs.csi_deadline_control);
/*
@@ -619,7 +607,7 @@ static int __maybe_unused atomisp_restore_iunit_reg(struct atomisp_device *isp)
static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp)
{
struct pci_dev *dev = isp->pdev;
struct pci_dev *pdev = to_pci_dev(isp->dev);
u32 irq;
unsigned long flags;
@@ -635,11 +623,11 @@ static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp)
* So, here we need to check if there is any pending
* IRQ, if so, waiting for it to be served
*/
pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
irq = irq & 1 << INTR_IIR;
pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
if (!(irq & (1 << INTR_IIR)))
goto done;
@@ -652,11 +640,11 @@ static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp)
spin_unlock_irqrestore(&isp->lock, flags);
return -EAGAIN;
} else {
pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
irq = irq & 1 << INTR_IIR;
pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
if (!(irq & (1 << INTR_IIR))) {
atomisp_css2_hw_store_32(MRFLD_INTR_ENABLE_REG, 0x0);
goto done;
@@ -675,11 +663,11 @@ done:
* to IIR. It could block subsequent interrupt messages.
* HW sighting:4568410.
*/
pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
irq &= ~(1 << INTR_IER);
pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
atomisp_msi_irq_uninit(isp, dev);
atomisp_msi_irq_uninit(isp);
atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true);
spin_unlock_irqrestore(&isp->lock, flags);
@@ -755,7 +743,7 @@ static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable)
/* Wait until ISPSSPM0 bit[25:24] shows the right value */
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &tmp);
tmp = (tmp & MRFLD_ISPSSPM0_ISPSSC_MASK) >> MRFLD_ISPSSPM0_ISPSSS_OFFSET;
tmp = (tmp >> MRFLD_ISPSSPM0_ISPSSS_OFFSET) & MRFLD_ISPSSPM0_ISPSSC_MASK;
if (tmp == val) {
trace_ipu_cstate(enable);
return 0;
@@ -778,15 +766,13 @@ static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable)
/* Workaround for pmu_nc_set_power_state not ready in MRFLD */
int atomisp_mrfld_power_down(struct atomisp_device *isp)
{
// FIXME: at least with ISP2401, enabling this code causes the driver to break
return 0 && atomisp_mrfld_power(isp, false);
return atomisp_mrfld_power(isp, false);
}
/* Workaround for pmu_nc_set_power_state not ready in MRFLD */
int atomisp_mrfld_power_up(struct atomisp_device *isp)
{
// FIXME: at least with ISP2401, enabling this code causes the driver to break
return 0 && atomisp_mrfld_power(isp, true);
return atomisp_mrfld_power(isp, true);
}
int atomisp_runtime_suspend(struct device *dev)
@@ -902,6 +888,7 @@ static int __maybe_unused atomisp_resume(struct device *dev)
int atomisp_csi_lane_config(struct atomisp_device *isp)
{
struct pci_dev *pdev = to_pci_dev(isp->dev);
static const struct {
u8 code;
u8 lanes[MRFLD_PORT_NUM];
@@ -1003,7 +990,7 @@ int atomisp_csi_lane_config(struct atomisp_device *isp)
return -EINVAL;
}
pci_read_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, &csi_control);
pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, &csi_control);
csi_control &= ~port_config_mask;
csi_control |= (portconfigs[i].code << MRFLD_PORT_CONFIGCODE_SHIFT)
| (portconfigs[i].lanes[0] ? 0 : (1 << MRFLD_PORT1_ENABLE_SHIFT))
@@ -1013,7 +1000,7 @@ int atomisp_csi_lane_config(struct atomisp_device *isp)
| (((1 << portconfigs[i].lanes[1]) - 1) << MRFLD_PORT2_LANES_SHIFT)
| (((1 << portconfigs[i].lanes[2]) - 1) << port3_lanes_shift);
pci_write_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, csi_control);
pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, csi_control);
dev_dbg(isp->dev,
"%s: the portconfig is %d-%d-%d, CSI_CONTROL is 0x%08X\n",
@@ -1440,8 +1427,7 @@ atomisp_load_firmware(struct atomisp_device *isp)
* Check for flags the driver was compiled with against the PCI
* device. Always returns true on other than ISP 2400.
*/
static bool is_valid_device(struct pci_dev *dev,
const struct pci_device_id *id)
static bool is_valid_device(struct pci_dev *pdev, const struct pci_device_id *id)
{
unsigned int a0_max_id = 0;
const char *name;
@@ -1465,14 +1451,14 @@ static bool is_valid_device(struct pci_dev *dev,
name = "Cherrytrail";
break;
default:
dev_err(&dev->dev, "%s: unknown device ID %x04:%x04\n",
dev_err(&pdev->dev, "%s: unknown device ID %x04:%x04\n",
product, id->vendor, id->device);
return false;
}
if (dev->revision <= ATOMISP_PCI_REV_BYT_A0_MAX) {
dev_err(&dev->dev, "%s revision %d is not unsupported\n",
name, dev->revision);
if (pdev->revision <= ATOMISP_PCI_REV_BYT_A0_MAX) {
dev_err(&pdev->dev, "%s revision %d is not unsupported\n",
name, pdev->revision);
return false;
}
@@ -1483,22 +1469,20 @@ static bool is_valid_device(struct pci_dev *dev,
#if defined(ISP2400)
if (IS_ISP2401) {
dev_err(&dev->dev, "Support for %s (ISP2401) was disabled at compile time\n",
dev_err(&pdev->dev, "Support for %s (ISP2401) was disabled at compile time\n",
name);
return false;
}
#else
if (!IS_ISP2401) {
dev_err(&dev->dev, "Support for %s (ISP2400) was disabled at compile time\n",
dev_err(&pdev->dev, "Support for %s (ISP2400) was disabled at compile time\n",
name);
return false;
}
#endif
dev_info(&dev->dev, "Detected %s version %d (ISP240%c) on %s\n",
name, dev->revision,
IS_ISP2401 ? '1' : '0',
product);
dev_info(&pdev->dev, "Detected %s version %d (ISP240%c) on %s\n",
name, pdev->revision, IS_ISP2401 ? '1' : '0', product);
return true;
}
@@ -1538,66 +1522,60 @@ alloc_fail:
#define ATOM_ISP_PCI_BAR 0
static int atomisp_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const struct atomisp_platform_data *pdata;
struct atomisp_device *isp;
unsigned int start;
void __iomem *base;
int err, val;
u32 irq;
if (!is_valid_device(dev, id))
if (!is_valid_device(pdev, id))
return -ENODEV;
/* Pointer to struct device. */
atomisp_dev = &dev->dev;
atomisp_dev = &pdev->dev;
pdata = atomisp_get_platform_data();
if (!pdata)
dev_warn(&dev->dev, "no platform data available\n");
dev_warn(&pdev->dev, "no platform data available\n");
err = pcim_enable_device(dev);
err = pcim_enable_device(pdev);
if (err) {
dev_err(&dev->dev, "Failed to enable CI ISP device (%d)\n",
err);
dev_err(&pdev->dev, "Failed to enable CI ISP device (%d)\n", err);
return err;
}
start = pci_resource_start(dev, ATOM_ISP_PCI_BAR);
dev_dbg(&dev->dev, "start: 0x%x\n", start);
start = pci_resource_start(pdev, ATOM_ISP_PCI_BAR);
dev_dbg(&pdev->dev, "start: 0x%x\n", start);
err = pcim_iomap_regions(dev, 1 << ATOM_ISP_PCI_BAR, pci_name(dev));
err = pcim_iomap_regions(pdev, 1 << ATOM_ISP_PCI_BAR, pci_name(pdev));
if (err) {
dev_err(&dev->dev, "Failed to I/O memory remapping (%d)\n",
err);
dev_err(&pdev->dev, "Failed to I/O memory remapping (%d)\n", err);
goto ioremap_fail;
}
base = pcim_iomap_table(dev)[ATOM_ISP_PCI_BAR];
dev_dbg(&dev->dev, "base: %p\n", base);
atomisp_io_base = base;
dev_dbg(&dev->dev, "atomisp_io_base: %p\n", atomisp_io_base);
isp = devm_kzalloc(&dev->dev, sizeof(struct atomisp_device), GFP_KERNEL);
isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
if (!isp) {
err = -ENOMEM;
goto atomisp_dev_alloc_fail;
}
isp->pdev = dev;
isp->dev = &dev->dev;
isp->dev = &pdev->dev;
isp->base = pcim_iomap_table(pdev)[ATOM_ISP_PCI_BAR];
isp->sw_contex.power_state = ATOM_ISP_POWER_UP;
isp->saved_regs.ispmmadr = start;
dev_dbg(&pdev->dev, "atomisp mmio base: %p\n", isp->base);
rt_mutex_init(&isp->mutex);
mutex_init(&isp->streamoff_mutex);
spin_lock_init(&isp->lock);
/* This is not a true PCI device on SoC, so the delay is not needed. */
isp->pdev->d3_delay = 0;
pdev->d3_delay = 0;
pci_set_drvdata(pdev, isp);
switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) {
case ATOMISP_PCI_DEVICE_SOC_MRFLD:
@@ -1648,15 +1626,14 @@ static int atomisp_pci_probe(struct pci_dev *dev,
* have specs yet for exactly how it varies. Default to
* BYT-CR but let provisioning set it via EFI variable
*/
isp->hpll_freq = gmin_get_var_int(&dev->dev, false, "HpllFreq",
HPLL_FREQ_2000MHZ);
isp->hpll_freq = gmin_get_var_int(&pdev->dev, false, "HpllFreq", HPLL_FREQ_2000MHZ);
/*
* for BYT/CHT we are put isp into D3cold to avoid pci registers access
* in power off. Set d3cold_delay to 0 since default 100ms is not
* necessary.
*/
isp->pdev->d3cold_delay = 0;
pdev->d3cold_delay = 0;
break;
case ATOMISP_PCI_DEVICE_SOC_ANN:
isp->media_dev.hw_revision = (
@@ -1666,7 +1643,7 @@ static int atomisp_pci_probe(struct pci_dev *dev,
ATOMISP_HW_REVISION_ISP2401_LEGACY
#endif
<< ATOMISP_HW_REVISION_SHIFT);
isp->media_dev.hw_revision |= isp->pdev->revision < 2 ?
isp->media_dev.hw_revision |= pdev->revision < 2 ?
ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0;
isp->dfs = &dfs_config_merr;
isp->hpll_freq = HPLL_FREQ_1600MHZ;
@@ -1679,13 +1656,13 @@ static int atomisp_pci_probe(struct pci_dev *dev,
ATOMISP_HW_REVISION_ISP2401_LEGACY
#endif
<< ATOMISP_HW_REVISION_SHIFT);
isp->media_dev.hw_revision |= isp->pdev->revision < 2 ?
isp->media_dev.hw_revision |= pdev->revision < 2 ?
ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0;
isp->dfs = &dfs_config_cht;
isp->pdev->d3cold_delay = 0;
pdev->d3cold_delay = 0;
iosf_mbi_read(CCK_PORT, MBI_REG_READ, CCK_FUSE_REG_0, &val);
iosf_mbi_read(BT_MBI_UNIT_CCK, MBI_REG_READ, CCK_FUSE_REG_0, &val);
switch (val & CCK_FUSE_HPLL_FREQ_MASK) {
case 0x00:
isp->hpll_freq = HPLL_FREQ_800MHZ;
@@ -1698,18 +1675,16 @@ static int atomisp_pci_probe(struct pci_dev *dev,
break;
default:
isp->hpll_freq = HPLL_FREQ_1600MHZ;
dev_warn(isp->dev,
"read HPLL from cck failed. Default to 1600 MHz.\n");
dev_warn(&pdev->dev, "read HPLL from cck failed. Default to 1600 MHz.\n");
}
break;
default:
dev_err(&dev->dev, "un-supported IUNIT device\n");
dev_err(&pdev->dev, "un-supported IUNIT device\n");
err = -ENODEV;
goto atomisp_dev_alloc_fail;
}
dev_info(&dev->dev, "ISP HPLL frequency base = %d MHz\n",
isp->hpll_freq);
dev_info(&pdev->dev, "ISP HPLL frequency base = %d MHz\n", isp->hpll_freq);
isp->max_isr_latency = ATOMISP_MAX_ISR_LATENCY;
@@ -1718,30 +1693,28 @@ static int atomisp_pci_probe(struct pci_dev *dev,
isp->firmware = atomisp_load_firmware(isp);
if (!isp->firmware) {
err = -ENOENT;
dev_dbg(&dev->dev, "Firmware load failed\n");
dev_dbg(&pdev->dev, "Firmware load failed\n");
goto load_fw_fail;
}
err = sh_css_check_firmware_version(isp->dev,
isp->firmware->data);
err = sh_css_check_firmware_version(isp->dev, isp->firmware->data);
if (err) {
dev_dbg(&dev->dev, "Firmware version check failed\n");
dev_dbg(&pdev->dev, "Firmware version check failed\n");
goto fw_validation_fail;
}
} else {
dev_info(&dev->dev, "Firmware load will be deferred\n");
dev_info(&pdev->dev, "Firmware load will be deferred\n");
}
pci_set_master(dev);
pci_set_drvdata(dev, isp);
pci_set_master(pdev);
err = pci_enable_msi(dev);
err = pci_enable_msi(pdev);
if (err) {
dev_err(&dev->dev, "Failed to enable msi (%d)\n", err);
dev_err(&pdev->dev, "Failed to enable msi (%d)\n", err);
goto enable_msi_fail;
}
atomisp_msi_irq_init(isp, dev);
atomisp_msi_irq_init(isp);
cpu_latency_qos_add_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE);
@@ -1762,8 +1735,7 @@ static int atomisp_pci_probe(struct pci_dev *dev,
* Workaround for imbalance data eye issue which is observed
* on TNG B0.
*/
pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
&csi_afe_trim);
pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, &csi_afe_trim);
csi_afe_trim &= ~((MRFLD_PCI_CSI_HSRXCLKTRIM_MASK <<
MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT) |
(MRFLD_PCI_CSI_HSRXCLKTRIM_MASK <<
@@ -1776,20 +1748,18 @@ static int atomisp_pci_probe(struct pci_dev *dev,
MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT) |
(MRFLD_PCI_CSI3_HSRXCLKTRIM <<
MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT);
pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
csi_afe_trim);
pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, csi_afe_trim);
}
err = atomisp_initialize_modules(isp);
if (err < 0) {
dev_err(&dev->dev, "atomisp_initialize_modules (%d)\n", err);
dev_err(&pdev->dev, "atomisp_initialize_modules (%d)\n", err);
goto initialize_modules_fail;
}
err = atomisp_register_entities(isp);
if (err < 0) {
dev_err(&dev->dev, "atomisp_register_entities failed (%d)\n",
err);
dev_err(&pdev->dev, "atomisp_register_entities failed (%d)\n", err);
goto register_entities_fail;
}
err = atomisp_create_pads_links(isp);
@@ -1802,24 +1772,24 @@ static int atomisp_pci_probe(struct pci_dev *dev,
/* save the iunit context only once after all the values are init'ed. */
atomisp_save_iunit_reg(isp);
pm_runtime_put_noidle(&dev->dev);
pm_runtime_allow(&dev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
hmm_init_mem_stat(repool_pgnr, dypool_enable, dypool_pgnr);
err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED);
if (err) {
dev_err(&dev->dev, "Failed to register reserved memory pool.\n");
dev_err(&pdev->dev, "Failed to register reserved memory pool.\n");
goto hmm_pool_fail;
}
/* Init ISP memory management */
hmm_init();
err = devm_request_threaded_irq(&dev->dev, dev->irq,
err = devm_request_threaded_irq(&pdev->dev, pdev->irq,
atomisp_isr, atomisp_isr_thread,
IRQF_SHARED, "isp_irq", isp);
if (err) {
dev_err(&dev->dev, "Failed to request irq (%d)\n", err);
dev_err(&pdev->dev, "Failed to request irq (%d)\n", err);
goto request_irq_fail;
}
@@ -1827,23 +1797,23 @@ static int atomisp_pci_probe(struct pci_dev *dev,
if (!defer_fw_load) {
err = atomisp_css_load_firmware(isp);
if (err) {
dev_err(&dev->dev, "Failed to init css.\n");
dev_err(&pdev->dev, "Failed to init css.\n");
goto css_init_fail;
}
} else {
dev_dbg(&dev->dev, "Skip css init.\n");
dev_dbg(&pdev->dev, "Skip css init.\n");
}
/* Clear FW image from memory */
release_firmware(isp->firmware);
isp->firmware = NULL;
isp->css_env.isp_css_fw.data = NULL;
atomisp_drvfs_init(&dev->driver->driver, isp);
atomisp_drvfs_init(isp);
return 0;
css_init_fail:
devm_free_irq(&dev->dev, dev->irq, isp);
devm_free_irq(&pdev->dev, pdev->irq, isp);
request_irq_fail:
hmm_cleanup();
hmm_pool_unregister(HMM_POOL_TYPE_RESERVED);
@@ -1856,8 +1826,8 @@ register_entities_fail:
atomisp_uninitialize_modules(isp);
initialize_modules_fail:
cpu_latency_qos_remove_request(&isp->pm_qos);
atomisp_msi_irq_uninit(isp, dev);
pci_disable_msi(dev);
atomisp_msi_irq_uninit(isp);
pci_disable_msi(pdev);
enable_msi_fail:
fw_validation_fail:
release_firmware(isp->firmware);
@@ -1869,35 +1839,34 @@ load_fw_fail:
* The following lines have been copied from atomisp suspend path
*/
pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
irq = irq & 1 << INTR_IIR;
pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
irq &= ~(1 << INTR_IER);
pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
atomisp_msi_irq_uninit(isp, dev);
atomisp_msi_irq_uninit(isp);
atomisp_ospm_dphy_down(isp);
/* Address later when we worry about the ...field chips */
if (IS_ENABLED(CONFIG_PM) && atomisp_mrfld_power_down(isp))
dev_err(&dev->dev, "Failed to switch off ISP\n");
dev_err(&pdev->dev, "Failed to switch off ISP\n");
atomisp_dev_alloc_fail:
pcim_iounmap_regions(dev, 1 << ATOM_ISP_PCI_BAR);
pcim_iounmap_regions(pdev, 1 << ATOM_ISP_PCI_BAR);
ioremap_fail:
return err;
}
static void atomisp_pci_remove(struct pci_dev *dev)
static void atomisp_pci_remove(struct pci_dev *pdev)
{
struct atomisp_device *isp = (struct atomisp_device *)
pci_get_drvdata(dev);
struct atomisp_device *isp = pci_get_drvdata(pdev);
dev_info(&dev->dev, "Removing atomisp driver\n");
dev_info(&pdev->dev, "Removing atomisp driver\n");
atomisp_drvfs_exit();
@@ -1906,11 +1875,11 @@ static void atomisp_pci_remove(struct pci_dev *dev)
ia_css_unload_firmware();
hmm_cleanup();
pm_runtime_forbid(&dev->dev);
pm_runtime_get_noresume(&dev->dev);
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
cpu_latency_qos_remove_request(&isp->pm_qos);
atomisp_msi_irq_uninit(isp, dev);
atomisp_msi_irq_uninit(isp);
atomisp_unregister_entities(isp);
destroy_workqueue(isp->wdt_work_queue);

View File

@@ -48,7 +48,7 @@ static struct ia_css_refcount_entry *refcount_find_entry(ia_css_ptr ptr,
return NULL;
if (!myrefcount.items) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
"refcount_find_entry(): Ref count not initialized!\n");
"%s(): Ref count not initialized!\n", __func__);
return NULL;
}
@@ -73,12 +73,12 @@ int ia_css_refcount_init(uint32_t size)
if (size == 0) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_refcount_init(): Size of 0 for Ref count init!\n");
"%s(): Size of 0 for Ref count init!\n", __func__);
return -EINVAL;
}
if (myrefcount.items) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_refcount_init(): Ref count is already initialized\n");
"%s(): Ref count is already initialized\n", __func__);
return -EINVAL;
}
myrefcount.items =
@@ -99,7 +99,7 @@ void ia_css_refcount_uninit(void)
u32 i;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_refcount_uninit() entry\n");
"%s() entry\n", __func__);
for (i = 0; i < myrefcount.size; i++) {
/* driver verifier tool has issues with &arr[i]
and prefers arr + i; as these are actually equivalent
@@ -120,7 +120,7 @@ void ia_css_refcount_uninit(void)
myrefcount.items = NULL;
myrefcount.size = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_refcount_uninit() leave\n");
"%s() leave\n", __func__);
}
ia_css_ptr ia_css_refcount_increment(s32 id, ia_css_ptr ptr)
@@ -133,7 +133,7 @@ ia_css_ptr ia_css_refcount_increment(s32 id, ia_css_ptr ptr)
entry = refcount_find_entry(ptr, false);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_refcount_increment(%x) 0x%x\n", id, ptr);
"%s(%x) 0x%x\n", __func__, id, ptr);
if (!entry) {
entry = refcount_find_entry(ptr, true);
@@ -145,7 +145,7 @@ ia_css_ptr ia_css_refcount_increment(s32 id, ia_css_ptr ptr)
if (entry->id != id) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
"ia_css_refcount_increment(): Ref count IDS do not match!\n");
"%s(): Ref count IDS do not match!\n", __func__);
return mmgr_NULL;
}
@@ -165,7 +165,7 @@ bool ia_css_refcount_decrement(s32 id, ia_css_ptr ptr)
struct ia_css_refcount_entry *entry;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_refcount_decrement(%x) 0x%x\n", id, ptr);
"%s(%x) 0x%x\n", __func__, id, ptr);
if (ptr == mmgr_NULL)
return false;
@@ -175,7 +175,7 @@ bool ia_css_refcount_decrement(s32 id, ia_css_ptr ptr)
if (entry) {
if (entry->id != id) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
"ia_css_refcount_decrement(): Ref count IDS do not match!\n");
"%s(): Ref count IDS do not match!\n", __func__);
return false;
}
if (entry->count > 0) {
@@ -225,8 +225,8 @@ void ia_css_refcount_clear(s32 id, clear_func clear_func_ptr)
u32 count = 0;
assert(clear_func_ptr);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_refcount_clear(%x)\n",
id);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s(%x)\n",
__func__, id);
for (i = 0; i < myrefcount.size; i++) {
/* driver verifier tool has issues with &arr[i]
@@ -236,14 +236,14 @@ void ia_css_refcount_clear(s32 id, clear_func clear_func_ptr)
entry = myrefcount.items + i;
if ((entry->data != mmgr_NULL) && (entry->id == id)) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_refcount_clear: %x: 0x%x\n",
"%s: %x: 0x%x\n", __func__,
id, entry->data);
if (clear_func_ptr) {
/* clear using provided function */
clear_func_ptr(entry->data);
} else {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_refcount_clear: using hmm_free: no clear_func\n");
"%s: using hmm_free: no clear_func\n", __func__);
hmm_free(entry->data);
}
@@ -260,7 +260,7 @@ void ia_css_refcount_clear(s32 id, clear_func clear_func_ptr)
}
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_refcount_clear(%x): cleared %d\n", id,
"%s(%x): cleared %d\n", __func__, id,
count);
}

View File

@@ -52,32 +52,14 @@ typedef unsigned short hive_uint16;
typedef unsigned int hive_uint32;
typedef unsigned long long hive_uint64;
/* by default assume 32 bit master port (both data and address) */
#ifndef HRT_DATA_WIDTH
#define HRT_DATA_WIDTH 32
#endif
#ifndef HRT_ADDRESS_WIDTH
#define HRT_ADDRESS_WIDTH 32
#endif
#define HRT_ADDRESS_WIDTH 64
#define HRT_DATA_BYTES (HRT_DATA_WIDTH / 8)
#define HRT_ADDRESS_BYTES (HRT_ADDRESS_WIDTH / 8)
#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3)
#if HRT_DATA_WIDTH == 64
typedef hive_uint64 hrt_data;
#elif HRT_DATA_WIDTH == 32
typedef hive_uint32 hrt_data;
#else
#error data width not supported
#endif
#if HRT_ADDRESS_WIDTH == 64
typedef hive_uint64 hrt_address;
#elif HRT_ADDRESS_WIDTH == 32
typedef hive_uint32 hrt_address;
#else
#error adddres width not supported
#endif
/* use 64 bit addresses in simulation, where possible */
typedef hive_uint64 hive_sim_address;

View File

@@ -735,7 +735,7 @@ ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr)
void hmm_show_mem_stat(const char *func, const int line)
{
trace_printk("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n",
pr_info("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n",
hmm_mem_stat.tol_cnt,
hmm_mem_stat.usr_size, hmm_mem_stat.res_size,
hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size,

View File

@@ -13,306 +13,4 @@
* more details.
*/
#ifndef __SYSTEM_GLOBAL_H_INCLUDED__
#define __SYSTEM_GLOBAL_H_INCLUDED__
#include <hive_isp_css_defs.h>
#include <type_support.h>
/*
* The longest allowed (uninteruptible) bus transfer, does not
* take stalling into account
*/
#define HIVE_ISP_MAX_BURST_LENGTH 1024
/*
* Maximum allowed burst length in words for the ISP DMA
*/
#define ISP_DMA_MAX_BURST_LENGTH 128
/*
* Create a list of HAS and IS properties that defines the system
*
* The configuration assumes the following
* - The system is hetereogeneous; Multiple cells and devices classes
* - The cell and device instances are homogeneous, each device type
* belongs to the same class
* - Device instances supporting a subset of the class capabilities are
* allowed
*
* We could manage different device classes through the enumerated
* lists (C) or the use of classes (C++), but that is presently not
* fully supported
*
* N.B. the 3 input formatters are of 2 different classess
*/
#define USE_INPUT_SYSTEM_VERSION_2
#define HAS_MMU_VERSION_2
#define HAS_DMA_VERSION_2
#define HAS_GDC_VERSION_2
#define HAS_VAMEM_VERSION_2
#define HAS_HMEM_VERSION_1
#define HAS_BAMEM_VERSION_2
#define HAS_IRQ_VERSION_2
#define HAS_IRQ_MAP_VERSION_2
#define HAS_INPUT_FORMATTER_VERSION_2
/* 2401: HAS_INPUT_SYSTEM_VERSION_2401 */
#define HAS_INPUT_SYSTEM_VERSION_2
#define HAS_BUFFERED_SENSOR
#define HAS_FIFO_MONITORS_VERSION_2
/* #define HAS_GP_REGS_VERSION_2 */
#define HAS_GP_DEVICE_VERSION_2
#define HAS_GPIO_VERSION_1
#define HAS_TIMED_CTRL_VERSION_1
#define HAS_RX_VERSION_2
#define DMA_DDR_TO_VAMEM_WORKAROUND
#define DMA_DDR_TO_HMEM_WORKAROUND
/*
* Semi global. "HRT" is accessible from SP, but the HRT types do not fully apply
*/
#define HRT_VADDRESS_WIDTH 32
//#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property*/
#define HRT_DATA_WIDTH 32
#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3)
#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH / 8)
/* The main bus connecting all devices */
#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH
#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES
/* per-frame parameter handling support */
#define SH_CSS_ENABLE_PER_FRAME_PARAMS
typedef u32 hrt_bus_align_t;
/*
* Enumerate the devices, device access through the API is by ID, through the DLI by address
* The enumerator terminators are used to size the wiring arrays and as an exception value.
*/
typedef enum {
DDR0_ID = 0,
N_DDR_ID
} ddr_ID_t;
typedef enum {
ISP0_ID = 0,
N_ISP_ID
} isp_ID_t;
typedef enum {
SP0_ID = 0,
N_SP_ID
} sp_ID_t;
typedef enum {
MMU0_ID = 0,
MMU1_ID,
N_MMU_ID
} mmu_ID_t;
typedef enum {
DMA0_ID = 0,
N_DMA_ID
} dma_ID_t;
typedef enum {
GDC0_ID = 0,
GDC1_ID,
N_GDC_ID
} gdc_ID_t;
#define N_GDC_ID_CPP 2 // this extra define is needed because we want to use it also in the preprocessor, and that doesn't work with enums.
typedef enum {
VAMEM0_ID = 0,
VAMEM1_ID,
VAMEM2_ID,
N_VAMEM_ID
} vamem_ID_t;
typedef enum {
BAMEM0_ID = 0,
N_BAMEM_ID
} bamem_ID_t;
typedef enum {
HMEM0_ID = 0,
N_HMEM_ID
} hmem_ID_t;
/*
typedef enum {
IRQ0_ID = 0,
N_IRQ_ID
} irq_ID_t;
*/
typedef enum {
IRQ0_ID = 0, // GP IRQ block
IRQ1_ID, // Input formatter
IRQ2_ID, // input system
IRQ3_ID, // input selector
N_IRQ_ID
} irq_ID_t;
typedef enum {
FIFO_MONITOR0_ID = 0,
N_FIFO_MONITOR_ID
} fifo_monitor_ID_t;
/*
* Deprecated: Since all gp_reg instances are different
* and put in the address maps of other devices we cannot
* enumerate them as that assumes the instrances are the
* same.
*
* We define a single GP_DEVICE containing all gp_regs
* w.r.t. a single base address
*
typedef enum {
GP_REGS0_ID = 0,
N_GP_REGS_ID
} gp_regs_ID_t;
*/
typedef enum {
GP_DEVICE0_ID = 0,
N_GP_DEVICE_ID
} gp_device_ID_t;
typedef enum {
GP_TIMER0_ID = 0,
GP_TIMER1_ID,
GP_TIMER2_ID,
GP_TIMER3_ID,
GP_TIMER4_ID,
GP_TIMER5_ID,
GP_TIMER6_ID,
GP_TIMER7_ID,
N_GP_TIMER_ID
} gp_timer_ID_t;
typedef enum {
GPIO0_ID = 0,
N_GPIO_ID
} gpio_ID_t;
typedef enum {
TIMED_CTRL0_ID = 0,
N_TIMED_CTRL_ID
} timed_ctrl_ID_t;
typedef enum {
INPUT_FORMATTER0_ID = 0,
INPUT_FORMATTER1_ID,
INPUT_FORMATTER2_ID,
INPUT_FORMATTER3_ID,
N_INPUT_FORMATTER_ID
} input_formatter_ID_t;
/* The IF RST is outside the IF */
#define INPUT_FORMATTER0_SRST_OFFSET 0x0824
#define INPUT_FORMATTER1_SRST_OFFSET 0x0624
#define INPUT_FORMATTER2_SRST_OFFSET 0x0424
#define INPUT_FORMATTER3_SRST_OFFSET 0x0224
#define INPUT_FORMATTER0_SRST_MASK 0x0001
#define INPUT_FORMATTER1_SRST_MASK 0x0002
#define INPUT_FORMATTER2_SRST_MASK 0x0004
#define INPUT_FORMATTER3_SRST_MASK 0x0008
typedef enum {
INPUT_SYSTEM0_ID = 0,
N_INPUT_SYSTEM_ID
} input_system_ID_t;
typedef enum {
RX0_ID = 0,
N_RX_ID
} rx_ID_t;
enum mipi_port_id {
MIPI_PORT0_ID = 0,
MIPI_PORT1_ID,
MIPI_PORT2_ID,
N_MIPI_PORT_ID
};
#define N_RX_CHANNEL_ID 4
/* Generic port enumeration with an internal port type ID */
typedef enum {
CSI_PORT0_ID = 0,
CSI_PORT1_ID,
CSI_PORT2_ID,
TPG_PORT0_ID,
PRBS_PORT0_ID,
FIFO_PORT0_ID,
MEMORY_PORT0_ID,
N_INPUT_PORT_ID
} input_port_ID_t;
typedef enum {
CAPTURE_UNIT0_ID = 0,
CAPTURE_UNIT1_ID,
CAPTURE_UNIT2_ID,
ACQUISITION_UNIT0_ID,
DMA_UNIT0_ID,
CTRL_UNIT0_ID,
GPREGS_UNIT0_ID,
FIFO_UNIT0_ID,
IRQ_UNIT0_ID,
N_SUB_SYSTEM_ID
} sub_system_ID_t;
#define N_CAPTURE_UNIT_ID 3
#define N_ACQUISITION_UNIT_ID 1
#define N_CTRL_UNIT_ID 1
enum ia_css_isp_memories {
IA_CSS_ISP_PMEM0 = 0,
IA_CSS_ISP_DMEM0,
IA_CSS_ISP_VMEM0,
IA_CSS_ISP_VAMEM0,
IA_CSS_ISP_VAMEM1,
IA_CSS_ISP_VAMEM2,
IA_CSS_ISP_HMEM0,
IA_CSS_SP_DMEM0,
IA_CSS_DDR,
N_IA_CSS_MEMORIES
};
#define IA_CSS_NUM_MEMORIES 9
/* For driver compatibility */
#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES
#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES
#if 0
typedef enum {
dev_chn, /* device channels, external resource */
ext_mem, /* external memories */
int_mem, /* internal memories */
int_chn /* internal channels, user defined */
} resource_type_t;
/* if this enum is extended with other memory resources, pls also extend the function resource_to_memptr() */
typedef enum {
vied_nci_dev_chn_dma_ext0,
int_mem_vmem0,
int_mem_dmem0
} resource_id_t;
/* enum listing the different memories within a program group.
This enum is used in the mem_ptr_t type */
typedef enum {
buf_mem_invalid = 0,
buf_mem_vmem_prog0,
buf_mem_dmem_prog0
} buf_mem_t;
#endif
#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */

View File

@@ -1,321 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2010-2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __SYSTEM_LOCAL_H_INCLUDED__
#define __SYSTEM_LOCAL_H_INCLUDED__
#ifdef HRT_ISP_CSS_CUSTOM_HOST
#ifndef HRT_USE_VIR_ADDRS
#define HRT_USE_VIR_ADDRS
#endif
#endif
#include "system_global.h"
/* HRT assumes 32 by default (see Linux/include/hive_types.h), overrule it in case it is different */
#undef HRT_ADDRESS_WIDTH
#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property */
/* This interface is deprecated */
#include "hive_types.h"
/*
* Cell specific address maps
*/
#if HRT_ADDRESS_WIDTH == 64
#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */
/* DDR */
static const hrt_address DDR_BASE[N_DDR_ID] = {
(hrt_address)0x0000000120000000ULL
};
/* ISP */
static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
(hrt_address)0x0000000000020000ULL
};
static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
(hrt_address)0x0000000000200000ULL
};
static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
(hrt_address)0x0000000000100000ULL
};
static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
(hrt_address)0x00000000001C0000ULL,
(hrt_address)0x00000000001D0000ULL,
(hrt_address)0x00000000001E0000ULL
};
static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
(hrt_address)0x00000000001F0000ULL
};
/* SP */
static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
(hrt_address)0x0000000000010000ULL
};
static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
(hrt_address)0x0000000000300000ULL
};
static const hrt_address SP_PMEM_BASE[N_SP_ID] = {
(hrt_address)0x00000000000B0000ULL
};
/* MMU */
/*
* MMU0_ID: The data MMU
* MMU1_ID: The icache MMU
*/
static const hrt_address MMU_BASE[N_MMU_ID] = {
(hrt_address)0x0000000000070000ULL,
(hrt_address)0x00000000000A0000ULL
};
/* DMA */
static const hrt_address DMA_BASE[N_DMA_ID] = {
(hrt_address)0x0000000000040000ULL
};
/* IRQ */
static const hrt_address IRQ_BASE[N_IRQ_ID] = {
(hrt_address)0x0000000000000500ULL,
(hrt_address)0x0000000000030A00ULL,
(hrt_address)0x000000000008C000ULL,
(hrt_address)0x0000000000090200ULL
};
/*
(hrt_address)0x0000000000000500ULL};
*/
/* GDC */
static const hrt_address GDC_BASE[N_GDC_ID] = {
(hrt_address)0x0000000000050000ULL,
(hrt_address)0x0000000000060000ULL
};
/* FIFO_MONITOR (not a subset of GP_DEVICE) */
static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
(hrt_address)0x0000000000000000ULL
};
/*
static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
(hrt_address)0x0000000000000000ULL};
static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
(hrt_address)0x0000000000090000ULL};
*/
/* GP_DEVICE (single base for all separate GP_REG instances) */
static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
(hrt_address)0x0000000000000000ULL
};
/*GP TIMER , all timer registers are inter-twined,
* so, having multiple base addresses for
* different timers does not help*/
static const hrt_address GP_TIMER_BASE =
(hrt_address)0x0000000000000600ULL;
/* GPIO */
static const hrt_address GPIO_BASE[N_GPIO_ID] = {
(hrt_address)0x0000000000000400ULL
};
/* TIMED_CTRL */
static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
(hrt_address)0x0000000000000100ULL
};
/* INPUT_FORMATTER */
static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
(hrt_address)0x0000000000030000ULL,
(hrt_address)0x0000000000030200ULL,
(hrt_address)0x0000000000030400ULL,
(hrt_address)0x0000000000030600ULL
}; /* memcpy() */
/* INPUT_SYSTEM */
static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
(hrt_address)0x0000000000080000ULL
};
/* (hrt_address)0x0000000000081000ULL, */ /* capture A */
/* (hrt_address)0x0000000000082000ULL, */ /* capture B */
/* (hrt_address)0x0000000000083000ULL, */ /* capture C */
/* (hrt_address)0x0000000000084000ULL, */ /* Acquisition */
/* (hrt_address)0x0000000000085000ULL, */ /* DMA */
/* (hrt_address)0x0000000000089000ULL, */ /* ctrl */
/* (hrt_address)0x000000000008A000ULL, */ /* GP regs */
/* (hrt_address)0x000000000008B000ULL, */ /* FIFO */
/* (hrt_address)0x000000000008C000ULL, */ /* IRQ */
/* RX, the MIPI lane control regs start at offset 0 */
static const hrt_address RX_BASE[N_RX_ID] = {
(hrt_address)0x0000000000080100ULL
};
#elif HRT_ADDRESS_WIDTH == 32
#define GP_FIFO_BASE ((hrt_address)0x00090104) /* This is NOT a base address */
/* DDR : Attention, this value not defined in 32-bit */
static const hrt_address DDR_BASE[N_DDR_ID] = {
(hrt_address)0x00000000UL
};
/* ISP */
static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
(hrt_address)0x00020000UL
};
static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
(hrt_address)0x00200000UL
};
static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
(hrt_address)0x100000UL
};
static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
(hrt_address)0xffffffffUL,
(hrt_address)0xffffffffUL,
(hrt_address)0xffffffffUL
};
static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
(hrt_address)0xffffffffUL
};
/* SP */
static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
(hrt_address)0x00010000UL
};
static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
(hrt_address)0x00300000UL
};
static const hrt_address SP_PMEM_BASE[N_SP_ID] = {
(hrt_address)0x000B0000UL
};
/* MMU */
/*
* MMU0_ID: The data MMU
* MMU1_ID: The icache MMU
*/
static const hrt_address MMU_BASE[N_MMU_ID] = {
(hrt_address)0x00070000UL,
(hrt_address)0x000A0000UL
};
/* DMA */
static const hrt_address DMA_BASE[N_DMA_ID] = {
(hrt_address)0x00040000UL
};
/* IRQ */
static const hrt_address IRQ_BASE[N_IRQ_ID] = {
(hrt_address)0x00000500UL,
(hrt_address)0x00030A00UL,
(hrt_address)0x0008C000UL,
(hrt_address)0x00090200UL
};
/*
(hrt_address)0x00000500UL};
*/
/* GDC */
static const hrt_address GDC_BASE[N_GDC_ID] = {
(hrt_address)0x00050000UL,
(hrt_address)0x00060000UL
};
/* FIFO_MONITOR (not a subset of GP_DEVICE) */
static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
(hrt_address)0x00000000UL
};
/*
static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
(hrt_address)0x00000000UL};
static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
(hrt_address)0x00090000UL};
*/
/* GP_DEVICE (single base for all separate GP_REG instances) */
static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
(hrt_address)0x00000000UL
};
/*GP TIMER , all timer registers are inter-twined,
* so, having multiple base addresses for
* different timers does not help*/
static const hrt_address GP_TIMER_BASE =
(hrt_address)0x00000600UL;
/* GPIO */
static const hrt_address GPIO_BASE[N_GPIO_ID] = {
(hrt_address)0x00000400UL
};
/* TIMED_CTRL */
static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
(hrt_address)0x00000100UL
};
/* INPUT_FORMATTER */
static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
(hrt_address)0x00030000UL,
(hrt_address)0x00030200UL,
(hrt_address)0x00030400UL
};
/* (hrt_address)0x00030600UL, */ /* memcpy() */
/* INPUT_SYSTEM */
static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
(hrt_address)0x00080000UL
};
/* (hrt_address)0x00081000UL, */ /* capture A */
/* (hrt_address)0x00082000UL, */ /* capture B */
/* (hrt_address)0x00083000UL, */ /* capture C */
/* (hrt_address)0x00084000UL, */ /* Acquisition */
/* (hrt_address)0x00085000UL, */ /* DMA */
/* (hrt_address)0x00089000UL, */ /* ctrl */
/* (hrt_address)0x0008A000UL, */ /* GP regs */
/* (hrt_address)0x0008B000UL, */ /* FIFO */
/* (hrt_address)0x0008C000UL, */ /* IRQ */
/* RX, the MIPI lane control regs start at offset 0 */
static const hrt_address RX_BASE[N_RX_ID] = {
(hrt_address)0x00080100UL
};
#else
#error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}"
#endif
#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */

View File

@@ -13,415 +13,7 @@
* more details.
*/
#ifndef __SYSTEM_GLOBAL_H_INCLUDED__
#define __SYSTEM_GLOBAL_H_INCLUDED__
#include <hive_isp_css_defs.h>
#include <type_support.h>
/*
* The longest allowed (uninteruptible) bus transfer, does not
* take stalling into account
*/
#define HIVE_ISP_MAX_BURST_LENGTH 1024
/*
* Maximum allowed burst length in words for the ISP DMA
* This value is set to 2 to prevent the ISP DMA from blocking
* the bus for too long; as the input system can only buffer
* 2 lines on Moorefield and Cherrytrail, the input system buffers
* may overflow if blocked for too long (BZ 2726).
*/
#define ISP_DMA_MAX_BURST_LENGTH 2
/*
* Create a list of HAS and IS properties that defines the system
*
* The configuration assumes the following
* - The system is hetereogeneous; Multiple cells and devices classes
* - The cell and device instances are homogeneous, each device type
* belongs to the same class
* - Device instances supporting a subset of the class capabilities are
* allowed
*
* We could manage different device classes through the enumerated
* lists (C) or the use of classes (C++), but that is presently not
* fully supported
*
* N.B. the 3 input formatters are of 2 different classess
*/
#define USE_INPUT_SYSTEM_VERSION_2401
#define HAS_MMU_VERSION_2
#define HAS_DMA_VERSION_2
#define HAS_GDC_VERSION_2
#define HAS_VAMEM_VERSION_2
#define HAS_HMEM_VERSION_1
#define HAS_BAMEM_VERSION_2
#define HAS_IRQ_VERSION_2
#define HAS_IRQ_MAP_VERSION_2
#define HAS_INPUT_FORMATTER_VERSION_2
/* 2401: HAS_INPUT_SYSTEM_VERSION_3 */
/* 2400: HAS_INPUT_SYSTEM_VERSION_2 */
#define HAS_INPUT_SYSTEM_VERSION_2
#define HAS_INPUT_SYSTEM_VERSION_2401
#define HAS_BUFFERED_SENSOR
#define HAS_FIFO_MONITORS_VERSION_2
/* #define HAS_GP_REGS_VERSION_2 */
#define HAS_GP_DEVICE_VERSION_2
#define HAS_GPIO_VERSION_1
#define HAS_TIMED_CTRL_VERSION_1
#define HAS_RX_VERSION_2
#define HAS_NO_INPUT_FORMATTER
/*#define HAS_NO_PACKED_RAW_PIXELS*/
/*#define HAS_NO_DVS_6AXIS_CONFIG_UPDATE*/
#define DMA_DDR_TO_VAMEM_WORKAROUND
#define DMA_DDR_TO_HMEM_WORKAROUND
/*
* Semi global. "HRT" is accessible from SP, but
* the HRT types do not fully apply
*/
#define HRT_VADDRESS_WIDTH 32
/* Surprise, this is a local property*/
/*#define HRT_ADDRESS_WIDTH 64 */
#define HRT_DATA_WIDTH 32
#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3)
#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH / 8)
/* The main bus connecting all devices */
#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH
#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES
#define USE_INPUT_SYSTEM_VERSION_2401
#define HAS_INPUT_SYSTEM_VERSION_2401
#define CSI2P_DISABLE_ISYS2401_ONLINE_MODE
/* per-frame parameter handling support */
#define SH_CSS_ENABLE_PER_FRAME_PARAMS
typedef u32 hrt_bus_align_t;
/*
* Enumerate the devices, device access through the API is by ID,
* through the DLI by address. The enumerator terminators are used
* to size the wiring arrays and as an exception value.
*/
typedef enum {
DDR0_ID = 0,
N_DDR_ID
} ddr_ID_t;
typedef enum {
ISP0_ID = 0,
N_ISP_ID
} isp_ID_t;
typedef enum {
SP0_ID = 0,
N_SP_ID
} sp_ID_t;
typedef enum {
MMU0_ID = 0,
MMU1_ID,
N_MMU_ID
} mmu_ID_t;
typedef enum {
DMA0_ID = 0,
N_DMA_ID
} dma_ID_t;
typedef enum {
GDC0_ID = 0,
GDC1_ID,
N_GDC_ID
} gdc_ID_t;
/* this extra define is needed because we want to use it also
in the preprocessor, and that doesn't work with enums.
*/
#define N_GDC_ID_CPP 2
typedef enum {
VAMEM0_ID = 0,
VAMEM1_ID,
VAMEM2_ID,
N_VAMEM_ID
} vamem_ID_t;
typedef enum {
BAMEM0_ID = 0,
N_BAMEM_ID
} bamem_ID_t;
typedef enum {
HMEM0_ID = 0,
N_HMEM_ID
} hmem_ID_t;
typedef enum {
ISYS_IRQ0_ID = 0, /* port a */
ISYS_IRQ1_ID, /* port b */
ISYS_IRQ2_ID, /* port c */
N_ISYS_IRQ_ID
} isys_irq_ID_t;
typedef enum {
IRQ0_ID = 0, /* GP IRQ block */
IRQ1_ID, /* Input formatter */
IRQ2_ID, /* input system */
IRQ3_ID, /* input selector */
N_IRQ_ID
} irq_ID_t;
typedef enum {
FIFO_MONITOR0_ID = 0,
N_FIFO_MONITOR_ID
} fifo_monitor_ID_t;
/*
* Deprecated: Since all gp_reg instances are different
* and put in the address maps of other devices we cannot
* enumerate them as that assumes the instrances are the
* same.
*
* We define a single GP_DEVICE containing all gp_regs
* w.r.t. a single base address
*
typedef enum {
GP_REGS0_ID = 0,
N_GP_REGS_ID
} gp_regs_ID_t;
*/
typedef enum {
GP_DEVICE0_ID = 0,
N_GP_DEVICE_ID
} gp_device_ID_t;
typedef enum {
GP_TIMER0_ID = 0,
GP_TIMER1_ID,
GP_TIMER2_ID,
GP_TIMER3_ID,
GP_TIMER4_ID,
GP_TIMER5_ID,
GP_TIMER6_ID,
GP_TIMER7_ID,
N_GP_TIMER_ID
} gp_timer_ID_t;
typedef enum {
GPIO0_ID = 0,
N_GPIO_ID
} gpio_ID_t;
typedef enum {
TIMED_CTRL0_ID = 0,
N_TIMED_CTRL_ID
} timed_ctrl_ID_t;
typedef enum {
INPUT_FORMATTER0_ID = 0,
INPUT_FORMATTER1_ID,
INPUT_FORMATTER2_ID,
INPUT_FORMATTER3_ID,
N_INPUT_FORMATTER_ID
} input_formatter_ID_t;
/* The IF RST is outside the IF */
#define INPUT_FORMATTER0_SRST_OFFSET 0x0824
#define INPUT_FORMATTER1_SRST_OFFSET 0x0624
#define INPUT_FORMATTER2_SRST_OFFSET 0x0424
#define INPUT_FORMATTER3_SRST_OFFSET 0x0224
#define INPUT_FORMATTER0_SRST_MASK 0x0001
#define INPUT_FORMATTER1_SRST_MASK 0x0002
#define INPUT_FORMATTER2_SRST_MASK 0x0004
#define INPUT_FORMATTER3_SRST_MASK 0x0008
typedef enum {
INPUT_SYSTEM0_ID = 0,
N_INPUT_SYSTEM_ID
} input_system_ID_t;
typedef enum {
RX0_ID = 0,
N_RX_ID
} rx_ID_t;
enum mipi_port_id {
MIPI_PORT0_ID = 0,
MIPI_PORT1_ID,
MIPI_PORT2_ID,
N_MIPI_PORT_ID
};
#define N_RX_CHANNEL_ID 4
/* Generic port enumeration with an internal port type ID */
typedef enum {
CSI_PORT0_ID = 0,
CSI_PORT1_ID,
CSI_PORT2_ID,
TPG_PORT0_ID,
PRBS_PORT0_ID,
FIFO_PORT0_ID,
MEMORY_PORT0_ID,
N_INPUT_PORT_ID
} input_port_ID_t;
typedef enum {
CAPTURE_UNIT0_ID = 0,
CAPTURE_UNIT1_ID,
CAPTURE_UNIT2_ID,
ACQUISITION_UNIT0_ID,
DMA_UNIT0_ID,
CTRL_UNIT0_ID,
GPREGS_UNIT0_ID,
FIFO_UNIT0_ID,
IRQ_UNIT0_ID,
N_SUB_SYSTEM_ID
} sub_system_ID_t;
#define N_CAPTURE_UNIT_ID 3
#define N_ACQUISITION_UNIT_ID 1
#define N_CTRL_UNIT_ID 1
/*
* Input-buffer Controller.
*/
typedef enum {
IBUF_CTRL0_ID = 0, /* map to ISYS2401_IBUF_CNTRL_A */
IBUF_CTRL1_ID, /* map to ISYS2401_IBUF_CNTRL_B */
IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */
N_IBUF_CTRL_ID
} ibuf_ctrl_ID_t;
/* end of Input-buffer Controller */
/*
* Stream2MMIO.
*/
typedef enum {
STREAM2MMIO0_ID = 0, /* map to ISYS2401_S2M_A */
STREAM2MMIO1_ID, /* map to ISYS2401_S2M_B */
STREAM2MMIO2_ID, /* map to ISYS2401_S2M_C */
N_STREAM2MMIO_ID
} stream2mmio_ID_t;
typedef enum {
/*
* Stream2MMIO 0 has 8 SIDs that are indexed by
* [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID].
*
* Stream2MMIO 1 has 4 SIDs that are indexed by
* [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID].
*
* Stream2MMIO 2 has 4 SIDs that are indexed by
* [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID].
*/
STREAM2MMIO_SID0_ID = 0,
STREAM2MMIO_SID1_ID,
STREAM2MMIO_SID2_ID,
STREAM2MMIO_SID3_ID,
STREAM2MMIO_SID4_ID,
STREAM2MMIO_SID5_ID,
STREAM2MMIO_SID6_ID,
STREAM2MMIO_SID7_ID,
N_STREAM2MMIO_SID_ID
} stream2mmio_sid_ID_t;
/* end of Stream2MMIO */
/**
* Input System 2401: CSI-MIPI recevier.
*/
typedef enum {
CSI_RX_BACKEND0_ID = 0, /* map to ISYS2401_MIPI_BE_A */
CSI_RX_BACKEND1_ID, /* map to ISYS2401_MIPI_BE_B */
CSI_RX_BACKEND2_ID, /* map to ISYS2401_MIPI_BE_C */
N_CSI_RX_BACKEND_ID
} csi_rx_backend_ID_t;
typedef enum {
CSI_RX_FRONTEND0_ID = 0, /* map to ISYS2401_CSI_RX_A */
CSI_RX_FRONTEND1_ID, /* map to ISYS2401_CSI_RX_B */
CSI_RX_FRONTEND2_ID, /* map to ISYS2401_CSI_RX_C */
#define N_CSI_RX_FRONTEND_ID (CSI_RX_FRONTEND2_ID + 1)
} csi_rx_frontend_ID_t;
typedef enum {
CSI_RX_DLANE0_ID = 0, /* map to DLANE0 in CSI RX */
CSI_RX_DLANE1_ID, /* map to DLANE1 in CSI RX */
CSI_RX_DLANE2_ID, /* map to DLANE2 in CSI RX */
CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */
N_CSI_RX_DLANE_ID
} csi_rx_fe_dlane_ID_t;
/* end of CSI-MIPI receiver */
typedef enum {
ISYS2401_DMA0_ID = 0,
N_ISYS2401_DMA_ID
} isys2401_dma_ID_t;
/**
* Pixel-generator. ("system_global.h")
*/
typedef enum {
PIXELGEN0_ID = 0,
PIXELGEN1_ID,
PIXELGEN2_ID,
N_PIXELGEN_ID
} pixelgen_ID_t;
/* end of pixel-generator. ("system_global.h") */
typedef enum {
INPUT_SYSTEM_CSI_PORT0_ID = 0,
INPUT_SYSTEM_CSI_PORT1_ID,
INPUT_SYSTEM_CSI_PORT2_ID,
INPUT_SYSTEM_PIXELGEN_PORT0_ID,
INPUT_SYSTEM_PIXELGEN_PORT1_ID,
INPUT_SYSTEM_PIXELGEN_PORT2_ID,
N_INPUT_SYSTEM_INPUT_PORT_ID
} input_system_input_port_ID_t;
#define N_INPUT_SYSTEM_CSI_PORT 3
typedef enum {
ISYS2401_DMA_CHANNEL_0 = 0,
ISYS2401_DMA_CHANNEL_1,
ISYS2401_DMA_CHANNEL_2,
ISYS2401_DMA_CHANNEL_3,
ISYS2401_DMA_CHANNEL_4,
ISYS2401_DMA_CHANNEL_5,
ISYS2401_DMA_CHANNEL_6,
ISYS2401_DMA_CHANNEL_7,
ISYS2401_DMA_CHANNEL_8,
ISYS2401_DMA_CHANNEL_9,
ISYS2401_DMA_CHANNEL_10,
ISYS2401_DMA_CHANNEL_11,
N_ISYS2401_DMA_CHANNEL
} isys2401_dma_channel;
enum ia_css_isp_memories {
IA_CSS_ISP_PMEM0 = 0,
IA_CSS_ISP_DMEM0,
IA_CSS_ISP_VMEM0,
IA_CSS_ISP_VAMEM0,
IA_CSS_ISP_VAMEM1,
IA_CSS_ISP_VAMEM2,
IA_CSS_ISP_HMEM0,
IA_CSS_SP_DMEM0,
IA_CSS_DDR,
N_IA_CSS_MEMORIES
};
#define IA_CSS_NUM_MEMORIES 9
/* For driver compatibility */
#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES
#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES
#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */

View File

@@ -1,402 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __SYSTEM_LOCAL_H_INCLUDED__
#define __SYSTEM_LOCAL_H_INCLUDED__
#ifdef HRT_ISP_CSS_CUSTOM_HOST
#ifndef HRT_USE_VIR_ADDRS
#define HRT_USE_VIR_ADDRS
#endif
#endif
#include "system_global.h"
#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property */
/* This interface is deprecated */
#include "hive_types.h"
/*
* Cell specific address maps
*/
#if HRT_ADDRESS_WIDTH == 64
#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */
/* DDR */
static const hrt_address DDR_BASE[N_DDR_ID] = {
0x0000000120000000ULL
};
/* ISP */
static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
0x0000000000020000ULL
};
static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
0x0000000000200000ULL
};
static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
0x0000000000100000ULL
};
static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
0x00000000001C0000ULL,
0x00000000001D0000ULL,
0x00000000001E0000ULL
};
static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
0x00000000001F0000ULL
};
/* SP */
static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
0x0000000000010000ULL
};
static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
0x0000000000300000ULL
};
/* MMU */
/*
* MMU0_ID: The data MMU
* MMU1_ID: The icache MMU
*/
static const hrt_address MMU_BASE[N_MMU_ID] = {
0x0000000000070000ULL,
0x00000000000A0000ULL
};
/* DMA */
static const hrt_address DMA_BASE[N_DMA_ID] = {
0x0000000000040000ULL
};
static const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = {
0x00000000000CA000ULL
};
/* IRQ */
static const hrt_address IRQ_BASE[N_IRQ_ID] = {
0x0000000000000500ULL,
0x0000000000030A00ULL,
0x000000000008C000ULL,
0x0000000000090200ULL
};
/*
0x0000000000000500ULL};
*/
/* GDC */
static const hrt_address GDC_BASE[N_GDC_ID] = {
0x0000000000050000ULL,
0x0000000000060000ULL
};
/* FIFO_MONITOR (not a subset of GP_DEVICE) */
static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
0x0000000000000000ULL
};
/*
static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
0x0000000000000000ULL};
static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
0x0000000000090000ULL};
*/
/* GP_DEVICE (single base for all separate GP_REG instances) */
static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
0x0000000000000000ULL
};
/*GP TIMER , all timer registers are inter-twined,
* so, having multiple base addresses for
* different timers does not help*/
static const hrt_address GP_TIMER_BASE =
(hrt_address)0x0000000000000600ULL;
/* GPIO */
static const hrt_address GPIO_BASE[N_GPIO_ID] = {
0x0000000000000400ULL
};
/* TIMED_CTRL */
static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
0x0000000000000100ULL
};
/* INPUT_FORMATTER */
static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
0x0000000000030000ULL,
0x0000000000030200ULL,
0x0000000000030400ULL,
0x0000000000030600ULL
}; /* memcpy() */
/* INPUT_SYSTEM */
static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
0x0000000000080000ULL
};
/* 0x0000000000081000ULL, */ /* capture A */
/* 0x0000000000082000ULL, */ /* capture B */
/* 0x0000000000083000ULL, */ /* capture C */
/* 0x0000000000084000ULL, */ /* Acquisition */
/* 0x0000000000085000ULL, */ /* DMA */
/* 0x0000000000089000ULL, */ /* ctrl */
/* 0x000000000008A000ULL, */ /* GP regs */
/* 0x000000000008B000ULL, */ /* FIFO */
/* 0x000000000008C000ULL, */ /* IRQ */
/* RX, the MIPI lane control regs start at offset 0 */
static const hrt_address RX_BASE[N_RX_ID] = {
0x0000000000080100ULL
};
/* IBUF_CTRL, part of the Input System 2401 */
static const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = {
0x00000000000C1800ULL, /* ibuf controller A */
0x00000000000C3800ULL, /* ibuf controller B */
0x00000000000C5800ULL /* ibuf controller C */
};
/* ISYS IRQ Controllers, part of the Input System 2401 */
static const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = {
0x00000000000C1400ULL, /* port a */
0x00000000000C3400ULL, /* port b */
0x00000000000C5400ULL /* port c */
};
/* CSI FE, part of the Input System 2401 */
static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = {
0x00000000000C0400ULL, /* csi fe controller A */
0x00000000000C2400ULL, /* csi fe controller B */
0x00000000000C4400ULL /* csi fe controller C */
};
/* CSI BE, part of the Input System 2401 */
static const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = {
0x00000000000C0800ULL, /* csi be controller A */
0x00000000000C2800ULL, /* csi be controller B */
0x00000000000C4800ULL /* csi be controller C */
};
/* PIXEL Generator, part of the Input System 2401 */
static const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = {
0x00000000000C1000ULL, /* pixel gen controller A */
0x00000000000C3000ULL, /* pixel gen controller B */
0x00000000000C5000ULL /* pixel gen controller C */
};
/* Stream2MMIO, part of the Input System 2401 */
static const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = {
0x00000000000C0C00ULL, /* stream2mmio controller A */
0x00000000000C2C00ULL, /* stream2mmio controller B */
0x00000000000C4C00ULL /* stream2mmio controller C */
};
#elif HRT_ADDRESS_WIDTH == 32
#define GP_FIFO_BASE ((hrt_address)0x00090104) /* This is NOT a base address */
/* DDR : Attention, this value not defined in 32-bit */
static const hrt_address DDR_BASE[N_DDR_ID] = {
0x00000000UL
};
/* ISP */
static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
0x00020000UL
};
static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
0xffffffffUL
};
static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
0xffffffffUL
};
static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
0xffffffffUL,
0xffffffffUL,
0xffffffffUL
};
static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
0xffffffffUL
};
/* SP */
static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
0x00010000UL
};
static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
0x00300000UL
};
/* MMU */
/*
* MMU0_ID: The data MMU
* MMU1_ID: The icache MMU
*/
static const hrt_address MMU_BASE[N_MMU_ID] = {
0x00070000UL,
0x000A0000UL
};
/* DMA */
static const hrt_address DMA_BASE[N_DMA_ID] = {
0x00040000UL
};
static const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = {
0x000CA000UL
};
/* IRQ */
static const hrt_address IRQ_BASE[N_IRQ_ID] = {
0x00000500UL,
0x00030A00UL,
0x0008C000UL,
0x00090200UL
};
/*
0x00000500UL};
*/
/* GDC */
static const hrt_address GDC_BASE[N_GDC_ID] = {
0x00050000UL,
0x00060000UL
};
/* FIFO_MONITOR (not a subset of GP_DEVICE) */
static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
0x00000000UL
};
/*
static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
0x00000000UL};
static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
0x00090000UL};
*/
/* GP_DEVICE (single base for all separate GP_REG instances) */
static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
0x00000000UL
};
/*GP TIMER , all timer registers are inter-twined,
* so, having multiple base addresses for
* different timers does not help*/
static const hrt_address GP_TIMER_BASE =
(hrt_address)0x00000600UL;
/* GPIO */
static const hrt_address GPIO_BASE[N_GPIO_ID] = {
0x00000400UL
};
/* TIMED_CTRL */
static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
0x00000100UL
};
/* INPUT_FORMATTER */
static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
0x00030000UL,
0x00030200UL,
0x00030400UL
};
/* 0x00030600UL, */ /* memcpy() */
/* INPUT_SYSTEM */
static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
0x00080000UL
};
/* 0x00081000UL, */ /* capture A */
/* 0x00082000UL, */ /* capture B */
/* 0x00083000UL, */ /* capture C */
/* 0x00084000UL, */ /* Acquisition */
/* 0x00085000UL, */ /* DMA */
/* 0x00089000UL, */ /* ctrl */
/* 0x0008A000UL, */ /* GP regs */
/* 0x0008B000UL, */ /* FIFO */
/* 0x0008C000UL, */ /* IRQ */
/* RX, the MIPI lane control regs start at offset 0 */
static const hrt_address RX_BASE[N_RX_ID] = {
0x00080100UL
};
/* IBUF_CTRL, part of the Input System 2401 */
static const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = {
0x000C1800UL, /* ibuf controller A */
0x000C3800UL, /* ibuf controller B */
0x000C5800UL /* ibuf controller C */
};
/* ISYS IRQ Controllers, part of the Input System 2401 */
static const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = {
0x000C1400ULL, /* port a */
0x000C3400ULL, /* port b */
0x000C5400ULL /* port c */
};
/* CSI FE, part of the Input System 2401 */
static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = {
0x000C0400UL, /* csi fe controller A */
0x000C2400UL, /* csi fe controller B */
0x000C4400UL /* csi fe controller C */
};
/* CSI BE, part of the Input System 2401 */
static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = {
0x000C0800UL, /* csi be controller A */
0x000C2800UL, /* csi be controller B */
0x000C4800UL /* csi be controller C */
};
/* PIXEL Generator, part of the Input System 2401 */
static const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = {
0x000C1000UL, /* pixel gen controller A */
0x000C3000UL, /* pixel gen controller B */
0x000C5000UL /* pixel gen controller C */
};
/* Stream2MMIO, part of the Input System 2401 */
static const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = {
0x000C0C00UL, /* stream2mmio controller A */
0x000C2C00UL, /* stream2mmio controller B */
0x000C4C00UL /* stream2mmio controller C */
};
#else
#error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}"
#endif
#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */

View File

@@ -1841,8 +1841,13 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
#endif
#if !defined(HAS_NO_INPUT_SYSTEM)
if (!IS_ISP2401)
dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN,
ISP_DMA_MAX_BURST_LENGTH);
ISP2400_DMA_MAX_BURST_LENGTH);
else
dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN,
ISP2401_DMA_MAX_BURST_LENGTH);
if (ia_css_isys_init() != INPUT_SYSTEM_ERR_NO_ERROR)
err = -EINVAL;

View File

@@ -4,8 +4,403 @@
* (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
*/
#ifndef __SYSTEM_GLOBAL_H_INCLUDED__
#define __SYSTEM_GLOBAL_H_INCLUDED__
/*
* Create a list of HAS and IS properties that defines the system
* Those are common for both ISP2400 and ISP2401
*
* The configuration assumes the following
* - The system is hetereogeneous; Multiple cells and devices classes
* - The cell and device instances are homogeneous, each device type
* belongs to the same class
* - Device instances supporting a subset of the class capabilities are
* allowed
*
* We could manage different device classes through the enumerated
* lists (C) or the use of classes (C++), but that is presently not
* fully supported
*
* N.B. the 3 input formatters are of 2 different classess
*/
#define HAS_MMU_VERSION_2
#define HAS_DMA_VERSION_2
#define HAS_GDC_VERSION_2
#define HAS_VAMEM_VERSION_2
#define HAS_HMEM_VERSION_1
#define HAS_BAMEM_VERSION_2
#define HAS_IRQ_VERSION_2
#define HAS_IRQ_MAP_VERSION_2
#define HAS_INPUT_FORMATTER_VERSION_2
#define HAS_INPUT_SYSTEM_VERSION_2
#define HAS_BUFFERED_SENSOR
#define HAS_FIFO_MONITORS_VERSION_2
#define HAS_GP_DEVICE_VERSION_2
#define HAS_GPIO_VERSION_1
#define HAS_TIMED_CTRL_VERSION_1
#define HAS_RX_VERSION_2
/* per-frame parameter handling support */
#define SH_CSS_ENABLE_PER_FRAME_PARAMS
#define DMA_DDR_TO_VAMEM_WORKAROUND
#define DMA_DDR_TO_HMEM_WORKAROUND
/*
* The longest allowed (uninteruptible) bus transfer, does not
* take stalling into account
*/
#define HIVE_ISP_MAX_BURST_LENGTH 1024
/*
* Maximum allowed burst length in words for the ISP DMA
* This value is set to 2 to prevent the ISP DMA from blocking
* the bus for too long; as the input system can only buffer
* 2 lines on Moorefield and Cherrytrail, the input system buffers
* may overflow if blocked for too long (BZ 2726).
*/
#define ISP2400_DMA_MAX_BURST_LENGTH 128
#define ISP2401_DMA_MAX_BURST_LENGTH 2
#ifdef ISP2401
# include "isp2401_system_global.h"
#else
# include "isp2400_system_global.h"
#endif
#include <hive_isp_css_defs.h>
#include <type_support.h>
/* This interface is deprecated */
#include "hive_types.h"
/*
* Semi global. "HRT" is accessible from SP, but the HRT types do not fully apply
*/
#define HRT_VADDRESS_WIDTH 32
#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3)
#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH / 8)
/* The main bus connecting all devices */
#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH
#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES
typedef u32 hrt_bus_align_t;
/*
* Enumerate the devices, device access through the API is by ID,
* through the DLI by address. The enumerator terminators are used
* to size the wiring arrays and as an exception value.
*/
typedef enum {
DDR0_ID = 0,
N_DDR_ID
} ddr_ID_t;
typedef enum {
ISP0_ID = 0,
N_ISP_ID
} isp_ID_t;
typedef enum {
SP0_ID = 0,
N_SP_ID
} sp_ID_t;
typedef enum {
MMU0_ID = 0,
MMU1_ID,
N_MMU_ID
} mmu_ID_t;
typedef enum {
DMA0_ID = 0,
N_DMA_ID
} dma_ID_t;
typedef enum {
GDC0_ID = 0,
GDC1_ID,
N_GDC_ID
} gdc_ID_t;
/* this extra define is needed because we want to use it also
in the preprocessor, and that doesn't work with enums.
*/
#define N_GDC_ID_CPP 2
typedef enum {
VAMEM0_ID = 0,
VAMEM1_ID,
VAMEM2_ID,
N_VAMEM_ID
} vamem_ID_t;
typedef enum {
BAMEM0_ID = 0,
N_BAMEM_ID
} bamem_ID_t;
typedef enum {
HMEM0_ID = 0,
N_HMEM_ID
} hmem_ID_t;
typedef enum {
IRQ0_ID = 0, /* GP IRQ block */
IRQ1_ID, /* Input formatter */
IRQ2_ID, /* input system */
IRQ3_ID, /* input selector */
N_IRQ_ID
} irq_ID_t;
typedef enum {
FIFO_MONITOR0_ID = 0,
N_FIFO_MONITOR_ID
} fifo_monitor_ID_t;
typedef enum {
GP_DEVICE0_ID = 0,
N_GP_DEVICE_ID
} gp_device_ID_t;
typedef enum {
GP_TIMER0_ID = 0,
GP_TIMER1_ID,
GP_TIMER2_ID,
GP_TIMER3_ID,
GP_TIMER4_ID,
GP_TIMER5_ID,
GP_TIMER6_ID,
GP_TIMER7_ID,
N_GP_TIMER_ID
} gp_timer_ID_t;
typedef enum {
GPIO0_ID = 0,
N_GPIO_ID
} gpio_ID_t;
typedef enum {
TIMED_CTRL0_ID = 0,
N_TIMED_CTRL_ID
} timed_ctrl_ID_t;
typedef enum {
INPUT_FORMATTER0_ID = 0,
INPUT_FORMATTER1_ID,
INPUT_FORMATTER2_ID,
INPUT_FORMATTER3_ID,
N_INPUT_FORMATTER_ID
} input_formatter_ID_t;
/* The IF RST is outside the IF */
#define INPUT_FORMATTER0_SRST_OFFSET 0x0824
#define INPUT_FORMATTER1_SRST_OFFSET 0x0624
#define INPUT_FORMATTER2_SRST_OFFSET 0x0424
#define INPUT_FORMATTER3_SRST_OFFSET 0x0224
#define INPUT_FORMATTER0_SRST_MASK 0x0001
#define INPUT_FORMATTER1_SRST_MASK 0x0002
#define INPUT_FORMATTER2_SRST_MASK 0x0004
#define INPUT_FORMATTER3_SRST_MASK 0x0008
typedef enum {
INPUT_SYSTEM0_ID = 0,
N_INPUT_SYSTEM_ID
} input_system_ID_t;
typedef enum {
RX0_ID = 0,
N_RX_ID
} rx_ID_t;
enum mipi_port_id {
MIPI_PORT0_ID = 0,
MIPI_PORT1_ID,
MIPI_PORT2_ID,
N_MIPI_PORT_ID
};
#define N_RX_CHANNEL_ID 4
/* Generic port enumeration with an internal port type ID */
typedef enum {
CSI_PORT0_ID = 0,
CSI_PORT1_ID,
CSI_PORT2_ID,
TPG_PORT0_ID,
PRBS_PORT0_ID,
FIFO_PORT0_ID,
MEMORY_PORT0_ID,
N_INPUT_PORT_ID
} input_port_ID_t;
typedef enum {
CAPTURE_UNIT0_ID = 0,
CAPTURE_UNIT1_ID,
CAPTURE_UNIT2_ID,
ACQUISITION_UNIT0_ID,
DMA_UNIT0_ID,
CTRL_UNIT0_ID,
GPREGS_UNIT0_ID,
FIFO_UNIT0_ID,
IRQ_UNIT0_ID,
N_SUB_SYSTEM_ID
} sub_system_ID_t;
#define N_CAPTURE_UNIT_ID 3
#define N_ACQUISITION_UNIT_ID 1
#define N_CTRL_UNIT_ID 1
enum ia_css_isp_memories {
IA_CSS_ISP_PMEM0 = 0,
IA_CSS_ISP_DMEM0,
IA_CSS_ISP_VMEM0,
IA_CSS_ISP_VAMEM0,
IA_CSS_ISP_VAMEM1,
IA_CSS_ISP_VAMEM2,
IA_CSS_ISP_HMEM0,
IA_CSS_SP_DMEM0,
IA_CSS_DDR,
N_IA_CSS_MEMORIES
};
#define IA_CSS_NUM_MEMORIES 9
/* For driver compatibility */
#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES
#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES
/*
* ISP2401 specific enums
*/
typedef enum {
ISYS_IRQ0_ID = 0, /* port a */
ISYS_IRQ1_ID, /* port b */
ISYS_IRQ2_ID, /* port c */
N_ISYS_IRQ_ID
} isys_irq_ID_t;
/*
* Input-buffer Controller.
*/
typedef enum {
IBUF_CTRL0_ID = 0, /* map to ISYS2401_IBUF_CNTRL_A */
IBUF_CTRL1_ID, /* map to ISYS2401_IBUF_CNTRL_B */
IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */
N_IBUF_CTRL_ID
} ibuf_ctrl_ID_t;
/* end of Input-buffer Controller */
/*
* Stream2MMIO.
*/
typedef enum {
STREAM2MMIO0_ID = 0, /* map to ISYS2401_S2M_A */
STREAM2MMIO1_ID, /* map to ISYS2401_S2M_B */
STREAM2MMIO2_ID, /* map to ISYS2401_S2M_C */
N_STREAM2MMIO_ID
} stream2mmio_ID_t;
typedef enum {
/*
* Stream2MMIO 0 has 8 SIDs that are indexed by
* [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID].
*
* Stream2MMIO 1 has 4 SIDs that are indexed by
* [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID].
*
* Stream2MMIO 2 has 4 SIDs that are indexed by
* [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID].
*/
STREAM2MMIO_SID0_ID = 0,
STREAM2MMIO_SID1_ID,
STREAM2MMIO_SID2_ID,
STREAM2MMIO_SID3_ID,
STREAM2MMIO_SID4_ID,
STREAM2MMIO_SID5_ID,
STREAM2MMIO_SID6_ID,
STREAM2MMIO_SID7_ID,
N_STREAM2MMIO_SID_ID
} stream2mmio_sid_ID_t;
/* end of Stream2MMIO */
/**
* Input System 2401: CSI-MIPI recevier.
*/
typedef enum {
CSI_RX_BACKEND0_ID = 0, /* map to ISYS2401_MIPI_BE_A */
CSI_RX_BACKEND1_ID, /* map to ISYS2401_MIPI_BE_B */
CSI_RX_BACKEND2_ID, /* map to ISYS2401_MIPI_BE_C */
N_CSI_RX_BACKEND_ID
} csi_rx_backend_ID_t;
typedef enum {
CSI_RX_FRONTEND0_ID = 0, /* map to ISYS2401_CSI_RX_A */
CSI_RX_FRONTEND1_ID, /* map to ISYS2401_CSI_RX_B */
CSI_RX_FRONTEND2_ID, /* map to ISYS2401_CSI_RX_C */
#define N_CSI_RX_FRONTEND_ID (CSI_RX_FRONTEND2_ID + 1)
} csi_rx_frontend_ID_t;
typedef enum {
CSI_RX_DLANE0_ID = 0, /* map to DLANE0 in CSI RX */
CSI_RX_DLANE1_ID, /* map to DLANE1 in CSI RX */
CSI_RX_DLANE2_ID, /* map to DLANE2 in CSI RX */
CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */
N_CSI_RX_DLANE_ID
} csi_rx_fe_dlane_ID_t;
/* end of CSI-MIPI receiver */
typedef enum {
ISYS2401_DMA0_ID = 0,
N_ISYS2401_DMA_ID
} isys2401_dma_ID_t;
/**
* Pixel-generator. ("system_global.h")
*/
typedef enum {
PIXELGEN0_ID = 0,
PIXELGEN1_ID,
PIXELGEN2_ID,
N_PIXELGEN_ID
} pixelgen_ID_t;
/* end of pixel-generator. ("system_global.h") */
typedef enum {
INPUT_SYSTEM_CSI_PORT0_ID = 0,
INPUT_SYSTEM_CSI_PORT1_ID,
INPUT_SYSTEM_CSI_PORT2_ID,
INPUT_SYSTEM_PIXELGEN_PORT0_ID,
INPUT_SYSTEM_PIXELGEN_PORT1_ID,
INPUT_SYSTEM_PIXELGEN_PORT2_ID,
N_INPUT_SYSTEM_INPUT_PORT_ID
} input_system_input_port_ID_t;
#define N_INPUT_SYSTEM_CSI_PORT 3
typedef enum {
ISYS2401_DMA_CHANNEL_0 = 0,
ISYS2401_DMA_CHANNEL_1,
ISYS2401_DMA_CHANNEL_2,
ISYS2401_DMA_CHANNEL_3,
ISYS2401_DMA_CHANNEL_4,
ISYS2401_DMA_CHANNEL_5,
ISYS2401_DMA_CHANNEL_6,
ISYS2401_DMA_CHANNEL_7,
ISYS2401_DMA_CHANNEL_8,
ISYS2401_DMA_CHANNEL_9,
ISYS2401_DMA_CHANNEL_10,
ISYS2401_DMA_CHANNEL_11,
N_ISYS2401_DMA_CHANNEL
} isys2401_dma_channel;
#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */

View File

@@ -0,0 +1,179 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "system_local.h"
/* ISP */
const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
0x0000000000020000ULL
};
const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
0x0000000000200000ULL
};
const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
0x0000000000100000ULL
};
/* SP */
const hrt_address SP_CTRL_BASE[N_SP_ID] = {
0x0000000000010000ULL
};
const hrt_address SP_DMEM_BASE[N_SP_ID] = {
0x0000000000300000ULL
};
/* MMU */
/*
* MMU0_ID: The data MMU
* MMU1_ID: The icache MMU
*/
const hrt_address MMU_BASE[N_MMU_ID] = {
0x0000000000070000ULL,
0x00000000000A0000ULL
};
/* DMA */
const hrt_address DMA_BASE[N_DMA_ID] = {
0x0000000000040000ULL
};
const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = {
0x00000000000CA000ULL
};
/* IRQ */
const hrt_address IRQ_BASE[N_IRQ_ID] = {
0x0000000000000500ULL,
0x0000000000030A00ULL,
0x000000000008C000ULL,
0x0000000000090200ULL
};
/*
0x0000000000000500ULL};
*/
/* GDC */
const hrt_address GDC_BASE[N_GDC_ID] = {
0x0000000000050000ULL,
0x0000000000060000ULL
};
/* FIFO_MONITOR (not a subset of GP_DEVICE) */
const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
0x0000000000000000ULL
};
/*
const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
0x0000000000000000ULL};
const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
0x0000000000090000ULL};
*/
/* GP_DEVICE (single base for all separate GP_REG instances) */
const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
0x0000000000000000ULL
};
/*GP TIMER , all timer registers are inter-twined,
* so, having multiple base addresses for
* different timers does not help*/
const hrt_address GP_TIMER_BASE =
(hrt_address)0x0000000000000600ULL;
/* GPIO */
const hrt_address GPIO_BASE[N_GPIO_ID] = {
0x0000000000000400ULL
};
/* TIMED_CTRL */
const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
0x0000000000000100ULL
};
/* INPUT_FORMATTER */
const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
0x0000000000030000ULL,
0x0000000000030200ULL,
0x0000000000030400ULL,
0x0000000000030600ULL
}; /* memcpy() */
/* INPUT_SYSTEM */
const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
0x0000000000080000ULL
};
/* 0x0000000000081000ULL, */ /* capture A */
/* 0x0000000000082000ULL, */ /* capture B */
/* 0x0000000000083000ULL, */ /* capture C */
/* 0x0000000000084000ULL, */ /* Acquisition */
/* 0x0000000000085000ULL, */ /* DMA */
/* 0x0000000000089000ULL, */ /* ctrl */
/* 0x000000000008A000ULL, */ /* GP regs */
/* 0x000000000008B000ULL, */ /* FIFO */
/* 0x000000000008C000ULL, */ /* IRQ */
/* RX, the MIPI lane control regs start at offset 0 */
const hrt_address RX_BASE[N_RX_ID] = {
0x0000000000080100ULL
};
/* IBUF_CTRL, part of the Input System 2401 */
const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = {
0x00000000000C1800ULL, /* ibuf controller A */
0x00000000000C3800ULL, /* ibuf controller B */
0x00000000000C5800ULL /* ibuf controller C */
};
/* ISYS IRQ Controllers, part of the Input System 2401 */
const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = {
0x00000000000C1400ULL, /* port a */
0x00000000000C3400ULL, /* port b */
0x00000000000C5400ULL /* port c */
};
/* CSI FE, part of the Input System 2401 */
const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = {
0x00000000000C0400ULL, /* csi fe controller A */
0x00000000000C2400ULL, /* csi fe controller B */
0x00000000000C4400ULL /* csi fe controller C */
};
/* CSI BE, part of the Input System 2401 */
const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = {
0x00000000000C0800ULL, /* csi be controller A */
0x00000000000C2800ULL, /* csi be controller B */
0x00000000000C4800ULL /* csi be controller C */
};
/* PIXEL Generator, part of the Input System 2401 */
const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = {
0x00000000000C1000ULL, /* pixel gen controller A */
0x00000000000C3000ULL, /* pixel gen controller B */
0x00000000000C5000ULL /* pixel gen controller C */
};
/* Stream2MMIO, part of the Input System 2401 */
const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = {
0x00000000000C0C00ULL, /* stream2mmio controller A */
0x00000000000C2C00ULL, /* stream2mmio controller B */
0x00000000000C4C00ULL /* stream2mmio controller C */
};

View File

@@ -1,11 +1,103 @@
/* SPDX-License-Identifier: GPL-2.0 */
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifdef ISP2401
# include "isp2401_system_local.h"
#else
# include "isp2400_system_local.h"
#ifndef __SYSTEM_LOCAL_H_INCLUDED__
#define __SYSTEM_LOCAL_H_INCLUDED__
#ifdef HRT_ISP_CSS_CUSTOM_HOST
#ifndef HRT_USE_VIR_ADDRS
#define HRT_USE_VIR_ADDRS
#endif
#endif
#include "system_global.h"
/* This interface is deprecated */
#include "hive_types.h"
/*
* Cell specific address maps
*/
#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */
/* ISP */
extern const hrt_address ISP_CTRL_BASE[N_ISP_ID];
extern const hrt_address ISP_DMEM_BASE[N_ISP_ID];
extern const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID];
/* SP */
extern const hrt_address SP_CTRL_BASE[N_SP_ID];
extern const hrt_address SP_DMEM_BASE[N_SP_ID];
/* MMU */
extern const hrt_address MMU_BASE[N_MMU_ID];
/* DMA */
extern const hrt_address DMA_BASE[N_DMA_ID];
extern const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID];
/* IRQ */
extern const hrt_address IRQ_BASE[N_IRQ_ID];
/* GDC */
extern const hrt_address GDC_BASE[N_GDC_ID];
/* FIFO_MONITOR (not a subset of GP_DEVICE) */
extern const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID];
/* GP_DEVICE (single base for all separate GP_REG instances) */
extern const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID];
/*GP TIMER , all timer registers are inter-twined,
* so, having multiple base addresses for
* different timers does not help*/
extern const hrt_address GP_TIMER_BASE;
/* GPIO */
extern const hrt_address GPIO_BASE[N_GPIO_ID];
/* TIMED_CTRL */
extern const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID];
/* INPUT_FORMATTER */
extern const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID];
/* INPUT_SYSTEM */
extern const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID];
/* RX, the MIPI lane control regs start at offset 0 */
extern const hrt_address RX_BASE[N_RX_ID];
/* IBUF_CTRL, part of the Input System 2401 */
extern const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID];
/* ISYS IRQ Controllers, part of the Input System 2401 */
extern const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID];
/* CSI FE, part of the Input System 2401 */
extern const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID];
/* CSI BE, part of the Input System 2401 */
extern const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID];
/* PIXEL Generator, part of the Input System 2401 */
extern const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID];
/* Stream2MMIO, part of the Input System 2401 */
extern const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID];
#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */

View File

@@ -521,14 +521,19 @@ static void vfio_pci_release(void *device_data)
vfio_pci_vf_token_user_add(vdev, -1);
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_disable(vdev);
mutex_lock(&vdev->igate);
if (vdev->err_trigger) {
eventfd_ctx_put(vdev->err_trigger);
vdev->err_trigger = NULL;
}
mutex_unlock(&vdev->igate);
mutex_lock(&vdev->igate);
if (vdev->req_trigger) {
eventfd_ctx_put(vdev->req_trigger);
vdev->req_trigger = NULL;
}
mutex_unlock(&vdev->igate);
}
mutex_unlock(&vdev->reflck->lock);

View File

@@ -641,11 +641,11 @@ static int vm_cmdline_set(const char *device,
&vm_cmdline_id, &consumed);
/*
* sscanf() must processes at least 2 chunks; also there
* sscanf() must process at least 2 chunks; also there
* must be no extra characters after the last chunk, so
* str[consumed] must be '\0'
*/
if (processed < 2 || str[consumed])
if (processed < 2 || str[consumed] || irq == 0)
return -EINVAL;
resources[0].flags = IORESOURCE_MEM;

View File

@@ -1461,6 +1461,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
if (ret < 0 && ret != -ENOENT) {
ulist_free(tmp);
ulist_free(*roots);
*roots = NULL;
return ret;
}
node = ulist_next(tmp, &uiter);

View File

@@ -1999,6 +1999,7 @@ static int __process_pages_contig(struct address_space *mapping,
if (!PageDirty(pages[i]) ||
pages[i]->mapping != mapping) {
unlock_page(pages[i]);
for (; i < ret; i++)
put_page(pages[i]);
err = -EAGAIN;
goto out;

View File

@@ -8123,19 +8123,16 @@ again:
/*
* Qgroup reserved space handler
* Page here will be either
* 1) Already written to disk
* In this case, its reserved space is released from data rsv map
* and will be freed by delayed_ref handler finally.
* So even we call qgroup_free_data(), it won't decrease reserved
* space.
* 2) Not written to disk
* This means the reserved space should be freed here. However,
* if a truncate invalidates the page (by clearing PageDirty)
* and the page is accounted for while allocating extent
* in btrfs_check_data_free_space() we let delayed_ref to
* free the entire extent.
* 1) Already written to disk or ordered extent already submitted
* Then its QGROUP_RESERVED bit in io_tree is already cleaned.
* Qgroup will be handled by its qgroup_record then.
* btrfs_qgroup_free_data() call will do nothing here.
*
* 2) Not written to disk yet
* Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED
* bit of its io_tree, and free the qgroup reserved data space.
* Since the IO will never happen for this page.
*/
if (PageDirty(page))
btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
if (!inode_evicting) {
clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED |

View File

@@ -7051,6 +7051,14 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
mutex_lock(&uuid_mutex);
mutex_lock(&fs_info->chunk_mutex);
/*
* It is possible for mount and umount to race in such a way that
* we execute this code path, but open_fs_devices failed to clear
* total_rw_bytes. We certainly want it cleared before reading the
* device items, so clear it here.
*/
fs_info->fs_devices->total_rw_bytes = 0;
/*
* Read all device items, and then all the chunk items. All
* device items are found before any chunk item (their object id

View File

@@ -1112,7 +1112,7 @@ found:
ret = exfat_get_next_cluster(sb, &clu.dir);
}
if (ret || clu.dir != EXFAT_EOF_CLUSTER) {
if (ret || clu.dir == EXFAT_EOF_CLUSTER) {
/* just initialized hint_stat */
hint_stat->clu = p_dir->dir;
hint_stat->eidx = 0;

View File

@@ -371,7 +371,7 @@ static inline bool exfat_is_last_sector_in_cluster(struct exfat_sb_info *sbi,
static inline sector_t exfat_cluster_to_sector(struct exfat_sb_info *sbi,
unsigned int clus)
{
return ((clus - EXFAT_RESERVED_CLUSTERS) << sbi->sect_per_clus_bits) +
return ((sector_t)(clus - EXFAT_RESERVED_CLUSTERS) << sbi->sect_per_clus_bits) +
sbi->data_start_sector;
}

View File

@@ -176,7 +176,7 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
ep2->dentry.stream.size = 0;
} else {
ep2->dentry.stream.valid_size = cpu_to_le64(new_size);
ep2->dentry.stream.size = ep->dentry.stream.valid_size;
ep2->dentry.stream.size = ep2->dentry.stream.valid_size;
}
if (new_size == 0) {

View File

@@ -495,7 +495,7 @@ static int exfat_utf8_to_utf16(struct super_block *sb,
struct exfat_uni_name *p_uniname, int *p_lossy)
{
int i, unilen, lossy = NLS_NAME_NO_LOSSY;
unsigned short upname[MAX_NAME_LENGTH + 1];
__le16 upname[MAX_NAME_LENGTH + 1];
unsigned short *uniname = p_uniname->name;
WARN_ON(!len);
@@ -519,7 +519,7 @@ static int exfat_utf8_to_utf16(struct super_block *sb,
exfat_wstrchr(bad_uni_chars, *uniname))
lossy |= NLS_NAME_LOSSY;
upname[i] = exfat_toupper(sb, *uniname);
upname[i] = cpu_to_le16(exfat_toupper(sb, *uniname));
uniname++;
}
@@ -597,7 +597,7 @@ static int exfat_nls_to_ucs2(struct super_block *sb,
struct exfat_uni_name *p_uniname, int *p_lossy)
{
int i = 0, unilen = 0, lossy = NLS_NAME_NO_LOSSY;
unsigned short upname[MAX_NAME_LENGTH + 1];
__le16 upname[MAX_NAME_LENGTH + 1];
unsigned short *uniname = p_uniname->name;
struct nls_table *nls = EXFAT_SB(sb)->nls_io;
@@ -611,7 +611,7 @@ static int exfat_nls_to_ucs2(struct super_block *sb,
exfat_wstrchr(bad_uni_chars, *uniname))
lossy |= NLS_NAME_LOSSY;
upname[unilen] = exfat_toupper(sb, *uniname);
upname[unilen] = cpu_to_le16(exfat_toupper(sb, *uniname));
uniname++;
unilen++;
}

View File

@@ -605,6 +605,7 @@ enum {
struct async_poll {
struct io_poll_iocb poll;
struct io_poll_iocb *double_poll;
struct io_wq_work work;
};
@@ -4159,9 +4160,9 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
return false;
}
static void io_poll_remove_double(struct io_kiocb *req)
static void io_poll_remove_double(struct io_kiocb *req, void *data)
{
struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
struct io_poll_iocb *poll = data;
lockdep_assert_held(&req->ctx->completion_lock);
@@ -4181,7 +4182,7 @@ static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
{
struct io_ring_ctx *ctx = req->ctx;
io_poll_remove_double(req);
io_poll_remove_double(req, req->io);
req->poll.done = true;
io_cqring_fill_event(req, error ? error : mangle_poll(mask));
io_commit_cqring(ctx);
@@ -4224,21 +4225,21 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
int sync, void *key)
{
struct io_kiocb *req = wait->private;
struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
struct io_poll_iocb *poll = req->apoll->double_poll;
__poll_t mask = key_to_poll(key);
/* for instances that support it check for an event match first: */
if (mask && !(mask & poll->events))
return 0;
if (req->poll.head) {
if (poll && poll->head) {
bool done;
spin_lock(&req->poll.head->lock);
done = list_empty(&req->poll.wait.entry);
spin_lock(&poll->head->lock);
done = list_empty(&poll->wait.entry);
if (!done)
list_del_init(&req->poll.wait.entry);
spin_unlock(&req->poll.head->lock);
list_del_init(&poll->wait.entry);
spin_unlock(&poll->head->lock);
if (!done)
__io_async_wake(req, poll, mask, io_poll_task_func);
}
@@ -4258,7 +4259,8 @@ static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
}
static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
struct wait_queue_head *head)
struct wait_queue_head *head,
struct io_poll_iocb **poll_ptr)
{
struct io_kiocb *req = pt->req;
@@ -4269,7 +4271,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
*/
if (unlikely(poll->head)) {
/* already have a 2nd entry, fail a third attempt */
if (req->io) {
if (*poll_ptr) {
pt->error = -EINVAL;
return;
}
@@ -4281,7 +4283,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake);
refcount_inc(&req->refs);
poll->wait.private = req;
req->io = (void *) poll;
*poll_ptr = poll;
}
pt->error = 0;
@@ -4293,8 +4295,9 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
struct poll_table_struct *p)
{
struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
struct async_poll *apoll = pt->req->apoll;
__io_queue_proc(&pt->req->apoll->poll, pt, head);
__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
}
static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
@@ -4344,11 +4347,13 @@ static void io_async_task_func(struct callback_head *cb)
}
}
io_poll_remove_double(req, apoll->double_poll);
spin_unlock_irq(&ctx->completion_lock);
/* restore ->work in case we need to retry again */
if (req->flags & REQ_F_WORK_INITIALIZED)
memcpy(&req->work, &apoll->work, sizeof(req->work));
kfree(apoll->double_poll);
kfree(apoll);
if (!canceled) {
@@ -4436,7 +4441,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
struct async_poll *apoll;
struct io_poll_table ipt;
__poll_t mask, ret;
bool had_io;
if (!req->file || !file_can_poll(req->file))
return false;
@@ -4448,11 +4452,11 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (unlikely(!apoll))
return false;
apoll->double_poll = NULL;
req->flags |= REQ_F_POLLED;
if (req->flags & REQ_F_WORK_INITIALIZED)
memcpy(&apoll->work, &req->work, sizeof(req->work));
had_io = req->io != NULL;
io_get_req_task(req);
req->apoll = apoll;
@@ -4470,13 +4474,11 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
io_async_wake);
if (ret) {
ipt.error = 0;
/* only remove double add if we did it here */
if (!had_io)
io_poll_remove_double(req);
io_poll_remove_double(req, apoll->double_poll);
spin_unlock_irq(&ctx->completion_lock);
if (req->flags & REQ_F_WORK_INITIALIZED)
memcpy(&req->work, &apoll->work, sizeof(req->work));
kfree(apoll->double_poll);
kfree(apoll);
return false;
}
@@ -4507,11 +4509,13 @@ static bool io_poll_remove_one(struct io_kiocb *req)
bool do_complete;
if (req->opcode == IORING_OP_POLL_ADD) {
io_poll_remove_double(req);
io_poll_remove_double(req, req->io);
do_complete = __io_poll_remove_one(req, &req->poll);
} else {
struct async_poll *apoll = req->apoll;
io_poll_remove_double(req, apoll->double_poll);
/* non-poll requests have submit ref still */
do_complete = __io_poll_remove_one(req, &apoll->poll);
if (do_complete) {
@@ -4524,6 +4528,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)
if (req->flags & REQ_F_WORK_INITIALIZED)
memcpy(&req->work, &apoll->work,
sizeof(req->work));
kfree(apoll->double_poll);
kfree(apoll);
}
}
@@ -4624,7 +4629,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
{
struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
__io_queue_proc(&pt->req->poll, pt, head);
__io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->io);
}
static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -4732,7 +4737,9 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
{
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL;
if (sqe->ioprio || sqe->buf_index || sqe->len)
return -EINVAL;
req->timeout.addr = READ_ONCE(sqe->addr);
@@ -4910,8 +4917,9 @@ static int io_async_cancel_prep(struct io_kiocb *req,
{
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
sqe->cancel_flags)
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
return -EINVAL;
req->cancel.addr = READ_ONCE(sqe->addr);
@@ -4929,7 +4937,9 @@ static int io_async_cancel(struct io_kiocb *req)
static int io_files_update_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
if (sqe->flags || sqe->ioprio || sqe->rw_flags)
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL;
if (sqe->ioprio || sqe->rw_flags)
return -EINVAL;
req->files_update.offset = READ_ONCE(sqe->off);
@@ -5720,6 +5730,7 @@ fail_req:
* Never try inline submit of IOSQE_ASYNC is set, go straight
* to async execution.
*/
io_req_init_async(req);
req->work.flags |= IO_WQ_WORK_CONCURRENT;
io_queue_async_work(req);
} else {

View File

@@ -507,6 +507,17 @@ find_any_file(struct nfs4_file *f)
return ret;
}
static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
{
struct nfsd_file *ret = NULL;
spin_lock(&f->fi_lock);
if (f->fi_deleg_file)
ret = nfsd_file_get(f->fi_deleg_file);
spin_unlock(&f->fi_lock);
return ret;
}
static atomic_long_t num_delegations;
unsigned long max_delegations;
@@ -2444,6 +2455,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
oo = ols->st_stateowner;
nf = st->sc_file;
file = find_any_file(nf);
if (!file)
return 0;
seq_printf(s, "- ");
nfs4_show_stateid(s, &st->sc_stateid);
@@ -2481,6 +2494,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
oo = ols->st_stateowner;
nf = st->sc_file;
file = find_any_file(nf);
if (!file)
return 0;
seq_printf(s, "- ");
nfs4_show_stateid(s, &st->sc_stateid);
@@ -2513,7 +2528,9 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
ds = delegstateid(st);
nf = st->sc_file;
file = nf->fi_deleg_file;
file = find_deleg_file(nf);
if (!file)
return 0;
seq_printf(s, "- ");
nfs4_show_stateid(s, &st->sc_stateid);
@@ -2529,6 +2546,7 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
seq_printf(s, ", ");
nfs4_show_fname(s, file);
seq_printf(s, " }\n");
nfsd_file_put(file);
return 0;
}

View File

@@ -175,7 +175,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
/* Extract the length of the metadata block */
data = page_address(bvec->bv_page) + bvec->bv_offset;
length = data[offset];
if (offset <= bvec->bv_len - 1) {
if (offset < bvec->bv_len - 1) {
length |= data[offset + 1] << 8;
} else {
if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {

View File

@@ -607,14 +607,14 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
int nr_pages;
ssize_t ret;
nr_pages = iov_iter_npages(from, BIO_MAX_PAGES);
if (!nr_pages)
return 0;
max = queue_max_zone_append_sectors(bdev_get_queue(bdev));
max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
iov_iter_truncate(from, max);
nr_pages = iov_iter_npages(from, BIO_MAX_PAGES);
if (!nr_pages)
return 0;
bio = bio_alloc_bioset(GFP_NOFS, nr_pages, &fs_bio_set);
if (!bio)
return -ENOMEM;
@@ -1119,7 +1119,7 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
char *file_name;
struct dentry *dir;
unsigned int n = 0;
int ret = -ENOMEM;
int ret;
/* If the group is empty, there is nothing to do */
if (!zd->nr_zones[type])
@@ -1135,8 +1135,10 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
zgroup_name = "seq";
dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type);
if (!dir)
if (!dir) {
ret = -ENOMEM;
goto free;
}
/*
* The first zone contains the super block: skip it.
@@ -1174,8 +1176,10 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
* Use the file number within its group as file name.
*/
snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n);
if (!zonefs_create_inode(dir, file_name, zone, type))
if (!zonefs_create_inode(dir, file_name, zone, type)) {
ret = -ENOMEM;
goto free;
}
n++;
}

View File

@@ -432,6 +432,7 @@ const char *dm_device_name(struct mapped_device *md);
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
union map_info *dm_get_rq_mapinfo(struct request *rq);

View File

@@ -56,7 +56,7 @@ struct property_entry;
* on a bus (or read from them). Apart from two basic transfer functions to
* transmit one message at a time, a more complex version can be used to
* transmit an arbitrary number of messages without interruption.
* @count must be be less than 64k since msg.len is u16.
* @count must be less than 64k since msg.len is u16.
*/
int i2c_transfer_buffer_flags(const struct i2c_client *client,
char *buf, int count, u16 flags);

View File

@@ -107,9 +107,12 @@ io_mapping_init_wc(struct io_mapping *iomap,
resource_size_t base,
unsigned long size)
{
iomap->iomem = ioremap_wc(base, size);
if (!iomap->iomem)
return NULL;
iomap->base = base;
iomap->size = size;
iomap->iomem = ioremap_wc(base, size);
#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
#elif defined(pgprot_writecombine)

View File

@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <uapi/linux/xattr.h>
struct inode;
@@ -95,7 +96,7 @@ static inline void simple_xattrs_free(struct simple_xattrs *xattrs)
list_for_each_entry_safe(xattr, node, &xattrs->head, list) {
kfree(xattr->name);
kfree(xattr);
kvfree(xattr);
}
}

View File

@@ -12,6 +12,7 @@ struct rt5670_platform_data {
int jd_mode;
bool in2_diff;
bool dev_gpio;
bool gpio1_is_ext_spk_en;
bool dmic_en;
unsigned int dmic1_data_pin;

View File

@@ -161,6 +161,7 @@ void snd_soc_dai_resume(struct snd_soc_dai *dai);
int snd_soc_dai_compress_new(struct snd_soc_dai *dai,
struct snd_soc_pcm_runtime *rtd, int num);
bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int stream);
void snd_soc_dai_link_set_capabilities(struct snd_soc_dai_link *dai_link);
void snd_soc_dai_action(struct snd_soc_dai *dai,
int stream, int action);
static inline void snd_soc_dai_activate(struct snd_soc_dai *dai,

View File

@@ -452,6 +452,8 @@ int devm_snd_soc_register_component(struct device *dev,
const struct snd_soc_component_driver *component_driver,
struct snd_soc_dai_driver *dai_drv, int num_dai);
void snd_soc_unregister_component(struct device *dev);
void snd_soc_unregister_component_by_driver(struct device *dev,
const struct snd_soc_component_driver *component_driver);
struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev,
const char *driver_name);
struct snd_soc_component *snd_soc_lookup_component(struct device *dev,

View File

@@ -45,7 +45,10 @@ int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE];
#ifdef CONFIG_CMA
static struct cma *hugetlb_cma[MAX_NUMNODES];
#endif
static unsigned long hugetlb_cma_size __initdata;
/*
* Minimum page order among possible hugepage sizes, set to a proper value
@@ -1235,9 +1238,10 @@ static void free_gigantic_page(struct page *page, unsigned int order)
* If the page isn't allocated using the cma allocator,
* cma_release() returns false.
*/
if (IS_ENABLED(CONFIG_CMA) &&
cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
#ifdef CONFIG_CMA
if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
return;
#endif
free_contig_range(page_to_pfn(page), 1 << order);
}
@@ -1248,7 +1252,8 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
{
unsigned long nr_pages = 1UL << huge_page_order(h);
if (IS_ENABLED(CONFIG_CMA)) {
#ifdef CONFIG_CMA
{
struct page *page;
int node;
@@ -1262,6 +1267,7 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
return page;
}
}
#endif
return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
}
@@ -2571,7 +2577,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
for (i = 0; i < h->max_huge_pages; ++i) {
if (hstate_is_gigantic(h)) {
if (IS_ENABLED(CONFIG_CMA) && hugetlb_cma[0]) {
if (hugetlb_cma_size) {
pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
break;
}
@@ -5654,7 +5660,6 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
}
#ifdef CONFIG_CMA
static unsigned long hugetlb_cma_size __initdata;
static bool cma_reserve_called __initdata;
static int __init cmdline_parse_hugetlb_cma(char *p)

View File

@@ -958,6 +958,9 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
return SCAN_ADDRESS_RANGE;
if (!hugepage_vma_check(vma, vma->vm_flags))
return SCAN_VMA_CHECK;
/* Anon VMA expected */
if (!vma->anon_vma || vma->vm_ops)
return SCAN_VMA_CHECK;
return 0;
}

View File

@@ -5669,7 +5669,6 @@ static void __mem_cgroup_clear_mc(void)
if (!mem_cgroup_is_root(mc.to))
page_counter_uncharge(&mc.to->memory, mc.moved_swap);
mem_cgroup_id_get_many(mc.to, mc.moved_swap);
css_put_many(&mc.to->css, mc.moved_swap);
mc.moved_swap = 0;
@@ -5860,7 +5859,8 @@ put: /* get_mctgt_type() gets the page */
ent = target.ent;
if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
mc.precharge--;
/* we fixup refcnts and charges later. */
mem_cgroup_id_get_many(mc.to, 1);
/* we fixup other refcnts and charges later. */
mc.moved_swap++;
}
break;
@@ -7186,6 +7186,13 @@ static struct cftype memsw_files[] = {
{ }, /* terminate */
};
/*
* If mem_cgroup_swap_init() is implemented as a subsys_initcall()
* instead of a core_initcall(), this could mean cgroup_memory_noswap still
* remains set to false even when memcg is disabled via "cgroup_disable=memory"
* boot parameter. This may result in premature OOPS inside
* mem_cgroup_get_nr_swap_pages() function in corner cases.
*/
static int __init mem_cgroup_swap_init(void)
{
/* No memory control -> no swap control */
@@ -7200,6 +7207,6 @@ static int __init mem_cgroup_swap_init(void)
return 0;
}
subsys_initcall(mem_cgroup_swap_init);
core_initcall(mem_cgroup_swap_init);
#endif /* CONFIG_MEMCG_SWAP */

View File

@@ -1614,7 +1614,7 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
#else
unsigned long idx = 0, pgcount = *num;
int err;
int err = -EINVAL;
for (; idx < pgcount; ++idx) {
err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);

View File

@@ -2629,7 +2629,7 @@ static void unmap_region(struct mm_struct *mm,
* Create a list of vma's touched by the unmap, removing them from the mm's
* vma list as we go..
*/
static void
static bool
detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, unsigned long end)
{
@@ -2654,6 +2654,17 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
/* Kill the cache */
vmacache_invalidate(mm);
/*
* Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
* VM_GROWSUP VMA. Such VMAs can change their size under
* down_read(mmap_lock) and collide with the VMA we are about to unmap.
*/
if (vma && (vma->vm_flags & VM_GROWSDOWN))
return false;
if (prev && (prev->vm_flags & VM_GROWSUP))
return false;
return true;
}
/*
@@ -2834,7 +2845,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
}
/* Detach vmas from rbtree */
detach_vmas_to_be_unmapped(mm, vma, prev, end);
if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
downgrade = false;
if (downgrade)
mmap_write_downgrade(mm);

View File

@@ -3178,7 +3178,7 @@ static int shmem_initxattrs(struct inode *inode,
new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
GFP_KERNEL);
if (!new_xattr->name) {
kfree(new_xattr);
kvfree(new_xattr);
return -ENOMEM;
}

View File

@@ -326,6 +326,14 @@ int slab_unmergeable(struct kmem_cache *s)
if (s->refcount < 0)
return 1;
#ifdef CONFIG_MEMCG_KMEM
/*
* Skip the dying kmem_cache.
*/
if (s->memcg_params.dying)
return 1;
#endif
return 0;
}
@@ -886,12 +894,15 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
return 0;
}
static void flush_memcg_workqueue(struct kmem_cache *s)
static void memcg_set_kmem_cache_dying(struct kmem_cache *s)
{
spin_lock_irq(&memcg_kmem_wq_lock);
s->memcg_params.dying = true;
spin_unlock_irq(&memcg_kmem_wq_lock);
}
static void flush_memcg_workqueue(struct kmem_cache *s)
{
/*
* SLAB and SLUB deactivate the kmem_caches through call_rcu. Make
* sure all registered rcu callbacks have been invoked.
@@ -923,10 +934,6 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s)
{
return 0;
}
static inline void flush_memcg_workqueue(struct kmem_cache *s)
{
}
#endif /* CONFIG_MEMCG_KMEM */
void slab_kmem_cache_release(struct kmem_cache *s)
@@ -944,8 +951,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (unlikely(!s))
return;
flush_memcg_workqueue(s);
get_online_cpus();
get_online_mems();
@@ -955,6 +960,22 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (s->refcount)
goto out_unlock;
#ifdef CONFIG_MEMCG_KMEM
memcg_set_kmem_cache_dying(s);
mutex_unlock(&slab_mutex);
put_online_mems();
put_online_cpus();
flush_memcg_workqueue(s);
get_online_cpus();
get_online_mems();
mutex_lock(&slab_mutex);
#endif
err = shutdown_memcg_caches(s);
if (!err)
err = shutdown_cache(s);

View File

@@ -87,8 +87,8 @@ parse_symbol() {
return
fi
# Strip out the base of the path
code=${code#$basepath/}
# Strip out the base of the path on each line
code=$(while read -r line; do echo "${line#$basepath/}"; done <<< "$code")
# In the case of inlines, move everything to same line
code=${code//$'\n'/' '}

View File

@@ -96,7 +96,7 @@ lx-symbols command."""
return ""
attrs = sect_attrs['attrs']
section_name_to_address = {
attrs[n]['name'].string(): attrs[n]['address']
attrs[n]['battr']['attr']['name'].string(): attrs[n]['address']
for n in range(int(sect_attrs['nsections']))}
args = []
for section_name in [".data", ".data..read_mostly", ".rodata", ".bss",

View File

@@ -606,7 +606,9 @@ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len)
{
int c;
if (snd_BUG_ON(!buffer || !buffer->buffer))
if (snd_BUG_ON(!buffer))
return 1;
if (!buffer->buffer)
return 1;
if (len <= 0 || buffer->stop || buffer->error)
return 1;

View File

@@ -7587,6 +7587,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),

View File

@@ -232,9 +232,7 @@ static int snd_acp3x_probe(struct pci_dev *pci,
}
pm_runtime_set_autosuspend_delay(&pci->dev, 2000);
pm_runtime_use_autosuspend(&pci->dev);
pm_runtime_set_active(&pci->dev);
pm_runtime_put_noidle(&pci->dev);
pm_runtime_enable(&pci->dev);
pm_runtime_allow(&pci->dev);
return 0;
@@ -303,7 +301,7 @@ static void snd_acp3x_remove(struct pci_dev *pci)
ret = acp3x_deinit(adata->acp3x_base);
if (ret)
dev_err(&pci->dev, "ACP de-init failed\n");
pm_runtime_disable(&pci->dev);
pm_runtime_forbid(&pci->dev);
pm_runtime_get_noresume(&pci->dev);
pci_disable_msi(pci);
pci_release_regions(pci);

View File

@@ -779,13 +779,6 @@ static int max98373_probe(struct snd_soc_component *component)
regmap_write(max98373->regmap,
MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2,
0x1);
/* Set inital volume (0dB) */
regmap_write(max98373->regmap,
MAX98373_R203D_AMP_DIG_VOL_CTRL,
0x00);
regmap_write(max98373->regmap,
MAX98373_R203E_AMP_PATH_GAIN,
0x00);
/* Enable DC blocker */
regmap_write(max98373->regmap,
MAX98373_R203F_AMP_DSP_CFG,
@@ -869,7 +862,6 @@ static const struct snd_soc_component_driver soc_codec_dev_max98373 = {
.num_dapm_widgets = ARRAY_SIZE(max98373_dapm_widgets),
.dapm_routes = max98373_audio_map,
.num_dapm_routes = ARRAY_SIZE(max98373_audio_map),
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
.non_legacy_dai_naming = 1,

View File

@@ -272,13 +272,13 @@ static int rt286_jack_detect(struct rt286_priv *rt286, bool *hp, bool *mic)
regmap_read(rt286->regmap, RT286_GET_MIC1_SENSE, &buf);
*mic = buf & 0x80000000;
}
if (!*mic) {
if (!*hp) {
snd_soc_dapm_disable_pin(dapm, "HV");
snd_soc_dapm_disable_pin(dapm, "VREF");
}
if (!*hp)
snd_soc_dapm_disable_pin(dapm, "LDO1");
snd_soc_dapm_sync(dapm);
}
return 0;
}

View File

@@ -43,6 +43,7 @@
#define RT5670_JD_MODE1 BIT(9)
#define RT5670_JD_MODE2 BIT(10)
#define RT5670_JD_MODE3 BIT(11)
#define RT5670_GPIO1_IS_EXT_SPK_EN BIT(12)
static unsigned long rt5670_quirk;
static unsigned int quirk_override;
@@ -602,9 +603,9 @@ int rt5670_set_jack_detect(struct snd_soc_component *component,
EXPORT_SYMBOL_GPL(rt5670_set_jack_detect);
static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0);
static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000);
static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
@@ -1447,6 +1448,33 @@ static int rt5670_hp_event(struct snd_soc_dapm_widget *w,
return 0;
}
static int rt5670_spk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct rt5670_priv *rt5670 = snd_soc_component_get_drvdata(component);
if (!rt5670->pdata.gpio1_is_ext_spk_en)
return 0;
switch (event) {
case SND_SOC_DAPM_POST_PMU:
regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2,
RT5670_GP1_OUT_MASK, RT5670_GP1_OUT_HI);
break;
case SND_SOC_DAPM_PRE_PMD:
regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2,
RT5670_GP1_OUT_MASK, RT5670_GP1_OUT_LO);
break;
default:
return 0;
}
return 0;
}
static int rt5670_bst1_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -1860,7 +1888,9 @@ static const struct snd_soc_dapm_widget rt5670_specific_dapm_widgets[] = {
};
static const struct snd_soc_dapm_widget rt5672_specific_dapm_widgets[] = {
SND_SOC_DAPM_PGA("SPO Amp", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA_E("SPO Amp", SND_SOC_NOPM, 0, 0, NULL, 0,
rt5670_spk_event, SND_SOC_DAPM_PRE_PMD |
SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_OUTPUT("SPOLP"),
SND_SOC_DAPM_OUTPUT("SPOLN"),
SND_SOC_DAPM_OUTPUT("SPORP"),
@@ -2857,14 +2887,14 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = {
},
{
.callback = rt5670_quirk_cb,
.ident = "Lenovo Thinkpad Tablet 10",
.ident = "Lenovo Miix 2 10",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Miix 2 10"),
},
.driver_data = (unsigned long *)(RT5670_DMIC_EN |
RT5670_DMIC1_IN2P |
RT5670_DEV_GPIO |
RT5670_GPIO1_IS_EXT_SPK_EN |
RT5670_JD_MODE2),
},
{
@@ -2924,6 +2954,10 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
rt5670->pdata.dev_gpio = true;
dev_info(&i2c->dev, "quirk dev_gpio\n");
}
if (rt5670_quirk & RT5670_GPIO1_IS_EXT_SPK_EN) {
rt5670->pdata.gpio1_is_ext_spk_en = true;
dev_info(&i2c->dev, "quirk GPIO1 is external speaker enable\n");
}
if (rt5670_quirk & RT5670_IN2_DIFF) {
rt5670->pdata.in2_diff = true;
dev_info(&i2c->dev, "quirk IN2_DIFF\n");
@@ -3023,6 +3057,13 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT);
}
if (rt5670->pdata.gpio1_is_ext_spk_en) {
regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL1,
RT5670_GP1_PIN_MASK, RT5670_GP1_PIN_GPIO1);
regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2,
RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT);
}
if (rt5670->pdata.jd_mode) {
regmap_update_bits(rt5670->regmap, RT5670_GLB_CLK,
RT5670_SCLK_SRC_MASK, RT5670_SCLK_SRC_RCCLK);

View File

@@ -757,7 +757,7 @@
#define RT5670_PWR_VREF2_BIT 4
#define RT5670_PWR_FV2 (0x1 << 3)
#define RT5670_PWR_FV2_BIT 3
#define RT5670_LDO_SEL_MASK (0x3)
#define RT5670_LDO_SEL_MASK (0x7)
#define RT5670_LDO_SEL_SFT 0
/* Power Management for Analog 2 (0x64) */

View File

@@ -967,13 +967,12 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
rt5682_enable_push_button_irq(component, false);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW);
if (snd_soc_dapm_get_pin_status(dapm, "MICBIAS"))
if (!snd_soc_dapm_get_pin_status(dapm, "MICBIAS"))
snd_soc_component_update_bits(component,
RT5682_PWR_ANLG_1, RT5682_PWR_MB, 0);
if (!snd_soc_dapm_get_pin_status(dapm, "Vref2"))
snd_soc_component_update_bits(component,
RT5682_PWR_ANLG_1, RT5682_PWR_VREF2, 0);
else
snd_soc_component_update_bits(component,
RT5682_PWR_ANLG_1,
RT5682_PWR_VREF2 | RT5682_PWR_MB, 0);
snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
RT5682_PWR_CBJ, 0);
@@ -992,16 +991,17 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component,
rt5682->hs_jack = hs_jack;
if (!rt5682->is_sdw) {
if (!hs_jack) {
regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
RT5682_POW_JDH | RT5682_POW_JDL, 0);
cancel_delayed_work_sync(&rt5682->jack_detect_work);
return 0;
}
if (!rt5682->is_sdw) {
switch (rt5682->pdata.jd_src) {
case RT5682_JD1:
snd_soc_component_update_bits(component,
@@ -1082,7 +1082,8 @@ void rt5682_jack_detect_handler(struct work_struct *work)
/* jack was out, report jack type */
rt5682->jack_type =
rt5682_headset_detect(rt5682->component, 1);
} else {
} else if ((rt5682->jack_type & SND_JACK_HEADSET) ==
SND_JACK_HEADSET) {
/* jack is already in, report button event */
rt5682->jack_type = SND_JACK_HEADSET;
btn_type = rt5682_button_detect(rt5682->component);
@@ -1608,8 +1609,7 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
0, set_filter_clk, SND_SOC_DAPM_PRE_PMU),
SND_SOC_DAPM_SUPPLY("Vref1", RT5682_PWR_ANLG_1, RT5682_PWR_VREF1_BIT, 0,
rt5682_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_SUPPLY("Vref2", RT5682_PWR_ANLG_1, RT5682_PWR_VREF2_BIT, 0,
NULL, 0),
SND_SOC_DAPM_SUPPLY("Vref2", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS", SND_SOC_NOPM, 0, 0, NULL, 0),
/* ASRC */
@@ -2492,6 +2492,15 @@ static int rt5682_wclk_prepare(struct clk_hw *hw)
snd_soc_dapm_force_enable_pin_unlocked(dapm, "MICBIAS");
snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
RT5682_PWR_MB, RT5682_PWR_MB);
snd_soc_dapm_force_enable_pin_unlocked(dapm, "Vref2");
snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
RT5682_PWR_VREF2 | RT5682_PWR_FV2,
RT5682_PWR_VREF2);
usleep_range(55000, 60000);
snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
RT5682_PWR_FV2, RT5682_PWR_FV2);
snd_soc_dapm_force_enable_pin_unlocked(dapm, "I2S1");
snd_soc_dapm_force_enable_pin_unlocked(dapm, "PLL2F");
snd_soc_dapm_force_enable_pin_unlocked(dapm, "PLL2B");
@@ -2517,9 +2526,12 @@ static void rt5682_wclk_unprepare(struct clk_hw *hw)
snd_soc_dapm_mutex_lock(dapm);
snd_soc_dapm_disable_pin_unlocked(dapm, "MICBIAS");
snd_soc_dapm_disable_pin_unlocked(dapm, "Vref2");
if (!rt5682->jack_type)
snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
RT5682_PWR_VREF2 | RT5682_PWR_FV2 |
RT5682_PWR_MB, 0);
snd_soc_dapm_disable_pin_unlocked(dapm, "I2S1");
snd_soc_dapm_disable_pin_unlocked(dapm, "PLL2F");
snd_soc_dapm_disable_pin_unlocked(dapm, "PLL2B");

View File

@@ -186,7 +186,7 @@ SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_MONOMIX, 0, 1, 0),
/* Boost mixer */
static const struct snd_kcontrol_new wm8974_boost_mixer[] = {
SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 0),
SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 1),
};
/* Input PGA */
@@ -474,6 +474,10 @@ static int wm8974_set_dai_fmt(struct snd_soc_dai *codec_dai,
iface |= 0x0008;
break;
case SND_SOC_DAIFMT_DSP_A:
if ((fmt & SND_SOC_DAIFMT_INV_MASK) == SND_SOC_DAIFMT_IB_IF ||
(fmt & SND_SOC_DAIFMT_INV_MASK) == SND_SOC_DAIFMT_NB_IF) {
return -EINVAL;
}
iface |= 0x00018;
break;
default:

View File

@@ -317,8 +317,8 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
if (ret < 0)
goto out_put_node;
dai_link->dpcm_playback = 1;
dai_link->dpcm_capture = 1;
snd_soc_dai_link_set_capabilities(dai_link);
dai_link->ops = &graph_ops;
dai_link->init = asoc_simple_dai_init;

View File

@@ -231,8 +231,8 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
if (ret < 0)
goto out_put_node;
dai_link->dpcm_playback = 1;
dai_link->dpcm_capture = 1;
snd_soc_dai_link_set_capabilities(dai_link);
dai_link->ops = &simple_ops;
dai_link->init = asoc_simple_dai_init;

View File

@@ -354,6 +354,7 @@ static struct snd_soc_dai_link bdw_rt5677_dais[] = {
{
.name = "Codec DSP",
.stream_name = "Wake on Voice",
.capture_only = 1,
.ops = &bdw_rt5677_dsp_ops,
SND_SOC_DAILINK_REG(dsp),
},

View File

@@ -543,9 +543,11 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
if (cnt) {
ret = device_add_properties(codec_dev, props);
if (ret)
if (ret) {
put_device(codec_dev);
return ret;
}
}
devm_acpi_dev_add_driver_gpios(codec_dev, byt_cht_es8316_gpios);
priv->speaker_en_gpio =

View File

@@ -253,21 +253,20 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
params_set_format(params, SNDRV_PCM_FORMAT_S24_LE);
/*
* Default mode for SSP configuration is TDM 4 slot
* Default mode for SSP configuration is TDM 4 slot. One board/design,
* the Lenovo Miix 2 10 uses not 1 but 2 codecs connected to SSP2. The
* second piggy-backed, output-only codec is inside the keyboard-dock
* (which has extra speakers). Unlike the main rt5672 codec, we cannot
* configure this codec, it is hard coded to use 2 channel 24 bit I2S.
* Since we only support 2 channels anyways, there is no need for TDM
* on any cht-bsw-rt5672 designs. So we simply use I2S 2ch everywhere.
*/
ret = snd_soc_dai_set_fmt(asoc_rtd_to_codec(rtd, 0),
SND_SOC_DAIFMT_DSP_B |
SND_SOC_DAIFMT_IB_NF |
ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0) {
dev_err(rtd->dev, "can't set format to TDM %d\n", ret);
return ret;
}
/* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */
ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_codec(rtd, 0), 0xF, 0xF, 4, 24);
if (ret < 0) {
dev_err(rtd->dev, "can't set codec TDM slot %d\n", ret);
dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret);
return ret;
}

View File

@@ -72,7 +72,7 @@ config SND_SOC_QDSP6_ASM_DAI
config SND_SOC_QDSP6
tristate "SoC ALSA audio driver for QDSP6"
depends on QCOM_APR && HAS_DMA
depends on QCOM_APR
select SND_SOC_QDSP6_COMMON
select SND_SOC_QDSP6_CORE
select SND_SOC_QDSP6_AFE

Some files were not shown because too many files have changed in this diff Show More