
Sync up with android12-5.10 for the following commits:fb39cdb9ea
ANDROID: export reclaim_pages1f8f6d59a2
ANDROID: vendor_hook: Add hook to not be stuck ro rmap lock in kswapd or direct_reclaim91bfc78bc0
ANDROID: Update symbol list for mtk02df0b2661
ANDROID: GKI: rockchip: Add symbols for cryptoefdf581d14
ANDROID: GKI: rockchip: Add symbol pci_disable_link_state504ce2d3a6
ANDROID: GKI: rockchip: Add symbols for sounda6b6bc98b7
ANDROID: GKI: rockchip: Add symbols for videof3a311b456
BACKPORT: f2fs: do not set compression bit if kernel doesn't supportb0988144b0
UPSTREAM: exfat: improve performance of exfat_free_cluster when using dirsync mount00d3b8c0cc
ANDROID: GKI: rockchip: Add symbols for drm dp936f1e35d1
UPSTREAM: arm64: perf: Support new DT compatiblesed931dc8ff
UPSTREAM: arm64: perf: Simplify registration boilerplatebb6c018ab6
UPSTREAM: arm64: perf: Support Denver and Carmel PMUsd306fd9d47
UPSTREAM: arm64: perf: add support for Cortex-A7809f78c3f7e
ANDROID: GKI: rockchip: Update symbol for devfreqe7ed66854e
ANDROID: GKI: rockchip: Update symbols for drma3e70ff5bf
ANDROID: GKI: Update symbols to symbol lista09241c6dd
UPSTREAM: ASoC: hdmi-codec: make hdmi_codec_controls static9eda09e511
UPSTREAM: ASoC: hdmi-codec: Add a prepare hook4ad97b395f
UPSTREAM: ASoC: hdmi-codec: Add iec958 controlsc0c2f6962d
UPSTREAM: ASoC: hdmi-codec: Rework to support more controls4c6eb3db8a
UPSTREAM: ALSA: iec958: Split status creation and fill580d2e7c78
UPSTREAM: ALSA: doc: Clarify IEC958 controls iface8b4bb1bca0
UPSTREAM: ASoC: hdmi-codec: remove unused spk_mask member5a2c4a5d1e
UPSTREAM: ASoC: hdmi-codec: remove useless initialization49e502f0c0
UPSTREAM: ASoC: codec: hdmi-codec: Support IEC958 encoded PCM format9bf69acb92
UPSTREAM: ASoC: hdmi-codec: Fix return value in hdmi_codec_set_jack()056409c7dc
UPSTREAM: ASoC: hdmi-codec: Add RX support5e75deab3a
UPSTREAM: ASoC: hdmi-codec: Get ELD in before reporting plugged eventd6207c39cb
ANDROID: GKI: rockchip: Add symbols for display driver1c3ed9d481
BACKPORT: KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID843d3cb41b
BACKPORT: io_uring: always grab file table for deferred statx784cc16aed
BACKPORT: Bluetooth: L2CAP: Fix use-after-free caused by l2cap_chan_put2b377175a3
ANDROID: add two func in mm/memcontrol.ce56f8712cf
ANDROID: vendor_hooks: protect multi-mapcount pages in kernel3f775b9367
ANDROID: vendor_hooks: account page-mapcount1d2287f56e
FROMGIT: io_uring: Use original task for req identity in io_identity_cow()e0c9da25b2
FROMLIST: binder: fix UAF of ref->proc caused by race condition12f4322442
ANDROID: vendor_hooks: Guard cgroup struct with CONFIG_CGROUPS6532784c78
ANDROID: vendor_hooks: add hooks for remove_vm_area.c9a70dd592
ANDROID: GKI: allow mm vendor hooks header inclusion from header files039080d064
ANDROID: Update symbol list of mediatek9e8dedef1e
ANDROID: sched: add vendor hook to PELT multiplier573c7f061d
ANDROID: Guard hooks with their CONFIG_ options14f646cca5
ANDROID: fix kernelci issue for allnoconfig builds4442801a43
ANDROID: sched: Introducing PELT multiplierb2e5773ea4
FROMGIT: binder: fix redefinition of seq_file attributes9c2a5eef8f
Merge tag 'android12-5.10.117_r00' into 'android12-5.10'5fa1e1affc
ANDROID: GKI: pcie: Fix the broken dw_pcie structure51b3e17071
UPSTREAM: PCI: dwc: Support multiple ATU memory regionsa8d7f6518e
ANDROID: oplus: Update the ABI xml and symbol list4536de1b70
ANDROID: vendor_hooks: add hooks in __alloc_pages_slowpathd63c961c9d
ANDROID: GKI: Update symbols to symbol list41cbbe08f9
FROMGIT: arm64: fix oops in concurrently setting insn_emulation sysctlsc301d142e8
FROMGIT: usb: dwc3: core: Do not perform GCTL_CORE_SOFTRESET during bootup8b19ed264b
ANDROID: vendor_hooks:vendor hook for mmput242b11e574
ANDROID: vendor_hooks:vendor hook for pidfd_open0e1cb27700
ANDROID: vendor_hook: Add hook in shmem_writepage()8ee37d0bcd
BACKPORT: iommu/dma: Fix race condition during iova_domain initialization321bf845e1
FROMGIT: usb: dwc3: core: Deprecate GCTL.CORESOFTRESETc5eb0edfde
FROMGIT: usb: dwc3: gadget: Prevent repeat pullup()8de633b735
FROMGIT: Binder: add TF_UPDATE_TXN to replace outdated txne8fce59434
BACKPORT: FROMGIT: cgroup: Use separate src/dst nodes when preloading css_sets for migrationf26c566455
UPSTREAM: usb: gadget: f_uac2: allow changing interface name via configfs98fa7f7dfd
UPSTREAM: usb: gadget: f_uac1: allow changing interface name via configfs29172165ca
UPSTREAM: usb: gadget: f_uac1: Add suspend callbackff5468c71e
UPSTREAM: usb: gadget: f_uac2: Add suspend callback31e6d620c1
UPSTREAM: usb: gadget: u_audio: Add suspend call17643c1fdd
UPSTREAM: usb: gadget: u_audio: Rate ctl notifies about current srate (0=stopped)308955e3a6
UPSTREAM: usb: gadget: f_uac1: Support multiple sampling ratesae03eadb42
UPSTREAM: usb: gadget: f_uac2: Support multiple sampling ratesbedc53fae4
UPSTREAM: usb: gadget:audio: Replace deprecated macro S_IRUGO37e0d5eddb
UPSTREAM: usb: gadget: u_audio: Add capture/playback srate getter3251bb3250
UPSTREAM: usb: gadget: u_audio: Move dynamic srate from params to rtd530916be97
UPSTREAM: usb: gadget: u_audio: Support multiple sampling rates7f496d5a99
UPSTREAM: docs: ABI: fixed formatting in configfs-usb-gadget-uac22500cb53e6
UPSTREAM: usb: gadget: u_audio: Subdevice 0 for capture ctlsc386f34bd4
UPSTREAM: usb: gadget: u_audio: fix calculations for small bIntervalf74e3e2fe4
UPSTREAM: docs: ABI: fixed req_number desc in UAC102949bae5c
UPSTREAM: docs: ABI: added missing num_requests param to UAC2e1377ac38f
UPSTREAM: usb:gadget: f_uac1: fixed sync playback4b7c8905c5
UPSTREAM: usb: gadget: u_audio.c: Adding Playback Pitch ctl for sync playbacke29d2b5178
UPSTREAM: ABI: configfs-usb-gadget-uac2: fix a broken tableec313ae88d
UPSTREAM: ABI: configfs-usb-gadget-uac1: fix a broken tablebf46bbe087
UPSTREAM: usb: gadget: f_uac1: fixing inconsistent indentingb9c4cbbf7a
UPSTREAM: docs: usb: fix malformed tablea380b466e0
UPSTREAM: usb: gadget: f_uac1: add volume and mute supporte2c0816af2
BACKPORT: usb: gadget: f_uac2: add volume and mute support8430eb0243
UPSTREAM: usb: gadget: u_audio: add bi-directional volume and mute support257d21b184
UPSTREAM: usb: audio-v2: add ability to define feature unit descriptor1002747429
ANDROID: mm: shmem: use reclaim_pages() to recalim pages from a list6719763187
UPSTREAM: usb: gadget: f_uac1: disable IN/OUT ep if unused And add the new symbols being tracked due to abi additions from the android12-5.10 branch: Leaf changes summary: 85 artifacts changed Changed leaf types summary: 0 leaf type changed Removed/Changed/Added functions summary: 0 Removed, 0 Changed, 69 Added functions Removed/Changed/Added variables summary: 0 Removed, 0 Changed, 16 Added variables 69 Added functions: [A] 'function void __dev_kfree_skb_irq(sk_buff*, skb_free_reason)' [A] 'function int __page_mapcount(page*)' [A] 'function int __traceiter_android_vh_add_page_to_lrulist(void*, page*, bool, lru_list)' [A] 'function int __traceiter_android_vh_alloc_pages_slowpath_begin(void*, gfp_t, unsigned int, unsigned long int*)' [A] 'function int __traceiter_android_vh_alloc_pages_slowpath_end(void*, gfp_t, unsigned int, unsigned long int)' [A] 'function int __traceiter_android_vh_del_page_from_lrulist(void*, page*, bool, lru_list)' [A] 'function int __traceiter_android_vh_do_traversal_lruvec(void*, lruvec*)' [A] 'function int __traceiter_android_vh_mark_page_accessed(void*, page*)' [A] 'function int __traceiter_android_vh_mutex_unlock_slowpath_end(void*, mutex*, task_struct*)' [A] 'function int __traceiter_android_vh_page_should_be_protected(void*, page*, bool*)' [A] 'function int __traceiter_android_vh_rwsem_mark_wake_readers(void*, rw_semaphore*, rwsem_waiter*)' [A] 'function int __traceiter_android_vh_rwsem_set_owner(void*, rw_semaphore*)' [A] 'function int __traceiter_android_vh_rwsem_set_reader_owned(void*, rw_semaphore*)' [A] 'function int __traceiter_android_vh_rwsem_up_read_end(void*, rw_semaphore*)' [A] 'function int __traceiter_android_vh_rwsem_up_write_end(void*, rw_semaphore*)' [A] 'function int __traceiter_android_vh_sched_pelt_multiplier(void*, unsigned int, unsigned int, int*)' [A] 'function int __traceiter_android_vh_show_mapcount_pages(void*, void*)' [A] 'function int __traceiter_android_vh_update_page_mapcount(void*, page*, bool, bool, bool*, bool*)' [A] 'function int __v4l2_ctrl_handler_setup(v4l2_ctrl_handler*)' [A] 'function int crypto_ahash_final(ahash_request*)' [A] 'function crypto_akcipher* crypto_alloc_akcipher(const char*, u32, u32)' [A] 'function int crypto_register_akcipher(akcipher_alg*)' [A] 'function void crypto_unregister_akcipher(akcipher_alg*)' [A] 'function int des_expand_key(des_ctx*, const u8*, unsigned int)' [A] 'function void dev_pm_opp_unregister_set_opp_helper(opp_table*)' [A] 'function net_device* devm_alloc_etherdev_mqs(device*, int, unsigned int, unsigned int)' [A] 'function mii_bus* devm_mdiobus_alloc_size(device*, int)' [A] 'function int devm_of_mdiobus_register(device*, mii_bus*, device_node*)' [A] 'function int devm_register_netdev(device*, net_device*)' [A] 'function bool disable_hardirq(unsigned int)' [A] 'function void do_traversal_all_lruvec()' [A] 'function drm_connector_status drm_bridge_detect(drm_bridge*)' [A] 'function edid* drm_bridge_get_edid(drm_bridge*, drm_connector*)' [A] 'function int drm_bridge_get_modes(drm_bridge*, drm_connector*)' [A] 'function int drm_dp_get_phy_test_pattern(drm_dp_aux*, drm_dp_phy_test_params*)' [A] 'function int drm_dp_read_desc(drm_dp_aux*, drm_dp_desc*, bool)' [A] 'function int drm_dp_read_dpcd_caps(drm_dp_aux*, u8*)' [A] 'function int drm_dp_read_sink_count(drm_dp_aux*)' [A] 'function int drm_dp_set_phy_test_pattern(drm_dp_aux*, drm_dp_phy_test_params*, u8)' [A] 'function uint64_t drm_format_info_min_pitch(const drm_format_info*, int, unsigned int)' [A] 'function int drm_mm_reserve_node(drm_mm*, drm_mm_node*)' [A] 'function bool drm_probe_ddc(i2c_adapter*)' [A] 'function void drm_self_refresh_helper_cleanup(drm_crtc*)' [A] 'function int drm_self_refresh_helper_init(drm_crtc*)' [A] 'function int get_pelt_halflife()' [A] 'function ssize_t hdmi_avi_infoframe_pack_only(const hdmi_avi_infoframe*, void*, size_t)' [A] 'function ssize_t iio_read_const_attr(device*, device_attribute*, char*)' [A] 'function bool mipi_dsi_packet_format_is_short(u8)' [A] 'function platform_device* of_device_alloc(device_node*, const char*, device*)' [A] 'function lruvec* page_to_lruvec(page*, pg_data_t*)' [A] 'function int pci_disable_link_state(pci_dev*, int)' [A] 'function int regmap_test_bits(regmap*, unsigned int, unsigned int)' [A] 'function unsigned int regulator_get_linear_step(regulator*)' [A] 'function int regulator_suspend_enable(regulator_dev*, suspend_state_t)' [A] 'function int rsa_parse_priv_key(rsa_key*, void*, unsigned int)' [A] 'function int rsa_parse_pub_key(rsa_key*, void*, unsigned int)' [A] 'function int sg_nents(scatterlist*)' [A] 'function int snd_pcm_create_iec958_consumer_default(u8*, size_t)' [A] 'function int snd_pcm_fill_iec958_consumer(snd_pcm_runtime*, u8*, size_t)' [A] 'function int snd_pcm_fill_iec958_consumer_hw_params(snd_pcm_hw_params*, u8*, size_t)' [A] 'function int snd_soc_dapm_force_bias_level(snd_soc_dapm_context*, snd_soc_bias_level)' [A] 'function int snd_soc_jack_add_zones(snd_soc_jack*, int, snd_soc_jack_zone*)' [A] 'function int snd_soc_jack_get_type(snd_soc_jack*, int)' [A] 'function void tcpm_tcpc_reset(tcpm_port*)' [A] 'function int v4l2_enum_dv_timings_cap(v4l2_enum_dv_timings*, const v4l2_dv_timings_cap*, v4l2_check_dv_timings_fnc*, void*)' [A] 'function void v4l2_print_dv_timings(const char*, const char*, const v4l2_dv_timings*, bool)' [A] 'function int v4l2_src_change_event_subdev_subscribe(v4l2_subdev*, v4l2_fh*, v4l2_event_subscription*)' [A] 'function void v4l2_subdev_notify_event(v4l2_subdev*, const v4l2_event*)' [A] 'function bool v4l2_valid_dv_timings(const v4l2_dv_timings*, const v4l2_dv_timings_cap*, v4l2_check_dv_timings_fnc*, void*)' 16 Added variables: [A] 'tracepoint __tracepoint_android_vh_add_page_to_lrulist' [A] 'tracepoint __tracepoint_android_vh_alloc_pages_slowpath_begin' [A] 'tracepoint __tracepoint_android_vh_alloc_pages_slowpath_end' [A] 'tracepoint __tracepoint_android_vh_del_page_from_lrulist' [A] 'tracepoint __tracepoint_android_vh_do_traversal_lruvec' [A] 'tracepoint __tracepoint_android_vh_mark_page_accessed' [A] 'tracepoint __tracepoint_android_vh_mutex_unlock_slowpath_end' [A] 'tracepoint __tracepoint_android_vh_page_should_be_protected' [A] 'tracepoint __tracepoint_android_vh_rwsem_mark_wake_readers' [A] 'tracepoint __tracepoint_android_vh_rwsem_set_owner' [A] 'tracepoint __tracepoint_android_vh_rwsem_set_reader_owned' [A] 'tracepoint __tracepoint_android_vh_rwsem_up_read_end' [A] 'tracepoint __tracepoint_android_vh_rwsem_up_write_end' [A] 'tracepoint __tracepoint_android_vh_sched_pelt_multiplier' [A] 'tracepoint __tracepoint_android_vh_show_mapcount_pages' [A] 'tracepoint __tracepoint_android_vh_update_page_mapcount' Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I47eefe85b949d3f358da95a9b6553660b9be0791
630 lines
16 KiB
C
630 lines
16 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Synopsys DesignWare PCIe host controller driver
|
|
*
|
|
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
|
|
* https://www.samsung.com
|
|
*
|
|
* Author: Jingoo Han <jg1.han@samsung.com>
|
|
*/
|
|
|
|
#include <linux/irqchip/chained_irq.h>
|
|
#include <linux/irqdomain.h>
|
|
#include <linux/msi.h>
|
|
#include <linux/of_address.h>
|
|
#include <linux/of_pci.h>
|
|
#include <linux/pci_regs.h>
|
|
#include <linux/platform_device.h>
|
|
|
|
#include "../../pci.h"
|
|
#include "pcie-designware.h"
|
|
|
|
static struct pci_ops dw_pcie_ops;
|
|
static struct pci_ops dw_child_pcie_ops;
|
|
|
|
static void dw_msi_ack_irq(struct irq_data *d)
|
|
{
|
|
irq_chip_ack_parent(d);
|
|
}
|
|
|
|
static void dw_msi_mask_irq(struct irq_data *d)
|
|
{
|
|
pci_msi_mask_irq(d);
|
|
irq_chip_mask_parent(d);
|
|
}
|
|
|
|
static void dw_msi_unmask_irq(struct irq_data *d)
|
|
{
|
|
pci_msi_unmask_irq(d);
|
|
irq_chip_unmask_parent(d);
|
|
}
|
|
|
|
static struct irq_chip dw_pcie_msi_irq_chip = {
|
|
.name = "PCI-MSI",
|
|
.irq_ack = dw_msi_ack_irq,
|
|
.irq_mask = dw_msi_mask_irq,
|
|
.irq_unmask = dw_msi_unmask_irq,
|
|
};
|
|
|
|
static struct msi_domain_info dw_pcie_msi_domain_info = {
|
|
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
|
MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
|
|
.chip = &dw_pcie_msi_irq_chip,
|
|
};
|
|
|
|
/* MSI int handler */
|
|
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
|
|
{
|
|
int i, pos, irq;
|
|
unsigned long val;
|
|
u32 status, num_ctrls;
|
|
irqreturn_t ret = IRQ_NONE;
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
|
|
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
|
|
|
|
for (i = 0; i < num_ctrls; i++) {
|
|
status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
|
|
(i * MSI_REG_CTRL_BLOCK_SIZE));
|
|
if (!status)
|
|
continue;
|
|
|
|
ret = IRQ_HANDLED;
|
|
val = status;
|
|
pos = 0;
|
|
while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
|
|
pos)) != MAX_MSI_IRQS_PER_CTRL) {
|
|
irq = irq_find_mapping(pp->irq_domain,
|
|
(i * MAX_MSI_IRQS_PER_CTRL) +
|
|
pos);
|
|
generic_handle_irq(irq);
|
|
pos++;
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(dw_handle_msi_irq);
|
|
|
|
/* Chained MSI interrupt service routine */
|
|
static void dw_chained_msi_isr(struct irq_desc *desc)
|
|
{
|
|
struct irq_chip *chip = irq_desc_get_chip(desc);
|
|
struct pcie_port *pp;
|
|
|
|
chained_irq_enter(chip, desc);
|
|
|
|
pp = irq_desc_get_handler_data(desc);
|
|
dw_handle_msi_irq(pp);
|
|
|
|
chained_irq_exit(chip, desc);
|
|
}
|
|
|
|
static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
|
|
{
|
|
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
u64 msi_target;
|
|
|
|
msi_target = (u64)pp->msi_data;
|
|
|
|
msg->address_lo = lower_32_bits(msi_target);
|
|
msg->address_hi = upper_32_bits(msi_target);
|
|
|
|
msg->data = d->hwirq;
|
|
|
|
dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
|
|
(int)d->hwirq, msg->address_hi, msg->address_lo);
|
|
}
|
|
|
|
static int dw_pci_msi_set_affinity(struct irq_data *d,
|
|
const struct cpumask *mask, bool force)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
|
|
static void dw_pci_bottom_mask(struct irq_data *d)
|
|
{
|
|
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
unsigned int res, bit, ctrl;
|
|
unsigned long flags;
|
|
|
|
raw_spin_lock_irqsave(&pp->lock, flags);
|
|
|
|
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
|
|
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
|
|
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
|
|
|
|
pp->irq_mask[ctrl] |= BIT(bit);
|
|
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
|
|
|
|
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
|
}
|
|
|
|
static void dw_pci_bottom_unmask(struct irq_data *d)
|
|
{
|
|
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
unsigned int res, bit, ctrl;
|
|
unsigned long flags;
|
|
|
|
raw_spin_lock_irqsave(&pp->lock, flags);
|
|
|
|
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
|
|
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
|
|
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
|
|
|
|
pp->irq_mask[ctrl] &= ~BIT(bit);
|
|
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
|
|
|
|
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
|
}
|
|
|
|
static void dw_pci_bottom_ack(struct irq_data *d)
|
|
{
|
|
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
unsigned int res, bit, ctrl;
|
|
|
|
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
|
|
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
|
|
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
|
|
|
|
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
|
|
}
|
|
|
|
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
|
|
.name = "DWPCI-MSI",
|
|
.irq_ack = dw_pci_bottom_ack,
|
|
.irq_compose_msi_msg = dw_pci_setup_msi_msg,
|
|
.irq_set_affinity = dw_pci_msi_set_affinity,
|
|
.irq_mask = dw_pci_bottom_mask,
|
|
.irq_unmask = dw_pci_bottom_unmask,
|
|
};
|
|
|
|
static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
|
|
unsigned int virq, unsigned int nr_irqs,
|
|
void *args)
|
|
{
|
|
struct pcie_port *pp = domain->host_data;
|
|
unsigned long flags;
|
|
u32 i;
|
|
int bit;
|
|
|
|
raw_spin_lock_irqsave(&pp->lock, flags);
|
|
|
|
bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
|
|
order_base_2(nr_irqs));
|
|
|
|
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
|
|
|
if (bit < 0)
|
|
return -ENOSPC;
|
|
|
|
for (i = 0; i < nr_irqs; i++)
|
|
irq_domain_set_info(domain, virq + i, bit + i,
|
|
pp->msi_irq_chip,
|
|
pp, handle_edge_irq,
|
|
NULL, NULL);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void dw_pcie_irq_domain_free(struct irq_domain *domain,
|
|
unsigned int virq, unsigned int nr_irqs)
|
|
{
|
|
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
|
|
struct pcie_port *pp = domain->host_data;
|
|
unsigned long flags;
|
|
|
|
raw_spin_lock_irqsave(&pp->lock, flags);
|
|
|
|
bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
|
|
order_base_2(nr_irqs));
|
|
|
|
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
|
}
|
|
|
|
static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
|
|
.alloc = dw_pcie_irq_domain_alloc,
|
|
.free = dw_pcie_irq_domain_free,
|
|
};
|
|
|
|
int dw_pcie_allocate_domains(struct pcie_port *pp)
|
|
{
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
|
|
|
|
pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
|
|
&dw_pcie_msi_domain_ops, pp);
|
|
if (!pp->irq_domain) {
|
|
dev_err(pci->dev, "Failed to create IRQ domain\n");
|
|
return -ENOMEM;
|
|
}
|
|
|
|
irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
|
|
|
|
pp->msi_domain = pci_msi_create_irq_domain(fwnode,
|
|
&dw_pcie_msi_domain_info,
|
|
pp->irq_domain);
|
|
if (!pp->msi_domain) {
|
|
dev_err(pci->dev, "Failed to create MSI domain\n");
|
|
irq_domain_remove(pp->irq_domain);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
void dw_pcie_free_msi(struct pcie_port *pp)
|
|
{
|
|
if (pp->msi_irq) {
|
|
irq_set_chained_handler(pp->msi_irq, NULL);
|
|
irq_set_handler_data(pp->msi_irq, NULL);
|
|
}
|
|
|
|
irq_domain_remove(pp->msi_domain);
|
|
irq_domain_remove(pp->irq_domain);
|
|
|
|
if (pp->msi_data) {
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
struct device *dev = pci->dev;
|
|
|
|
dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg),
|
|
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
|
}
|
|
}
|
|
|
|
void dw_pcie_msi_init(struct pcie_port *pp)
|
|
{
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
u64 msi_target = (u64)pp->msi_data;
|
|
|
|
if (!IS_ENABLED(CONFIG_PCI_MSI))
|
|
return;
|
|
|
|
/* Program the msi_data */
|
|
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
|
|
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
|
|
}
|
|
EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
|
|
|
|
int dw_pcie_host_init(struct pcie_port *pp)
|
|
{
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
struct device *dev = pci->dev;
|
|
struct device_node *np = dev->of_node;
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
struct resource_entry *win;
|
|
struct pci_host_bridge *bridge;
|
|
struct resource *cfg_res;
|
|
int ret;
|
|
|
|
raw_spin_lock_init(&pci->pp.lock);
|
|
|
|
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
|
|
if (cfg_res) {
|
|
pp->cfg0_size = resource_size(cfg_res);
|
|
pp->cfg0_base = cfg_res->start;
|
|
} else if (!pp->va_cfg0_base) {
|
|
dev_err(dev, "Missing *config* reg space\n");
|
|
}
|
|
|
|
bridge = devm_pci_alloc_host_bridge(dev, 0);
|
|
if (!bridge)
|
|
return -ENOMEM;
|
|
|
|
pp->bridge = bridge;
|
|
|
|
/* Get the I/O and memory ranges from DT */
|
|
resource_list_for_each_entry(win, &bridge->windows) {
|
|
switch (resource_type(win->res)) {
|
|
case IORESOURCE_IO:
|
|
pp->io_size = resource_size(win->res);
|
|
pp->io_bus_addr = win->res->start - win->offset;
|
|
pp->io_base = pci_pio_to_address(win->res->start);
|
|
break;
|
|
case 0:
|
|
dev_err(dev, "Missing *config* reg space\n");
|
|
pp->cfg0_size = resource_size(win->res);
|
|
pp->cfg0_base = win->res->start;
|
|
if (!pci->dbi_base) {
|
|
pci->dbi_base = devm_pci_remap_cfgspace(dev,
|
|
pp->cfg0_base,
|
|
pp->cfg0_size);
|
|
if (!pci->dbi_base) {
|
|
dev_err(dev, "Error with ioremap\n");
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (!pp->va_cfg0_base) {
|
|
pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
|
|
pp->cfg0_base, pp->cfg0_size);
|
|
if (!pp->va_cfg0_base) {
|
|
dev_err(dev, "Error with ioremap in function\n");
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
|
|
if (ret)
|
|
pci->num_viewport = 2;
|
|
|
|
if (pci->link_gen < 1)
|
|
pci->link_gen = of_pci_get_max_link_speed(np);
|
|
|
|
if (pci_msi_enabled()) {
|
|
/*
|
|
* If a specific SoC driver needs to change the
|
|
* default number of vectors, it needs to implement
|
|
* the set_num_vectors callback.
|
|
*/
|
|
if (!pp->ops->set_num_vectors) {
|
|
pp->num_vectors = MSI_DEF_NUM_VECTORS;
|
|
} else {
|
|
pp->ops->set_num_vectors(pp);
|
|
|
|
if (pp->num_vectors > MAX_MSI_IRQS ||
|
|
pp->num_vectors == 0) {
|
|
dev_err(dev,
|
|
"Invalid number of vectors\n");
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
if (!pp->ops->msi_host_init) {
|
|
pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
|
|
|
|
ret = dw_pcie_allocate_domains(pp);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (pp->msi_irq)
|
|
irq_set_chained_handler_and_data(pp->msi_irq,
|
|
dw_chained_msi_isr,
|
|
pp);
|
|
|
|
pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
|
|
sizeof(pp->msi_msg),
|
|
DMA_FROM_DEVICE,
|
|
DMA_ATTR_SKIP_CPU_SYNC);
|
|
ret = dma_mapping_error(pci->dev, pp->msi_data);
|
|
if (ret) {
|
|
dev_err(pci->dev, "Failed to map MSI data\n");
|
|
pp->msi_data = 0;
|
|
goto err_free_msi;
|
|
}
|
|
} else {
|
|
ret = pp->ops->msi_host_init(pp);
|
|
if (ret < 0)
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
/* Set default bus ops */
|
|
bridge->ops = &dw_pcie_ops;
|
|
bridge->child_ops = &dw_child_pcie_ops;
|
|
|
|
if (pp->ops->host_init) {
|
|
ret = pp->ops->host_init(pp);
|
|
if (ret)
|
|
goto err_free_msi;
|
|
}
|
|
|
|
bridge->sysdata = pp;
|
|
|
|
ret = pci_host_probe(bridge);
|
|
if (!ret)
|
|
return 0;
|
|
|
|
err_free_msi:
|
|
if (pci_msi_enabled() && !pp->ops->msi_host_init)
|
|
dw_pcie_free_msi(pp);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(dw_pcie_host_init);
|
|
|
|
void dw_pcie_host_deinit(struct pcie_port *pp)
|
|
{
|
|
pci_stop_root_bus(pp->bridge->bus);
|
|
pci_remove_root_bus(pp->bridge->bus);
|
|
if (pci_msi_enabled() && !pp->ops->msi_host_init)
|
|
dw_pcie_free_msi(pp);
|
|
}
|
|
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
|
|
|
|
static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
|
|
unsigned int devfn, int where)
|
|
{
|
|
int type;
|
|
u32 busdev;
|
|
struct pcie_port *pp = bus->sysdata;
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
|
|
/*
|
|
* Checking whether the link is up here is a last line of defense
|
|
* against platforms that forward errors on the system bus as
|
|
* SError upon PCI configuration transactions issued when the link
|
|
* is down. This check is racy by definition and does not stop
|
|
* the system from triggering an SError if the link goes down
|
|
* after this check is performed.
|
|
*/
|
|
if (!dw_pcie_link_up(pci))
|
|
return NULL;
|
|
|
|
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
|
|
PCIE_ATU_FUNC(PCI_FUNC(devfn));
|
|
|
|
if (pci_is_root_bus(bus->parent))
|
|
type = PCIE_ATU_TYPE_CFG0;
|
|
else
|
|
type = PCIE_ATU_TYPE_CFG1;
|
|
|
|
|
|
dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size);
|
|
|
|
return pp->va_cfg0_base + where;
|
|
}
|
|
|
|
static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
|
|
int where, int size, u32 *val)
|
|
{
|
|
int ret;
|
|
struct pcie_port *pp = bus->sysdata;
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
|
|
ret = pci_generic_config_read(bus, devfn, where, size, val);
|
|
|
|
if (!ret && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
|
|
dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
|
|
pp->io_bus_addr, pp->io_size);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
|
|
int where, int size, u32 val)
|
|
{
|
|
int ret;
|
|
struct pcie_port *pp = bus->sysdata;
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
|
|
ret = pci_generic_config_write(bus, devfn, where, size, val);
|
|
|
|
if (!ret && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
|
|
dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
|
|
pp->io_bus_addr, pp->io_size);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static struct pci_ops dw_child_pcie_ops = {
|
|
.map_bus = dw_pcie_other_conf_map_bus,
|
|
.read = dw_pcie_rd_other_conf,
|
|
.write = dw_pcie_wr_other_conf,
|
|
};
|
|
|
|
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
|
|
{
|
|
struct pcie_port *pp = bus->sysdata;
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
|
|
if (PCI_SLOT(devfn) > 0)
|
|
return NULL;
|
|
|
|
return pci->dbi_base + where;
|
|
}
|
|
EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
|
|
|
|
static struct pci_ops dw_pcie_ops = {
|
|
.map_bus = dw_pcie_own_conf_map_bus,
|
|
.read = pci_generic_config_read,
|
|
.write = pci_generic_config_write,
|
|
};
|
|
|
|
void dw_pcie_setup_rc(struct pcie_port *pp)
|
|
{
|
|
u32 val, ctrl, num_ctrls;
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
|
|
/*
|
|
* Enable DBI read-only registers for writing/updating configuration.
|
|
* Write permission gets disabled towards the end of this function.
|
|
*/
|
|
dw_pcie_dbi_ro_wr_en(pci);
|
|
|
|
dw_pcie_setup(pci);
|
|
|
|
if (pci_msi_enabled() && !pp->ops->msi_host_init) {
|
|
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
|
|
|
|
/* Initialize IRQ Status array */
|
|
for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
|
|
pp->irq_mask[ctrl] = ~0;
|
|
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
|
|
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
|
|
pp->irq_mask[ctrl]);
|
|
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
|
|
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
|
|
~0);
|
|
}
|
|
}
|
|
|
|
/* Setup RC BARs */
|
|
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
|
|
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
|
|
|
|
/* Setup interrupt pins */
|
|
val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
|
|
val &= 0xffff00ff;
|
|
val |= 0x00000100;
|
|
dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
|
|
|
|
/* Setup bus numbers */
|
|
val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
|
|
val &= 0xff000000;
|
|
val |= 0x00ff0100;
|
|
dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
|
|
|
|
/* Setup command register */
|
|
val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
|
|
val &= 0xffff0000;
|
|
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
|
|
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
|
|
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
|
|
|
|
/*
|
|
* If the platform provides its own child bus config accesses, it means
|
|
* the platform uses its own address translation component rather than
|
|
* ATU, so we should not program the ATU here.
|
|
*/
|
|
if (pp->bridge->child_ops == &dw_child_pcie_ops) {
|
|
int atu_idx = 0;
|
|
struct resource_entry *entry;
|
|
|
|
/* Get last memory resource entry */
|
|
resource_list_for_each_entry(entry, &pp->bridge->windows) {
|
|
if (resource_type(entry->res) != IORESOURCE_MEM)
|
|
continue;
|
|
|
|
if (pci->num_viewport <= ++atu_idx)
|
|
break;
|
|
|
|
dw_pcie_prog_outbound_atu(pci, atu_idx,
|
|
PCIE_ATU_TYPE_MEM, entry->res->start,
|
|
entry->res->start - entry->offset,
|
|
resource_size(entry->res));
|
|
}
|
|
|
|
if (pp->io_size) {
|
|
if (pci->num_viewport > ++atu_idx)
|
|
dw_pcie_prog_outbound_atu(pci, atu_idx,
|
|
PCIE_ATU_TYPE_IO, pp->io_base,
|
|
pp->io_bus_addr, pp->io_size);
|
|
else
|
|
pci->iatu_unroll_enabled |= DWC_IATU_IOCFG_SHARED;
|
|
}
|
|
|
|
if (pci->num_viewport <= atu_idx)
|
|
dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)",
|
|
pci->num_viewport);
|
|
}
|
|
|
|
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
|
|
|
|
/* Program correct class for RC */
|
|
dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
|
|
|
|
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
|
|
val |= PORT_LOGIC_SPEED_CHANGE;
|
|
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
|
|
|
|
dw_pcie_dbi_ro_wr_dis(pci);
|
|
}
|
|
EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
|