
commit 5ce97f4ec5e0f8726a5dda1710727b1ee9badcac upstream. The AMD IOMMU logs I/O page faults and such to a ring buffer in system memory, and this ring buffer can overflow. The AMD IOMMU spec has the following to say about the interrupt status bit that signals this overflow condition: EventOverflow: Event log overflow. RW1C. Reset 0b. 1 = IOMMU event log overflow has occurred. This bit is set when a new event is to be written to the event log and there is no usable entry in the event log, causing the new event information to be discarded. An interrupt is generated when EventOverflow = 1b and MMIO Offset 0018h[EventIntEn] = 1b. No new event log entries are written while this bit is set. Software Note: To resume logging, clear EventOverflow (W1C), and write a 1 to MMIO Offset 0018h[EventLogEn]. The AMD IOMMU driver doesn't currently implement this recovery sequence, meaning that if a ring buffer overflow occurs, logging of EVT/PPR/GA events will cease entirely. This patch implements the spec-mandated reset sequence, with the minor tweak that the hardware seems to want to have a 0 written to MMIO Offset 0018h[EventLogEn] first, before writing an 1 into this field, or the IOMMU won't actually resume logging events. Signed-off-by: Lennert Buytenhek <buytenh@arista.com> Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/YVrSXEdW2rzEfOvk@wantstofly.org Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
116 lines
3.6 KiB
C
116 lines
3.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
|
|
* Author: Joerg Roedel <jroedel@suse.de>
|
|
*/
|
|
|
|
#ifndef AMD_IOMMU_H
|
|
#define AMD_IOMMU_H
|
|
|
|
#include <linux/iommu.h>
|
|
|
|
#include "amd_iommu_types.h"
|
|
|
|
extern int amd_iommu_get_num_iommus(void);
|
|
extern int amd_iommu_init_dma_ops(void);
|
|
extern int amd_iommu_init_passthrough(void);
|
|
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
|
|
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
|
|
extern void amd_iommu_apply_erratum_63(u16 devid);
|
|
extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
|
|
extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
|
|
extern int amd_iommu_init_devices(void);
|
|
extern void amd_iommu_uninit_devices(void);
|
|
extern void amd_iommu_init_notifier(void);
|
|
extern int amd_iommu_init_api(void);
|
|
|
|
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
|
|
void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
|
|
#else
|
|
static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
|
|
#endif
|
|
|
|
/* Needed for interrupt remapping */
|
|
extern int amd_iommu_prepare(void);
|
|
extern int amd_iommu_enable(void);
|
|
extern void amd_iommu_disable(void);
|
|
extern int amd_iommu_reenable(int);
|
|
extern int amd_iommu_enable_faulting(void);
|
|
extern int amd_iommu_guest_ir;
|
|
|
|
/* IOMMUv2 specific functions */
|
|
struct iommu_domain;
|
|
|
|
extern bool amd_iommu_v2_supported(void);
|
|
extern struct amd_iommu *get_amd_iommu(unsigned int idx);
|
|
extern u8 amd_iommu_pc_get_max_banks(unsigned int idx);
|
|
extern bool amd_iommu_pc_supported(void);
|
|
extern u8 amd_iommu_pc_get_max_counters(unsigned int idx);
|
|
extern int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
|
|
u8 fxn, u64 *value);
|
|
extern int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
|
|
u8 fxn, u64 *value);
|
|
|
|
extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
|
|
extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
|
|
extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
|
|
extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
|
|
extern int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
|
|
u64 address);
|
|
extern int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
|
|
extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
|
|
unsigned long cr3);
|
|
extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid);
|
|
extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
|
|
|
|
#ifdef CONFIG_IRQ_REMAP
|
|
extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
|
|
#else
|
|
static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#define PPR_SUCCESS 0x0
|
|
#define PPR_INVALID 0x1
|
|
#define PPR_FAILURE 0xf
|
|
|
|
extern int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
|
|
int status, int tag);
|
|
|
|
static inline bool is_rd890_iommu(struct pci_dev *pdev)
|
|
{
|
|
return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
|
|
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
|
|
}
|
|
|
|
static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask)
|
|
{
|
|
return !!(iommu->features & mask);
|
|
}
|
|
|
|
static inline u64 iommu_virt_to_phys(void *vaddr)
|
|
{
|
|
return (u64)__sme_set(virt_to_phys(vaddr));
|
|
}
|
|
|
|
static inline void *iommu_phys_to_virt(unsigned long paddr)
|
|
{
|
|
return phys_to_virt(__sme_clr(paddr));
|
|
}
|
|
|
|
extern bool translation_pre_enabled(struct amd_iommu *iommu);
|
|
extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
|
|
struct device *dev);
|
|
extern int __init add_special_device(u8 type, u8 id, u16 *devid,
|
|
bool cmd_line);
|
|
|
|
#ifdef CONFIG_DMI
|
|
void amd_iommu_apply_ivrs_quirks(void);
|
|
#else
|
|
static inline void amd_iommu_apply_ivrs_quirks(void) { }
|
|
#endif
|
|
|
|
#endif
|