Snap for 9715369 from 935aa170ab to android12-5.10-keystone-qcom-release

Change-Id: Ib1fff90f06a297246afc8daab6a8e2141f494338
This commit is contained in:
Android Build Coastguard Worker
2023-03-09 11:00:55 +00:00
1207 changed files with 15026 additions and 8065 deletions

View File

@@ -142,7 +142,7 @@ Description:
Raw capacitance measurement from channel Y. Units after
application of scale and offset are nanofarads.
What: /sys/.../iio:deviceX/in_capacitanceY-in_capacitanceZ_raw
What: /sys/.../iio:deviceX/in_capacitanceY-capacitanceZ_raw
KernelVersion: 3.2
Contact: linux-iio@vger.kernel.org
Description:

View File

@@ -528,3 +528,8 @@ Contact: "Ping Xiong" <xiongping1@xiaomi.com>
Description: When DATA SEPARATION is on, it controls the age threshold to indicate
the data blocks as warm. By default it was initialized as 2621440 blocks
(equals to 10GB).
What: /sys/fs/f2fs/<disk>/last_age_weight
Date: January 2023
Contact: "Ping Xiong" <xiongping1@xiaomi.com>
Description: When DATA SEPARATION is on, it controls the weight of last data block age.

View File

@@ -76,10 +76,14 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A57 | #1319537 | ARM64_ERRATUM_1319367 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A57 | #1742098 | ARM64_ERRATUM_1742098 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A72 | #853709 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A72 | #1319367 | ARM64_ERRATUM_1319367 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A72 | #1655431 | ARM64_ERRATUM_1742098 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A76 | #1188873,1418040| ARM64_ERRATUM_1418040 |

View File

@@ -300,6 +300,11 @@ inlinecrypt When possible, encrypt/decrypt the contents of encrypted
Documentation/block/inline-encryption.rst.
atgc Enable age-threshold garbage collection, it provides high
effectiveness and efficiency on background GC.
memory=%s Control memory mode. This supports "normal" and "low" modes.
"low" mode is introduced to support low memory devices.
Because of the nature of low memory devices, in this mode, f2fs
will try to save memory sometimes by sacrificing performance.
"normal" mode is the default mode and same as before.
age_extent_cache Enable an age extent cache based on rb-tree. It records
data block update frequency of the extent per inode, in
order to provide better temperature hints for data block

View File

@@ -51,7 +51,7 @@ the Technical Advisory Board (TAB) or other maintainers if you're
uncertain how to handle situations that come up. It will not be
considered a violation report unless you want it to be. If you are
uncertain about approaching the TAB or any other maintainers, please
reach out to our conflict mediator, Joanna Lee <joanna.lee@gesmer.com>.
reach out to our conflict mediator, Joanna Lee <jlee@linuxfoundation.org>.
In the end, "be kind to each other" is really what the end goal is for
everybody. We know everyone is human and we all fail at times, but the

View File

@@ -39,7 +39,7 @@ Documentation written by Tom Zanussi
will use the event's kernel stacktrace as the key. The keywords
'keys' or 'key' can be used to specify keys, and the keywords
'values', 'vals', or 'val' can be used to specify values. Compound
keys consisting of up to two fields can be specified by the 'keys'
keys consisting of up to three fields can be specified by the 'keys'
keyword. Hashing a compound key produces a unique entry in the
table for each unique combination of component keys, and can be
useful for providing more fine-grained summaries of event data.

View File

@@ -215,6 +215,7 @@ KVM_S390_VM_TOD_EXT).
:Parameters: address of a buffer in user space to store the data (u8) to
:Returns: -EFAULT if the given address is not accessible from kernel space;
-EINVAL if setting the TOD clock extension to != 0 is not supported
-EOPNOTSUPP for a PV guest (TOD managed by the ultravisor)
3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW
-----------------------------------
@@ -224,6 +225,7 @@ the POP (u64).
:Parameters: address of a buffer in user space to store the data (u64) to
:Returns: -EFAULT if the given address is not accessible from kernel space
-EOPNOTSUPP for a PV guest (TOD managed by the ultravisor)
3.3. ATTRIBUTE: KVM_S390_VM_TOD_EXT
-----------------------------------
@@ -237,6 +239,7 @@ it, it is stored as 0 and not allowed to be set to a value != 0.
(kvm_s390_vm_tod_clock) to
:Returns: -EFAULT if the given address is not accessible from kernel space;
-EINVAL if setting the TOD clock extension to != 0 is not supported
-EOPNOTSUPP for a PV guest (TOD managed by the ultravisor)
4. GROUP: KVM_S390_VM_CRYPTO
============================

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 149
SUBLEVEL = 160
EXTRAVERSION =
NAME = Dare mighty things
@@ -480,6 +480,8 @@ LZ4 = lz4
XZ = xz
ZSTD = zstd
PAHOLE_FLAGS = $(shell PAHOLE=$(PAHOLE) $(srctree)/scripts/pahole-flags.sh)
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
NOSTDINC_FLAGS :=
@@ -534,6 +536,7 @@ export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
export PAHOLE_FLAGS
# Files to ignore in find ... statements
@@ -837,12 +840,12 @@ endif
# Initialize all stack variables with a zero value.
ifdef CONFIG_INIT_STACK_ALL_ZERO
# Future support for zero initialization is still being debated, see
# https://bugs.llvm.org/show_bug.cgi?id=45497. These flags are subject to being
# renamed or dropped.
KBUILD_CFLAGS += -ftrivial-auto-var-init=zero
ifdef CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER
# https://github.com/llvm/llvm-project/issues/44842
KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
endif
endif
DEBUG_CFLAGS :=
@@ -860,7 +863,9 @@ else
DEBUG_CFLAGS += -g
endif
ifneq ($(LLVM_IAS),1)
ifeq ($(LLVM_IAS),1)
KBUILD_AFLAGS += -g
else
KBUILD_AFLAGS += -Wa,-gdwarf-2
endif

File diff suppressed because it is too large Load Diff

View File

@@ -574,6 +574,7 @@
_snd_ctl_add_follower
_snd_pcm_stream_lock_irqsave
_totalram_pages
_trace_android_vh_record_pcpu_rwsem_starttime
access_process_vm
ack_all_badblocks
activate_task

View File

@@ -0,0 +1,3 @@
[abi_symbol_list]
__traceiter_android_rvh_dma_buf_stats_teardown
__tracepoint_android_rvh_dma_buf_stats_teardown

View File

@@ -0,0 +1,248 @@
[abi_symbol_list]
add_to_page_cache_locked
__alloc_pages_nodemask
__arch_copy_from_user
__arch_copy_to_user
arm64_const_caps_ready
autoremove_wake_function
balance_dirty_pages_ratelimited
bcmp
bdev_read_only
__bforget
bio_add_page
bio_alloc_bioset
bio_associate_blkg
bio_put
__bitmap_weight
bit_waitqueue
blkdev_issue_discard
blkdev_issue_flush
blk_finish_plug
blk_start_plug
__blockdev_direct_IO
block_invalidatepage
block_is_partially_uptodate
__breadahead
__bread_gfp
__brelse
buffer_migrate_page
capable
capable_wrt_inode_uidgid
__cfi_slowpath
__check_object_size
clear_inode
clear_page_dirty_for_io
complete_and_exit
cpu_hwcap_keys
cpu_hwcaps
create_empty_buffers
current_umask
d_add
d_add_ci
d_instantiate
d_make_root
down_read
down_write
dput
drop_nlink
d_splice_alias
dump_stack
end_buffer_read_sync
end_page_writeback
errseq_set
failure_tracking
fiemap_fill_next_extent
fiemap_prep
filemap_fdatawait_range
filemap_fdatawrite
filemap_flush
__filemap_set_wb_err
filemap_write_and_wait_range
file_remove_privs
file_update_time
file_write_and_wait_range
finish_wait
flush_dcache_page
freezing_slow_path
fs_bio_set
generic_error_remove_page
generic_file_direct_write
generic_file_llseek
generic_file_mmap
generic_file_open
generic_file_read_iter
generic_file_splice_read
generic_fillattr
generic_perform_write
generic_read_dir
generic_write_checks
__getblk_gfp
gic_nonsecure_priorities
grab_cache_page_write_begin
iget5_locked
igrab
ihold
ilookup5
in_group_p
__init_rwsem
init_wait_entry
__init_waitqueue_head
inode_dio_wait
inode_init_once
inode_newsize_ok
inode_set_flags
__insert_inode_hash
invalidate_bdev
invalidate_mapping_pages
io_schedule
iov_iter_advance
iov_iter_alignment
iov_iter_get_pages
iput
is_bad_inode
iter_file_splice_write
iunique
jiffies
jiffies_to_msecs
kasan_flag_enabled
kfree
kill_block_super
__kmalloc
kmalloc_caches
kmem_cache_alloc
kmem_cache_alloc_trace
kmem_cache_create
kmem_cache_create_usercopy
kmem_cache_destroy
kmem_cache_free
krealloc
kthread_create_on_node
kthread_should_stop
kthread_stop
ktime_get_coarse_real_ts64
kvfree
__list_add_valid
__list_del_entry_valid
ll_rw_block
load_nls
load_nls_default
__lock_buffer
__lock_page
lru_cache_add
make_bad_inode
mark_buffer_dirty
mark_buffer_write_io_error
__mark_inode_dirty
mark_page_accessed
memcpy
memmove
memset
mktime64
mnt_drop_write_file
mnt_want_write_file
module_layout
mount_bdev
mpage_readahead
mpage_readpage
__msecs_to_jiffies
__mutex_init
mutex_lock
mutex_trylock
mutex_unlock
new_inode
notify_change
pagecache_get_page
page_cache_next_miss
page_cache_prev_miss
__page_pinner_migration_failed
pagevec_lookup_range_tag
__pagevec_release
__percpu_down_read
preempt_schedule
preempt_schedule_notrace
prepare_to_wait
prepare_to_wait_event
printk
__put_page
put_pages_list
___ratelimit
_raw_read_lock
_raw_read_lock_irqsave
_raw_read_unlock
_raw_read_unlock_irqrestore
_raw_spin_lock
_raw_spin_lock_irqsave
_raw_spin_unlock
_raw_spin_unlock_irqrestore
_raw_write_lock
_raw_write_lock_irqsave
_raw_write_unlock
_raw_write_unlock_irqrestore
rcuwait_wake_up
readahead_gfp_mask
read_cache_page
redirty_page_for_writepage
__refrigerator
register_filesystem
__remove_inode_hash
sb_min_blocksize
sb_set_blocksize
schedule
schedule_timeout_interruptible
seq_printf
setattr_prepare
set_freezable
set_nlink
set_page_dirty
__set_page_dirty_buffers
__set_page_dirty_nobuffers
set_user_nice
simple_strtol
simple_strtoul
simple_strtoull
sprintf
__stack_chk_fail
__stack_chk_guard
strchr
strcmp
strlen
strncasecmp
strncmp
strsep
strstr
submit_bh
submit_bio
__sync_dirty_buffer
sync_dirty_buffer
sync_filesystem
sync_inode_metadata
system_freezing_cnt
sys_tz
tag_pages_for_writeback
__test_set_page_writeback
time64_to_tm
_trace_android_vh_record_pcpu_rwsem_starttime
truncate_inode_pages
truncate_inode_pages_final
truncate_setsize
try_to_writeback_inodes_sb
unload_nls
unlock_buffer
unlock_new_inode
unlock_page
unregister_filesystem
up_read
up_write
vfree
vfs_fsync_range
__vmalloc
vsnprintf
vzalloc
__wait_on_buffer
wait_on_page_bit
wake_bit_function
__wake_up
wake_up_process
__warn_printk
write_inode_now
xa_load

View File

@@ -80,7 +80,12 @@ init_rtc_epoch(void)
static int
alpha_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
mc146818_get_time(tm);
int ret = mc146818_get_time(tm);
if (ret < 0) {
dev_err_ratelimited(dev, "unable to read current time\n");
return ret;
}
/* Adjust for non-default epochs. It's easier to depend on the
generic __get_rtc_time and adjust the epoch here than create

View File

@@ -32,7 +32,7 @@ static inline void ioport_unmap(void __iomem *addr)
{
}
extern void iounmap(const void __iomem *addr);
extern void iounmap(const volatile void __iomem *addr);
/*
* io{read,write}{16,32}be() macros

View File

@@ -93,7 +93,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
EXPORT_SYMBOL(ioremap_prot);
void iounmap(const void __iomem *addr)
void iounmap(const volatile void __iomem *addr)
{
/* weird double cast to handle phys_addr_t > 32 bits */
if (arc_uncached_addr_space((phys_addr_t)(u32)addr))

View File

@@ -1792,7 +1792,6 @@ config CMDLINE
choice
prompt "Kernel command line type" if CMDLINE != ""
default CMDLINE_FROM_BOOTLOADER
depends on ATAGS
config CMDLINE_FROM_BOOTLOADER
bool "Use bootloader kernel arguments if available"

View File

@@ -12,8 +12,7 @@
compatible = "phytec,am335x-pcm-953", "phytec,am335x-phycore-som", "ti,am33xx";
/* Power */
regulators {
vcc3v3: fixedregulator@1 {
vcc3v3: fixedregulator1 {
compatible = "regulator-fixed";
regulator-name = "vcc3v3";
regulator-min-microvolt = <3300000>;
@@ -21,14 +20,13 @@
regulator-boot-on;
};
vcc1v8: fixedregulator@2 {
vcc1v8: fixedregulator2 {
compatible = "regulator-fixed";
regulator-name = "vcc1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
};
};
/* User IO */
user_leds: user_leds {

View File

@@ -307,7 +307,7 @@
marvell,function = "spi0";
};
spi0cs1_pins: spi0cs1-pins {
spi0cs2_pins: spi0cs2-pins {
marvell,pins = "mpp26";
marvell,function = "spi0";
};
@@ -342,7 +342,7 @@
};
};
/* MISO, MOSI, SCLK and CS1 are routed to pin header CN11 */
/* MISO, MOSI, SCLK and CS2 are routed to pin header CN11 */
};
&uart0 {

View File

@@ -660,7 +660,7 @@
compatible = "atmel,at91rm9200-udc";
reg = <0xfffb0000 0x4000>;
interrupts = <11 IRQ_TYPE_LEVEL_HIGH 2>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 11>, <&pmc PMC_TYPE_SYSTEM 2>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 11>, <&pmc PMC_TYPE_SYSTEM 1>;
clock-names = "pclk", "hclk";
status = "disabled";
};

View File

@@ -39,6 +39,13 @@
};
usb1 {
pinctrl_usb1_vbus_gpio: usb1_vbus_gpio {
atmel,pins =
<AT91_PIOC 5 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PC5 GPIO */
};
};
mmc0_slot1 {
pinctrl_board_mmc0_slot1: mmc0_slot1-board {
atmel,pins =
@@ -84,6 +91,8 @@
};
usb1: gadget@fffa4000 {
pinctrl-0 = <&pinctrl_usb1_vbus_gpio>;
pinctrl-names = "default";
atmel,vbus-gpio = <&pioC 5 GPIO_ACTIVE_HIGH>;
status = "okay";
};

View File

@@ -588,7 +588,7 @@
clocks = <&camera 1>;
clock-names = "extclk";
samsung,camclk-out = <1>;
gpios = <&gpm1 6 GPIO_ACTIVE_HIGH>;
gpios = <&gpm1 6 GPIO_ACTIVE_LOW>;
port {
is_s5k6a3_ep: endpoint {

View File

@@ -95,7 +95,7 @@
};
&ehci {
samsung,vbus-gpio = <&gpx3 5 1>;
samsung,vbus-gpio = <&gpx3 5 GPIO_ACTIVE_HIGH>;
status = "okay";
phys = <&exynos_usbphy 2>, <&exynos_usbphy 3>;
phy-names = "hsic0", "hsic1";

View File

@@ -84,6 +84,9 @@
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x20000>;
ranges = <0 0x00900000 0x20000>;
#address-cells = <1>;
#size-cells = <1>;
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};

View File

@@ -364,8 +364,8 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_wifi>;
interrupts-extended = <&gpio1 30 IRQ_TYPE_LEVEL_HIGH>;
ref-clock-frequency = "38400000";
tcxo-clock-frequency = "19200000";
ref-clock-frequency = <38400000>;
tcxo-clock-frequency = <19200000>;
};
};

View File

@@ -163,6 +163,9 @@
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x40000>;
ranges = <0 0x00900000 0x40000>;
#address-cells = <1>;
#size-cells = <1>;
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};

View File

@@ -31,7 +31,7 @@
user-pb {
label = "user_pb";
gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>;
gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>;
linux,code = <BTN_0>;
};

View File

@@ -28,7 +28,7 @@
user-pb {
label = "user_pb";
gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>;
gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>;
linux,code = <BTN_0>;
};

View File

@@ -9,12 +9,18 @@
ocram2: sram@940000 {
compatible = "mmio-sram";
reg = <0x00940000 0x20000>;
ranges = <0 0x00940000 0x20000>;
#address-cells = <1>;
#size-cells = <1>;
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};
ocram3: sram@960000 {
compatible = "mmio-sram";
reg = <0x00960000 0x20000>;
ranges = <0 0x00960000 0x20000>;
#address-cells = <1>;
#size-cells = <1>;
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};

View File

@@ -114,6 +114,9 @@
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x20000>;
ranges = <0 0x00900000 0x20000>;
#address-cells = <1>;
#size-cells = <1>;
clocks = <&clks IMX6SL_CLK_OCRAM>;
};

View File

@@ -115,6 +115,9 @@
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x20000>;
ranges = <0 0x00900000 0x20000>;
#address-cells = <1>;
#size-cells = <1>;
};
intc: interrupt-controller@a01000 {

View File

@@ -161,12 +161,18 @@
ocram_s: sram@8f8000 {
compatible = "mmio-sram";
reg = <0x008f8000 0x4000>;
ranges = <0 0x008f8000 0x4000>;
#address-cells = <1>;
#size-cells = <1>;
clocks = <&clks IMX6SX_CLK_OCRAM_S>;
};
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x20000>;
ranges = <0 0x00900000 0x20000>;
#address-cells = <1>;
#size-cells = <1>;
clocks = <&clks IMX6SX_CLK_OCRAM>;
};

View File

@@ -199,12 +199,7 @@
interrupt-parent = <&gpio2>;
interrupts = <29 0>;
pendown-gpio = <&gpio2 29 GPIO_ACTIVE_HIGH>;
ti,x-min = /bits/ 16 <0>;
ti,x-max = /bits/ 16 <0>;
ti,y-min = /bits/ 16 <0>;
ti,y-max = /bits/ 16 <0>;
ti,pressure-max = /bits/ 16 <0>;
ti,x-plate-ohms = /bits/ 16 <400>;
touchscreen-max-pressure = <255>;
wakeup-source;
};
};

View File

@@ -10,6 +10,11 @@
ocp@f1000000 {
pinctrl: pin-controller@10000 {
/* Non-default UART pins */
pmx_uart0: pmx-uart0 {
marvell,pins = "mpp4", "mpp5";
};
pmx_power_hdd: pmx-power-hdd {
marvell,pins = "mpp10";
marvell,function = "gpo";
@@ -213,22 +218,11 @@
&mdio {
status = "okay";
ethphy0: ethernet-phy@0 {
reg = <0>;
};
ethphy1: ethernet-phy@8 {
reg = <8>;
};
};
&eth0 {
status = "okay";
ethernet0-port@0 {
phy-handle = <&ethphy0>;
};
};
&eth1 {
status = "okay";
ethernet1-port@0 {

View File

@@ -31,7 +31,7 @@
&i2c1 {
status = "okay";
hym8563: hym8563@51 {
hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;

View File

@@ -67,7 +67,7 @@
#sound-dai-cells = <0>;
};
ir_recv: gpio-ir-receiver {
ir_recv: ir-receiver {
compatible = "gpio-ir-receiver";
gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";

View File

@@ -402,7 +402,7 @@
rockchip,pins = <2 RK_PD3 1 &pcfg_pull_none>;
};
lcdc1_rgb24: ldcd1-rgb24 {
lcdc1_rgb24: lcdc1-rgb24 {
rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>,
<2 RK_PA1 1 &pcfg_pull_none>,
<2 RK_PA2 1 &pcfg_pull_none>,
@@ -630,7 +630,6 @@
&global_timer {
interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
status = "disabled";
};
&local_timer {

View File

@@ -54,7 +54,7 @@
vin-supply = <&vcc_sys>;
};
hym8563@51 {
rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;

View File

@@ -233,7 +233,7 @@
vin-supply = <&vcc_sys>;
};
hym8563: hym8563@51 {
hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;

View File

@@ -157,7 +157,7 @@
vin-supply = <&vcc_sys>;
};
hym8563: hym8563@51 {
hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;

View File

@@ -165,7 +165,7 @@
};
&i2c0 {
hym8563: hym8563@51 {
hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;

View File

@@ -111,6 +111,13 @@
reg = <0x1013c200 0x20>;
interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
clocks = <&cru CORE_PERI>;
status = "disabled";
/* The clock source and the sched_clock provided by the arm_global_timer
* on Rockchip rk3066a/rk3188 are quite unstable because their rates
* depend on the CPU frequency.
* Keep the arm_global_timer disabled in order to have the
* DW_APB_TIMER (rk3066a) or ROCKCHIP_TIMER (rk3188) selected by default.
*/
};
local_timer: local-timer@1013c600 {

View File

@@ -17,7 +17,7 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->ARM_pc = (__ip); \
(regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \
frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \
(regs)->ARM_sp = current_stack_pointer; \
(regs)->ARM_cpsr = SVC_MODE; \
}

View File

@@ -44,12 +44,6 @@
typedef pte_t *pte_addr_t;
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
#define ZERO_PAGE(vaddr) (virt_to_page(0))
/*
* Mark the prot value as uncacheable and unbufferable.
*/

View File

@@ -10,6 +10,15 @@
#include <linux/const.h>
#include <asm/proc-fns.h>
#ifndef __ASSEMBLY__
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr) (empty_zero_page)
#endif
#ifndef CONFIG_MMU
#include <asm-generic/pgtable-nopud.h>
@@ -156,13 +165,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define __S111 __PAGE_SHARED_EXEC
#ifndef __ASSEMBLY__
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr) (empty_zero_page)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];

View File

@@ -387,8 +387,10 @@ static void __init mxs_machine_init(void)
root = of_find_node_by_path("/");
ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
if (ret)
if (ret) {
kfree(soc_dev_attr);
return;
}
soc_dev_attr->family = "Freescale MXS Family";
soc_dev_attr->soc_id = mxs_get_soc_id();

View File

@@ -342,7 +342,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
addr = start + i * PMD_SIZE;
domain = get_domain_name(pmd);
if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
note_page(st, addr, 3, pmd_val(*pmd), domain);
note_page(st, addr, 4, pmd_val(*pmd), domain);
else
walk_pte(st, pmd, addr, domain);

View File

@@ -300,7 +300,11 @@ static struct mem_type mem_types[] __ro_after_init = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN | L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
#ifdef CONFIG_ARM_LPAE
.prot_sect = PMD_TYPE_SECT | L_PMD_SECT_RDONLY | PMD_SECT_AP2,
#else
.prot_sect = PMD_TYPE_SECT,
#endif
.domain = DOMAIN_KERNEL,
},
[MT_ROM] = {

View File

@@ -26,6 +26,13 @@
unsigned long vectors_base;
/*
* empty_zero_page is a special page that is used for
* zero-initialized data and COW.
*/
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_ARM_MPU
struct mpu_rgn_info mpu_rgn_info;
#endif
@@ -148,9 +155,21 @@ void __init adjust_lowmem_bounds(void)
*/
void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
early_trap_init((void *)vectors_base);
mpu_setup();
/* allocate the zero page. */
zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!zero_page)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
bootmem_init();
empty_zero_page = virt_to_page(zero_page);
flush_dcache_page(empty_zero_page);
}
/*

View File

@@ -493,6 +493,22 @@ config ARM64_ERRATUM_834220
If unsure, say Y.
config ARM64_ERRATUM_1742098
bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence"
depends on COMPAT
default y
help
This option removes the AES hwcap for aarch32 user-space to
workaround erratum 1742098 on Cortex-A57 and Cortex-A72.
Affected parts may corrupt the AES state if an interrupt is
taken between a pair of AES instructions. These instructions
are only present if the cryptography extensions are present.
All software should have a fallback implementation for CPUs
that don't implement the cryptography extensions.
If unsure, say Y.
config ARM64_ERRATUM_845719
bool "Cortex-A53: 845719: a load might read incorrect data"
depends on COMPAT
@@ -713,6 +729,29 @@ config ARM64_ERRATUM_2067961
If unsure, say Y.
config ARM64_ERRATUM_2454944
bool "Cortex-A510: 2454944: Unmodified cache line might be written back to memory"
select ARCH_HAS_TEARDOWN_DMA_OPS
select RODATA_FULL_DEFAULT_ENABLED
help
This option adds the workaround for ARM Cortex-A510 erratum 2454944.
Affected Cortex-A510 core might write unmodified cache lines back to
memory, which breaks the assumptions upon which software coherency
management for non-coherent DMA relies. If a cache line is
speculatively fetched while a non-coherent device is writing directly
to DRAM, and subsequently written back by natural eviction, data
written by the device in the intervening period can be lost.
The workaround is to enforce as far as reasonably possible that all
non-coherent DMA transfers are bounced and/or remapped to minimise
the chance that any Cacheable alias exists through which speculative
cache fills could occur. To further improve effectiveness of
the workaround, lazy TLB flushing should be disabled.
This is quite involved and has unavoidable performance impact on
affected systems.
config ARM64_ERRATUM_2457168
bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly"
depends on ARM64_AMU_EXTN

View File

@@ -595,12 +595,26 @@
polling-delay = <1000>;
polling-delay-passive = <100>;
thermal-sensors = <&scpi_sensors0 0>;
trips {
pmic_crit0: trip0 {
temperature = <90000>;
hysteresis = <2000>;
type = "critical";
};
};
};
soc {
polling-delay = <1000>;
polling-delay-passive = <100>;
thermal-sensors = <&scpi_sensors0 3>;
trips {
soc_crit0: trip0 {
temperature = <80000>;
hysteresis = <2000>;
type = "critical";
};
};
};
big_cluster_thermal_zone: big-cluster {

View File

@@ -942,7 +942,7 @@
gpmi: nand-controller@33002000 {
compatible = "fsl,imx8mm-gpmi-nand", "fsl,imx7d-gpmi-nand";
#address-cells = <1>;
#size-cells = <1>;
#size-cells = <0>;
reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
reg-names = "gpmi-nand", "bch";
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;

View File

@@ -809,7 +809,7 @@
gpmi: nand-controller@33002000 {
compatible = "fsl,imx8mn-gpmi-nand", "fsl,imx7d-gpmi-nand";
#address-cells = <1>;
#size-cells = <1>;
#size-cells = <0>;
reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
reg-names = "gpmi-nand", "bch";
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;

View File

@@ -899,6 +899,7 @@
interrupts = <20 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gauge>;
power-supplies = <&bq25895>;
maxim,over-heat-temp = <700>;
maxim,over-volt = <4500>;
maxim,rsns-microohm = <5000>;

View File

@@ -9,6 +9,10 @@
label = "proximity-wifi-lte";
};
&mpss_mem {
reg = <0x0 0x86000000 0x0 0x8c00000>;
};
&remoteproc_mpss {
firmware-name = "qcom/sc7180-trogdor/modem/mba.mbn",
"qcom/sc7180-trogdor/modem/qdsp6sw.mbn";

View File

@@ -39,7 +39,7 @@
};
mpss_mem: memory@86000000 {
reg = <0x0 0x86000000 0x0 0x8c00000>;
reg = <0x0 0x86000000 0x0 0x2000000>;
no-map;
};

View File

@@ -13,7 +13,7 @@
stdout-path = "serial2:1500000n8";
};
ir_rx {
ir-receiver {
compatible = "gpio-ir-receiver";
gpios = <&gpio0 RK_PC0 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";

View File

@@ -203,7 +203,7 @@
cap-sd-highspeed;
cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
disable-wp;
max-frequency = <150000000>;
max-frequency = <40000000>;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
vmmc-supply = <&vcc3v3_baseboard>;

View File

@@ -448,7 +448,6 @@
&i2s1 {
rockchip,playback-channels = <2>;
rockchip,capture-channels = <2>;
status = "okay";
};
&i2s2 {

View File

@@ -51,6 +51,7 @@ CONFIG_ARCH_SUNXI=y
CONFIG_ARCH_HISI=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SPRD=y
CONFIG_ARM64_ERRATUM_2454944=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=32
CONFIG_PARAVIRT=y
@@ -648,7 +649,6 @@ CONFIG_HARDENED_USERCOPY=y
CONFIG_STATIC_USERMODEHELPER=y
CONFIG_STATIC_USERMODEHELPER_PATH=""
CONFIG_SECURITY_SELINUX=y
CONFIG_INIT_STACK_ALL_ZERO=y
CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
CONFIG_CRYPTO_CHACHA20POLY1305=y
CONFIG_CRYPTO_ADIANTUM=y
@@ -697,4 +697,5 @@ CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_TRACE_MMIO_ACCESS=y
CONFIG_TRACEFS_DISABLE_AUTOMOUNT=y
CONFIG_HIST_TRIGGERS=y
CONFIG_FUNCTION_ERROR_INJECTION=y
# CONFIG_RUNTIME_TESTING_MENU is not set

View File

@@ -6,6 +6,7 @@
#define __ASM_CACHE_H
#include <asm/cputype.h>
#include <asm/mte-def.h>
#define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3
@@ -49,15 +50,21 @@
*/
#define ARCH_DMA_MINALIGN (128)
#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
#elif defined(CONFIG_KASAN_HW_TAGS)
#define ARCH_SLAB_MINALIGN MTE_GRANULE_SIZE
#endif
#ifndef __ASSEMBLY__
#include <linux/bitops.h>
#include <linux/kasan-enabled.h>
#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
#elif defined(CONFIG_KASAN_HW_TAGS)
static inline unsigned int arch_slab_minalign(void)
{
return kasan_hw_tags_enabled() ? MTE_GRANULE_SIZE :
__alignof__(unsigned long long);
}
#define arch_slab_minalign() arch_slab_minalign()
#endif
#define ICACHEF_ALIASING 0
#define ICACHEF_VPIPT 1

View File

@@ -72,8 +72,9 @@
#define ARM64_WORKAROUND_TSB_FLUSH_FAILURE 61
#define ARM64_SPECTRE_BHB 62
#define ARM64_WORKAROUND_2457168 63
#define ARM64_WORKAROUND_1742098 64
/* kabi: reserve 64 - 76 for future cpu capabilities */
/* kabi: reserve 65 - 76 for future cpu capabilities */
#define ARM64_NCAPS 76
#endif /* __ASM_CPUCAPS_H */

View File

@@ -662,7 +662,8 @@ static inline bool system_supports_4kb_granule(void)
val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN4_SHIFT);
return val == ID_AA64MMFR0_TGRAN4_SUPPORTED;
return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX);
}
static inline bool system_supports_64kb_granule(void)
@@ -674,7 +675,8 @@ static inline bool system_supports_64kb_granule(void)
val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN64_SHIFT);
return val == ID_AA64MMFR0_TGRAN64_SUPPORTED;
return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX);
}
static inline bool system_supports_16kb_granule(void)
@@ -686,7 +688,8 @@ static inline bool system_supports_16kb_granule(void)
val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN16_SHIFT);
return val == ID_AA64MMFR0_TGRAN16_SUPPORTED;
return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX);
}
static inline bool system_supports_mixed_endian_el0(void)

View File

@@ -41,7 +41,7 @@
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
#define MIDR_CPU_MODEL(imp, partnum) \
(((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
((_AT(u32, imp) << MIDR_IMPLEMENTOR_SHIFT) | \
(0xf << MIDR_ARCHITECTURE_SHIFT) | \
((partnum) << MIDR_PARTNUM_SHIFT))
@@ -60,6 +60,7 @@
#define ARM_CPU_IMP_FUJITSU 0x46
#define ARM_CPU_IMP_HISI 0x48
#define ARM_CPU_IMP_APPLE 0x61
#define ARM_CPU_IMP_AMPERE 0xC0
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
@@ -112,6 +113,8 @@
#define APPLE_CPU_PART_M1_ICESTORM 0x022
#define APPLE_CPU_PART_M1_FIRESTORM 0x023
#define AMPERE_CPU_PART_AMPERE1 0xAC3
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
@@ -151,6 +154,7 @@
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX

View File

@@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2022-2023 ARM Ltd.
*/
#ifndef __ASM_DMA_MAPPING_NOALIAS_H
#define __ASM_DMA_MAPPING_NOALIAS_H
#ifdef CONFIG_ARM64_ERRATUM_2454944
void arm64_noalias_setup_dma_ops(struct device *dev);
#else
static inline void arm64_noalias_setup_dma_ops(struct device *dev)
{
}
#endif
#endif /* __ASM_DMA_MAPPING_NOALIAS_H */

View File

@@ -25,6 +25,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
({ \
efi_virtmap_load(); \
__efi_fpsimd_begin(); \
spin_lock(&efi_rt_lock); \
})
#define arch_efi_call_virt(p, f, args...) \
@@ -36,10 +37,12 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define arch_efi_call_virt_teardown() \
({ \
spin_unlock(&efi_rt_lock); \
__efi_fpsimd_end(); \
efi_virtmap_unload(); \
})
extern spinlock_t efi_rt_lock;
efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)

View File

@@ -22,15 +22,6 @@ struct exception_table_entry
#define ARCH_HAS_RELATIVE_EXTABLE
static inline bool in_bpf_jit(struct pt_regs *regs)
{
if (!IS_ENABLED(CONFIG_BPF_JIT))
return false;
return regs->pc >= BPF_JIT_REGION_START &&
regs->pc < BPF_JIT_REGION_END;
}
#ifdef CONFIG_BPF_JIT
int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
struct pt_regs *regs);

View File

@@ -44,11 +44,8 @@
#define _PAGE_OFFSET(va) (-(UL(1) << (va)))
#define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS))
#define KIMAGE_VADDR (MODULES_END)
#define BPF_JIT_REGION_START (KASAN_SHADOW_END)
#define BPF_JIT_REGION_SIZE (SZ_128M)
#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR (BPF_JIT_REGION_END)
#define MODULES_VADDR (KASAN_SHADOW_END)
#define MODULES_VSIZE (SZ_128M)
#define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M)
#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE)

View File

@@ -6,6 +6,7 @@
#define __ASM_MTE_KASAN_H
#include <asm/compiler.h>
#include <asm/cputype.h>
#include <asm/mte-def.h>
#ifndef __ASSEMBLY__

View File

@@ -10,6 +10,7 @@
#include <asm/alternative.h>
#include <asm/cmpxchg.h>
#include <asm/stack_pointer.h>
#include <asm/sysreg.h>
static inline void set_my_cpu_offset(unsigned long off)
{

View File

@@ -8,7 +8,7 @@
#ifndef __ASM_SYSCALL_WRAPPER_H
#define __ASM_SYSCALL_WRAPPER_H
struct pt_regs;
#include <asm/ptrace.h>
#define SC_ARM64_REGS_TO_ARGS(x, ...) \
__MAP(x,__SC_ARGS \

View File

@@ -853,14 +853,23 @@
#define ID_AA64MMFR0_PARANGE_SHIFT 0
#define ID_AA64MMFR0_TGRAN4_NI 0xf
#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0
#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN64_NI 0xf
#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0
#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN16_NI 0x0
#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1
#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf
#define ID_AA64MMFR0_PARANGE_48 0x5
#define ID_AA64MMFR0_PARANGE_52 0x6
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7
#ifdef CONFIG_ARM64_PA_BITS_52
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
#else
@@ -1027,13 +1036,16 @@
#if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX
#elif defined(CONFIG_ARM64_16K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN16_SUPPORTED
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX
#elif defined(CONFIG_ARM64_64K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX
#endif
#define MVFR2_FPMISC_SHIFT 4

View File

@@ -358,6 +358,14 @@ static const struct midr_range tsb_flush_fail_cpus[] = {
};
#endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
#ifdef CONFIG_ARM64_ERRATUM_1742098
static struct midr_range broken_aarch32_aes[] = {
MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
{},
};
#endif
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
@@ -566,6 +574,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A510 r0p0-r1p1 */
CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1742098
{
.desc = "ARM erratum 1742098",
.capability = ARM64_WORKAROUND_1742098,
CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
},
#endif
{
}

View File

@@ -79,6 +79,7 @@
#include <asm/cpu_ops.h>
#include <asm/fpsimd.h>
#include <asm/kvm_host.h>
#include <asm/hwcap.h>
#include <asm/mmu_context.h>
#include <asm/mte.h>
#include <asm/processor.h>
@@ -1897,6 +1898,14 @@ static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, in
}
#endif /* CONFIG_KVM */
static void elf_hwcap_fixup(void)
{
#ifdef CONFIG_ARM64_ERRATUM_1742098
if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
#endif /* ARM64_ERRATUM_1742098 */
}
/* Internal helper functions to match cpu capability type */
static bool
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
@@ -2921,8 +2930,10 @@ void __init setup_cpu_features(void)
setup_system_capabilities();
setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0())
if (system_supports_32bit_el0()) {
setup_elf_hwcaps(compat_elf_hwcaps);
elf_hwcap_fixup();
}
if (system_uses_ttbr0_pan())
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");

View File

@@ -4,6 +4,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
SYM_FUNC_START(__efi_rt_asm_wrapper)
stp x29, x30, [sp, #-32]!
@@ -16,6 +17,12 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
*/
stp x1, x18, [sp, #16]
ldr_l x16, efi_rt_stack_top
mov sp, x16
#ifdef CONFIG_SHADOW_CALL_STACK
str x18, [sp, #-16]!
#endif
/*
* We are lucky enough that no EFI runtime services take more than
* 5 arguments, so all are passed in registers rather than via the
@@ -29,6 +36,7 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
mov x4, x6
blr x8
mov sp, x29
ldp x1, x2, [sp, #16]
cmp x2, x18
ldp x29, x30, [sp], #32
@@ -42,6 +50,10 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
* called with preemption disabled and a separate shadow stack is used
* for interrupts.
*/
mov x18, x2
#ifdef CONFIG_SHADOW_CALL_STACK
ldr_l x18, efi_rt_stack_top
ldr x18, [x18, #-16]
#endif
b efi_handle_corrupted_x18 // tail call
SYM_FUNC_END(__efi_rt_asm_wrapper)

View File

@@ -12,6 +12,14 @@
#include <asm/efi.h>
static bool region_is_misaligned(const efi_memory_desc_t *md)
{
if (PAGE_SIZE == EFI_PAGE_SIZE)
return false;
return !PAGE_ALIGNED(md->phys_addr) ||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT);
}
/*
* Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
* executable, everything else can be mapped with the XN bits
@@ -25,14 +33,22 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
if (type == EFI_MEMORY_MAPPED_IO)
return PROT_DEVICE_nGnRE;
if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
"UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
if (region_is_misaligned(md)) {
static bool __initdata code_is_misaligned;
/*
* If the region is not aligned to the page size of the OS, we
* can not use strict permissions, since that would also affect
* the mapping attributes of the adjacent regions.
* Regions that are not aligned to the OS page size cannot be
* mapped with strict permissions, as those might interfere
* with the permissions that are needed by the adjacent
* region's mapping. However, if we haven't encountered any
* misaligned runtime code regions so far, we can safely use
* non-executable permissions for non-code regions.
*/
return pgprot_val(PAGE_KERNEL_EXEC);
code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE);
return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC)
: pgprot_val(PAGE_KERNEL);
}
/* R-- */
if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
@@ -62,19 +78,16 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
md->type == EFI_RUNTIME_SERVICES_DATA);
if (!PAGE_ALIGNED(md->phys_addr) ||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
/*
* If the end address of this region is not aligned to page
* size, the mapping is rounded up, and may end up sharing a
* page frame with the next UEFI memory region. If we create
* a block entry now, we may need to split it again when mapping
* the next region, and support for that is going to be removed
* from the MMU routines. So avoid block mappings altogether in
* that case.
* If this region is not aligned to the page size used by the OS, the
* mapping will be rounded outwards, and may end up sharing a page
* frame with an adjacent runtime memory region. Given that the page
* table descriptor covering the shared page will be rewritten when the
* adjacent region gets mapped, we must avoid block mappings here so we
* don't have to worry about splitting them when that happens.
*/
if (region_is_misaligned(md))
page_mappings_only = true;
}
create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
md->num_pages << EFI_PAGE_SHIFT,
@@ -101,6 +114,9 @@ int __init efi_set_mapping_permissions(struct mm_struct *mm,
BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
md->type != EFI_RUNTIME_SERVICES_DATA);
if (region_is_misaligned(md))
return 0;
/*
* Calling apply_to_page_range() is only safe on regions that are
* guaranteed to be mapped down to pages. Since we are only called
@@ -127,3 +143,30 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f);
return s;
}
DEFINE_SPINLOCK(efi_rt_lock);
asmlinkage u64 *efi_rt_stack_top __ro_after_init;
/* EFI requires 8 KiB of stack space for runtime services */
static_assert(THREAD_SIZE >= SZ_8K);
static int __init arm64_efi_rt_init(void)
{
void *p;
if (!efi_enabled(EFI_RUNTIME_SERVICES))
return 0;
p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL,
NUMA_NO_NODE, &&l);
l: if (!p) {
pr_warn("Failed to allocate EFI runtime stack\n");
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return -ENOMEM;
}
efi_rt_stack_top = p + THREAD_SIZE;
return 0;
}
core_initcall(arm64_efi_rt_init);

View File

@@ -216,11 +216,26 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long pc = rec->ip;
u32 old = 0, new;
new = aarch64_insn_gen_nop();
/*
* When using mcount, callsites in modules may have been initalized to
* call an arbitrary module PLT (which redirects to the _mcount stub)
* rather than the ftrace PLT we'll use at runtime (which redirects to
* the ftrace trampoline). We can ignore the old PLT when initializing
* the callsite.
*
* Note: 'mod' is only set at module load time.
*/
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
return aarch64_insn_patch_text_nosync((void *)pc, new);
}
if (!ftrace_find_callable_addr(rec, mod, &addr))
return -EINVAL;
old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
new = aarch64_insn_gen_nop();
return ftrace_modify_code(pc, old, new, true);
}

View File

@@ -671,8 +671,10 @@ SYM_FUNC_END(__secondary_too_slow)
SYM_FUNC_START(__enable_mmu)
mrs x2, ID_AA64MMFR0_EL1
ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
b.ne __no_granule_support
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
b.lt __no_granule_support
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
b.gt __no_granule_support
update_early_cpu_boot_status 0, x2, x3
adrp x2, idmap_pg_dir
phys_to_ttbr x1, x1

View File

@@ -868,6 +868,10 @@ u8 spectre_bhb_loop_affected(int scope)
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
{},
};
static const struct midr_range spectre_bhb_k11_list[] = {
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
{},
};
static const struct midr_range spectre_bhb_k8_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
@@ -878,6 +882,8 @@ u8 spectre_bhb_loop_affected(int scope)
k = 32;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
k = 24;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
k = 11;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
k = 8;

View File

@@ -22,46 +22,6 @@
#include <asm/cputype.h>
#include <asm/topology.h>
void store_cpu_topology(unsigned int cpuid)
{
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
u64 mpidr;
if (cpuid_topo->package_id != -1)
goto topology_populated;
mpidr = read_cpuid_mpidr();
/* Uniprocessor systems can rely on default topology values */
if (mpidr & MPIDR_UP_BITMASK)
return;
/*
* This would be the place to create cpu topology based on MPIDR.
*
* However, it cannot be trusted to depict the actual topology; some
* pieces of the architecture enforce an artificial cap on Aff0 values
* (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an
* artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up
* having absolutely no relationship to the actual underlying system
* topology, and cannot be reasonably used as core / package ID.
*
* If the MT bit is set, Aff0 *could* be used to define a thread ID, but
* we still wouldn't be able to obtain a sane core ID. This means we
* need to entirely ignore MPIDR for any topology deduction.
*/
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = cpuid;
cpuid_topo->package_id = cpu_to_node(cpuid);
pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
cpuid_topo->thread_id, mpidr);
topology_populated:
update_siblings_masks(cpuid);
}
#ifdef CONFIG_ACPI
static bool __init acpi_cpu_is_threaded(int cpu)
{
@@ -158,7 +118,7 @@ static int validate_cpu_freq_invariance_counters(int cpu)
}
/* Convert maximum frequency from KHz to Hz and validate */
max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000;
max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000ULL;
if (unlikely(!max_freq_hz)) {
pr_debug("CPU%d: invalid maximum frequency.\n", cpu);
return -EINVAL;

View File

@@ -931,7 +931,7 @@ static struct break_hook bug_break_hook = {
static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr)
{
pr_err("%s generated an invalid instruction at %pS!\n",
in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching",
"Kernel text patching",
(void *)instruction_pointer(regs));
/* We cannot handle this */

View File

@@ -344,16 +344,18 @@ int kvm_set_ipa_limit(void)
}
switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) {
default:
case 1:
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
return -EINVAL;
case 0:
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
break;
case 2:
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
break;
default:
kvm_err("Unsupported value for TGRAN_2, giving up\n");
return -EINVAL;
}
kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);

View File

@@ -2096,7 +2096,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
memset(entry, 0, esz);
while (len > 0) {
while (true) {
int next_offset;
size_t byte_offset;
@@ -2109,6 +2109,9 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
return next_offset;
byte_offset = next_offset * esz;
if (byte_offset >= len)
break;
id += next_offset;
gpa += byte_offset;
len -= byte_offset;

View File

@@ -13,3 +13,5 @@ KASAN_SANITIZE_physaddr.o += n
obj-$(CONFIG_KASAN) += kasan_init.o
KASAN_SANITIZE_kasan_init.o := n
obj-$(CONFIG_ARM64_ERRATUM_2454944) += dma-mapping-noalias.o

View File

@@ -0,0 +1,576 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Support for uncached DMA mappings.
* Part of Cortex-A510 erratum 2454944 workaround.
*
* Copyright (C) 2022-2023 ARM Ltd.
* Author: Robin Murphy <robin.murphy@arm.com>
* Activating swiotlb + disabling lazy vunmap: Beata Michalska
*/
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <linux/iommu.h>
#include <linux/slab.h>
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
/*
* Bits [58:55] of the translation table descriptor are being reserved
* by the architecture for software use purposes. With the assumption that
* those should not be used on linear map addresses (which is not without
* any guarantee though), those bits are being leveraged to trace potential
* cacheable aliases. This is still far from being perfect, to say at least:
* ... categorically the worst, but oh well, needs must...
*/
#define REFCOUNT_INC BIT(55)
#define PTE_REFCOUNT(pte) (((pte) >> 55) & 0xf)
static int pte_set_nc(pte_t *ptep, unsigned long addr, void *data)
{
pteval_t old_pte, new_pte, pte;
unsigned int refcount;
pte = pte_val(READ_ONCE(*ptep));
do {
/* Avoid racing against the transient invalid state */
old_pte = pte | PTE_VALID;
new_pte = old_pte + REFCOUNT_INC;
refcount = PTE_REFCOUNT(pte);
if (WARN_ON(refcount == 15))
return -EINVAL;
if (refcount == 0) {
new_pte &= ~(PTE_ATTRINDX_MASK | PTE_VALID);
new_pte |= PTE_ATTRINDX(MT_NORMAL_NC);
}
pte = cmpxchg_relaxed(&pte_val(*ptep), old_pte, new_pte);
} while (pte != old_pte);
*(unsigned int *)data = refcount;
if (refcount)
return 0;
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
WRITE_ONCE(*ptep, __pte(new_pte | PTE_VALID));
return 0;
}
static int pte_clear_nc(pte_t *ptep, unsigned long addr, void *data)
{
pteval_t old_pte, new_pte, pte;
unsigned int refcount;
pte = pte_val(READ_ONCE(*ptep));
do {
old_pte = pte | PTE_VALID;
new_pte = old_pte - REFCOUNT_INC;
refcount = PTE_REFCOUNT(pte);
if (WARN_ON(refcount == 0))
return -EINVAL;
if (refcount == 1) {
new_pte &= ~(PTE_ATTRINDX_MASK | PTE_VALID);
new_pte |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
}
pte = cmpxchg_relaxed(&pte_val(*ptep), old_pte, new_pte);
} while (pte != old_pte);
if (refcount > 1)
return 0;
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
WRITE_ONCE(*ptep, __pte(new_pte | PTE_VALID));
return 0;
}
static int set_nc(void *addr, size_t size)
{
unsigned int count;
int ret = apply_to_existing_page_range(&init_mm, (unsigned long)addr,
size, pte_set_nc, &count);
WARN_RATELIMIT(count == 0 && page_mapped(virt_to_page(addr)),
"changing linear mapping but cacheable aliases may still exist\n");
dsb(ishst);
isb();
__flush_dcache_area(addr, size);
return ret;
}
static int clear_nc(void *addr, size_t size)
{
int ret = apply_to_existing_page_range(&init_mm, (unsigned long)addr,
size, pte_clear_nc, NULL);
dsb(ishst);
isb();
__inval_dcache_area(addr, size);
return ret;
}
static phys_addr_t __arm64_noalias_map(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir,
unsigned long attrs, bool bounce)
{
bounce = bounce || (phys | size) & ~PAGE_MASK;
if (bounce) {
phys = swiotlb_tbl_map_single(dev, phys, size, PAGE_ALIGN(size),
dir, attrs);
if (phys == DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
}
if (set_nc(phys_to_virt(phys & PAGE_MASK), PAGE_ALIGN(size)))
goto out_unmap;
return phys;
out_unmap:
if (bounce)
swiotlb_tbl_unmap_single(dev, phys, size, PAGE_ALIGN(size), dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR;
}
static void __arm64_noalias_unmap(struct device *dev, phys_addr_t phys, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
clear_nc(phys_to_virt(phys & PAGE_MASK), PAGE_ALIGN(size));
if (is_swiotlb_buffer(phys))
swiotlb_tbl_unmap_single(dev, phys, size, PAGE_ALIGN(size), dir, attrs);
}
static void __arm64_noalias_sync_for_device(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir)
{
if (is_swiotlb_buffer(phys))
swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE);
else
arch_sync_dma_for_device(phys, size, dir);
}
static void __arm64_noalias_sync_for_cpu(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir)
{
if (is_swiotlb_buffer(phys))
swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU);
else
arch_sync_dma_for_cpu(phys, size, dir);
}
static void *arm64_noalias_alloc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp, unsigned long attrs)
{
struct page *page;
void *ret;
if (attrs & DMA_ATTR_NO_WARN)
gfp |= __GFP_NOWARN;
size = PAGE_ALIGN(size);
page = dma_direct_alloc_pages(dev, size, dma_addr, 0, gfp & ~__GFP_ZERO);
if (!page)
return NULL;
ret = page_address(page);
if (set_nc(ret, size)) {
dma_direct_free_pages(dev, size, page, *dma_addr, 0);
return NULL;
}
return ret;
}
static void arm64_noalias_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
size = PAGE_ALIGN(size);
clear_nc(cpu_addr, size);
dma_direct_free_pages(dev, size, virt_to_page(cpu_addr), dma_addr, 0);
}
static dma_addr_t arm64_noalias_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t phys = page_to_phys(page) + offset;
bool bounce = !dma_capable(dev, phys_to_dma(dev, phys), size, true);
if (!bounce && dir == DMA_TO_DEVICE) {
arch_sync_dma_for_device(phys, size, dir);
return phys_to_dma(dev, phys);
}
bounce = bounce || page_mapped(page);
phys = __arm64_noalias_map(dev, phys, size, dir, attrs, bounce);
if (phys == DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
return phys_to_dma(dev, phys);
}
static void arm64_noalias_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
if (dir == DMA_TO_DEVICE)
return;
__arm64_noalias_unmap(dev, dma_to_phys(dev, dma_addr), size, dir, attrs);
}
static void arm64_noalias_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
if (dir == DMA_TO_DEVICE)
return;
for_each_sg(sgl, sg, nents, i)
__arm64_noalias_unmap(dev, dma_to_phys(dev, sg->dma_address),
sg->length, dir, attrs);
}
static int arm64_noalias_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
int i;
struct scatterlist *sg;
for_each_sg(sgl, sg, nents, i) {
sg->dma_address = arm64_noalias_map_page(dev, sg_page(sg), sg->offset,
sg->length, dir, attrs);
if (sg->dma_address == DMA_MAPPING_ERROR)
goto out_unmap;
sg->dma_length = sg->length;
}
return nents;
out_unmap:
arm64_noalias_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
return 0;
}
static void arm64_noalias_sync_single_for_device(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
__arm64_noalias_sync_for_device(dev, dma_to_phys(dev, addr), size, dir);
}
static void arm64_noalias_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
__arm64_noalias_sync_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
}
static void arm64_noalias_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nents, i)
arm64_noalias_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
}
static void arm64_noalias_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nents, i)
arm64_noalias_sync_single_for_cpu(dev, sg->dma_address, sg->length, dir);
}
static const struct dma_map_ops arm64_noalias_ops = {
.alloc = arm64_noalias_alloc,
.free = arm64_noalias_free,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.map_page = arm64_noalias_map_page,
.unmap_page = arm64_noalias_unmap_page,
.map_sg = arm64_noalias_map_sg,
.unmap_sg = arm64_noalias_unmap_sg,
.sync_single_for_cpu = arm64_noalias_sync_single_for_cpu,
.sync_single_for_device = arm64_noalias_sync_single_for_device,
.sync_sg_for_cpu = arm64_noalias_sync_sg_for_cpu,
.sync_sg_for_device = arm64_noalias_sync_sg_for_device,
.dma_supported = dma_direct_supported,
.get_required_mask = dma_direct_get_required_mask,
.max_mapping_size = swiotlb_max_mapping_size,
};
#ifdef CONFIG_IOMMU_DMA
static const struct dma_map_ops *iommu_dma_ops;
static void *arm64_iommu_alloc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp, unsigned long attrs)
{
struct page **pages;
void *ret;
int i;
size = PAGE_ALIGN(size);
if (!gfpflags_allow_blocking(gfp) || (attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
ret = dma_common_alloc_pages(dev, size, dma_addr, 0, gfp);
return ret ? page_address(ret) : NULL;
}
ret = iommu_dma_ops->alloc(dev, size, dma_addr, gfp, attrs);
if (ret) {
pages = dma_common_find_pages(ret);
for (i = 0; i < size / PAGE_SIZE; i++)
if (set_nc(page_address(pages[i]), PAGE_SIZE))
goto err;
}
return ret;
err:
while (i--)
clear_nc(page_address(pages[i]), PAGE_SIZE);
iommu_dma_ops->free(dev, size, ret, *dma_addr, attrs);
return NULL;
}
static void arm64_iommu_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
struct page **pages = dma_common_find_pages(cpu_addr);
int i;
size = PAGE_ALIGN(size);
if (!pages)
return dma_common_free_pages(dev, size, virt_to_page(cpu_addr), dma_addr, 0);
for (i = 0; i < size / PAGE_SIZE; i++)
clear_nc(page_address(pages[i]), PAGE_SIZE);
iommu_dma_ops->free(dev, size, cpu_addr, dma_addr, attrs);
}
static dma_addr_t arm64_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t phys = page_to_phys(page) + offset;
dma_addr_t ret;
if (dir == DMA_TO_DEVICE)
return iommu_dma_ops->map_page(dev, page, offset, size, dir, attrs);
phys = __arm64_noalias_map(dev, phys, size, dir, attrs, page_mapped(page));
if (phys == DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
ret = iommu_dma_ops->map_page(dev, phys_to_page(phys), offset_in_page(phys),
size, dir, attrs);
if (ret == DMA_MAPPING_ERROR)
__arm64_noalias_unmap(dev, phys, size, dir, attrs);
return ret;
}
static void arm64_iommu_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t phys;
if (dir == DMA_TO_DEVICE)
return iommu_dma_ops->unmap_page(dev, addr, size, dir, attrs);
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), addr);
iommu_dma_ops->unmap_page(dev, addr, size, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
__arm64_noalias_unmap(dev, phys, size, dir, attrs);
}
static int arm64_iommu_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
int i, ret;
struct scatterlist *sg;
phys_addr_t *orig_phys;
if (dir == DMA_TO_DEVICE)
return iommu_dma_ops->map_sg(dev, sgl, nents, dir, attrs);
orig_phys = kmalloc_array(nents, sizeof(*orig_phys), GFP_ATOMIC);
if (!orig_phys)
return 0;
for_each_sg(sgl, sg, nents, i) {
phys_addr_t phys = sg_phys(sg);
/*
* Note we do not have the page_mapped() check here, since
* bouncing plays complete havoc with dma-buf imports. Those
* may well be mapped in userspace, but we hope and pray that
* it's via dma_mmap_attrs() so any such mappings are safely
* non-cacheable. DO NOT allow a block device or other similar
* scatterlist user to get here (disable IOMMUs if necessary),
* since we can't mitigate for both conflicting use-cases.
*/
phys = __arm64_noalias_map(dev, phys, sg->length, dir, attrs, false);
if (phys == DMA_MAPPING_ERROR)
goto out_unmap;
orig_phys[i] = sg_phys(sg);
sg_assign_page(sg, phys_to_page(phys));
sg->offset = offset_in_page(phys);
}
ret = iommu_dma_ops->map_sg(dev, sgl, nents, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
if (ret <= 0)
goto out_unmap;
for_each_sg(sgl, sg, nents, i) {
sg_assign_page(sg, phys_to_page(orig_phys[i]));
sg->offset = offset_in_page(orig_phys[i]);
}
kfree(orig_phys);
return ret;
out_unmap:
for_each_sg(sgl, sg, nents, i) {
__arm64_noalias_unmap(dev, sg_phys(sg), sg->length, dir, attrs);
sg_assign_page(sg, phys_to_page(orig_phys[i]));
sg->offset = offset_in_page(orig_phys[i]);
}
kfree(orig_phys);
return 0;
}
static void arm64_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
struct iommu_domain *domain;
struct scatterlist *sg, *tmp;
dma_addr_t iova;
int i;
if (dir == DMA_TO_DEVICE)
return iommu_dma_ops->unmap_sg(dev, sgl, nents, dir, attrs);
domain = iommu_get_dma_domain(dev);
iova = sgl->dma_address;
tmp = sgl;
for_each_sg(sgl, sg, nents, i) {
phys_addr_t phys = iommu_iova_to_phys(domain, iova);
__arm64_noalias_unmap(dev, phys, sg->length, dir, attrs);
iova += sg->length;
if (iova == tmp->dma_address + tmp->dma_length && !sg_is_last(tmp)) {
tmp = sg_next(tmp);
iova = tmp->dma_address;
}
}
iommu_dma_ops->unmap_sg(dev, sgl, nents, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
}
static void arm64_iommu_sync_single_for_device(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
phys_addr_t phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), addr);
__arm64_noalias_sync_for_device(dev, phys, size, dir);
}
static void arm64_iommu_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
phys_addr_t phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), addr);
__arm64_noalias_sync_for_cpu(dev, phys, size, dir);
}
static void arm64_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct scatterlist *sg, *tmp = sgl;
dma_addr_t iova = sgl->dma_address;
int i;
for_each_sg(sgl, sg, nents, i) {
phys_addr_t phys = iommu_iova_to_phys(domain, iova);
__arm64_noalias_sync_for_device(dev, phys, sg->length, dir);
iova += sg->length;
if (iova == tmp->dma_address + tmp->dma_length && !sg_is_last(tmp)) {
tmp = sg_next(tmp);
iova = tmp->dma_address;
}
}
}
static void arm64_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct scatterlist *sg, *tmp = sgl;
dma_addr_t iova = sgl->dma_address;
int i;
for_each_sg(sgl, sg, nents, i) {
phys_addr_t phys = iommu_iova_to_phys(domain, iova);
__arm64_noalias_sync_for_cpu(dev, phys, sg->length, dir);
iova += sg->length;
if (iova == tmp->dma_address + tmp->dma_length && !sg_is_last(tmp)) {
tmp = sg_next(tmp);
iova = tmp->dma_address;
}
}
}
static struct dma_map_ops arm64_iommu_ops = {
.alloc = arm64_iommu_alloc,
.free = arm64_iommu_free,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
.map_page = arm64_iommu_map_page,
.unmap_page = arm64_iommu_unmap_page,
.map_sg = arm64_iommu_map_sg,
.unmap_sg = arm64_iommu_unmap_sg,
.sync_single_for_cpu = arm64_iommu_sync_single_for_cpu,
.sync_single_for_device = arm64_iommu_sync_single_for_device,
.sync_sg_for_cpu = arm64_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm64_iommu_sync_sg_for_device,
};
#endif /* CONFIG_IOMMU_DMA */
static inline void arm64_noalias_prepare(void)
{
if (!is_swiotlb_active())
swiotlb_late_init_with_default_size(swiotlb_size_or_default());
if (lazy_vunmap_enable) {
lazy_vunmap_enable = false;
vm_unmap_aliases();
}
}
void arm64_noalias_setup_dma_ops(struct device *dev)
{
if (dev_is_dma_coherent(dev))
return;
dev_info(dev, "applying no-alias DMA workaround\n");
if (!dev->dma_ops) {
dev->dma_ops = &arm64_noalias_ops;
goto done;
}
if (IS_ENABLED(CONFIG_IOMMU_DMA)) {
dev->dma_ops = &arm64_iommu_ops;
if (iommu_dma_ops)
goto done;
iommu_dma_ops = dev->dma_ops;
arm64_iommu_ops.mmap = iommu_dma_ops->mmap;
arm64_iommu_ops.get_sgtable = iommu_dma_ops->get_sgtable;
arm64_iommu_ops.map_resource = iommu_dma_ops->map_resource;
arm64_iommu_ops.unmap_resource = iommu_dma_ops->unmap_resource;
arm64_iommu_ops.get_merge_boundary = iommu_dma_ops->get_merge_boundary;
}
done:
arm64_noalias_prepare();
}
EXPORT_SYMBOL_GPL(arm64_noalias_setup_dma_ops);

View File

@@ -11,6 +11,7 @@
#include <xen/xen.h>
#include <xen/swiotlb-xen.h>
#include <trace/hooks/iommu.h>
#include <trace/hooks/dma_noalias.h>
#include <asm/cacheflush.h>
@@ -56,6 +57,9 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
trace_android_rvh_iommu_setup_dma_ops(dev, dma_base, size);
}
/* Allow vendor modules to opt-in for the 2454944 erratum workaround */
trace_android_rvh_setup_dma_ops(dev);
#ifdef CONFIG_XEN
if (xen_initial_domain())
dev->dma_ops = &xen_swiotlb_dma_ops;

View File

@@ -9,14 +9,19 @@
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
unsigned long addr;
fixup = search_exception_tables(instruction_pointer(regs));
addr = instruction_pointer(regs);
/* Search the BPF tables first, these are formatted differently */
fixup = search_bpf_extables(addr);
if (fixup)
return arm64_bpf_fixup_exception(fixup, regs);
fixup = search_exception_tables(addr);
if (!fixup)
return 0;
if (in_bpf_jit(regs))
return arm64_bpf_fixup_exception(fixup, regs);
regs->pc = (unsigned long)&fixup->fixup + fixup->fixup;
return 1;
}

View File

@@ -41,8 +41,6 @@ static struct addr_marker address_markers[] = {
{ 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
{ KASAN_SHADOW_END, "Kasan shadow end" },
#endif
{ BPF_JIT_REGION_START, "BPF start" },
{ BPF_JIT_REGION_END, "BPF end" },
{ MODULES_VADDR, "Modules start" },
{ MODULES_END, "Modules end" },
{ VMALLOC_START, "vmalloc() area" },

View File

@@ -1148,15 +1148,12 @@ out:
u64 bpf_jit_alloc_exec_limit(void)
{
return BPF_JIT_REGION_SIZE;
return VMALLOC_END - VMALLOC_START;
}
void *bpf_jit_alloc_exec(unsigned long size)
{
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
BPF_JIT_REGION_END, GFP_KERNEL,
PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
return vmalloc(size);
}
void bpf_jit_free_exec(void *addr)

View File

@@ -6,9 +6,9 @@
#include <linux/bitops.h>
#include <asm/segment.h>
#include <linux/cache.h>
#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/cache.h>
#include <abi/reg_ops.h>
#include <abi/regdef.h>
#include <abi/switch_context.h>

View File

@@ -106,5 +106,6 @@ int memory_add_physaddr_to_nid(u64 addr)
return 0;
return nid;
}
EXPORT_SYMBOL(memory_add_physaddr_to_nid);
#endif
#endif

View File

@@ -86,7 +86,7 @@ static __init void prom_init_mem(void)
pr_debug("Assume 128MB RAM\n");
break;
}
if (!memcmp(prom_init, prom_init + mem, 32))
if (!memcmp((void *)prom_init, (void *)prom_init + mem, 32))
break;
}
lowmem = mem;
@@ -163,7 +163,7 @@ void __init bcm47xx_prom_highmem_init(void)
off = EXTVBASE + __pa(off);
for (extmem = 128 << 20; extmem < 512 << 20; extmem <<= 1) {
if (!memcmp(prom_init, (void *)(off + extmem), 16))
if (!memcmp((void *)prom_init, (void *)(off + extmem), 16))
break;
}
extmem -= lowmem;

View File

@@ -26,6 +26,6 @@ extern char *fw_getcmdline(void);
extern void fw_meminit(void);
extern char *fw_getenv(char *name);
extern unsigned long fw_getenvl(char *name);
extern void fw_init_early_console(char port);
extern void fw_init_early_console(void);
#endif /* __ASM_FW_H_ */

View File

@@ -56,7 +56,7 @@ void arch_jump_label_transform(struct jump_entry *e,
* The branch offset must fit in the instruction's 26
* bit field.
*/
WARN_ON((offset >= BIT(25)) ||
WARN_ON((offset >= (long)BIT(25)) ||
(offset < -(long)BIT(25)));
insn.j_format.opcode = bc6_op;

View File

@@ -27,7 +27,7 @@
#define U_BRG(x) (UART_BASE(x) + 0x40)
static void __iomem *uart_base;
static char console_port = -1;
static int console_port = -1;
static int __init configure_uart_pins(int port)
{
@@ -47,7 +47,7 @@ static int __init configure_uart_pins(int port)
return 0;
}
static void __init configure_uart(char port, int baud)
static void __init configure_uart(int port, int baud)
{
u32 pbclk;
@@ -60,7 +60,7 @@ static void __init configure_uart(char port, int baud)
uart_base + PIC32_SET(U_STA(port)));
}
static void __init setup_early_console(char port, int baud)
static void __init setup_early_console(int port, int baud)
{
if (configure_uart_pins(port))
return;
@@ -130,15 +130,14 @@ _out:
return baud;
}
void __init fw_init_early_console(char port)
void __init fw_init_early_console(void)
{
char *arch_cmdline = pic32_getcmdline();
int baud = -1;
int baud, port;
uart_base = ioremap(PIC32_BASE_UART, 0xc00);
baud = get_baud_from_cmdline(arch_cmdline);
if (port == -1)
port = get_port_from_cmdline(arch_cmdline);
if (port == -1)

View File

@@ -60,7 +60,7 @@ void __init plat_mem_setup(void)
strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
#ifdef CONFIG_EARLY_PRINTK
fw_init_early_console(-1);
fw_init_early_console();
#endif
pic32_config_init();
}

View File

@@ -27,15 +27,18 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
{
struct xtalk_bridge_platform_data *bd;
struct sgi_w1_platform_data *wd;
struct platform_device *pdev;
struct platform_device *pdev_wd;
struct platform_device *pdev_bd;
struct resource w1_res;
unsigned long offset;
offset = NODE_OFFSET(nasid);
wd = kzalloc(sizeof(*wd), GFP_KERNEL);
if (!wd)
goto no_mem;
if (!wd) {
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
return;
}
snprintf(wd->dev_id, sizeof(wd->dev_id), "bridge-%012lx",
offset + (widget << SWIN_SIZE_BITS));
@@ -46,22 +49,35 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
w1_res.end = w1_res.start + 3;
w1_res.flags = IORESOURCE_MEM;
pdev = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
if (!pdev) {
kfree(wd);
goto no_mem;
pdev_wd = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
if (!pdev_wd) {
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
goto err_kfree_wd;
}
platform_device_add_resources(pdev, &w1_res, 1);
platform_device_add_data(pdev, wd, sizeof(*wd));
platform_device_add(pdev);
if (platform_device_add_resources(pdev_wd, &w1_res, 1)) {
pr_warn("xtalk:n%d/%x bridge failed to add platform resources.\n", nasid, widget);
goto err_put_pdev_wd;
}
if (platform_device_add_data(pdev_wd, wd, sizeof(*wd))) {
pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget);
goto err_put_pdev_wd;
}
if (platform_device_add(pdev_wd)) {
pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget);
goto err_put_pdev_wd;
}
/* platform_device_add_data() duplicates the data */
kfree(wd);
bd = kzalloc(sizeof(*bd), GFP_KERNEL);
if (!bd)
goto no_mem;
pdev = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
if (!pdev) {
kfree(bd);
goto no_mem;
if (!bd) {
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
goto err_unregister_pdev_wd;
}
pdev_bd = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
if (!pdev_bd) {
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
goto err_kfree_bd;
}
@@ -82,13 +98,31 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
bd->io.flags = IORESOURCE_IO;
bd->io_offset = offset;
platform_device_add_data(pdev, bd, sizeof(*bd));
platform_device_add(pdev);
if (platform_device_add_data(pdev_bd, bd, sizeof(*bd))) {
pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget);
goto err_put_pdev_bd;
}
if (platform_device_add(pdev_bd)) {
pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget);
goto err_put_pdev_bd;
}
/* platform_device_add_data() duplicates the data */
kfree(bd);
pr_info("xtalk:n%d/%x bridge widget\n", nasid, widget);
return;
no_mem:
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
err_put_pdev_bd:
platform_device_put(pdev_bd);
err_kfree_bd:
kfree(bd);
err_unregister_pdev_wd:
platform_device_unregister(pdev_wd);
return;
err_put_pdev_wd:
platform_device_put(pdev_wd);
err_kfree_wd:
kfree(wd);
return;
}
static int probe_one_port(nasid_t nasid, int widget, int masterwid)

View File

@@ -20,7 +20,7 @@ $(obj)/vmlinux.bin: vmlinux FORCE
$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
$(obj)/vmImage: $(obj)/vmlinux.gz
$(obj)/vmImage: $(obj)/vmlinux.gz FORCE
$(call if_changed,uimage)
@$(kecho) 'Kernel: $@ is ready'

View File

@@ -10,12 +10,12 @@
#define SVERSION_ANY_ID PA_SVERSION_ANY_ID
struct hp_hardware {
unsigned short hw_type:5; /* HPHW_xxx */
unsigned short hversion;
unsigned long sversion:28;
unsigned short opt;
const char name[80]; /* The hardware description */
};
unsigned int hw_type:8; /* HPHW_xxx */
unsigned int hversion:12;
unsigned int sversion:12;
unsigned char opt;
unsigned char name[59]; /* The hardware description */
} __packed;
struct parisc_device;

View File

@@ -883,15 +883,13 @@ void __init walk_central_bus(void)
&root);
}
static void print_parisc_device(struct parisc_device *dev)
static __init void print_parisc_device(struct parisc_device *dev)
{
char hw_path[64];
static int count;
static int count __initdata;
print_pa_hwpath(dev, hw_path);
pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type,
dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
pr_info("%d. %s at %pap { type:%d, hv:%#x, sv:%#x, rev:%#x }",
++count, dev->name, &(dev->hpa.start), dev->id.hw_type,
dev->id.hversion, dev->id.sversion, dev->id.hversion_rev);
if (dev->num_addrs) {
int k;
@@ -1080,7 +1078,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
static int print_one_device(struct device * dev, void * data)
static __init int print_one_device(struct device * dev, void * data)
{
struct parisc_device * pdev = to_parisc_device(dev);

View File

@@ -153,7 +153,7 @@ CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power9,-mtune=power8)
else
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5))
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mcpu=power5,-mcpu=power4)
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4
endif
else ifdef CONFIG_PPC_BOOK3E_64
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64

Some files were not shown because too many files have changed in this diff Show More