Merge tag 'android12-5.10.209_r00' into android12-5.10

This merges up to the 5.10.209 LTS release into the android12-5.10
branch.  included in here are the following commits:

* f5f23fd864 ANDROID: GKI: db845c: Update symbols list and ABI
*   717f23122d Merge "Merge branch 'android12-5.10' into branch 'android12-5.10-lts'" into android12-5.10-lts
|\
| * 10896fff34 Merge branch 'android12-5.10' into branch 'android12-5.10-lts'
* | 6f5d6d6de5 UPSTREAM: drm/msm/dsi: Enable runtime PM
* | 5a2e61dcbd UPSTREAM: PM: runtime: Have devm_pm_runtime_enable() handle pm_runtime_dont_use_autosuspend()
* | cb06375abb UPSTREAM: PM: runtime: add devm_pm_runtime_enable helper
|/
* 7edcfba384 Revert "clk: fixed-rate: add devm_clk_hw_register_fixed_rate"
* a8a9ceb11a Revert "clk: fixed-rate: fix clk_hw_register_fixed_rate_with_accuracy_parent_hw"
*   7e6944b050 Merge 5.10.209 into android12-5.10-lts
|\
| * 16ad71c250 Linux 5.10.209
| * 66e4f4a847 arm64: dts: armada-3720-turris-mox: set irq type for RTC
| * 2e23761beb i2c: s3c24xx: fix transferring more than one message in polling mode
| * da60686bab i2c: s3c24xx: fix read transfers in polling mode
| * 8ba74e9016 selftests: mlxsw: qos_pfc: Adjust the test to support 8 lanes
| * 3e1ca80659 selftests: mlxsw: qos_pfc: Convert to iproute2 dcb
| * 56750ea5d1 mlxsw: spectrum_acl_tcam: Fix stack corruption
| * a9b233b287 mlxsw: spectrum_acl_tcam: Reorder functions to avoid forward declarations
| * d3669ebc4c mlxsw: spectrum_acl_tcam: Make fini symmetric to init
| * a557dbda64 mlxsw: spectrum_acl_tcam: Add missing mutex_destroy()
| * 936d06d699 mlxsw: spectrum: Use 'bitmap_zalloc()' when applicable
| * e8bfdf6301 mlxsw: spectrum_acl_erp: Fix error flow of pool allocation failure
| * f90b1cebc4 ethtool: netlink: Add missing ethnl_ops_begin/complete
| * b44e1aec80 kdb: Fix a potential buffer overflow in kdb_local()
| * 977c2cf563 ipvs: avoid stat macros calls from preemptible context
| * 00a86f81c8 netfilter: nf_tables: reject NFT_SET_CONCAT with not field length description
| * 9a4d25267d netfilter: nf_tables: skip dead set elements in netlink dump
| * 2d4c0798a1 netfilter: nf_tables: do not allow mismatch field size and set key length
| * b099b495e1 net: dsa: vsc73xx: Add null pointer check to vsc73xx_gpio_probe
| * 6c8a5bbca6 net: ravb: Fix dma_addr_t truncation in error case
| * ba77c8b4e1 net: phy: micrel: populate .soft_reset for KSZ9131
| * 1658d7a7a2 net: ethernet: ti: am65-cpsw: Fix max mtu to fit ethernet frames
| * 2295c22348 net: qualcomm: rmnet: fix global oob in rmnet_policy
| * b55808b96e s390/pci: fix max size calculation in zpci_memcpy_toio()
| * 178b437344 PCI: keystone: Fix race condition when initializing PHYs
| * 0de2e62067 nvmet-tcp: Fix the H2C expected PDU len calculation
| * 172276460a serial: imx: Correct clock error message in function probe()
| * 5ff00408e5 apparmor: avoid crash when parsed profile name is empty
| * 866d32bff0 perf env: Avoid recursively taking env->bpf_progs.lock
| * 39669fae69 nvmet-tcp: fix a crash in nvmet_req_complete()
| * f775f2621c nvmet-tcp: Fix a kernel panic when host sends an invalid H2C PDU length
| * 1550b870ae usb: cdc-acm: return correct error code on unsupported break
| * d7c74b010a tty: use 'if' in send_break() instead of 'goto'
| * 3483ca0390 tty: don't check for signal_pending() in send_break()
| * 2cf81d3440 tty: early return from send_break() on TTY_DRIVER_HARDWARE_BREAK
| * 4e76dbd7c3 tty: change tty_write_lock()'s ndelay parameter to bool
| * abcc25a237 perf genelf: Set ELF program header addresses properly
| * 1e17feb4ad iio: adc: ad9467: fix scale setting
| * a3167e5ab5 iio: adc: ad9467: don't ignore error codes
| * 00e916d998 iio: adc: ad9467: fix reset gpio handling
| * 8083d68401 iio: adc: ad9467: Benefit from devm_clk_get_enabled() to simplify
| * 6e04a9d305 serial: imx: fix tx statemachine deadlock
| * 6746f3e893 software node: Let args be NULL in software_node_get_reference_args
| * 381bea33a8 acpi: property: Let args be NULL in __acpi_node_get_property_reference
| * 7166e8e913 libapi: Add missing linux/types.h header to get the __u64 type on io.h
| * bc57f3ef8a serial: 8250: omap: Don't skip resource freeing if pm_runtime_resume_and_get() failed
| * 89b97e6b19 power: supply: cw2015: correct time_to_empty units in sysfs
| * 5e3995ec39 MIPS: Alchemy: Fix an out-of-bound access in db1550_dev_setup()
| * c3ed63f6a1 MIPS: Alchemy: Fix an out-of-bound access in db1200_dev_setup()
| * 9995dee8b9 mips: Fix incorrect max_low_pfn adjustment
| * 5cdda6239c mips: dmi: Fix early remap on MIPS32
| * b152868773 leds: aw2013: Select missing dependency REGMAP_I2C
| * 927626a207 mfd: syscon: Fix null pointer dereference in of_syscon_register()
| * c964a0597b HID: wacom: Correct behavior when processing some confidence == false touches
| * c7883c9f83 iio: adc: ad7091r: Pass iio_dev to event handler
| * ba7be66674 KVM: arm64: vgic-its: Avoid potential UAF in LPI translation cache
| * 9b5a278594 KVM: arm64: vgic-v4: Restore pending state on host userspace write
| * 7521ea8f62 x86/kvm: Do not try to disable kvmclock if it was not enabled
| * 322c5fe40f wifi: mwifiex: configure BSSID consistently when starting AP
| * ba8a4fba62 wifi: rtlwifi: Convert LNKCTL change to PCIe cap RMW accessors
| * 3dc3122b0b wifi: rtlwifi: Remove bogus and dangerous ASPM disable/enable code
| * 5e9142b6a2 iommu/arm-smmu-qcom: Add missing GMU entry to match table
| * 394c6c0b6d Bluetooth: Fix atomicity violation in {min,max}_key_size_set
| * 3b8d7a1b85 rootfs: Fix support for rootfstype= when root= is given
| * 8c0b563e9b io_uring/rw: ensure io->bytes_done is always initialized
| * daa61bacd3 pwm: jz4740: Don't use dev_err_probe() in .request()
| * a7d4ec5672 fbdev: flush deferred work in fb_deferred_io_fsync()
| * ff2d54d9cc ALSA: hda/relatek: Enable Mute LED on HP Laptop 15s-fq2xxx
| * b5c2a32621 ALSA: oxygen: Fix right channel of capture volume mixer
| * 57f34589e9 serial: imx: Ensure that imx_uart_rs485_config() is called with enabled clock
| * b7747ca58e usb: mon: Fix atomicity violation in mon_bin_vma_fault
| * 5e5ab50eff usb: typec: class: fix typec_altmode_put_partner to put plugs
| * baaa6d8e3f Revert "usb: typec: class: fix typec_altmode_put_partner to put plugs"
| * f5c09e1d6e usb: chipidea: wait controller resume finished for wakeup irq
| * 084de4c659 Revert "usb: dwc3: don't reset device side if dwc3 was configured as host-only"
| * 1af0423587 Revert "usb: dwc3: Soft reset phy on probe for host"
| * 3253888660 usb: dwc: ep0: Update request status in dwc3_ep0_stall_restart
| * e70b17282a usb: phy: mxs: remove CONFIG_USB_OTG condition for mxs_phy_is_otg_host()
| * d65cade544 tick-sched: Fix idle and iowait sleeptime accounting vs CPU hotplug
| * 7e7a0d8654 binder: fix race between mmput() and do_exit()
| * cce8ba6fa4 xen-netback: don't produce zero-size SKB frags
| * d1e68ad279 net: ethernet: mtk_eth_soc: remove duplicate if statements
| * 8a29463915 kprobes: Fix to handle forcibly unoptimized kprobes on freeing_list
| * c02cdc2c5e Revert "ASoC: atmel: Remove system clock tree configuration for at91sam9g20ek"
| * 79026a2d0a virtio-crypto: fix memory leak in virtio_crypto_alg_skcipher_close_session()
| * dfdb686d1b virtio-crypto: fix memory-leak
| * c3b3389ad0 dma-mapping: Fix build error unused-value
| * 0dfe57f1f2 Input: atkbd - use ab83 as id when skipping the getid command
| * c8c1158ffb binder: fix use-after-free in shinker's callback
| * abbb909287 binder: fix unused alloc->free_async_space
| * fa62c9050e binder: fix async space check for 0-sized buffers
| * 4533e7623e keys, dns: Fix size check of V1 server-list header
| * 9e8a31c1aa of: unittest: Fix of_count_phandle_with_args() expected value message
| * d5f490343c of: Fix double free in of_parse_phandle_with_args_map
| * c9ee325bae IB/iser: Prevent invalidating wrong MR
| * bedecbb5a5 mmc: sdhci_omap: Fix TI SoC dependencies
| * 613af7d576 mmc: sdhci_am654: Fix TI SoC dependencies
| * 8da3a51f74 pwm: stm32: Fix enable count for clk in .probe()
| * e33e1bdbe6 pwm: stm32: Use hweight32 in stm32_pwm_detect_channels
| * 5af4ce2a54 pwm: stm32: Use regmap_clear_bits and regmap_set_bits where applicable
| * bed5ec2a59 clk: fixed-rate: fix clk_hw_register_fixed_rate_with_accuracy_parent_hw
| * 764b78b927 clk: fixed-rate: add devm_clk_hw_register_fixed_rate
| * e5236e58cc clk: si5341: fix an error code problem in si5341_output_clk_set_rate
| * 428381fbcb watchdog: rti_wdt: Drop runtime pm reference count when watchdog is unused
| * d83662bb90 watchdog: bcm2835_wdt: Fix WDIOC_SETTIMEOUT handling
| * 6c93290da9 watchdog/hpwdt: Only claim UNKNOWN NMI if from iLO
| * cbc15095d1 watchdog: set cdev owner before adding
| * e4f4a2cb74 drivers: clk: zynqmp: update divider round rate logic
| * af0b86199b clk: zynqmp: Add a check for NULL pointer
| * 45b4ea38c9 clk: zynqmp: make bestdiv unsigned
| * 7fdcd873c0 drivers: clk: zynqmp: calculate closest mux rate
| * 6cccbfafc0 clk: qcom: videocc-sm8150: Add missing PLL config property
| * 84ee04572f clk: qcom: videocc-sm8150: Update the videocc resets
| * 74bbdacf1b dt-bindings: clock: Update the videocc resets for sm8150
| * 0c8ba6937a gpu/drm/radeon: fix two memleaks in radeon_vm_init
| * 520e213a0b drivers/amd/pm: fix a use-after-free in kv_parse_power_table
| * aeed2b4e4a drm/amd/pm: fix a double-free in si_dpm_init
| * d53fee9e34 drm/amdgpu/debugfs: fix error code when smc register accessors are NULL
| * 56a79c68b5 media: dvb-frontends: m88ds3103: Fix a memory leak in an error handling path of m88ds3103_probe()
| * b5ebb9b4c1 media: dvbdev: drop refcount on error path in dvb_device_open()
| * 9284f409e4 f2fs: fix to update iostat correctly in f2fs_filemap_fault()
| * 40d36882c7 f2fs: fix to check compress file in f2fs_move_file_range()
| * b864287581 media: rkisp1: Disable runtime PM in probe error path
| * 962b35733b clk: qcom: gpucc-sm8150: Update the gpu_cc_pll1 config
| * 21a30b5969 media: cx231xx: fix a memleak in cx231xx_init_isoc
| * abd50cebf9 drm/bridge: tc358767: Fix return value on error case
| * 3f6932b5ba drm/radeon/trinity_dpm: fix a memleak in trinity_parse_power_table
| * e646308eda drm/radeon/dpm: fix a memleak in sumo_parse_power_table
| * 57ca798480 drm/radeon: check the alloc_workqueue return value in radeon_crtc_init()
| * c20a6aa0a3 drm/drv: propagate errors from drm_modeset_register_all()
| * da5e0feb12 drm/msm/dsi: Use pm_runtime_resume_and_get to prevent refcnt leaks
| * b704eabe87 drm/msm/mdp4: flush vblank event on disable
| * 96f5856867 ASoC: cs35l34: Fix GPIO name and drop legacy include
| * defe0327f5 ASoC: cs35l33: Fix GPIO name and drop legacy include
| * 1421b06618 drm/radeon: check return value of radeon_ring_lock()
| * 9924469212 drm/radeon/r100: Fix integer overflow issues in r100_cs_track_check()
| * ca267f79a5 drm/radeon/r600_cs: Fix possible int overflows in r600_cs_check_reg()
| * 6f866885e1 f2fs: fix to avoid dirent corruption
| * 13ea8af957 drm/bridge: Fix typo in post_disable() description
| * 3233d8bf78 media: pvrusb2: fix use after free on context disconnection
| * 53926e2a39 drm/bridge: tpd12s015: Drop buggy __exit annotation for remove function
| * 1eb7ceae48 drm/nouveau/fence:: fix warning directly dereferencing a rcu pointer
| * b1a07165be rcu: Create an unrcu_pointer() to remove __rcu from a pointer
| * 1d1d5b90ea drm/panel-elida-kd35t133: hold panel in reset for unprepare
| * 7794c14812 RDMA/usnic: Silence uninitialized symbol smatch warnings
| * d807f4ef22 ARM: davinci: always select CONFIG_CPU_ARM926T
| * da23bd709b ip6_tunnel: fix NEXTHDR_FRAGMENT handling in ip6_tnl_parse_tlv_enc_lim()
| * 4e09df9b24 Bluetooth: btmtkuart: fix recv_buf() return value
| * fd54d16613 Bluetooth: Fix bogus check for re-auth no supported with non-ssp
| * 15be96e1a6 netfilter: nf_tables: mark newset as dead on transaction abort
| * 6f39bea662 wifi: rtlwifi: rtl8192se: using calculate_bit_shift()
| * c713826653 wifi: rtlwifi: rtl8192ee: using calculate_bit_shift()
| * 99f56c3f7c wifi: rtlwifi: rtl8192de: using calculate_bit_shift()
| * b75b68dde5 rtlwifi: rtl8192de: make arrays static const, makes object smaller
| * df14e43219 wifi: rtlwifi: rtl8192ce: using calculate_bit_shift()
| * 016781c16e wifi: rtlwifi: rtl8192cu: using calculate_bit_shift()
| * bf277a76d3 wifi: rtlwifi: rtl8192c: using calculate_bit_shift()
| * 483c975e40 wifi: rtlwifi: rtl8188ee: phy: using calculate_bit_shift()
| * 31b651a7a1 wifi: rtlwifi: add calculate_bit_shift()
| * 64299791d0 dma-mapping: clear dev->dma_mem to NULL after freeing it
| * a6dd109564 dma-mapping: Add dma_release_coherent_memory to DMA API
| * ad43344ab4 virtio/vsock: fix logic which reduces credit update messages
| * 30ae0c6631 selftests/net: fix grep checking for fib_nexthop_multiprefix
| * 7a0f8295e7 scsi: hisi_sas: Replace with standard error code return value
| * afea95d319 bpf: Fix verification of indirect var-off stack access
| * 419ab8f74a arm64: dts: qcom: sdm845-db845c: correct LED panic indicator
| * 812cebdc5f arm64: dts: qcom: qrb5165-rb5: correct LED panic indicator
| * ba31bb08c1 scsi: fnic: Return error if vmalloc() failed
| * 2757f17972 bpf: fix check for attempt to corrupt spilled pointer
| * 7e98bbeb07 arm64: dts: ti: k3-am65-main: Fix DSS irq trigger type
| * d49863ed28 wifi: rtlwifi: rtl8821ae: phy: fix an undefined bitwise shift behavior
| * 2799324d92 firmware: meson_sm: populate platform devices from sm device tree data
| * 3959dbb375 firmware: ti_sci: Fix an off-by-one in ti_sci_debugfs_create()
| * 01f9feb5ed net/ncsi: Fix netlink major/minor version numbers
| * 7bcddd12a9 ncsi: internal.h: Fix a spello
| * ae98b7f7bb ARM: dts: qcom: apq8064: correct XOADC register address
| * 4675cacd6b wifi: libertas: stop selecting wext
| * 24d8aef5d9 wifi: ath11k: Defer on rproc_get failure
| * 91f3111558 bpf: Add crosstask check to __bpf_get_stack
| * d5d181df8d bpf, lpm: Fix check prefixlen before walking trie
| * eb0eac5736 wifi: rtw88: fix RX filter in FIF_ALLMULTI flag
| * d8caf15ab1 NFSv4.1/pnfs: Ensure we handle the error NFS4ERR_RETURNCONFLICT
| * 722c700dd8 blocklayoutdriver: Fix reference leak of pnfs_device_node
| * 4518dc468c crypto: scomp - fix req->dst buffer overflow
| * 1798c8fd00 crypto: sahara - do not resize req->src when doing hash operations
| * 67ae336f3b crypto: sahara - fix processing hash requests with req->nbytes < sg->length
| * b3287c8a7e crypto: sahara - improve error handling in sahara_sha_process()
| * 98985edab6 crypto: sahara - fix wait_for_completion_timeout() error handling
| * 69451bf97c crypto: sahara - fix ahash reqsize
| * d8d9580831 crypto: sahara - handle zero-length aes requests
| * b839648d05 crypto: sahara - avoid skcipher fallback code duplication
| * 14f57a013a crypto: virtio - Wait for tasklet to complete on device remove
| * 5c28478af3 gfs2: Fix kernel NULL pointer dereference in gfs2_rgrp_dump
| * 57c7b331f0 gfs2: Also reflect single-block allocations in rgd->rd_extfail_pt
| * 22f63f9bc8 Revert "gfs2: Don't reject a supposedly full bitmap if we have blocks reserved"
| * cd48d2a8e6 fs: indicate request originates from old mount API
| * acd413da3e pstore: ram_core: fix possible overflow in persistent_ram_init_ecc()
| * c60fd7a663 crypto: sahara - fix error handling in sahara_hw_descriptor_create()
| * 25b7ca747b crypto: sahara - fix processing requests with cryptlen < sg->length
| * fc91d32c7d crypto: sahara - fix ahash selftest failure
| * 4f4786b818 crypto: sahara - fix cbc selftest failure
| * ccdb86c339 crypto: sahara - remove FLAGS_NEW_KEY logic
| * 7f807dc073 crypto: af_alg - Disallow multiple in-flight AIO requests
| * 97f9d0455b crypto: ccp - fix memleak in ccp_init_dm_workarea
| * 95586bb74b crypto: sa2ul - Return crypto_aead_setkey to transfer the error
| * 0eb69890e8 crypto: virtio - Handle dataq logic with tasklet
| * 0dee72f9b7 virtio-crypto: wait ctrl queue instead of busy polling
| * 4ee475e76b virtio-crypto: use private buffer for control request
| * 7d386768ef virtio-crypto: change code style
| * 1ff5742889 virtio-crypto: implement RSA algorithm
| * f32dfee5a6 virtio-crypto: introduce akcipher service
| * b2092cdcda virtio_crypto: Introduce VIRTIO_CRYPTO_NOSPC
| * 1bc7a682ed selinux: Fix error priority for bind with AF_UNSPEC on PF_INET6 socket
| * 001a3f59d8 mtd: Fix gluebi NULL pointer dereference caused by ftl notifier
| * 1d7b39c842 ACPI: extlog: Clear Extended Error Log status when RAS_CEC handled the error
| * 46e3dc02a4 spi: sh-msiof: Enforce fixed DTDL for R-Car H3
| * 94c742324e efivarfs: force RO when remounting if SetVariable is not supported
| * 44a88650ba calipso: fix memory leak in netlbl_calipso_add_pass()
| * 5ac84b01a0 netlabel: remove unused parameter in netlbl_netlink_auditinfo()
| * 47210a5754 net: netlabel: Fix kerneldoc warnings
| * d5ce66bdf6 cpufreq: scmi: process the result of devm_of_clk_add_hw_provider()
| * fda1309205 cpufreq: Use of_property_present() for testing DT property presence
| * ba7c7e3530 of: Add of_property_present() helper
| * ded221bf4c of: property: define of_property_read_u{8,16,32,64}_array() unconditionally
| * f39c3d578c ACPI: LPIT: Avoid u32 multiplication overflow
| * c4e1a0ef0b ACPI: video: check for error while searching for backlight device parent
| * 11ac297aba mtd: rawnand: Increment IFC_TIMEOUT_MSECS for nand controller response
| * f8df7c9886 spi: spi-zynqmp-gqspi: fix driver kconfig dependencies
| * 5a669f3511 powerpc/imc-pmu: Add a null pointer check in update_events_in_group()
| * a67a04ad05 powerpc/powernv: Add a null pointer check in opal_powercap_init()
| * e6ad05e3ae powerpc/powernv: Add a null pointer check in opal_event_init()
| * 1eefa93faf powerpc/powernv: Add a null pointer check to scom_debug_init_one()
| * 428ab6a9dd selftests/powerpc: Fix error handling in FPU/VMX preemption tests
| * b582aa1f66 powerpc/pseries/memhp: Fix access beyond end of drmem array
| * f2ec41874b powerpc/pseries/memhotplug: Quieten some DLPAR operations
| * 81dce186f1 powerpc/44x: select I2C for CURRITUCK
| * d67339e9c8 powerpc: Remove in_kernel_text()
| * ecbbd90e70 powerpc: add crtsavres.o to always-y instead of extra-y
| * 6aa7865ba7 EDAC/thunderx: Fix possible out-of-bounds string access
| * 36dbbfff28 x86/lib: Fix overflow when counting digits
| * 83da4fc5aa coresight: etm4x: Fix width of CCITMIN field
| * 67d3d17e31 PCI: Add ACS quirk for more Zhaoxin Root Ports
| * e5457b54ad parport: parport_serial: Add Brainboxes device IDs and geometry
| * 937293ff64 parport: parport_serial: Add Brainboxes BAR details
| * 5e0be1229a uio: Fix use-after-free in uio_open
| * 214aac202d binder: fix comment on binder_alloc_new_buf() return value
| * 689f13128f binder: fix trivial typo of binder_free_buf_locked()
| * e18d60757b binder: use EPOLLERR from eventpoll.h
| * 19d949b37e ACPI: resource: Add another DMI match for the TongFang GMxXGxx
| * f138fb6e64 drm/crtc: fix uninitialized variable use
| * 7ba78e0823 ARM: sun9i: smp: fix return code check of of_property_match_string
| * 01d8918415 net: qrtr: ns: Return 0 if server port is not present
| * dbf8b0d938 ida: Fix crash in ida_free when the bitmap is empty
| * d8a07ba130 i2c: rk3x: fix potential spinlock recursion on poll
| * a57c59c85c Input: xpad - add Razer Wolverine V2 support
| * 86e4e2eea4 ARC: fix spare error
| * 9700ff5a4f s390/scm: fix virtual vs physical address confusion
| * 6e17155869 Input: i8042 - add nomux quirk for Acer P459-G2-M
| * b0e82ef611 Input: atkbd - skip ATKBD_CMD_GETID in translated mode
| * 6f1614080f reset: hisilicon: hi6220: fix Wvoid-pointer-to-enum-cast warning
| * 8d6913d050 ring-buffer: Do not record in NMI if the arch does not support cmpxchg in NMI
| * 439f3bbf75 tracing: Add size check when printing trace_marker output
| * bc6619c9aa tracing: Have large events show up as '[LINE TOO BIG]' instead of nothing
| * c0f1db7380 jbd2: fix soft lockup in journal_finish_inode_data_buffers()
| * cd94f81f77 neighbour: Don't let neigh_forced_gc() disable preemption for long
| * 3887ba7198 drm/crtc: Fix uninit-value bug in drm_mode_setcrtc
| * 956b740f3e jbd2: correct the printing of write_flags in jbd2_write_superblock()
| * 656d684109 clk: rockchip: rk3128: Fix HCLK_OTG gate register
| * 05644e6365 drm/exynos: fix a wrong error checking
| * 0fc35b0d9f drm/exynos: fix a potential error pointer dereference
| * fb2f34d939 nvme: introduce helper function to get ctrl state
| * a8b1ddeeac ASoC: da7219: Support low DC impedance headset
| * 1059aa41c5 net/tg3: fix race condition in tg3_reset_task()
| * 44ad1b9eb3 nouveau/tu102: flush all pdbs on vmm flush
| * ffe13302b8 ASoC: rt5650: add mutex to avoid the jack detection failure
| * 8b50b177bf ASoC: cs43130: Fix incorrect frame delay configuration
| * 921ff9f2b1 ASoC: cs43130: Fix the position of const qualifier
| * 304529d564 ASoC: Intel: Skylake: mem leak in skl register function
| * 1e31b47b2e ASoC: nau8822: Fix incorrect type in assignment and cast to restricted __be16
| * 9c89777c7d ASoC: Intel: Skylake: Fix mem leak in few functions
| * c78083013b ASoC: wm8974: Correct boost mixer inputs
| * 06a33eec1d nvme-core: check for too small lba shift
| * 974f127fcf drm/amdgpu: Fix cat debugfs amdgpu_regs_didt causes kernel null pointer
| * 52a33dbeac debugfs: fix automount d_fsdata usage
| * 65bde47aad mptcp: fix uninit-value in mptcp_incoming_options
| * 6b00598b64 ALSA: hda - Fix speaker and headset mic pin config for CHUWI CoreBook XPro
| * 94e192054f pinctrl: lochnagar: Don't build on MIPS
| * 3e47740091 f2fs: explicitly null-terminate the xattr list
* | 62f62d810d Revert "ipv6: remove max_size check inline with ipv4"
* |   08731a14db Merge "Merge 5.10.208 into android12-5.10-lts" into android12-5.10-lts
|\ \
| * | 680475ab46 Merge 5.10.208 into android12-5.10-lts
| |\|
| | * 3fee45ee55 Linux 5.10.208
| | * 929ba86476 Revert "nvme: use command_id instead of req->tag in trace_nvme_complete_rq()"
| | * 8a10841c1a PCI: Disable ATS for specific Intel IPU E2000 devices
| | * b74a0c4ddf PCI: Extract ATS disabling to a helper function
| | * 25d1e7be85 netfilter: nf_tables: Reject tables of unsupported family
| | * 03585b18b7 drm/qxl: fix UAF on handle creation
| | * dd56c5790d ipv6: remove max_size check inline with ipv4
| | * c6b2a6b827 net: tls, update curr on splice as well
| | * d36b6b152f powerpc: update ppc_save_regs to save current r1 in pt_regs
| | * ae64985e0e mmc: sdhci-sprd: Fix eMMC init failure after hw reset
| | * f7796d76bd mmc: core: Cancel delayed work before releasing host
| | * c29da60e1f mmc: rpmb: fixes pause retune on all RPMB partitions.
| | * c82efcaad2 mmc: meson-mx-sdhc: Fix initialization frozen issue
| | * 73704c6b35 mm: fix unmap_mapping_range high bits shift bug
| | * 4aca0af447 i2c: core: Fix atomic xfer check for non-preempt config
| | * 0ba8c7ef19 x86/kprobes: fix incorrect return address calculation in kprobe_emulate_call_indirect
| | * 23c006a760 firewire: ohci: suppress unexpected system reboot in AMD Ryzen machines and ASM108x/VT630x PCIe cards
| | * 70168fdc74 mm/memory-failure: check the mapcount of the precise page
| | * 18203c4484 net: Implement missing SO_TIMESTAMPING_NEW cmsg support
| | * 701b03fc14 bnxt_en: Remove mis-applied code from bnxt_cfg_ntp_filters()
| | * cfbf618e9a asix: Add check for usbnet_get_endpoints
| | * 0fc5fe6e41 net/qla3xxx: fix potential memleak in ql_alloc_buffer_queues
| | * c492f9c7d3 net/qla3xxx: switch from 'pci_' to 'dma_' API
| | * 53e92564c5 i40e: Restore VF MSI-X state during PCI reset
| | * 6d3465c3dd ASoC: meson: g12a-tohdmitx: Fix event generation for S/PDIF mux
| | * e08399e812 ASoC: meson: g12a-toacodec: Fix event generation
| | * 51e88b2cef ASoC: meson: g12a-tohdmitx: Validate written enum values
| | * 85f8d007de ASoC: meson: g12a-toacodec: Validate written enum values
| | * 21ecce2456 i40e: fix use-after-free in i40e_aqc_add_filters()
| | * f8c03fd826 net: Save and restore msg_namelen in sock_sendmsg
| | * 06ce3b8ec4 netfilter: nft_immediate: drop chain reference counter on error
| | * cf3c516dec netfilter: nftables: add loop check helper function
| | * 4366b7e1f5 net: bcmgenet: Fix FCS generation for fragmented skbuffs
| | * d1eb795385 sfc: fix a double-free bug in efx_probe_filters
| | * cb69cad457 ARM: sun9i: smp: Fix array-index-out-of-bounds read in sunxi_mc_smp_init
| | * 8abb7ab7cf net: sched: em_text: fix possible memory leak in em_text_destroy()
| | * ecdfb0970c i40e: Fix filter input checks to prevent config with invalid values
| | * 8d4ae760c7 drm/i915/dp: Fix passing the correct DPCD_REV for drm_dp_set_phy_test_pattern
| | * 3ff482518b octeontx2-af: Fix marking couple of structure as __packed
| | * 6adeb15cb6 nfc: llcp_core: Hold a ref to llcp_local->dev when holding a ref to llcp_local
| | * 74c9135d16 ALSA: hda/realtek: Fix mute and mic-mute LEDs for HP ProBook 440 G6
| | * 132ba71b4e block: Don't invalidate pagecache for invalid falloc modes
| | * ef1d1d7c10 keys, dns: Fix missing size check of V1 server-list header
* | | 274aa12c42 ANDROID: db845c: Enable device tree overlay support
|/ /
* | c925f18af7 Merge 5.10.207 into android12-5.10-lts
|\|
| * 03a0e87f70 Linux 5.10.207
| * a7fd5c7ba4 scsi: core: Always send batch on reset or error handling command
| * e30419672e Revert "scsi: core: Add scsi_prot_ref_tag() helper"
| * f60f60e1de Revert "scsi: core: Introduce scsi_get_sector()"
| * a5edb40702 Revert "scsi: core: Make scsi_get_lba() return the LBA"
| * 2129297760 Revert "scsi: core: Use scsi_cmd_to_rq() instead of scsi_cmnd.request"
| * cea19678bf Revert "scsi: core: Use a structure member to track the SCSI command submitter"
| * 6963d049a4 Revert "scsi: core: Always send batch on reset or error handling command"
* | 3d0828af52 Revert "ANDROID: GKI: Fix abi break in struct scsi_cmd"
* | ffc061ba7d ANDROID: GKI: Fix abi break in struct scsi_cmd
* | 8a9d593fd6 Merge 5.10.206 into android12-5.10-lts
|/
* cf13ba74e8 Linux 5.10.206
* 2df1e1887c spi: atmel: Fix PDC transfer setup bug
* 2a0a658ed6 Bluetooth: SMP: Fix crash when receiving new connection when debug is enabled
* ecd50f820d Revert "MIPS: Loongson64: Enable DMA noncoherent support"
* 9175341bd8 dm-integrity: don't modify bio's immutable bio_vec in integrity_metadata()
* 73117ea033 netfilter: nf_tables: skip set commit for deleted/destroyed sets
* 8bf79dec73 tracing: Fix blocked reader of snapshot buffer
* 0afe420228 ring-buffer: Fix wake ups when buffer_percent is set to 100
* 9db5239d75 scsi: core: Always send batch on reset or error handling command
* f2d30198c0 scsi: core: Use a structure member to track the SCSI command submitter
* df83ca8e98 scsi: core: Use scsi_cmd_to_rq() instead of scsi_cmnd.request
* d054858a9c scsi: core: Make scsi_get_lba() return the LBA
* f230e6d424 scsi: core: Introduce scsi_get_sector()
* 294d66c35a scsi: core: Add scsi_prot_ref_tag() helper
* 929f475eba spi: atmel: Fix CS and initialization bug
* 23d9267c54 spi: atmel: Switch to transfer_one transfer method
* db1b14eec8 Bluetooth: af_bluetooth: Fix Use-After-Free in bt_sock_recvmsg
* 0c54b79d1d smb: client: fix OOB in smbCalcSize()
* 203a412e52 smb: client: fix OOB in SMB2_query_info_init()
* 79e158ddc3 usb: fotg210-hcd: delete an incorrect bounds test
* da448f145f Bluetooth: MGMT/SMP: Fix address type when using SMP over BREDR/LE
* 4bc912140b Bluetooth: use inclusive language in SMP
* e219c3110a Bluetooth: SMP: Convert BT_ERR/BT_DBG to bt_dev_err/bt_dev_dbg
* cdbc4a1115 ARM: dts: Fix occasional boot hang for am3 usb
* 1e2db0124c 9p/net: fix possible memory leak in p9_check_errors()
* c4a22227f7 x86/alternatives: Sync core before enabling interrupts
* 7d407ef183 lib/vsprintf: Fix %pfwf when current node refcount == 0
* 565fadc3ea bus: ti-sysc: Flush posted write only after srst_udelay
* e50cfb5447 tracing / synthetic: Disable events after testing in synth_event_gen_test_init()
* cd6e41593e dt-bindings: nvmem: mxs-ocotp: Document fsl,ocotp
* 786788bb13 net: ks8851: Fix TX stall caused by TX buffer overrun
* 391c1019a0 net: rfkill: gpio: set GPIO direction
* 6d7b8e5a6d net: 9p: avoid freeing uninit memory in p9pdu_vreadf
* 45b63f09ba Input: soc_button_array - add mapping for airplane mode button
* 2aa744ad0e Bluetooth: L2CAP: Send reject on command corrupted request
* 25a6fdd26d Bluetooth: hci_event: Fix not checking if HCI_OP_INQUIRY has been sent
* 71e1c76540 USB: serial: option: add Quectel RM500Q R13 firmware support
* d521896bcc USB: serial: option: add Foxconn T99W265 with new baseline
* d0cf8a4bee USB: serial: option: add Quectel EG912Y module support
* f41f44cea9 USB: serial: ftdi_sio: update Actisense PIDs constant names
* 20d84a1946 wifi: cfg80211: fix certs build to not depend on file order
* 7a0a5cbfea wifi: cfg80211: Add my certificate
* 9dcf50da59 iio: adc: ti_am335x_adc: Fix return value check of tiadc_request_dma()
* abbebddb19 iio: common: ms_sensors: ms_sensors_i2c: fix humidity conversion time table
* c40db29812 scsi: bnx2fc: Fix skb double free in bnx2fc_rcv()
* e3749f85fd Input: ipaq-micro-keys - add error handling for devm_kmemdup
* b5f67cea27 iio: imu: inv_mpu6050: fix an error code problem in inv_mpu6050_read_raw
* 505df1c0ab interconnect: Treat xlate() returning NULL node as an error
* cc5eec86a4 btrfs: do not allow non subvolume root targets for snapshot
* bd267af18f smb: client: fix NULL deref in asn1_ber_decoder()
* 41350e813a ALSA: hda/hdmi: add force-connect quirk for NUC5CPYB
* a4692c38cd ALSA: hda/hdmi: Add quirk to force pin connectivity on NUC10
* e032ddb0e3 pinctrl: at91-pio4: use dedicated lock class for IRQ
* 0b85149a9d i2c: aspeed: Handle the coalesced stop conditions with the start conditions.
* 3dce7a52b2 afs: Fix overwriting of result of DNS query
* 97be1e865e keys, dns: Allow key types (eg. DNS) to be reclaimed immediately on expiry
* 9e0d18f946 net: check dev->gso_max_size in gso_features_check()
* 59dc16ce09 net: warn if gso_type isn't set for a GSO SKB
* 63ad66d484 afs: Fix dynamic root lookup DNS check
* 65d2c287fc afs: Fix the dynamic root's d_delete to always delete unused dentries
* a3218319ee net: check vlan filter feature in vlan_vids_add_by_dev() and vlan_vids_del_by_dev()
* 12e5a4719c net/rose: fix races in rose_kill_by_device()
* be0988c9b0 ethernet: atheros: fix a memleak in atl1e_setup_ring_resources
* 8b6f8bfe3a net: sched: ife: fix potential use-after-free
* f245312e9f net/mlx5e: Correct snprintf truncation handling for fw_version buffer used by representors
* e8ba688a64 net/mlx5: Fix fw tracer first block check
* fc4c53f8e9 net/mlx5e: Fix slab-out-of-bounds in mlx5_query_nic_vport_mac_list()
* 50aa92e699 Revert "net/mlx5e: fix double free of encap_header"
* b851889e91 wifi: mac80211: mesh_plink: fix matches_local logic
* 717f08fb51 s390/vx: fix save/restore of fpu kernel context
* c48219fad1 reset: Fix crash when freeing non-existent optional resets
* c999682ce8 ARM: OMAP2+: Fix null pointer dereference and memory leak in omap_soc_device_init
* dbc8edb80f smb: client: fix OOB in smb2_query_reparse_point()
* bc3c57493b ksmbd: fix wrong name of SMB2_CREATE_ALLOCATION_SIZE

Updates the .xml file to track the new symbol that is required:

Leaf changes summary: 1 artifact changed
Changed leaf types summary: 0 leaf type changed
Removed/Changed/Added functions summary: 0 Removed, 0 Changed, 1 Added function
Removed/Changed/Added variables summary: 0 Removed, 0 Changed, 0 Added variable

1 Added function:

  [A] 'function int devm_pm_runtime_enable(device*)'

Change-Id: I68d1c499f716926e1e84a98895170b9d192019e8
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2024-03-06 16:10:12 +00:00
committed by Treehugger Robot
366 changed files with 4940 additions and 3216 deletions

View File

@@ -14,9 +14,11 @@ allOf:
properties: properties:
compatible: compatible:
enum: items:
- fsl,imx23-ocotp - enum:
- fsl,imx28-ocotp - fsl,imx23-ocotp
- fsl,imx28-ocotp
- const: fsl,ocotp
"#address-cells": "#address-cells":
const: 1 const: 1
@@ -40,7 +42,7 @@ additionalProperties: false
examples: examples:
- | - |
ocotp: efuse@8002c000 { ocotp: efuse@8002c000 {
compatible = "fsl,imx28-ocotp"; compatible = "fsl,imx28-ocotp", "fsl,ocotp";
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
reg = <0x8002c000 0x2000>; reg = <0x8002c000 0x2000>;

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 205 SUBLEVEL = 209
EXTRAVERSION = EXTRAVERSION =
NAME = Dare mighty things NAME = Dare mighty things

File diff suppressed because it is too large Load Diff

View File

@@ -1086,6 +1086,7 @@
devfreq_suspend_device devfreq_suspend_device
devm_clk_register devm_clk_register
devm_devfreq_add_device devm_devfreq_add_device
devm_pm_runtime_enable
devm_regulator_get_exclusive devm_regulator_get_exclusive
dev_pm_opp_find_freq_floor dev_pm_opp_find_freq_floor
dev_pm_opp_get_freq dev_pm_opp_get_freq
@@ -1177,7 +1178,6 @@
drm_dp_link_rate_to_bw_code drm_dp_link_rate_to_bw_code
drm_dp_link_train_channel_eq_delay drm_dp_link_train_channel_eq_delay
drm_dp_link_train_clock_recovery_delay drm_dp_link_train_clock_recovery_delay
drm_edid_block_valid
drm_encoder_cleanup drm_encoder_cleanup
drm_encoder_init drm_encoder_init
drm_flip_work_cleanup drm_flip_work_cleanup
@@ -1812,6 +1812,7 @@
# preserved by --additions-only # preserved by --additions-only
drm_connector_init_with_ddc drm_connector_init_with_ddc
drm_edid_block_valid
gpiod_direction_input gpiod_direction_input
idr_alloc_u32 idr_alloc_u32
of_clk_get_by_name of_clk_get_by_name

View File

@@ -61,7 +61,7 @@ struct rt_sigframe {
unsigned int sigret_magic; unsigned int sigret_magic;
}; };
static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs) static int save_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
{ {
int err = 0; int err = 0;
#ifndef CONFIG_ISA_ARCOMPACT #ifndef CONFIG_ISA_ARCOMPACT
@@ -74,12 +74,12 @@ static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
#else #else
v2abi.r58 = v2abi.r59 = 0; v2abi.r58 = v2abi.r59 = 0;
#endif #endif
err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi)); err = __copy_to_user(&mctx->v2abi, (void const *)&v2abi, sizeof(v2abi));
#endif #endif
return err; return err;
} }
static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs) static int restore_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
{ {
int err = 0; int err = 0;
#ifndef CONFIG_ISA_ARCOMPACT #ifndef CONFIG_ISA_ARCOMPACT

View File

@@ -347,6 +347,7 @@
<SYSC_IDLE_NO>, <SYSC_IDLE_NO>,
<SYSC_IDLE_SMART>, <SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>; <SYSC_IDLE_SMART_WKUP>;
ti,sysc-delay-us = <2>;
clocks = <&l3s_clkctrl AM3_L3S_USB_OTG_HS_CLKCTRL 0>; clocks = <&l3s_clkctrl AM3_L3S_USB_OTG_HS_CLKCTRL 0>;
clock-names = "fck"; clock-names = "fck";
#address-cells = <1>; #address-cells = <1>;

View File

@@ -760,7 +760,7 @@
xoadc: xoadc@197 { xoadc: xoadc@197 {
compatible = "qcom,pm8921-adc"; compatible = "qcom,pm8921-adc";
reg = <197>; reg = <0x197>;
interrupts-extended = <&pmicintc 78 IRQ_TYPE_EDGE_RISING>; interrupts-extended = <&pmicintc 78 IRQ_TYPE_EDGE_RISING>;
#address-cells = <2>; #address-cells = <2>;
#size-cells = <0>; #size-cells = <0>;

View File

@@ -3,6 +3,7 @@
menuconfig ARCH_DAVINCI menuconfig ARCH_DAVINCI
bool "TI DaVinci" bool "TI DaVinci"
depends on ARCH_MULTI_V5 depends on ARCH_MULTI_V5
select CPU_ARM926T
select DAVINCI_TIMER select DAVINCI_TIMER
select ZONE_DMA select ZONE_DMA
select PM_GENERIC_DOMAINS if PM select PM_GENERIC_DOMAINS if PM

View File

@@ -793,11 +793,16 @@ void __init omap_soc_device_init(void)
soc_dev_attr->machine = soc_name; soc_dev_attr->machine = soc_name;
soc_dev_attr->family = omap_get_family(); soc_dev_attr->family = omap_get_family();
if (!soc_dev_attr->family) {
kfree(soc_dev_attr);
return;
}
soc_dev_attr->revision = soc_rev; soc_dev_attr->revision = soc_rev;
soc_dev_attr->custom_attr_group = omap_soc_groups[0]; soc_dev_attr->custom_attr_group = omap_soc_groups[0];
soc_dev = soc_device_register(soc_dev_attr); soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) { if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr->family);
kfree(soc_dev_attr); kfree(soc_dev_attr);
return; return;
} }

View File

@@ -804,16 +804,16 @@ static int __init sunxi_mc_smp_init(void)
for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) { for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) {
ret = of_property_match_string(node, "enable-method", ret = of_property_match_string(node, "enable-method",
sunxi_mc_smp_data[i].enable_method); sunxi_mc_smp_data[i].enable_method);
if (!ret) if (ret >= 0)
break; break;
} }
is_a83t = sunxi_mc_smp_data[i].is_a83t;
of_node_put(node); of_node_put(node);
if (ret) if (ret < 0)
return -ENODEV; return -ENODEV;
is_a83t = sunxi_mc_smp_data[i].is_a83t;
if (!sunxi_mc_smp_cpu_table_init()) if (!sunxi_mc_smp_cpu_table_init())
return -EINVAL; return -EINVAL;

View File

@@ -129,7 +129,7 @@
compatible = "microchip,mcp7940x"; compatible = "microchip,mcp7940x";
reg = <0x6f>; reg = <0x6f>;
interrupt-parent = <&gpiosb>; interrupt-parent = <&gpiosb>;
interrupts = <5 0>; /* GPIO2_5 */ interrupts = <5 IRQ_TYPE_EDGE_FALLING>; /* GPIO2_5 */
}; };
}; };

View File

@@ -38,8 +38,8 @@
user4 { user4 {
label = "green:user4"; label = "green:user4";
gpios = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>; gpios = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "panic-indicator";
default-state = "off"; default-state = "off";
panic-indicator;
}; };
wlan { wlan {

View File

@@ -55,8 +55,8 @@
user4 { user4 {
label = "green:user4"; label = "green:user4";
gpios = <&pm8998_gpio 13 GPIO_ACTIVE_HIGH>; gpios = <&pm8998_gpio 13 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "panic-indicator";
default-state = "off"; default-state = "off";
panic-indicator;
}; };
wlan { wlan {

View File

@@ -865,7 +865,7 @@
assigned-clocks = <&k3_clks 67 2>; assigned-clocks = <&k3_clks 67 2>;
assigned-clock-parents = <&k3_clks 67 5>; assigned-clock-parents = <&k3_clks 67 5>;
interrupts = <GIC_SPI 166 IRQ_TYPE_EDGE_RISING>; interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled"; status = "disabled";

View File

@@ -584,7 +584,11 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
unsigned long flags; unsigned long flags;
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
irq = __vgic_its_check_cache(dist, db, devid, eventid); irq = __vgic_its_check_cache(dist, db, devid, eventid);
if (irq)
vgic_get_irq_kref(irq);
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return irq; return irq;
@@ -763,6 +767,7 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
raw_spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true; irq->pending_latch = true;
vgic_queue_irq_unlock(kvm, irq, flags); vgic_queue_irq_unlock(kvm, irq, flags);
vgic_put_irq(kvm, irq);
return 0; return 0;
} }

View File

@@ -356,19 +356,26 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (test_bit(i, &val)) {
/* /*
* pending_latch is set irrespective of irq type * pending_latch is set irrespective of irq type
* (level or edge) to avoid dependency that VM should * (level or edge) to avoid dependency that VM should
* restore irq config before pending info. * restore irq config before pending info.
*/ */
irq->pending_latch = true; irq->pending_latch = test_bit(i, &val);
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else { if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
irq_set_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
irq->pending_latch);
irq->pending_latch = false; irq->pending_latch = false;
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
} }
if (irq->pending_latch)
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
else
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }

View File

@@ -468,7 +468,6 @@ config MACH_LOONGSON2EF
config MACH_LOONGSON64 config MACH_LOONGSON64
bool "Loongson 64-bit family of machines" bool "Loongson 64-bit family of machines"
select ARCH_DMA_DEFAULT_COHERENT
select ARCH_SPARSEMEM_ENABLE select ARCH_SPARSEMEM_ENABLE
select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_MIGHT_HAVE_PC_SERIO
@@ -1380,7 +1379,6 @@ config CPU_LOONGSON64
select CPU_SUPPORTS_MSA select CPU_SUPPORTS_MSA
select CPU_DIEI_BROKEN if !LOONGSON3_ENHANCEMENT select CPU_DIEI_BROKEN if !LOONGSON3_ENHANCEMENT
select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_VI
select DMA_NONCOHERENT
select WEAK_ORDERING select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC select WEAK_REORDERING_BEYOND_LLSC
select MIPS_ASID_BITS_VARIABLE select MIPS_ASID_BITS_VARIABLE

View File

@@ -847,7 +847,7 @@ int __init db1200_dev_setup(void)
i2c_register_board_info(0, db1200_i2c_devs, i2c_register_board_info(0, db1200_i2c_devs,
ARRAY_SIZE(db1200_i2c_devs)); ARRAY_SIZE(db1200_i2c_devs));
spi_register_board_info(db1200_spi_devs, spi_register_board_info(db1200_spi_devs,
ARRAY_SIZE(db1200_i2c_devs)); ARRAY_SIZE(db1200_spi_devs));
/* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI) /* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI)
* S6.7 AC97/I2S selector (OFF=AC97 ON=I2S) * S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)

View File

@@ -588,7 +588,7 @@ int __init db1550_dev_setup(void)
i2c_register_board_info(0, db1550_i2c_devs, i2c_register_board_info(0, db1550_i2c_devs,
ARRAY_SIZE(db1550_i2c_devs)); ARRAY_SIZE(db1550_i2c_devs));
spi_register_board_info(db1550_spi_devs, spi_register_board_info(db1550_spi_devs,
ARRAY_SIZE(db1550_i2c_devs)); ARRAY_SIZE(db1550_spi_devs));
c = clk_get(NULL, "psc0_intclk"); c = clk_get(NULL, "psc0_intclk");
if (!IS_ERR(c)) { if (!IS_ERR(c)) {

View File

@@ -5,7 +5,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#define dmi_early_remap(x, l) ioremap_cache(x, l) #define dmi_early_remap(x, l) ioremap(x, l)
#define dmi_early_unmap(x, l) iounmap(x) #define dmi_early_unmap(x, l) iounmap(x)
#define dmi_remap(x, l) ioremap_cache(x, l) #define dmi_remap(x, l) ioremap_cache(x, l)
#define dmi_unmap(x) iounmap(x) #define dmi_unmap(x) iounmap(x)

View File

@@ -117,8 +117,7 @@ struct irq_source_routing_table {
u64 pci_io_start_addr; u64 pci_io_start_addr;
u64 pci_io_end_addr; u64 pci_io_end_addr;
u64 pci_config_addr; u64 pci_config_addr;
u16 dma_mask_bits; u32 dma_mask_bits;
u16 dma_noncoherent;
} __packed; } __packed;
struct interface_info { struct interface_info {

View File

@@ -322,11 +322,11 @@ static void __init bootmem_init(void)
panic("Incorrect memory mapping !!!"); panic("Incorrect memory mapping !!!");
if (max_pfn > PFN_DOWN(HIGHMEM_START)) { if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
max_low_pfn = PFN_DOWN(HIGHMEM_START);
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
highstart_pfn = PFN_DOWN(HIGHMEM_START); highstart_pfn = max_low_pfn;
highend_pfn = max_pfn; highend_pfn = max_pfn;
#else #else
max_low_pfn = PFN_DOWN(HIGHMEM_START);
max_pfn = max_low_pfn; max_pfn = max_low_pfn;
#endif #endif
} }

View File

@@ -13,8 +13,6 @@
* Copyright (C) 2009 Lemote Inc. * Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com * Author: Wu Zhangjin, wuzhangjin@gmail.com
*/ */
#include <linux/dma-map-ops.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/pci_ids.h> #include <linux/pci_ids.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
@@ -133,14 +131,8 @@ void __init prom_init_env(void)
loongson_sysconf.pci_io_base = eirq_source->pci_io_start_addr; loongson_sysconf.pci_io_base = eirq_source->pci_io_start_addr;
loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits; loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits;
if (loongson_sysconf.dma_mask_bits < 32 || if (loongson_sysconf.dma_mask_bits < 32 ||
loongson_sysconf.dma_mask_bits > 64) { loongson_sysconf.dma_mask_bits > 64)
loongson_sysconf.dma_mask_bits = 32; loongson_sysconf.dma_mask_bits = 32;
dma_default_coherent = true;
} else {
dma_default_coherent = !eirq_source->dma_noncoherent;
}
pr_info("Firmware: Coherent DMA: %s\n", dma_default_coherent ? "on" : "off");
loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm; loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm;
loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown; loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown;

View File

@@ -38,14 +38,6 @@ extern char start_virt_trampolines[];
extern char end_virt_trampolines[]; extern char end_virt_trampolines[];
#endif #endif
static inline int in_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext && addr < (unsigned long)__init_end)
return 1;
return 0;
}
static inline unsigned long kernel_toc_addr(void) static inline unsigned long kernel_toc_addr(void)
{ {
/* Defined by the linker, see vmlinux.lds.S */ /* Defined by the linker, see vmlinux.lds.S */

View File

@@ -58,10 +58,10 @@ _GLOBAL(ppc_save_regs)
lbz r0,PACAIRQSOFTMASK(r13) lbz r0,PACAIRQSOFTMASK(r13)
PPC_STL r0,SOFTE-STACK_FRAME_OVERHEAD(r3) PPC_STL r0,SOFTE-STACK_FRAME_OVERHEAD(r3)
#endif #endif
/* go up one stack frame for SP */ /* store current SP */
PPC_LL r4,0(r1) PPC_STL r1,1*SZL(r3)
PPC_STL r4,1*SZL(r3)
/* get caller's LR */ /* get caller's LR */
PPC_LL r4,0(r1)
PPC_LL r0,LRSAVE(r4) PPC_LL r0,LRSAVE(r4)
PPC_STL r0,_LINK-STACK_FRAME_OVERHEAD(r3) PPC_STL r0,_LINK-STACK_FRAME_OVERHEAD(r3)
mflr r0 mflr r0

View File

@@ -38,7 +38,7 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
# so it is only needed for modules, and only for older linkers which # so it is only needed for modules, and only for older linkers which
# do not support --save-restore-funcs # do not support --save-restore-funcs
ifeq ($(call ld-ifversion, -lt, 225000000, y),y) ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
extra-$(CONFIG_PPC64) += crtsavres.o always-$(CONFIG_PPC64) += crtsavres.o
endif endif
obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \

View File

@@ -292,6 +292,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
attr_group->attrs = attrs; attr_group->attrs = attrs;
do { do {
ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value); ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value);
if (!ev_val_str)
continue;
dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str); dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str);
if (!dev_str) if (!dev_str)
continue; continue;
@@ -299,6 +301,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
attrs[j++] = dev_str; attrs[j++] = dev_str;
if (pmu->events[i].scale) { if (pmu->events[i].scale) {
ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name); ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name);
if (!ev_scale_str)
continue;
dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale); dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale);
if (!dev_str) if (!dev_str)
continue; continue;
@@ -308,6 +312,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
if (pmu->events[i].unit) { if (pmu->events[i].unit) {
ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name); ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name);
if (!ev_unit_str)
continue;
dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit); dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit);
if (!dev_str) if (!dev_str)
continue; continue;

View File

@@ -177,6 +177,7 @@ config ISS4xx
config CURRITUCK config CURRITUCK
bool "IBM Currituck (476fpe) Support" bool "IBM Currituck (476fpe) Support"
depends on PPC_47x depends on PPC_47x
select I2C
select SWIOTLB select SWIOTLB
select 476FPE select 476FPE
select FORCE_PCI select FORCE_PCI

View File

@@ -278,6 +278,8 @@ int __init opal_event_init(void)
else else
name = kasprintf(GFP_KERNEL, "opal"); name = kasprintf(GFP_KERNEL, "opal");
if (!name)
continue;
/* Install interrupt handler */ /* Install interrupt handler */
rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK, rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK,
name, NULL); name, NULL);

View File

@@ -196,6 +196,12 @@ void __init opal_powercap_init(void)
j = 0; j = 0;
pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node); pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node);
if (!pcaps[i].pg.name) {
kfree(pcaps[i].pattrs);
kfree(pcaps[i].pg.attrs);
goto out_pcaps_pattrs;
}
if (has_min) { if (has_min) {
powercap_add_attr(min, "powercap-min", powercap_add_attr(min, "powercap-min",
&pcaps[i].pattrs[j]); &pcaps[i].pattrs[j]);

View File

@@ -165,6 +165,11 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
ent->chip = chip; ent->chip = chip;
snprintf(ent->name, 16, "%08x", chip); snprintf(ent->name, 16, "%08x", chip);
ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn); ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn);
if (!ent->path.data) {
kfree(ent);
return -ENOMEM;
}
ent->path.size = strlen((char *)ent->path.data); ent->path.size = strlen((char *)ent->path.data);
dir = debugfs_create_dir(ent->name, root); dir = debugfs_create_dir(ent->name, root);

View File

@@ -481,7 +481,7 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
int lmb_found; int lmb_found;
int rc; int rc;
pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index); pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index);
lmb_found = 0; lmb_found = 0;
for_each_drmem_lmb(lmb) { for_each_drmem_lmb(lmb) {
@@ -495,14 +495,15 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
} }
} }
if (!lmb_found) if (!lmb_found) {
pr_debug("Failed to look up LMB for drc index %x\n", drc_index);
rc = -EINVAL; rc = -EINVAL;
} else if (rc) {
if (rc) pr_debug("Failed to hot-remove memory at %llx\n",
pr_info("Failed to hot-remove memory at %llx\n", lmb->base_addr);
lmb->base_addr); } else {
else pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
pr_info("Memory at %llx was hot-removed\n", lmb->base_addr); }
return rc; return rc;
} }
@@ -719,8 +720,8 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
if (!drmem_lmb_reserved(lmb)) if (!drmem_lmb_reserved(lmb))
continue; continue;
pr_info("Memory at %llx (drc index %x) was hot-added\n", pr_debug("Memory at %llx (drc index %x) was hot-added\n",
lmb->base_addr, lmb->drc_index); lmb->base_addr, lmb->drc_index);
drmem_remove_lmb_reservation(lmb); drmem_remove_lmb_reservation(lmb);
} }
rc = 0; rc = 0;

View File

@@ -76,7 +76,7 @@ static inline int test_fp_ctl(u32 fpc)
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31) #define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH) #define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7) #define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW)
struct kernel_fpu; struct kernel_fpu;

View File

@@ -11,6 +11,8 @@
/* I/O size constraints */ /* I/O size constraints */
#define ZPCI_MAX_READ_SIZE 8 #define ZPCI_MAX_READ_SIZE 8
#define ZPCI_MAX_WRITE_SIZE 128 #define ZPCI_MAX_WRITE_SIZE 128
#define ZPCI_BOUNDARY_SIZE (1 << 12)
#define ZPCI_BOUNDARY_MASK (ZPCI_BOUNDARY_SIZE - 1)
/* I/O Map */ /* I/O Map */
#define ZPCI_IOMAP_SHIFT 48 #define ZPCI_IOMAP_SHIFT 48
@@ -125,16 +127,18 @@ out:
int zpci_write_block(volatile void __iomem *dst, const void *src, int zpci_write_block(volatile void __iomem *dst, const void *src,
unsigned long len); unsigned long len);
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max) static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max)
{ {
int count = len > max ? max : len, size = 1; int offset = dst & ZPCI_BOUNDARY_MASK;
int size;
while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) { size = min3(len, ZPCI_BOUNDARY_SIZE - offset, max);
dst = dst >> 1; if (IS_ALIGNED(src, 8) && IS_ALIGNED(dst, 8) && IS_ALIGNED(size, 8))
src = src >> 1; return size;
size = size << 1;
} if (size >= 8)
return size; return 8;
return rounddown_pow_of_two(size);
} }
static inline int zpci_memcpy_fromio(void *dst, static inline int zpci_memcpy_fromio(void *dst,
@@ -144,9 +148,9 @@ static inline int zpci_memcpy_fromio(void *dst,
int size, rc = 0; int size, rc = 0;
while (n > 0) { while (n > 0) {
size = zpci_get_max_write_size((u64 __force) src, size = zpci_get_max_io_size((u64 __force) src,
(u64) dst, n, (u64) dst, n,
ZPCI_MAX_READ_SIZE); ZPCI_MAX_READ_SIZE);
rc = zpci_read_single(dst, src, size); rc = zpci_read_single(dst, src, size);
if (rc) if (rc)
break; break;
@@ -166,9 +170,9 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
return -EINVAL; return -EINVAL;
while (n > 0) { while (n > 0) {
size = zpci_get_max_write_size((u64 __force) dst, size = zpci_get_max_io_size((u64 __force) dst,
(u64) src, n, (u64) src, n,
ZPCI_MAX_WRITE_SIZE); ZPCI_MAX_WRITE_SIZE);
if (size > 8) /* main path */ if (size > 8) /* main path */
rc = zpci_write_block(dst, src, size); rc = zpci_write_block(dst, src, size);
else else

View File

@@ -100,9 +100,9 @@ static inline int __memcpy_toio_inuser(void __iomem *dst,
old_fs = enable_sacf_uaccess(); old_fs = enable_sacf_uaccess();
while (n > 0) { while (n > 0) {
size = zpci_get_max_write_size((u64 __force) dst, size = zpci_get_max_io_size((u64 __force) dst,
(u64 __force) src, n, (u64 __force) src, n,
ZPCI_MAX_WRITE_SIZE); ZPCI_MAX_WRITE_SIZE);
if (size > 8) /* main path */ if (size > 8) /* main path */
rc = __pcistb_mio_inuser(dst, src, size, &status); rc = __pcistb_mio_inuser(dst, src, size, &status);
else else
@@ -252,9 +252,9 @@ static inline int __memcpy_fromio_inuser(void __user *dst,
old_fs = enable_sacf_uaccess(); old_fs = enable_sacf_uaccess();
while (n > 0) { while (n > 0) {
size = zpci_get_max_write_size((u64 __force) src, size = zpci_get_max_io_size((u64 __force) src,
(u64 __force) dst, n, (u64 __force) dst, n,
ZPCI_MAX_READ_SIZE); ZPCI_MAX_READ_SIZE);
rc = __pcilg_mio_inuser(dst, src, size, &status); rc = __pcilg_mio_inuser(dst, src, size, &status);
if (rc) if (rc)
break; break;

View File

@@ -1080,8 +1080,8 @@ void __init_or_module text_poke_early(void *addr, const void *opcode,
} else { } else {
local_irq_save(flags); local_irq_save(flags);
memcpy(addr, opcode, len); memcpy(addr, opcode, len);
local_irq_restore(flags);
sync_core(); sync_core();
local_irq_restore(flags);
/* /*
* Could also do a CLFLUSH here to speed up CPU recovery; but * Could also do a CLFLUSH here to speed up CPU recovery; but

View File

@@ -569,7 +569,8 @@ static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
{ {
unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg]; unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
int3_emulate_call(regs, regs_get_register(regs, offs)); int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + p->ainsn.size);
int3_emulate_jmp(regs, regs_get_register(regs, offs));
} }
NOKPROBE_SYMBOL(kprobe_emulate_call_indirect); NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);

View File

@@ -24,8 +24,8 @@
static int kvmclock __initdata = 1; static int kvmclock __initdata = 1;
static int kvmclock_vsyscall __initdata = 1; static int kvmclock_vsyscall __initdata = 1;
static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME; static int msr_kvm_system_time __ro_after_init;
static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK; static int msr_kvm_wall_clock __ro_after_init;
static u64 kvm_sched_clock_offset __ro_after_init; static u64 kvm_sched_clock_offset __ro_after_init;
static int __init parse_no_kvmclock(char *arg) static int __init parse_no_kvmclock(char *arg)
@@ -196,7 +196,8 @@ static void kvm_setup_secondary_clock(void)
void kvmclock_disable(void) void kvmclock_disable(void)
{ {
native_write_msr(msr_kvm_system_time, 0, 0); if (msr_kvm_system_time)
native_write_msr(msr_kvm_system_time, 0, 0);
} }
static void __init kvmclock_init_mem(void) static void __init kvmclock_init_mem(void)
@@ -292,7 +293,10 @@ void __init kvmclock_init(void)
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) { if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW; msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW; msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
} else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) { } else if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
} else {
return; return;
} }

View File

@@ -6,7 +6,7 @@
*/ */
int num_digits(int val) int num_digits(int val)
{ {
int m = 10; long long m = 10;
int d = 1; int d = 1;
if (val < 0) { if (val < 0) {

View File

@@ -7,6 +7,8 @@ FRAGMENT_CONFIG=${KERNEL_DIR}/arch/arm64/configs/db845c_gki.fragment
PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/${KERNEL_DIR}/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${FRAGMENT_CONFIG}" PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/${KERNEL_DIR}/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${FRAGMENT_CONFIG}"
POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG}" POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG}"
DTC_FLAGS="${DTC_FLAGS} -@"
MAKE_GOALS="${MAKE_GOALS} MAKE_GOALS="${MAKE_GOALS}
qcom/sdm845-db845c.dtb qcom/sdm845-db845c.dtb
Image.gz Image.gz

View File

@@ -1039,9 +1039,13 @@ EXPORT_SYMBOL_GPL(af_alg_sendpage);
void af_alg_free_resources(struct af_alg_async_req *areq) void af_alg_free_resources(struct af_alg_async_req *areq)
{ {
struct sock *sk = areq->sk; struct sock *sk = areq->sk;
struct af_alg_ctx *ctx;
af_alg_free_areq_sgls(areq); af_alg_free_areq_sgls(areq);
sock_kfree_s(sk, areq, areq->areqlen); sock_kfree_s(sk, areq, areq->areqlen);
ctx = alg_sk(sk)->private;
ctx->inflight = false;
} }
EXPORT_SYMBOL_GPL(af_alg_free_resources); EXPORT_SYMBOL_GPL(af_alg_free_resources);
@@ -1105,11 +1109,19 @@ EXPORT_SYMBOL_GPL(af_alg_poll);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen) unsigned int areqlen)
{ {
struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); struct af_alg_ctx *ctx = alg_sk(sk)->private;
struct af_alg_async_req *areq;
/* Only one AIO request can be in flight. */
if (ctx->inflight)
return ERR_PTR(-EBUSY);
areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
if (unlikely(!areq)) if (unlikely(!areq))
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ctx->inflight = true;
areq->areqlen = areqlen; areq->areqlen = areqlen;
areq->sk = sk; areq->sk = sk;
areq->last_rsgl = NULL; areq->last_rsgl = NULL;

View File

@@ -124,6 +124,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
struct crypto_scomp *scomp = *tfm_ctx; struct crypto_scomp *scomp = *tfm_ctx;
void **ctx = acomp_request_ctx(req); void **ctx = acomp_request_ctx(req);
struct scomp_scratch *scratch; struct scomp_scratch *scratch;
unsigned int dlen;
int ret; int ret;
if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
@@ -135,6 +136,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE) if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
req->dlen = SCOMP_SCRATCH_SIZE; req->dlen = SCOMP_SCRATCH_SIZE;
dlen = req->dlen;
scratch = raw_cpu_ptr(&scomp_scratch); scratch = raw_cpu_ptr(&scomp_scratch);
spin_lock(&scratch->lock); spin_lock(&scratch->lock);
@@ -152,6 +155,9 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
} else if (req->dlen > dlen) {
ret = -ENOSPC;
goto out;
} }
scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen, scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
1); 1);

View File

@@ -145,9 +145,14 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
static u32 err_seq; static u32 err_seq;
estatus = extlog_elog_entry_check(cpu, bank); estatus = extlog_elog_entry_check(cpu, bank);
if (estatus == NULL || (mce->kflags & MCE_HANDLED_CEC)) if (!estatus)
return NOTIFY_DONE; return NOTIFY_DONE;
if (mce->kflags & MCE_HANDLED_CEC) {
estatus->block_status = 0;
return NOTIFY_DONE;
}
memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN); memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN);
/* clear record status to enable BIOS to update it again */ /* clear record status to enable BIOS to update it again */
estatus->block_status = 0; estatus->block_status = 0;

View File

@@ -98,7 +98,7 @@ static void lpit_update_residency(struct lpit_residency_info *info,
struct acpi_lpit_native *lpit_native) struct acpi_lpit_native *lpit_native)
{ {
info->frequency = lpit_native->counter_frequency ? info->frequency = lpit_native->counter_frequency ?
lpit_native->counter_frequency : tsc_khz * 1000; lpit_native->counter_frequency : mul_u32_u32(tsc_khz, 1000U);
if (!info->frequency) if (!info->frequency)
info->frequency = 1; info->frequency = 1;

View File

@@ -1788,12 +1788,12 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
return; return;
count++; count++;
acpi_get_parent(device->dev->handle, &acpi_parent); if (ACPI_SUCCESS(acpi_get_parent(device->dev->handle, &acpi_parent))) {
pdev = acpi_get_pci_dev(acpi_parent);
pdev = acpi_get_pci_dev(acpi_parent); if (pdev) {
if (pdev) { parent = &pdev->dev;
parent = &pdev->dev; pci_dev_put(pdev);
pci_dev_put(pdev); }
} }
memset(&props, 0, sizeof(struct backlight_properties)); memset(&props, 0, sizeof(struct backlight_properties));

View File

@@ -639,6 +639,7 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
* @index: Index of the reference to return * @index: Index of the reference to return
* @num_args: Maximum number of arguments after each reference * @num_args: Maximum number of arguments after each reference
* @args: Location to store the returned reference with optional arguments * @args: Location to store the returned reference with optional arguments
* (may be NULL)
* *
* Find property with @name, verifify that it is a package containing at least * Find property with @name, verifify that it is a package containing at least
* one object reference and if so, store the ACPI device object pointer to the * one object reference and if so, store the ACPI device object pointer to the
@@ -697,6 +698,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
if (ret) if (ret)
return ret == -ENODEV ? -EINVAL : ret; return ret == -ENODEV ? -EINVAL : ret;
if (!args)
return 0;
args->fwnode = acpi_fwnode_handle(device); args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0; args->nargs = 0;
return 0; return 0;

View File

@@ -455,6 +455,13 @@ static const struct dmi_system_id asus_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"), DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
}, },
}, },
{
/* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
DMI_MATCH(DMI_BOARD_NAME, "RP-15"),
},
},
{ {
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */ /* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
.matches = { .matches = {

View File

@@ -5111,7 +5111,7 @@ static __poll_t binder_poll(struct file *filp,
thread = binder_get_thread(proc); thread = binder_get_thread(proc);
if (!thread) if (!thread)
return POLLERR; return EPOLLERR;
binder_inner_proc_lock(thread->proc); binder_inner_proc_lock(thread->proc);
thread->looper |= BINDER_LOOPER_STATE_POLL; thread->looper |= BINDER_LOOPER_STATE_POLL;

View File

@@ -272,7 +272,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
} }
if (mm) { if (mm) {
mmap_write_unlock(mm); mmap_write_unlock(mm);
mmput(mm); mmput_async(mm);
} }
return 0; return 0;
@@ -305,7 +305,7 @@ err_page_ptr_cleared:
err_no_vma: err_no_vma:
if (mm) { if (mm) {
mmap_write_unlock(mm); mmap_write_unlock(mm);
mmput(mm); mmput_async(mm);
} }
return vma ? -ENOMEM : -ESRCH; return vma ? -ENOMEM : -ESRCH;
} }
@@ -360,8 +360,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
continue; continue;
if (!buffer->async_transaction) if (!buffer->async_transaction)
continue; continue;
total_alloc_size += binder_alloc_buffer_size(alloc, buffer) total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
+ sizeof(struct binder_buffer);
num_buffers++; num_buffers++;
} }
@@ -422,6 +421,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
alloc->pid, extra_buffers_size); alloc->pid, extra_buffers_size);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
/* Pad 0-size buffers so they get assigned unique addresses */
size = max(size, sizeof(void *));
trace_android_vh_binder_alloc_new_buf_locked(size, alloc, is_async); trace_android_vh_binder_alloc_new_buf_locked(size, alloc, is_async);
if (is_async && if (is_async &&
alloc->free_async_space < size + sizeof(struct binder_buffer)) { alloc->free_async_space < size + sizeof(struct binder_buffer)) {
@@ -431,9 +433,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
} }
/* Pad 0-size buffers so they get assigned unique addresses */
size = max(size, sizeof(void *));
while (n) { while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node); buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free); BUG_ON(!buffer->free);
@@ -535,7 +534,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
buffer->pid = pid; buffer->pid = pid;
buffer->oneway_spam_suspect = false; buffer->oneway_spam_suspect = false;
if (is_async) { if (is_async) {
alloc->free_async_space -= size + sizeof(struct binder_buffer); alloc->free_async_space -= size;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n", "%d: binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space); alloc->pid, size, alloc->free_async_space);
@@ -573,7 +572,7 @@ err_alloc_buf_struct_failed:
* is the sum of the three given sizes (each rounded up to * is the sum of the three given sizes (each rounded up to
* pointer-sized boundary) * pointer-sized boundary)
* *
* Return: The allocated buffer or %NULL if error * Return: The allocated buffer or %ERR_PTR(-errno) if error
*/ */
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size, size_t data_size,
@@ -673,8 +672,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) { if (buffer->async_transaction) {
alloc->free_async_space += buffer_size + sizeof(struct binder_buffer); alloc->free_async_space += buffer_size;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n", "%d: binder_free_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space); alloc->pid, size, alloc->free_async_space);
@@ -722,7 +720,7 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
/* /*
* We could eliminate the call to binder_alloc_clear_buf() * We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to * from binder_alloc_deferred_release() by moving this to
* binder_alloc_free_buf_locked(). However, that could * binder_free_buf_locked(). However, that could
* increase contention for the alloc mutex if clear_on_free * increase contention for the alloc mutex if clear_on_free
* is used frequently for large buffers. The mutex is not * is used frequently for large buffers. The mutex is not
* needed for correctness here. * needed for correctness here.
@@ -1013,7 +1011,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
goto err_mmget; goto err_mmget;
if (!mmap_read_trylock(mm)) if (!mmap_read_trylock(mm))
goto err_mmap_read_lock_failed; goto err_mmap_read_lock_failed;
vma = binder_alloc_get_vma(alloc); vma = find_vma(mm, page_addr);
if (vma && vma != binder_alloc_get_vma(alloc))
goto err_invalid_vma;
list_lru_isolate(lru, item); list_lru_isolate(lru, item);
spin_unlock(lock); spin_unlock(lock);
@@ -1039,6 +1039,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mutex_unlock(&alloc->mutex); mutex_unlock(&alloc->mutex);
return LRU_REMOVED_RETRY; return LRU_REMOVED_RETRY;
err_invalid_vma:
mmap_read_unlock(mm);
err_mmap_read_lock_failed: err_mmap_read_lock_failed:
mmput_async(mm); mmput_async(mm);
err_mmget: err_mmget:

View File

@@ -1479,6 +1479,28 @@ void pm_runtime_enable(struct device *dev)
} }
EXPORT_SYMBOL_GPL(pm_runtime_enable); EXPORT_SYMBOL_GPL(pm_runtime_enable);
static void pm_runtime_disable_action(void *data)
{
pm_runtime_dont_use_autosuspend(data);
pm_runtime_disable(data);
}
/**
* devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
*
* NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
* you at driver exit time if needed.
*
* @dev: Device to handle.
*/
int devm_pm_runtime_enable(struct device *dev)
{
pm_runtime_enable(dev);
return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
/** /**
* pm_runtime_forbid - Block runtime PM of a device. * pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle. * @dev: Device to handle.

View File

@@ -544,6 +544,9 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
if (nargs > NR_FWNODE_REFERENCE_ARGS) if (nargs > NR_FWNODE_REFERENCE_ARGS)
return -EINVAL; return -EINVAL;
if (!args)
return 0;
args->fwnode = software_node_get(refnode); args->fwnode = software_node_get(refnode);
args->nargs = nargs; args->nargs = nargs;

View File

@@ -471,7 +471,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
return data; return data;
} }
static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count) static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
{ {
struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
const unsigned char *p_left = data, *p_h4; const unsigned char *p_left = data, *p_h4;
@@ -510,25 +510,20 @@ static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
bt_dev_err(bdev->hdev, bt_dev_err(bdev->hdev,
"Frame reassembly failed (%d)", err); "Frame reassembly failed (%d)", err);
bdev->rx_skb = NULL; bdev->rx_skb = NULL;
return err; return;
} }
sz_left -= sz_h4; sz_left -= sz_h4;
p_left += sz_h4; p_left += sz_h4;
} }
return 0;
} }
static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data, static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
size_t count) size_t count)
{ {
struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
int err;
err = btmtkuart_recv(bdev->hdev, data, count); btmtkuart_recv(bdev->hdev, data, count);
if (err < 0)
return err;
bdev->hdev->stat.byte_rx += count; bdev->hdev->stat.byte_rx += count;

View File

@@ -2093,13 +2093,23 @@ static int sysc_reset(struct sysc *ddata)
sysc_val = sysc_read_sysconfig(ddata); sysc_val = sysc_read_sysconfig(ddata);
sysc_val |= sysc_mask; sysc_val |= sysc_mask;
sysc_write(ddata, sysc_offset, sysc_val); sysc_write(ddata, sysc_offset, sysc_val);
/* Flush posted write */
/*
* Some devices need a delay before reading registers
* after reset. Presumably a srst_udelay is not needed
* for devices that use a rstctrl register reset.
*/
if (ddata->cfg.srst_udelay)
fsleep(ddata->cfg.srst_udelay);
/*
* Flush posted write. For devices needing srst_udelay
* this should trigger an interconnect error if the
* srst_udelay value is needed but not configured.
*/
sysc_val = sysc_read_sysconfig(ddata); sysc_val = sysc_read_sysconfig(ddata);
} }
if (ddata->cfg.srst_udelay)
fsleep(ddata->cfg.srst_udelay);
if (ddata->post_reset_quirk) if (ddata->post_reset_quirk)
ddata->post_reset_quirk(ddata); ddata->post_reset_quirk(ddata);

View File

@@ -888,10 +888,8 @@ static int si5341_output_clk_set_rate(struct clk_hw *hw, unsigned long rate,
r[0] = r_div ? (r_div & 0xff) : 1; r[0] = r_div ? (r_div & 0xff) : 1;
r[1] = (r_div >> 8) & 0xff; r[1] = (r_div >> 8) & 0xff;
r[2] = (r_div >> 16) & 0xff; r[2] = (r_div >> 16) & 0xff;
err = regmap_bulk_write(output->data->regmap, return regmap_bulk_write(output->data->regmap,
SI5341_OUT_R_REG(output), r, 3); SI5341_OUT_R_REG(output), r, 3);
return 0;
} }
static int si5341_output_reparent(struct clk_si5341_output *output, u8 index) static int si5341_output_reparent(struct clk_si5341_output *output, u8 index)

View File

@@ -38,8 +38,8 @@ static struct alpha_pll_config gpu_cc_pll1_config = {
.config_ctl_hi_val = 0x00002267, .config_ctl_hi_val = 0x00002267,
.config_ctl_hi1_val = 0x00000024, .config_ctl_hi1_val = 0x00000024,
.test_ctl_val = 0x00000000, .test_ctl_val = 0x00000000,
.test_ctl_hi_val = 0x00000002, .test_ctl_hi_val = 0x00000000,
.test_ctl_hi1_val = 0x00000000, .test_ctl_hi1_val = 0x00000020,
.user_ctl_val = 0x00000000, .user_ctl_val = 0x00000000,
.user_ctl_hi_val = 0x00000805, .user_ctl_hi_val = 0x00000805,
.user_ctl_hi1_val = 0x000000d0, .user_ctl_hi1_val = 0x000000d0,

View File

@@ -37,6 +37,7 @@ static struct alpha_pll_config video_pll0_config = {
.config_ctl_val = 0x20485699, .config_ctl_val = 0x20485699,
.config_ctl_hi_val = 0x00002267, .config_ctl_hi_val = 0x00002267,
.config_ctl_hi1_val = 0x00000024, .config_ctl_hi1_val = 0x00000024,
.test_ctl_hi1_val = 0x00000020,
.user_ctl_val = 0x00000000, .user_ctl_val = 0x00000000,
.user_ctl_hi_val = 0x00000805, .user_ctl_hi_val = 0x00000805,
.user_ctl_hi1_val = 0x000000D0, .user_ctl_hi1_val = 0x000000D0,
@@ -218,6 +219,10 @@ static const struct regmap_config video_cc_sm8150_regmap_config = {
static const struct qcom_reset_map video_cc_sm8150_resets[] = { static const struct qcom_reset_map video_cc_sm8150_resets[] = {
[VIDEO_CC_MVSC_CORE_CLK_BCR] = { 0x850, 2 }, [VIDEO_CC_MVSC_CORE_CLK_BCR] = { 0x850, 2 },
[VIDEO_CC_INTERFACE_BCR] = { 0x8f0 },
[VIDEO_CC_MVS0_BCR] = { 0x870 },
[VIDEO_CC_MVS1_BCR] = { 0x8b0 },
[VIDEO_CC_MVSC_BCR] = { 0x810 },
}; };
static const struct qcom_cc_desc video_cc_sm8150_desc = { static const struct qcom_cc_desc video_cc_sm8150_desc = {

View File

@@ -489,7 +489,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS), GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS), GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS),
GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS), GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 13, GFLAGS), GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
GATE(0, "hclk_peri_ahb", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS), GATE(0, "hclk_peri_ahb", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS),
GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS), GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS),
GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 12, GFLAGS), GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 12, GFLAGS),

View File

@@ -83,7 +83,7 @@ static int zynqmp_clk_mux_set_parent(struct clk_hw *hw, u8 index)
static const struct clk_ops zynqmp_clk_mux_ops = { static const struct clk_ops zynqmp_clk_mux_ops = {
.get_parent = zynqmp_clk_mux_get_parent, .get_parent = zynqmp_clk_mux_get_parent,
.set_parent = zynqmp_clk_mux_set_parent, .set_parent = zynqmp_clk_mux_set_parent,
.determine_rate = __clk_mux_determine_rate, .determine_rate = __clk_mux_determine_rate_closest,
}; };
static const struct clk_ops zynqmp_clk_mux_ro_ops = { static const struct clk_ops zynqmp_clk_mux_ro_ops = {

View File

@@ -109,49 +109,6 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_UP_ULL(parent_rate, value); return DIV_ROUND_UP_ULL(parent_rate, value);
} }
static void zynqmp_get_divider2_val(struct clk_hw *hw,
unsigned long rate,
struct zynqmp_clk_divider *divider,
int *bestdiv)
{
int div1;
int div2;
long error = LONG_MAX;
unsigned long div1_prate;
struct clk_hw *div1_parent_hw;
struct clk_hw *div2_parent_hw = clk_hw_get_parent(hw);
struct zynqmp_clk_divider *pdivider =
to_zynqmp_clk_divider(div2_parent_hw);
if (!pdivider)
return;
div1_parent_hw = clk_hw_get_parent(div2_parent_hw);
if (!div1_parent_hw)
return;
div1_prate = clk_hw_get_rate(div1_parent_hw);
*bestdiv = 1;
for (div1 = 1; div1 <= pdivider->max_div;) {
for (div2 = 1; div2 <= divider->max_div;) {
long new_error = ((div1_prate / div1) / div2) - rate;
if (abs(new_error) < abs(error)) {
*bestdiv = div2;
error = new_error;
}
if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
div2 = div2 << 1;
else
div2++;
}
if (pdivider->flags & CLK_DIVIDER_POWER_OF_TWO)
div1 = div1 << 1;
else
div1++;
}
}
/** /**
* zynqmp_clk_divider_round_rate() - Round rate of divider clock * zynqmp_clk_divider_round_rate() - Round rate of divider clock
* @hw: handle between common and hardware-specific interfaces * @hw: handle between common and hardware-specific interfaces
@@ -170,6 +127,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
u32 div_type = divider->div_type; u32 div_type = divider->div_type;
u32 bestdiv; u32 bestdiv;
int ret; int ret;
u8 width;
/* if read only, just return current value */ /* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) { if (divider->flags & CLK_DIVIDER_READ_ONLY) {
@@ -189,23 +147,12 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
return DIV_ROUND_UP_ULL((u64)*prate, bestdiv); return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
} }
bestdiv = zynqmp_divider_get_val(*prate, rate, divider->flags); width = fls(divider->max_div);
/* rate = divider_round_rate(hw, rate, prate, NULL, width, divider->flags);
* In case of two divisors, compute best divider values and return
* divider2 value based on compute value. div1 will be automatically
* set to optimum based on required total divider value.
*/
if (div_type == TYPE_DIV2 &&
(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
zynqmp_get_divider2_val(hw, rate, divider, &bestdiv);
}
if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac) if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && (rate % *prate))
bestdiv = rate % *prate ? 1 : bestdiv; *prate = rate;
bestdiv = min_t(u32, bestdiv, divider->max_div);
*prate = rate * bestdiv;
return rate; return rate;
} }

View File

@@ -163,7 +163,7 @@ static bool __init cpu0_node_has_opp_v2_prop(void)
struct device_node *np = of_cpu_device_node_get(0); struct device_node *np = of_cpu_device_node_get(0);
bool ret = false; bool ret = false;
if (of_get_property(np, "operating-points-v2", NULL)) if (of_property_present(np, "operating-points-v2"))
ret = true; ret = true;
of_node_put(np); of_node_put(np);

View File

@@ -89,7 +89,7 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
cpu_dev = get_cpu_device(0); cpu_dev = get_cpu_device(0);
if (!of_find_property(cpu_dev->of_node, "cpu-supply", NULL)) if (!of_property_present(cpu_dev->of_node, "cpu-supply"))
return -ENODEV; return -ENODEV;
if (of_machine_is_compatible("fsl,imx7ulp")) { if (of_machine_is_compatible("fsl,imx7ulp")) {

View File

@@ -230,7 +230,7 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
u32 val; u32 val;
int ret; int ret;
if (of_find_property(dev->of_node, "nvmem-cells", NULL)) { if (of_property_present(dev->of_node, "nvmem-cells")) {
ret = nvmem_cell_read_u32(dev, "speed_grade", &val); ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
if (ret) if (ret)
return ret; return ret;
@@ -285,7 +285,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
u32 val; u32 val;
int ret = 0; int ret = 0;
if (of_find_property(dev->of_node, "nvmem-cells", NULL)) { if (of_property_present(dev->of_node, "nvmem-cells")) {
ret = nvmem_cell_read_u32(dev, "speed_grade", &val); ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
if (ret) if (ret)
return ret; return ret;

View File

@@ -244,8 +244,11 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
#ifdef CONFIG_COMMON_CLK #ifdef CONFIG_COMMON_CLK
/* dummy clock provider as needed by OPP if clocks property is used */ /* dummy clock provider as needed by OPP if clocks property is used */
if (of_find_property(dev->of_node, "#clock-cells", NULL)) if (of_property_present(dev->of_node, "#clock-cells")) {
devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL); ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
if (ret)
return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__);
}
#endif #endif
ret = cpufreq_register_driver(&scmi_cpufreq_driver); ret = cpufreq_register_driver(&scmi_cpufreq_driver);

View File

@@ -25,7 +25,7 @@ static bool cpu0_node_has_opp_v2_prop(void)
struct device_node *np = of_cpu_device_node_get(0); struct device_node *np = of_cpu_device_node_get(0);
bool ret = false; bool ret = false;
if (of_get_property(np, "operating-points-v2", NULL)) if (of_property_present(np, "operating-points-v2"))
ret = true; ret = true;
of_node_put(np); of_node_put(np);

View File

@@ -179,8 +179,11 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
wa->dma.address = dma_map_single(wa->dev, wa->address, len, wa->dma.address = dma_map_single(wa->dev, wa->address, len,
dir); dir);
if (dma_mapping_error(wa->dev, wa->dma.address)) if (dma_mapping_error(wa->dev, wa->dma.address)) {
kfree(wa->address);
wa->address = NULL;
return -ENOMEM; return -ENOMEM;
}
wa->dma.length = len; wa->dma.length = len;
} }

View File

@@ -1848,9 +1848,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
crypto_aead_set_flags(ctx->fallback.aead, crypto_aead_set_flags(ctx->fallback.aead,
crypto_aead_get_flags(authenc) & crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK); CRYPTO_TFM_REQ_MASK);
crypto_aead_setkey(ctx->fallback.aead, key, keylen);
return 0; return crypto_aead_setkey(ctx->fallback.aead, key, keylen);
} }
static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)

View File

@@ -43,7 +43,6 @@
#define FLAGS_MODE_MASK 0x000f #define FLAGS_MODE_MASK 0x000f
#define FLAGS_ENCRYPT BIT(0) #define FLAGS_ENCRYPT BIT(0)
#define FLAGS_CBC BIT(1) #define FLAGS_CBC BIT(1)
#define FLAGS_NEW_KEY BIT(3)
#define SAHARA_HDR_BASE 0x00800000 #define SAHARA_HDR_BASE 0x00800000
#define SAHARA_HDR_SKHA_ALG_AES 0 #define SAHARA_HDR_SKHA_ALG_AES 0
@@ -141,8 +140,6 @@ struct sahara_hw_link {
}; };
struct sahara_ctx { struct sahara_ctx {
unsigned long flags;
/* AES-specific context */ /* AES-specific context */
int keylen; int keylen;
u8 key[AES_KEYSIZE_128]; u8 key[AES_KEYSIZE_128];
@@ -151,6 +148,7 @@ struct sahara_ctx {
struct sahara_aes_reqctx { struct sahara_aes_reqctx {
unsigned long mode; unsigned long mode;
u8 iv_out[AES_BLOCK_SIZE];
struct skcipher_request fallback_req; // keep at the end struct skcipher_request fallback_req; // keep at the end
}; };
@@ -446,27 +444,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
int ret; int ret;
int i, j; int i, j;
int idx = 0; int idx = 0;
u32 len;
/* Copy new key if necessary */ memcpy(dev->key_base, ctx->key, ctx->keylen);
if (ctx->flags & FLAGS_NEW_KEY) {
memcpy(dev->key_base, ctx->key, ctx->keylen);
ctx->flags &= ~FLAGS_NEW_KEY;
if (dev->flags & FLAGS_CBC) { if (dev->flags & FLAGS_CBC) {
dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE; dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
dev->hw_desc[idx]->p1 = dev->iv_phys_base; dev->hw_desc[idx]->p1 = dev->iv_phys_base;
} else { } else {
dev->hw_desc[idx]->len1 = 0; dev->hw_desc[idx]->len1 = 0;
dev->hw_desc[idx]->p1 = 0; dev->hw_desc[idx]->p1 = 0;
}
dev->hw_desc[idx]->len2 = ctx->keylen;
dev->hw_desc[idx]->p2 = dev->key_phys_base;
dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
idx++;
} }
dev->hw_desc[idx]->len2 = ctx->keylen;
dev->hw_desc[idx]->p2 = dev->key_phys_base;
dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
idx++;
dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total); dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
if (dev->nb_in_sg < 0) { if (dev->nb_in_sg < 0) {
@@ -488,24 +483,27 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (ret != dev->nb_in_sg) { if (ret != dev->nb_in_sg) {
dev_err(dev->device, "couldn't map in sg\n"); dev_err(dev->device, "couldn't map in sg\n");
goto unmap_in; return -EINVAL;
} }
ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg, ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (ret != dev->nb_out_sg) { if (ret != dev->nb_out_sg) {
dev_err(dev->device, "couldn't map out sg\n"); dev_err(dev->device, "couldn't map out sg\n");
goto unmap_out; goto unmap_in;
} }
/* Create input links */ /* Create input links */
dev->hw_desc[idx]->p1 = dev->hw_phys_link[0]; dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
sg = dev->in_sg; sg = dev->in_sg;
len = dev->total;
for (i = 0; i < dev->nb_in_sg; i++) { for (i = 0; i < dev->nb_in_sg; i++) {
dev->hw_link[i]->len = sg->length; dev->hw_link[i]->len = min(len, sg->length);
dev->hw_link[i]->p = sg->dma_address; dev->hw_link[i]->p = sg->dma_address;
if (i == (dev->nb_in_sg - 1)) { if (i == (dev->nb_in_sg - 1)) {
dev->hw_link[i]->next = 0; dev->hw_link[i]->next = 0;
} else { } else {
len -= min(len, sg->length);
dev->hw_link[i]->next = dev->hw_phys_link[i + 1]; dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
sg = sg_next(sg); sg = sg_next(sg);
} }
@@ -514,12 +512,14 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
/* Create output links */ /* Create output links */
dev->hw_desc[idx]->p2 = dev->hw_phys_link[i]; dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
sg = dev->out_sg; sg = dev->out_sg;
len = dev->total;
for (j = i; j < dev->nb_out_sg + i; j++) { for (j = i; j < dev->nb_out_sg + i; j++) {
dev->hw_link[j]->len = sg->length; dev->hw_link[j]->len = min(len, sg->length);
dev->hw_link[j]->p = sg->dma_address; dev->hw_link[j]->p = sg->dma_address;
if (j == (dev->nb_out_sg + i - 1)) { if (j == (dev->nb_out_sg + i - 1)) {
dev->hw_link[j]->next = 0; dev->hw_link[j]->next = 0;
} else { } else {
len -= min(len, sg->length);
dev->hw_link[j]->next = dev->hw_phys_link[j + 1]; dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
sg = sg_next(sg); sg = sg_next(sg);
} }
@@ -538,9 +538,6 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
return 0; return 0;
unmap_out:
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
unmap_in: unmap_in:
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE); DMA_TO_DEVICE);
@@ -548,8 +545,24 @@ unmap_in:
return -EINVAL; return -EINVAL;
} }
static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
/* Update IV buffer to contain the last ciphertext block */
if (rctx->mode & FLAGS_ENCRYPT) {
sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
ivsize, req->cryptlen - ivsize);
} else {
memcpy(req->iv, rctx->iv_out, ivsize);
}
}
static int sahara_aes_process(struct skcipher_request *req) static int sahara_aes_process(struct skcipher_request *req)
{ {
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct sahara_dev *dev = dev_ptr; struct sahara_dev *dev = dev_ptr;
struct sahara_ctx *ctx; struct sahara_ctx *ctx;
struct sahara_aes_reqctx *rctx; struct sahara_aes_reqctx *rctx;
@@ -571,8 +584,17 @@ static int sahara_aes_process(struct skcipher_request *req)
rctx->mode &= FLAGS_MODE_MASK; rctx->mode &= FLAGS_MODE_MASK;
dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode; dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
if ((dev->flags & FLAGS_CBC) && req->iv) if ((dev->flags & FLAGS_CBC) && req->iv) {
memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128); unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
memcpy(dev->iv_base, req->iv, ivsize);
if (!(dev->flags & FLAGS_ENCRYPT)) {
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
rctx->iv_out, ivsize,
req->cryptlen - ivsize);
}
}
/* assign new context to device */ /* assign new context to device */
dev->ctx = ctx; dev->ctx = ctx;
@@ -585,16 +607,20 @@ static int sahara_aes_process(struct skcipher_request *req)
timeout = wait_for_completion_timeout(&dev->dma_completion, timeout = wait_for_completion_timeout(&dev->dma_completion,
msecs_to_jiffies(SAHARA_TIMEOUT_MS)); msecs_to_jiffies(SAHARA_TIMEOUT_MS));
if (!timeout) {
dev_err(dev->device, "AES timeout\n");
return -ETIMEDOUT;
}
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (!timeout) {
dev_err(dev->device, "AES timeout\n");
return -ETIMEDOUT;
}
if ((dev->flags & FLAGS_CBC) && req->iv)
sahara_aes_cbc_update_iv(req);
return 0; return 0;
} }
@@ -608,7 +634,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
/* SAHARA only supports 128bit keys */ /* SAHARA only supports 128bit keys */
if (keylen == AES_KEYSIZE_128) { if (keylen == AES_KEYSIZE_128) {
memcpy(ctx->key, key, keylen); memcpy(ctx->key, key, keylen);
ctx->flags |= FLAGS_NEW_KEY;
return 0; return 0;
} }
@@ -624,12 +649,40 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
return crypto_skcipher_setkey(ctx->fallback, key, keylen); return crypto_skcipher_setkey(ctx->fallback, key, keylen);
} }
static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
{
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
req->base.complete,
req->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
req->dst, req->cryptlen, req->iv);
if (mode & FLAGS_ENCRYPT)
return crypto_skcipher_encrypt(&rctx->fallback_req);
return crypto_skcipher_decrypt(&rctx->fallback_req);
}
static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
{ {
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
struct sahara_dev *dev = dev_ptr; struct sahara_dev *dev = dev_ptr;
int err = 0; int err = 0;
if (!req->cryptlen)
return 0;
if (unlikely(ctx->keylen != AES_KEYSIZE_128))
return sahara_aes_fallback(req, mode);
dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n", dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
@@ -652,81 +705,21 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
static int sahara_aes_ecb_encrypt(struct skcipher_request *req) static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
{ {
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
req->base.complete,
req->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
req->dst, req->cryptlen, req->iv);
return crypto_skcipher_encrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, FLAGS_ENCRYPT); return sahara_aes_crypt(req, FLAGS_ENCRYPT);
} }
static int sahara_aes_ecb_decrypt(struct skcipher_request *req) static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
{ {
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
req->base.complete,
req->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
req->dst, req->cryptlen, req->iv);
return crypto_skcipher_decrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, 0); return sahara_aes_crypt(req, 0);
} }
static int sahara_aes_cbc_encrypt(struct skcipher_request *req) static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
{ {
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
req->base.complete,
req->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
req->dst, req->cryptlen, req->iv);
return crypto_skcipher_encrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
} }
static int sahara_aes_cbc_decrypt(struct skcipher_request *req) static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
{ {
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
req->base.complete,
req->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
req->dst, req->cryptlen, req->iv);
return crypto_skcipher_decrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, FLAGS_CBC); return sahara_aes_crypt(req, FLAGS_CBC);
} }
@@ -783,6 +776,7 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
int start) int start)
{ {
struct scatterlist *sg; struct scatterlist *sg;
unsigned int len;
unsigned int i; unsigned int i;
int ret; int ret;
@@ -804,12 +798,14 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
if (!ret) if (!ret)
return -EFAULT; return -EFAULT;
len = rctx->total;
for (i = start; i < dev->nb_in_sg + start; i++) { for (i = start; i < dev->nb_in_sg + start; i++) {
dev->hw_link[i]->len = sg->length; dev->hw_link[i]->len = min(len, sg->length);
dev->hw_link[i]->p = sg->dma_address; dev->hw_link[i]->p = sg->dma_address;
if (i == (dev->nb_in_sg + start - 1)) { if (i == (dev->nb_in_sg + start - 1)) {
dev->hw_link[i]->next = 0; dev->hw_link[i]->next = 0;
} else { } else {
len -= min(len, sg->length);
dev->hw_link[i]->next = dev->hw_phys_link[i + 1]; dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
sg = sg_next(sg); sg = sg_next(sg);
} }
@@ -890,24 +886,6 @@ static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
return 0; return 0;
} }
static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
{
if (!sg || !sg->length)
return nbytes;
while (nbytes && sg) {
if (nbytes <= sg->length) {
sg->length = nbytes;
sg_mark_end(sg);
break;
}
nbytes -= sg->length;
sg = sg_next(sg);
}
return nbytes;
}
static int sahara_sha_prepare_request(struct ahash_request *req) static int sahara_sha_prepare_request(struct ahash_request *req)
{ {
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -944,36 +922,20 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
hash_later, 0); hash_later, 0);
} }
/* nbytes should now be multiple of blocksize */ rctx->total = len - hash_later;
req->nbytes = req->nbytes - hash_later;
sahara_walk_and_recalc(req->src, req->nbytes);
/* have data from previous operation and current */ /* have data from previous operation and current */
if (rctx->buf_cnt && req->nbytes) { if (rctx->buf_cnt && req->nbytes) {
sg_init_table(rctx->in_sg_chain, 2); sg_init_table(rctx->in_sg_chain, 2);
sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt); sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
sg_chain(rctx->in_sg_chain, 2, req->src); sg_chain(rctx->in_sg_chain, 2, req->src);
rctx->total = req->nbytes + rctx->buf_cnt;
rctx->in_sg = rctx->in_sg_chain; rctx->in_sg = rctx->in_sg_chain;
req->src = rctx->in_sg_chain;
/* only data from previous operation */ /* only data from previous operation */
} else if (rctx->buf_cnt) { } else if (rctx->buf_cnt) {
if (req->src) rctx->in_sg = rctx->in_sg_chain;
rctx->in_sg = req->src;
else
rctx->in_sg = rctx->in_sg_chain;
/* buf was copied into rembuf above */
sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt); sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
rctx->total = rctx->buf_cnt;
/* no data from previous operation */ /* no data from previous operation */
} else { } else {
rctx->in_sg = req->src; rctx->in_sg = req->src;
rctx->total = req->nbytes;
req->src = rctx->in_sg;
} }
/* on next call, we only have the remaining data in the buffer */ /* on next call, we only have the remaining data in the buffer */
@@ -994,7 +956,10 @@ static int sahara_sha_process(struct ahash_request *req)
return ret; return ret;
if (rctx->first) { if (rctx->first) {
sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0); ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
if (ret)
return ret;
dev->hw_desc[0]->next = 0; dev->hw_desc[0]->next = 0;
rctx->first = 0; rctx->first = 0;
} else { } else {
@@ -1002,7 +967,10 @@ static int sahara_sha_process(struct ahash_request *req)
sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0); sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
dev->hw_desc[0]->next = dev->hw_phys_desc[1]; dev->hw_desc[0]->next = dev->hw_phys_desc[1];
sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1); ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
if (ret)
return ret;
dev->hw_desc[1]->next = 0; dev->hw_desc[1]->next = 0;
} }
@@ -1015,18 +983,19 @@ static int sahara_sha_process(struct ahash_request *req)
timeout = wait_for_completion_timeout(&dev->dma_completion, timeout = wait_for_completion_timeout(&dev->dma_completion,
msecs_to_jiffies(SAHARA_TIMEOUT_MS)); msecs_to_jiffies(SAHARA_TIMEOUT_MS));
if (!timeout) {
dev_err(dev->device, "SHA timeout\n");
return -ETIMEDOUT;
}
if (rctx->sg_in_idx) if (rctx->sg_in_idx)
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (!timeout) {
dev_err(dev->device, "SHA timeout\n");
return -ETIMEDOUT;
}
memcpy(rctx->context, dev->context_base, rctx->context_size); memcpy(rctx->context, dev->context_base, rctx->context_size);
if (req->result) if (req->result && rctx->last)
memcpy(req->result, rctx->context, rctx->digest_size); memcpy(req->result, rctx->context, rctx->digest_size);
return 0; return 0;
@@ -1170,8 +1139,7 @@ static int sahara_sha_import(struct ahash_request *req, const void *in)
static int sahara_sha_cra_init(struct crypto_tfm *tfm) static int sahara_sha_cra_init(struct crypto_tfm *tfm)
{ {
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct sahara_sha_reqctx) + sizeof(struct sahara_sha_reqctx));
SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
return 0; return 0;
} }

View File

@@ -3,8 +3,11 @@ config CRYPTO_DEV_VIRTIO
tristate "VirtIO crypto driver" tristate "VirtIO crypto driver"
depends on VIRTIO depends on VIRTIO
select CRYPTO_AEAD select CRYPTO_AEAD
select CRYPTO_AKCIPHER2
select CRYPTO_SKCIPHER select CRYPTO_SKCIPHER
select CRYPTO_ENGINE select CRYPTO_ENGINE
select CRYPTO_RSA
select MPILIB
help help
This driver provides support for virtio crypto device. If you This driver provides support for virtio crypto device. If you
choose 'M' here, this module will be called virtio_crypto. choose 'M' here, this module will be called virtio_crypto.

View File

@@ -2,5 +2,6 @@
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
virtio_crypto-objs := \ virtio_crypto-objs := \
virtio_crypto_algs.o \ virtio_crypto_algs.o \
virtio_crypto_akcipher_algs.o \
virtio_crypto_mgr.o \ virtio_crypto_mgr.o \
virtio_crypto_core.o virtio_crypto_core.o

View File

@@ -0,0 +1,591 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Asymmetric algorithms supported by virtio crypto device
*
* Authors: zhenwei pi <pizhenwei@bytedance.com>
* lei he <helei.sig11@bytedance.com>
*
* Copyright 2022 Bytedance CO., LTD.
*/
#include <linux/mpi.h>
#include <linux/scatterlist.h>
#include <crypto/algapi.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/rsa.h>
#include <linux/err.h>
#include <crypto/scatterwalk.h>
#include <linux/atomic.h>
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
struct virtio_crypto_rsa_ctx {
MPI n;
};
struct virtio_crypto_akcipher_ctx {
struct crypto_engine_ctx enginectx;
struct virtio_crypto *vcrypto;
struct crypto_akcipher *tfm;
bool session_valid;
__u64 session_id;
union {
struct virtio_crypto_rsa_ctx rsa_ctx;
};
};
struct virtio_crypto_akcipher_request {
struct virtio_crypto_request base;
struct virtio_crypto_akcipher_ctx *akcipher_ctx;
struct akcipher_request *akcipher_req;
void *src_buf;
void *dst_buf;
uint32_t opcode;
};
struct virtio_crypto_akcipher_algo {
uint32_t algonum;
uint32_t service;
unsigned int active_devs;
struct akcipher_alg algo;
};
static DEFINE_MUTEX(algs_lock);
static void virtio_crypto_akcipher_finalize_req(
struct virtio_crypto_akcipher_request *vc_akcipher_req,
struct akcipher_request *req, int err)
{
kfree(vc_akcipher_req->src_buf);
kfree(vc_akcipher_req->dst_buf);
vc_akcipher_req->src_buf = NULL;
vc_akcipher_req->dst_buf = NULL;
virtcrypto_clear_request(&vc_akcipher_req->base);
crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err);
}
static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *vc_req, int len)
{
struct virtio_crypto_akcipher_request *vc_akcipher_req =
container_of(vc_req, struct virtio_crypto_akcipher_request, base);
struct akcipher_request *akcipher_req;
int error;
switch (vc_req->status) {
case VIRTIO_CRYPTO_OK:
error = 0;
break;
case VIRTIO_CRYPTO_INVSESS:
case VIRTIO_CRYPTO_ERR:
error = -EINVAL;
break;
case VIRTIO_CRYPTO_BADMSG:
error = -EBADMSG;
break;
case VIRTIO_CRYPTO_KEY_REJECTED:
error = -EKEYREJECTED;
break;
default:
error = -EIO;
break;
}
akcipher_req = vc_akcipher_req->akcipher_req;
if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY)
sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
vc_akcipher_req->dst_buf, akcipher_req->dst_len);
virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
}
static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
struct virtio_crypto_ctrl_header *header, void *para,
const uint8_t *key, unsigned int keylen)
{
struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
struct virtio_crypto *vcrypto = ctx->vcrypto;
uint8_t *pkey;
int err;
unsigned int num_out = 0, num_in = 0;
struct virtio_crypto_op_ctrl_req *ctrl;
struct virtio_crypto_session_input *input;
struct virtio_crypto_ctrl_request *vc_ctrl_req;
pkey = kmemdup(key, keylen, GFP_ATOMIC);
if (!pkey)
return -ENOMEM;
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
if (!vc_ctrl_req) {
err = -ENOMEM;
goto out;
}
ctrl = &vc_ctrl_req->ctrl;
memcpy(&ctrl->header, header, sizeof(ctrl->header));
memcpy(&ctrl->u, para, sizeof(ctrl->u));
input = &vc_ctrl_req->input;
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr_sg;
sg_init_one(&key_sg, pkey, keylen);
sgs[num_out++] = &key_sg;
sg_init_one(&inhdr_sg, input, sizeof(*input));
sgs[num_out + num_in++] = &inhdr_sg;
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
if (err < 0)
goto out;
if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
pr_err("virtio_crypto: Create session failed status: %u\n",
le32_to_cpu(input->status));
err = -EINVAL;
goto out;
}
ctx->session_id = le64_to_cpu(input->session_id);
ctx->session_valid = true;
err = 0;
out:
kfree(vc_ctrl_req);
kfree_sensitive(pkey);
return err;
}
static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akcipher_ctx *ctx)
{
struct scatterlist outhdr_sg, inhdr_sg, *sgs[2];
struct virtio_crypto_destroy_session_req *destroy_session;
struct virtio_crypto *vcrypto = ctx->vcrypto;
unsigned int num_out = 0, num_in = 0;
int err;
struct virtio_crypto_op_ctrl_req *ctrl;
struct virtio_crypto_inhdr *ctrl_status;
struct virtio_crypto_ctrl_request *vc_ctrl_req;
if (!ctx->session_valid)
return 0;
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
if (!vc_ctrl_req)
return -ENOMEM;
ctrl_status = &vc_ctrl_req->ctrl_status;
ctrl_status->status = VIRTIO_CRYPTO_ERR;
ctrl = &vc_ctrl_req->ctrl;
ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
ctrl->header.queue_id = 0;
destroy_session = &ctrl->u.destroy_session;
destroy_session->session_id = cpu_to_le64(ctx->session_id);
sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr_sg;
sg_init_one(&inhdr_sg, &ctrl_status->status, sizeof(ctrl_status->status));
sgs[num_out + num_in++] = &inhdr_sg;
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
if (err < 0)
goto out;
if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
ctrl_status->status, destroy_session->session_id);
err = -EINVAL;
goto out;
}
err = 0;
ctx->session_valid = false;
out:
kfree(vc_ctrl_req);
return err;
}
static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req,
struct akcipher_request *req, struct data_queue *data_vq)
{
struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct virtio_crypto_op_data_req *req_data = vc_req->req_data;
struct scatterlist *sgs[4], outhdr_sg, inhdr_sg, srcdata_sg, dstdata_sg;
void *src_buf = NULL, *dst_buf = NULL;
unsigned int num_out = 0, num_in = 0;
int node = dev_to_node(&vcrypto->vdev->dev);
unsigned long flags;
int ret = -ENOMEM;
bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY;
unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len;
/* out header */
sg_init_one(&outhdr_sg, req_data, sizeof(*req_data));
sgs[num_out++] = &outhdr_sg;
/* src data */
src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node);
if (!src_buf)
goto err;
if (verify) {
/* for verify operation, both src and dst data work as OUT direction */
sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
sg_init_one(&srcdata_sg, src_buf, src_len);
sgs[num_out++] = &srcdata_sg;
} else {
sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
sg_init_one(&srcdata_sg, src_buf, src_len);
sgs[num_out++] = &srcdata_sg;
/* dst data */
dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
if (!dst_buf)
goto err;
sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
sgs[num_out + num_in++] = &dstdata_sg;
}
vc_akcipher_req->src_buf = src_buf;
vc_akcipher_req->dst_buf = dst_buf;
/* in header */
sg_init_one(&inhdr_sg, &vc_req->status, sizeof(vc_req->status));
sgs[num_out + num_in++] = &inhdr_sg;
spin_lock_irqsave(&data_vq->lock, flags);
ret = virtqueue_add_sgs(data_vq->vq, sgs, num_out, num_in, vc_req, GFP_ATOMIC);
virtqueue_kick(data_vq->vq);
spin_unlock_irqrestore(&data_vq->lock, flags);
if (ret)
goto err;
return 0;
err:
kfree(src_buf);
kfree(dst_buf);
return -ENOMEM;
}
static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)
{
struct akcipher_request *req = container_of(vreq, struct akcipher_request, base);
struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct data_queue *data_vq = vc_req->dataq;
struct virtio_crypto_op_header *header;
struct virtio_crypto_akcipher_data_req *akcipher_req;
int ret;
vc_req->sgs = NULL;
vc_req->req_data = kzalloc_node(sizeof(*vc_req->req_data),
GFP_KERNEL, dev_to_node(&vcrypto->vdev->dev));
if (!vc_req->req_data)
return -ENOMEM;
/* build request header */
header = &vc_req->req_data->header;
header->opcode = cpu_to_le32(vc_akcipher_req->opcode);
header->algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
header->session_id = cpu_to_le64(ctx->session_id);
/* build request akcipher data */
akcipher_req = &vc_req->req_data->u.akcipher_req;
akcipher_req->para.src_data_len = cpu_to_le32(req->src_len);
akcipher_req->para.dst_data_len = cpu_to_le32(req->dst_len);
ret = __virtio_crypto_akcipher_do_req(vc_akcipher_req, req, data_vq);
if (ret < 0) {
kfree_sensitive(vc_req->req_data);
vc_req->req_data = NULL;
return ret;
}
return 0;
}
static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode)
{
struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req);
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm);
struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback;
vc_akcipher_req->akcipher_ctx = ctx;
vc_akcipher_req->akcipher_req = req;
vc_akcipher_req->opcode = opcode;
return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req);
}
static int virtio_crypto_rsa_encrypt(struct akcipher_request *req)
{
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_ENCRYPT);
}
static int virtio_crypto_rsa_decrypt(struct akcipher_request *req)
{
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_DECRYPT);
}
static int virtio_crypto_rsa_sign(struct akcipher_request *req)
{
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_SIGN);
}
static int virtio_crypto_rsa_verify(struct akcipher_request *req)
{
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_VERIFY);
}
static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen,
bool private,
int padding_algo,
int hash_algo)
{
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
struct virtio_crypto *vcrypto;
struct virtio_crypto_ctrl_header header;
struct virtio_crypto_akcipher_session_para para;
struct rsa_key rsa_key = {0};
int node = virtio_crypto_get_current_node();
uint32_t keytype;
int ret;
/* mpi_free will test n, just free it. */
mpi_free(rsa_ctx->n);
rsa_ctx->n = NULL;
if (private) {
keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
ret = rsa_parse_priv_key(&rsa_key, key, keylen);
} else {
keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
ret = rsa_parse_pub_key(&rsa_key, key, keylen);
}
if (ret)
return ret;
rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz);
if (!rsa_ctx->n)
return -ENOMEM;
if (!ctx->vcrypto) {
vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER,
VIRTIO_CRYPTO_AKCIPHER_RSA);
if (!vcrypto) {
pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
return -ENODEV;
}
ctx->vcrypto = vcrypto;
} else {
virtio_crypto_alg_akcipher_close_session(ctx);
}
/* set ctrl header */
header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION);
header.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
header.queue_id = 0;
/* set RSA para */
para.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
para.keytype = cpu_to_le32(keytype);
para.keylen = cpu_to_le32(keylen);
para.u.rsa.padding_algo = cpu_to_le32(padding_algo);
para.u.rsa.hash_algo = cpu_to_le32(hash_algo);
return virtio_crypto_alg_akcipher_init_session(ctx, &header, &para, key, keylen);
}
static int virtio_crypto_rsa_raw_set_priv_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen)
{
return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
VIRTIO_CRYPTO_RSA_RAW_PADDING,
VIRTIO_CRYPTO_RSA_NO_HASH);
}
static int virtio_crypto_p1pad_rsa_sha1_set_priv_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen)
{
return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
VIRTIO_CRYPTO_RSA_SHA1);
}
static int virtio_crypto_rsa_raw_set_pub_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen)
{
return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
VIRTIO_CRYPTO_RSA_RAW_PADDING,
VIRTIO_CRYPTO_RSA_NO_HASH);
}
static int virtio_crypto_p1pad_rsa_sha1_set_pub_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen)
{
return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
VIRTIO_CRYPTO_RSA_SHA1);
}
static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm)
{
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
return mpi_get_size(rsa_ctx->n);
}
static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
{
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
ctx->tfm = tfm;
ctx->enginectx.op.do_one_request = virtio_crypto_rsa_do_req;
ctx->enginectx.op.prepare_request = NULL;
ctx->enginectx.op.unprepare_request = NULL;
return 0;
}
static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm)
{
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
virtio_crypto_alg_akcipher_close_session(ctx);
virtcrypto_dev_put(ctx->vcrypto);
mpi_free(rsa_ctx->n);
rsa_ctx->n = NULL;
}
static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
{
.algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
.service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
.algo = {
.encrypt = virtio_crypto_rsa_encrypt,
.decrypt = virtio_crypto_rsa_decrypt,
.set_pub_key = virtio_crypto_rsa_raw_set_pub_key,
.set_priv_key = virtio_crypto_rsa_raw_set_priv_key,
.max_size = virtio_crypto_rsa_max_size,
.init = virtio_crypto_rsa_init_tfm,
.exit = virtio_crypto_rsa_exit_tfm,
.reqsize = sizeof(struct virtio_crypto_akcipher_request),
.base = {
.cra_name = "rsa",
.cra_driver_name = "virtio-crypto-rsa",
.cra_priority = 150,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
},
},
},
{
.algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
.service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
.algo = {
.encrypt = virtio_crypto_rsa_encrypt,
.decrypt = virtio_crypto_rsa_decrypt,
.sign = virtio_crypto_rsa_sign,
.verify = virtio_crypto_rsa_verify,
.set_pub_key = virtio_crypto_p1pad_rsa_sha1_set_pub_key,
.set_priv_key = virtio_crypto_p1pad_rsa_sha1_set_priv_key,
.max_size = virtio_crypto_rsa_max_size,
.init = virtio_crypto_rsa_init_tfm,
.exit = virtio_crypto_rsa_exit_tfm,
.reqsize = sizeof(struct virtio_crypto_akcipher_request),
.base = {
.cra_name = "pkcs1pad(rsa,sha1)",
.cra_driver_name = "virtio-pkcs1-rsa-with-sha1",
.cra_priority = 150,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
},
},
},
};
int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto)
{
int ret = 0;
int i = 0;
mutex_lock(&algs_lock);
for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
uint32_t service = virtio_crypto_akcipher_algs[i].service;
uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
continue;
if (virtio_crypto_akcipher_algs[i].active_devs == 0) {
ret = crypto_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
if (ret)
goto unlock;
}
virtio_crypto_akcipher_algs[i].active_devs++;
dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n",
virtio_crypto_akcipher_algs[i].algo.base.cra_name);
}
unlock:
mutex_unlock(&algs_lock);
return ret;
}
void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto)
{
int i = 0;
mutex_lock(&algs_lock);
for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
uint32_t service = virtio_crypto_akcipher_algs[i].service;
uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
if (virtio_crypto_akcipher_algs[i].active_devs == 0 ||
!virtcrypto_algo_is_supported(vcrypto, service, algonum))
continue;
if (virtio_crypto_akcipher_algs[i].active_devs == 1)
crypto_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
virtio_crypto_akcipher_algs[i].active_devs--;
}
mutex_unlock(&algs_lock);
}

View File

@@ -118,11 +118,14 @@ static int virtio_crypto_alg_skcipher_init_session(
int encrypt) int encrypt)
{ {
struct scatterlist outhdr, key_sg, inhdr, *sgs[3]; struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
unsigned int tmp;
struct virtio_crypto *vcrypto = ctx->vcrypto; struct virtio_crypto *vcrypto = ctx->vcrypto;
int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT; int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
int err; int err;
unsigned int num_out = 0, num_in = 0; unsigned int num_out = 0, num_in = 0;
struct virtio_crypto_op_ctrl_req *ctrl;
struct virtio_crypto_session_input *input;
struct virtio_crypto_sym_create_session_req *sym_create_session;
struct virtio_crypto_ctrl_request *vc_ctrl_req;
/* /*
* Avoid to do DMA from the stack, switch to using * Avoid to do DMA from the stack, switch to using
@@ -133,26 +136,29 @@ static int virtio_crypto_alg_skcipher_init_session(
if (!cipher_key) if (!cipher_key)
return -ENOMEM; return -ENOMEM;
spin_lock(&vcrypto->ctrl_lock); vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
if (!vc_ctrl_req) {
err = -ENOMEM;
goto out;
}
/* Pad ctrl header */ /* Pad ctrl header */
vcrypto->ctrl.header.opcode = ctrl = &vc_ctrl_req->ctrl;
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION); ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
vcrypto->ctrl.header.algo = cpu_to_le32(alg); ctrl->header.algo = cpu_to_le32(alg);
/* Set the default dataqueue id to 0 */ /* Set the default dataqueue id to 0 */
vcrypto->ctrl.header.queue_id = 0; ctrl->header.queue_id = 0;
vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR); input = &vc_ctrl_req->input;
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
/* Pad cipher's parameters */ /* Pad cipher's parameters */
vcrypto->ctrl.u.sym_create_session.op_type = sym_create_session = &ctrl->u.sym_create_session;
cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER); sym_create_session->op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo = sym_create_session->u.cipher.para.algo = ctrl->header.algo;
vcrypto->ctrl.header.algo; sym_create_session->u.cipher.para.keylen = cpu_to_le32(keylen);
vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen = sym_create_session->u.cipher.para.op = cpu_to_le32(op);
cpu_to_le32(keylen);
vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
cpu_to_le32(op);
sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl)); sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr; sgs[num_out++] = &outhdr;
/* Set key */ /* Set key */
@@ -160,45 +166,30 @@ static int virtio_crypto_alg_skcipher_init_session(
sgs[num_out++] = &key_sg; sgs[num_out++] = &key_sg;
/* Return status and session id back */ /* Return status and session id back */
sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input)); sg_init_one(&inhdr, input, sizeof(*input));
sgs[num_out + num_in++] = &inhdr; sgs[num_out + num_in++] = &inhdr;
err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
num_in, vcrypto, GFP_ATOMIC); if (err < 0)
if (err < 0) { goto out;
spin_unlock(&vcrypto->ctrl_lock);
kfree_sensitive(cipher_key);
return err;
}
virtqueue_kick(vcrypto->ctrl_vq);
/* if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
* Trapping into the hypervisor, so the request should be
* handled immediately.
*/
while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
!virtqueue_is_broken(vcrypto->ctrl_vq))
cpu_relax();
if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
spin_unlock(&vcrypto->ctrl_lock);
pr_err("virtio_crypto: Create session failed status: %u\n", pr_err("virtio_crypto: Create session failed status: %u\n",
le32_to_cpu(vcrypto->input.status)); le32_to_cpu(input->status));
kfree_sensitive(cipher_key); err = -EINVAL;
return -EINVAL; goto out;
} }
if (encrypt) if (encrypt)
ctx->enc_sess_info.session_id = ctx->enc_sess_info.session_id = le64_to_cpu(input->session_id);
le64_to_cpu(vcrypto->input.session_id);
else else
ctx->dec_sess_info.session_id = ctx->dec_sess_info.session_id = le64_to_cpu(input->session_id);
le64_to_cpu(vcrypto->input.session_id);
spin_unlock(&vcrypto->ctrl_lock);
err = 0;
out:
kfree(vc_ctrl_req);
kfree_sensitive(cipher_key); kfree_sensitive(cipher_key);
return 0; return err;
} }
static int virtio_crypto_alg_skcipher_close_session( static int virtio_crypto_alg_skcipher_close_session(
@@ -206,60 +197,56 @@ static int virtio_crypto_alg_skcipher_close_session(
int encrypt) int encrypt)
{ {
struct scatterlist outhdr, status_sg, *sgs[2]; struct scatterlist outhdr, status_sg, *sgs[2];
unsigned int tmp;
struct virtio_crypto_destroy_session_req *destroy_session; struct virtio_crypto_destroy_session_req *destroy_session;
struct virtio_crypto *vcrypto = ctx->vcrypto; struct virtio_crypto *vcrypto = ctx->vcrypto;
int err; int err;
unsigned int num_out = 0, num_in = 0; unsigned int num_out = 0, num_in = 0;
struct virtio_crypto_op_ctrl_req *ctrl;
struct virtio_crypto_inhdr *ctrl_status;
struct virtio_crypto_ctrl_request *vc_ctrl_req;
spin_lock(&vcrypto->ctrl_lock); vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR; if (!vc_ctrl_req)
return -ENOMEM;
ctrl_status = &vc_ctrl_req->ctrl_status;
ctrl_status->status = VIRTIO_CRYPTO_ERR;
/* Pad ctrl header */ /* Pad ctrl header */
vcrypto->ctrl.header.opcode = ctrl = &vc_ctrl_req->ctrl;
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION); ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
/* Set the default virtqueue id to 0 */ /* Set the default virtqueue id to 0 */
vcrypto->ctrl.header.queue_id = 0; ctrl->header.queue_id = 0;
destroy_session = &vcrypto->ctrl.u.destroy_session; destroy_session = &ctrl->u.destroy_session;
if (encrypt) if (encrypt)
destroy_session->session_id = destroy_session->session_id = cpu_to_le64(ctx->enc_sess_info.session_id);
cpu_to_le64(ctx->enc_sess_info.session_id);
else else
destroy_session->session_id = destroy_session->session_id = cpu_to_le64(ctx->dec_sess_info.session_id);
cpu_to_le64(ctx->dec_sess_info.session_id);
sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl)); sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr; sgs[num_out++] = &outhdr;
/* Return status and session id back */ /* Return status and session id back */
sg_init_one(&status_sg, &vcrypto->ctrl_status.status, sg_init_one(&status_sg, &ctrl_status->status, sizeof(ctrl_status->status));
sizeof(vcrypto->ctrl_status.status));
sgs[num_out + num_in++] = &status_sg; sgs[num_out + num_in++] = &status_sg;
err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
num_in, vcrypto, GFP_ATOMIC); if (err < 0)
if (err < 0) { goto out;
spin_unlock(&vcrypto->ctrl_lock);
return err;
}
virtqueue_kick(vcrypto->ctrl_vq);
while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) && if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
!virtqueue_is_broken(vcrypto->ctrl_vq))
cpu_relax();
if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
spin_unlock(&vcrypto->ctrl_lock);
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n", pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
vcrypto->ctrl_status.status, ctrl_status->status, destroy_session->session_id);
destroy_session->session_id);
return -EINVAL; err = -EINVAL;
goto out;
} }
spin_unlock(&vcrypto->ctrl_lock);
return 0; err = 0;
out:
kfree(vc_ctrl_req);
return err;
} }
static int virtio_crypto_alg_skcipher_init_sessions( static int virtio_crypto_alg_skcipher_init_sessions(

View File

@@ -10,9 +10,11 @@
#include <linux/virtio.h> #include <linux/virtio.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <crypto/aead.h> #include <crypto/aead.h>
#include <crypto/aes.h> #include <crypto/aes.h>
#include <crypto/engine.h> #include <crypto/engine.h>
#include <uapi/linux/virtio_crypto.h>
/* Internal representation of a data virtqueue */ /* Internal representation of a data virtqueue */
@@ -27,6 +29,7 @@ struct data_queue {
char name[32]; char name[32];
struct crypto_engine *engine; struct crypto_engine *engine;
struct tasklet_struct done_task;
}; };
struct virtio_crypto { struct virtio_crypto {
@@ -56,6 +59,7 @@ struct virtio_crypto {
u32 mac_algo_l; u32 mac_algo_l;
u32 mac_algo_h; u32 mac_algo_h;
u32 aead_algo; u32 aead_algo;
u32 akcipher_algo;
/* Maximum length of cipher key */ /* Maximum length of cipher key */
u32 max_cipher_key_len; u32 max_cipher_key_len;
@@ -64,11 +68,6 @@ struct virtio_crypto {
/* Maximum size of per request */ /* Maximum size of per request */
u64 max_size; u64 max_size;
/* Control VQ buffers: protected by the ctrl_lock */
struct virtio_crypto_op_ctrl_req ctrl;
struct virtio_crypto_session_input input;
struct virtio_crypto_inhdr ctrl_status;
unsigned long status; unsigned long status;
atomic_t ref_count; atomic_t ref_count;
struct list_head list; struct list_head list;
@@ -84,6 +83,18 @@ struct virtio_crypto_sym_session_info {
__u64 session_id; __u64 session_id;
}; };
/*
* Note: there are padding fields in request, clear them to zero before
* sending to host to avoid to divulge any information.
* Ex, virtio_crypto_ctrl_request::ctrl::u::destroy_session::padding[48]
*/
struct virtio_crypto_ctrl_request {
struct virtio_crypto_op_ctrl_req ctrl;
struct virtio_crypto_session_input input;
struct virtio_crypto_inhdr ctrl_status;
struct completion compl;
};
struct virtio_crypto_request; struct virtio_crypto_request;
typedef void (*virtio_crypto_data_callback) typedef void (*virtio_crypto_data_callback)
(struct virtio_crypto_request *vc_req, int len); (struct virtio_crypto_request *vc_req, int len);
@@ -131,5 +142,10 @@ static inline int virtio_crypto_get_current_node(void)
int virtio_crypto_algs_register(struct virtio_crypto *vcrypto); int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto); void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto);
void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto);
int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
unsigned int out_sgs, unsigned int in_sgs,
struct virtio_crypto_ctrl_request *vc_ctrl_req);
#endif /* _VIRTIO_CRYPTO_COMMON_H */ #endif /* _VIRTIO_CRYPTO_COMMON_H */

View File

@@ -22,27 +22,78 @@ virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
} }
} }
static void virtcrypto_dataq_callback(struct virtqueue *vq) static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)
{
complete(&vc_ctrl_req->compl);
}
static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
{ {
struct virtio_crypto *vcrypto = vq->vdev->priv; struct virtio_crypto *vcrypto = vq->vdev->priv;
struct virtio_crypto_request *vc_req; struct virtio_crypto_ctrl_request *vc_ctrl_req;
unsigned long flags; unsigned long flags;
unsigned int len; unsigned int len;
unsigned int qid = vq->index;
spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags); spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) {
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
virtio_crypto_ctrlq_callback(vc_ctrl_req);
spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
}
if (unlikely(virtqueue_is_broken(vq)))
break;
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
}
int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
unsigned int out_sgs, unsigned int in_sgs,
struct virtio_crypto_ctrl_request *vc_ctrl_req)
{
int err;
unsigned long flags;
init_completion(&vc_ctrl_req->compl);
spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC);
if (err < 0) {
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
return err;
}
virtqueue_kick(vcrypto->ctrl_vq);
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
wait_for_completion(&vc_ctrl_req->compl);
return 0;
}
static void virtcrypto_done_task(unsigned long data)
{
struct data_queue *data_vq = (struct data_queue *)data;
struct virtqueue *vq = data_vq->vq;
struct virtio_crypto_request *vc_req;
unsigned int len;
do { do {
virtqueue_disable_cb(vq); virtqueue_disable_cb(vq);
while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) { while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
spin_unlock_irqrestore(
&vcrypto->data_vq[qid].lock, flags);
if (vc_req->alg_cb) if (vc_req->alg_cb)
vc_req->alg_cb(vc_req, len); vc_req->alg_cb(vc_req, len);
spin_lock_irqsave(
&vcrypto->data_vq[qid].lock, flags);
} }
} while (!virtqueue_enable_cb(vq)); } while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags); }
static void virtcrypto_dataq_callback(struct virtqueue *vq)
{
struct virtio_crypto *vcrypto = vq->vdev->priv;
struct data_queue *dq = &vcrypto->data_vq[vq->index];
tasklet_schedule(&dq->done_task);
} }
static int virtcrypto_find_vqs(struct virtio_crypto *vi) static int virtcrypto_find_vqs(struct virtio_crypto *vi)
@@ -73,7 +124,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
goto err_names; goto err_names;
/* Parameters for control virtqueue */ /* Parameters for control virtqueue */
callbacks[total_vqs - 1] = NULL; callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback;
names[total_vqs - 1] = "controlq"; names[total_vqs - 1] = "controlq";
/* Allocate/initialize parameters for data virtqueues */ /* Allocate/initialize parameters for data virtqueues */
@@ -99,6 +150,8 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
ret = -ENOMEM; ret = -ENOMEM;
goto err_engine; goto err_engine;
} }
tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
(unsigned long)&vi->data_vq[i]);
} }
kfree(names); kfree(names);
@@ -297,6 +350,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
u32 mac_algo_l = 0; u32 mac_algo_l = 0;
u32 mac_algo_h = 0; u32 mac_algo_h = 0;
u32 aead_algo = 0; u32 aead_algo = 0;
u32 akcipher_algo = 0;
u32 crypto_services = 0; u32 crypto_services = 0;
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
@@ -348,6 +402,9 @@ static int virtcrypto_probe(struct virtio_device *vdev)
mac_algo_h, &mac_algo_h); mac_algo_h, &mac_algo_h);
virtio_cread_le(vdev, struct virtio_crypto_config, virtio_cread_le(vdev, struct virtio_crypto_config,
aead_algo, &aead_algo); aead_algo, &aead_algo);
if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
virtio_cread_le(vdev, struct virtio_crypto_config,
akcipher_algo, &akcipher_algo);
/* Add virtio crypto device to global table */ /* Add virtio crypto device to global table */
err = virtcrypto_devmgr_add_dev(vcrypto); err = virtcrypto_devmgr_add_dev(vcrypto);
@@ -374,7 +431,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
vcrypto->mac_algo_h = mac_algo_h; vcrypto->mac_algo_h = mac_algo_h;
vcrypto->hash_algo = hash_algo; vcrypto->hash_algo = hash_algo;
vcrypto->aead_algo = aead_algo; vcrypto->aead_algo = aead_algo;
vcrypto->akcipher_algo = akcipher_algo;
dev_info(&vdev->dev, dev_info(&vdev->dev,
"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n", "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
@@ -431,11 +488,14 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
static void virtcrypto_remove(struct virtio_device *vdev) static void virtcrypto_remove(struct virtio_device *vdev)
{ {
struct virtio_crypto *vcrypto = vdev->priv; struct virtio_crypto *vcrypto = vdev->priv;
int i;
dev_info(&vdev->dev, "Start virtcrypto_remove.\n"); dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
if (virtcrypto_dev_started(vcrypto)) if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto); virtcrypto_dev_stop(vcrypto);
for (i = 0; i < vcrypto->max_data_queues; i++)
tasklet_kill(&vcrypto->data_vq[i].done_task);
vdev->config->reset(vdev); vdev->config->reset(vdev);
virtcrypto_free_unused_reqs(vcrypto); virtcrypto_free_unused_reqs(vcrypto);
virtcrypto_clear_crypto_engines(vcrypto); virtcrypto_clear_crypto_engines(vcrypto);

View File

@@ -242,6 +242,12 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
return -EFAULT; return -EFAULT;
} }
if (virtio_crypto_akcipher_algs_register(vcrypto)) {
pr_err("virtio_crypto: Failed to register crypto akcipher algs\n");
virtio_crypto_algs_unregister(vcrypto);
return -EFAULT;
}
return 0; return 0;
} }
@@ -258,6 +264,7 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto) void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
{ {
virtio_crypto_algs_unregister(vcrypto); virtio_crypto_algs_unregister(vcrypto);
virtio_crypto_akcipher_algs_unregister(vcrypto);
} }
/* /*
@@ -312,6 +319,10 @@ bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
case VIRTIO_CRYPTO_SERVICE_AEAD: case VIRTIO_CRYPTO_SERVICE_AEAD:
algo_mask = vcrypto->aead_algo; algo_mask = vcrypto->aead_algo;
break; break;
case VIRTIO_CRYPTO_SERVICE_AKCIPHER:
algo_mask = vcrypto->akcipher_algo;
break;
} }
if (!(algo_mask & (1u << algo))) if (!(algo_mask & (1u << algo)))

View File

@@ -1133,7 +1133,7 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
decode_register(other, OCX_OTHER_SIZE, decode_register(other, OCX_OTHER_SIZE,
ocx_com_errors, ctx->reg_com_int); ocx_com_errors, ctx->reg_com_int);
strncat(msg, other, OCX_MESSAGE_SIZE); strlcat(msg, other, OCX_MESSAGE_SIZE);
for (lane = 0; lane < OCX_RX_LANES; lane++) for (lane = 0; lane < OCX_RX_LANES; lane++)
if (ctx->reg_com_int & BIT(lane)) { if (ctx->reg_com_int & BIT(lane)) {
@@ -1142,12 +1142,12 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
lane, ctx->reg_lane_int[lane], lane, ctx->reg_lane_int[lane],
lane, ctx->reg_lane_stat11[lane]); lane, ctx->reg_lane_stat11[lane]);
strncat(msg, other, OCX_MESSAGE_SIZE); strlcat(msg, other, OCX_MESSAGE_SIZE);
decode_register(other, OCX_OTHER_SIZE, decode_register(other, OCX_OTHER_SIZE,
ocx_lane_errors, ocx_lane_errors,
ctx->reg_lane_int[lane]); ctx->reg_lane_int[lane]);
strncat(msg, other, OCX_MESSAGE_SIZE); strlcat(msg, other, OCX_MESSAGE_SIZE);
} }
if (ctx->reg_com_int & OCX_COM_INT_CE) if (ctx->reg_com_int & OCX_COM_INT_CE)
@@ -1217,7 +1217,7 @@ static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id)
decode_register(other, OCX_OTHER_SIZE, decode_register(other, OCX_OTHER_SIZE,
ocx_com_link_errors, ctx->reg_com_link_int); ocx_com_link_errors, ctx->reg_com_link_int);
strncat(msg, other, OCX_MESSAGE_SIZE); strlcat(msg, other, OCX_MESSAGE_SIZE);
if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE) if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE)
edac_device_handle_ue(ocx->edac_dev, 0, 0, msg); edac_device_handle_ue(ocx->edac_dev, 0, 0, msg);
@@ -1896,7 +1896,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int); decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int);
strncat(msg, other, L2C_MESSAGE_SIZE); strlcat(msg, other, L2C_MESSAGE_SIZE);
if (ctx->reg_int & mask_ue) if (ctx->reg_int & mask_ue)
edac_device_handle_ue(l2c->edac_dev, 0, 0, msg); edac_device_handle_ue(l2c->edac_dev, 0, 0, msg);

View File

@@ -279,6 +279,51 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_TI_SLLZ059 0x20 #define QUIRK_TI_SLLZ059 0x20
#define QUIRK_IR_WAKE 0x40 #define QUIRK_IR_WAKE 0x40
// On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
// ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register
// (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not
// clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register,
// while it is probable due to detection of any type of PCIe error.
#define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000
#if IS_ENABLED(CONFIG_X86)
static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci)
{
return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ);
}
#define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080
static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev)
{
const struct pci_dev *pcie_to_pci_bridge;
// Detect any type of AMD Ryzen machine.
if (!static_cpu_has(X86_FEATURE_ZEN))
return false;
// Detect VIA VT6306/6307/6308.
if (pdev->vendor != PCI_VENDOR_ID_VIA)
return false;
if (pdev->device != PCI_DEVICE_ID_VIA_VT630X)
return false;
// Detect Asmedia ASM1083/1085.
pcie_to_pci_bridge = pdev->bus->self;
if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA)
return false;
if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X)
return false;
return true;
}
#else
#define has_reboot_by_cycle_timer_read_quirk(ohci) false
#define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false
#endif
/* In case of multiple matches in ohci_quirks[], only the first one is used. */ /* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct { static const struct {
unsigned short vendor, device, revision, flags; unsigned short vendor, device, revision, flags;
@@ -1713,6 +1758,9 @@ static u32 get_cycle_time(struct fw_ohci *ohci)
s32 diff01, diff12; s32 diff01, diff12;
int i; int i;
if (has_reboot_by_cycle_timer_read_quirk(ohci))
return 0;
c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
if (ohci->quirks & QUIRK_CYCLE_TIMER) { if (ohci->quirks & QUIRK_CYCLE_TIMER) {
@@ -3615,6 +3663,9 @@ static int pci_probe(struct pci_dev *dev,
if (param_quirks) if (param_quirks)
ohci->quirks = param_quirks; ohci->quirks = param_quirks;
if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev))
ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ;
/* /*
* Because dma_alloc_coherent() allocates at least one page, * Because dma_alloc_coherent() allocates at least one page,
* we save space by using a common buffer for the AR request/ * we save space by using a common buffer for the AR request/

View File

@@ -313,11 +313,14 @@ static int __init meson_sm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fw); platform_set_drvdata(pdev, fw);
pr_info("secure-monitor enabled\n"); if (devm_of_platform_populate(dev))
goto out_in_base;
if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group)) if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group))
goto out_in_base; goto out_in_base;
pr_info("secure-monitor enabled\n");
return 0; return 0;
out_in_base: out_in_base:

View File

@@ -161,7 +161,7 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct resource *res; struct resource *res;
char debug_name[50] = "ti_sci_debug@"; char debug_name[50];
/* Debug region is optional */ /* Debug region is optional */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -178,10 +178,10 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
/* Setup NULL termination */ /* Setup NULL termination */
info->debug_buffer[info->debug_region_size] = 0; info->debug_buffer[info->debug_region_size] = 0;
info->d = debugfs_create_file(strncat(debug_name, dev_name(dev), snprintf(debug_name, sizeof(debug_name), "ti_sci_debug@%s",
sizeof(debug_name) - dev_name(dev));
sizeof("ti_sci_debug@")), info->d = debugfs_create_file(debug_name, 0444, NULL, info,
0444, NULL, info, &ti_sci_debug_fops); &ti_sci_debug_fops);
if (IS_ERR(info->d)) if (IS_ERR(info->d))
return PTR_ERR(info->d); return PTR_ERR(info->d);

View File

@@ -459,6 +459,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
if (!adev->didt_rreg)
return -EOPNOTSUPP;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -518,6 +521,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
if (!adev->didt_wreg)
return -EOPNOTSUPP;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -576,7 +582,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
int r; int r;
if (!adev->smc_rreg) if (!adev->smc_rreg)
return -EPERM; return -EOPNOTSUPP;
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
@@ -638,7 +644,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
int r; int r;
if (!adev->smc_wreg) if (!adev->smc_wreg)
return -EPERM; return -EOPNOTSUPP;
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;

View File

@@ -2733,10 +2733,8 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index]; &non_clock_info_array->nonClockInfo[non_clock_array_index];
ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
if (ps == NULL) { if (ps == NULL)
kfree(adev->pm.dpm.ps);
return -ENOMEM; return -ENOMEM;
}
adev->pm.dpm.ps[i].ps_priv = ps; adev->pm.dpm.ps[i].ps_priv = ps;
k = 0; k = 0;
idx = (u8 *)&power_state->v2.clockInfoIndex[0]; idx = (u8 *)&power_state->v2.clockInfoIndex[0];

View File

@@ -7349,10 +7349,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
kcalloc(4, kcalloc(4,
sizeof(struct amdgpu_clock_voltage_dependency_entry), sizeof(struct amdgpu_clock_voltage_dependency_entry),
GFP_KERNEL); GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries)
amdgpu_free_extended_power_table(adev);
return -ENOMEM; return -ENOMEM;
}
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;

View File

@@ -1637,7 +1637,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
} else { } else {
if (tc->hpd_pin < 0 || tc->hpd_pin > 1) { if (tc->hpd_pin < 0 || tc->hpd_pin > 1) {
dev_err(dev, "failed to parse HPD number\n"); dev_err(dev, "failed to parse HPD number\n");
return ret; return -EINVAL;
} }
} }

View File

@@ -179,7 +179,7 @@ static int tpd12s015_probe(struct platform_device *pdev)
return 0; return 0;
} }
static int __exit tpd12s015_remove(struct platform_device *pdev) static int tpd12s015_remove(struct platform_device *pdev)
{ {
struct tpd12s015_device *tpd = platform_get_drvdata(pdev); struct tpd12s015_device *tpd = platform_get_drvdata(pdev);
@@ -197,7 +197,7 @@ MODULE_DEVICE_TABLE(of, tpd12s015_of_match);
static struct platform_driver tpd12s015_driver = { static struct platform_driver tpd12s015_driver = {
.probe = tpd12s015_probe, .probe = tpd12s015_probe,
.remove = __exit_p(tpd12s015_remove), .remove = tpd12s015_remove,
.driver = { .driver = {
.name = "tpd12s015", .name = "tpd12s015",
.of_match_table = tpd12s015_of_match, .of_match_table = tpd12s015_of_match,

View File

@@ -562,8 +562,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_mode_set set; struct drm_mode_set set;
uint32_t __user *set_connectors_ptr; uint32_t __user *set_connectors_ptr;
struct drm_modeset_acquire_ctx ctx; struct drm_modeset_acquire_ctx ctx;
int ret; int ret, i, num_connectors = 0;
int i;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP; return -EOPNOTSUPP;
@@ -721,6 +720,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
connector->name); connector->name);
connector_set[i] = connector; connector_set[i] = connector;
num_connectors++;
} }
} }
@@ -729,7 +729,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
set.y = crtc_req->y; set.y = crtc_req->y;
set.mode = mode; set.mode = mode;
set.connectors = connector_set; set.connectors = connector_set;
set.num_connectors = crtc_req->count_connectors; set.num_connectors = num_connectors;
set.fb = fb; set.fb = fb;
if (drm_drv_uses_atomic_modeset(dev)) if (drm_drv_uses_atomic_modeset(dev))
@@ -742,7 +742,7 @@ out:
drm_framebuffer_put(fb); drm_framebuffer_put(fb);
if (connector_set) { if (connector_set) {
for (i = 0; i < crtc_req->count_connectors; i++) { for (i = 0; i < num_connectors; i++) {
if (connector_set[i]) if (connector_set[i])
drm_connector_put(connector_set[i]); drm_connector_put(connector_set[i]);
} }

View File

@@ -892,8 +892,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_minors; goto err_minors;
} }
if (drm_core_check_feature(dev, DRIVER_MODESET)) if (drm_core_check_feature(dev, DRIVER_MODESET)) {
drm_modeset_register_all(dev); ret = drm_modeset_register_all(dev);
if (ret)
goto err_unload;
}
ret = 0; ret = 0;
@@ -905,6 +908,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto out_unlock; goto out_unlock;
err_unload:
if (dev->driver->unload)
dev->driver->unload(dev);
err_minors: err_minors:
remove_compat_control_link(dev); remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY); drm_minor_unregister(dev, DRM_MINOR_PRIMARY);

View File

@@ -108,18 +108,16 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
return 0; return 0;
if (!priv->mapping) { if (!priv->mapping) {
void *mapping; void *mapping = NULL;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
mapping = arm_iommu_create_mapping(&platform_bus_type, mapping = arm_iommu_create_mapping(&platform_bus_type,
EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE); EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
else if (IS_ENABLED(CONFIG_IOMMU_DMA)) else if (IS_ENABLED(CONFIG_IOMMU_DMA))
mapping = iommu_get_domain_for_dev(priv->dma_dev); mapping = iommu_get_domain_for_dev(priv->dma_dev);
else
mapping = ERR_PTR(-ENODEV);
if (IS_ERR(mapping)) if (!mapping)
return PTR_ERR(mapping); return -ENODEV;
priv->mapping = mapping; priv->mapping = mapping;
} }

View File

@@ -1849,6 +1849,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
return ret; return ret;
crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI); crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
if (IS_ERR(crtc))
return PTR_ERR(crtc);
crtc->pipe_clk = &hdata->phy_clk; crtc->pipe_clk = &hdata->phy_clk;
ret = hdmi_create_connector(encoder); ret = hdmi_create_connector(encoder);

View File

@@ -5584,7 +5584,7 @@ void intel_dp_process_phy_request(struct intel_dp *intel_dp)
intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes); intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes);
drm_dp_set_phy_test_pattern(&intel_dp->aux, data, drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
link_status[DP_DPCD_REV]); intel_dp->dpcd[DP_DPCD_REV]);
} }
static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)

View File

@@ -268,6 +268,7 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
{ {
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc); struct mdp4_kms *mdp4_kms = get_kms(crtc);
unsigned long flags;
DBG("%s", mdp4_crtc->name); DBG("%s", mdp4_crtc->name);
@@ -280,6 +281,14 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
mdp4_disable(mdp4_kms); mdp4_disable(mdp4_kms);
if (crtc->state->event && !crtc->state->active) {
WARN_ON(mdp4_crtc->event);
spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags);
}
mdp4_crtc->enabled = false; mdp4_crtc->enabled = false;
} }

View File

@@ -558,7 +558,9 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
struct device *dev = &phy->pdev->dev; struct device *dev = &phy->pdev->dev;
int ret; int ret;
pm_runtime_get_sync(dev); ret = pm_runtime_resume_and_get(dev);
if (ret)
return ret;
ret = clk_prepare_enable(phy->ahb_clk); ret = clk_prepare_enable(phy->ahb_clk);
if (ret) { if (ret) {
@@ -708,6 +710,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail; goto fail;
} }
ret = devm_pm_runtime_enable(&pdev->dev);
if (ret)
return ret;
/* PLL init will call into clk_register which requires /* PLL init will call into clk_register which requires
* register access, so we need to enable power and ahb clock. * register access, so we need to enable power and ahb clock.
*/ */

View File

@@ -39,7 +39,7 @@ struct nv04_fence_priv {
static int static int
nv04_fence_emit(struct nouveau_fence *fence) nv04_fence_emit(struct nouveau_fence *fence)
{ {
struct nvif_push *push = fence->channel->chan.push; struct nvif_push *push = unrcu_pointer(fence->channel)->chan.push;
int ret = PUSH_WAIT(push, 2); int ret = PUSH_WAIT(push, 2);
if (ret == 0) { if (ret == 0) {
PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno); PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno);

View File

@@ -32,7 +32,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
type |= 0x00000001; /* PAGE_ALL */ type |= 0x00000001; /* PAGE_ALL */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */ type |= 0x00000006; /* HUB_ONLY | ALL PDB (hack) */
mutex_lock(&subdev->mutex); mutex_lock(&subdev->mutex);

View File

@@ -111,6 +111,8 @@ static int kd35t133_unprepare(struct drm_panel *panel)
return ret; return ret;
} }
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_disable(ctx->iovcc); regulator_disable(ctx->iovcc);
regulator_disable(ctx->vdd); regulator_disable(ctx->vdd);

View File

@@ -329,7 +329,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
u32 domain, u32 domain,
size_t size, size_t size,
struct qxl_surface *surf, struct qxl_surface *surf,
struct qxl_bo **qobj, struct drm_gem_object **gobj,
uint32_t *handle); uint32_t *handle);
void qxl_gem_object_free(struct drm_gem_object *gobj); void qxl_gem_object_free(struct drm_gem_object *gobj);
int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);

View File

@@ -34,6 +34,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
{ {
struct qxl_device *qdev = to_qxl(dev); struct qxl_device *qdev = to_qxl(dev);
struct qxl_bo *qobj; struct qxl_bo *qobj;
struct drm_gem_object *gobj;
uint32_t handle; uint32_t handle;
int r; int r;
struct qxl_surface surf; struct qxl_surface surf;
@@ -62,11 +63,13 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
r = qxl_gem_object_create_with_handle(qdev, file_priv, r = qxl_gem_object_create_with_handle(qdev, file_priv,
QXL_GEM_DOMAIN_SURFACE, QXL_GEM_DOMAIN_SURFACE,
args->size, &surf, &qobj, args->size, &surf, &gobj,
&handle); &handle);
if (r) if (r)
return r; return r;
qobj = gem_to_qxl_bo(gobj);
qobj->is_dumb = true; qobj->is_dumb = true;
drm_gem_object_put(gobj);
args->pitch = pitch; args->pitch = pitch;
args->handle = handle; args->handle = handle;
return 0; return 0;

View File

@@ -72,32 +72,41 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
return 0; return 0;
} }
/*
* If the caller passed a valid gobj pointer, it is responsible to call
* drm_gem_object_put() when it no longer needs to acess the object.
*
* If gobj is NULL, it is handled internally.
*/
int qxl_gem_object_create_with_handle(struct qxl_device *qdev, int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
struct drm_file *file_priv, struct drm_file *file_priv,
u32 domain, u32 domain,
size_t size, size_t size,
struct qxl_surface *surf, struct qxl_surface *surf,
struct qxl_bo **qobj, struct drm_gem_object **gobj,
uint32_t *handle) uint32_t *handle)
{ {
struct drm_gem_object *gobj;
int r; int r;
struct drm_gem_object *local_gobj;
BUG_ON(!qobj);
BUG_ON(!handle); BUG_ON(!handle);
r = qxl_gem_object_create(qdev, size, 0, r = qxl_gem_object_create(qdev, size, 0,
domain, domain,
false, false, surf, false, false, surf,
&gobj); &local_gobj);
if (r) if (r)
return -ENOMEM; return -ENOMEM;
r = drm_gem_handle_create(file_priv, gobj, handle); r = drm_gem_handle_create(file_priv, local_gobj, handle);
if (r) if (r)
return r; return r;
/* drop reference from allocate - handle holds it now */
*qobj = gem_to_qxl_bo(gobj); if (gobj)
drm_gem_object_put(gobj); *gobj = local_gobj;
else
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(local_gobj);
return 0; return 0;
} }

View File

@@ -39,7 +39,6 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
struct qxl_device *qdev = to_qxl(dev); struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc *qxl_alloc = data; struct drm_qxl_alloc *qxl_alloc = data;
int ret; int ret;
struct qxl_bo *qobj;
uint32_t handle; uint32_t handle;
u32 domain = QXL_GEM_DOMAIN_VRAM; u32 domain = QXL_GEM_DOMAIN_VRAM;
@@ -51,7 +50,7 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
domain, domain,
qxl_alloc->size, qxl_alloc->size,
NULL, NULL,
&qobj, &handle); NULL, &handle);
if (ret) { if (ret) {
DRM_ERROR("%s: failed to create gem ret=%d\n", DRM_ERROR("%s: failed to create gem ret=%d\n",
__func__, ret); __func__, ret);
@@ -393,7 +392,6 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
{ {
struct qxl_device *qdev = to_qxl(dev); struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc_surf *param = data; struct drm_qxl_alloc_surf *param = data;
struct qxl_bo *qobj;
int handle; int handle;
int ret; int ret;
int size, actual_stride; int size, actual_stride;
@@ -413,7 +411,7 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
QXL_GEM_DOMAIN_SURFACE, QXL_GEM_DOMAIN_SURFACE,
size, size,
&surf, &surf,
&qobj, &handle); NULL, &handle);
if (ret) { if (ret) {
DRM_ERROR("%s: failed to create gem ret=%d\n", DRM_ERROR("%s: failed to create gem ret=%d\n",
__func__, ret); __func__, ret);

View File

@@ -2313,7 +2313,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
switch (prim_walk) { switch (prim_walk) {
case 1: case 1:
for (i = 0; i < track->num_arrays; i++) { for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * track->max_indx * 4; size = track->arrays[i].esize * track->max_indx * 4UL;
if (track->arrays[i].robj == NULL) { if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer " DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i); "bound\n", prim_walk, i);
@@ -2332,7 +2332,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
break; break;
case 2: case 2:
for (i = 0; i < track->num_arrays; i++) { for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * (nverts - 1) * 4; size = track->arrays[i].esize * (nverts - 1) * 4UL;
if (track->arrays[i].robj == NULL) { if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer " DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i); "bound\n", prim_walk, i);

View File

@@ -1276,7 +1276,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL; return -EINVAL;
} }
tmp = (reg - CB_COLOR0_BASE) / 4; tmp = (reg - CB_COLOR0_BASE) / 4;
track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; track->cb_color_bo_offset[tmp] = (u64)radeon_get_ib_value(p, idx) << 8;
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->cb_color_base_last[tmp] = ib[idx]; track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj; track->cb_color_bo[tmp] = reloc->robj;
@@ -1303,7 +1303,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg); "0x%04X\n", reg);
return -EINVAL; return -EINVAL;
} }
track->htile_offset = radeon_get_ib_value(p, idx) << 8; track->htile_offset = (u64)radeon_get_ib_value(p, idx) << 8;
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->htile_bo = reloc->robj; track->htile_bo = reloc->robj;
track->db_dirty = true; track->db_dirty = true;

View File

@@ -689,11 +689,16 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
if (radeon_crtc == NULL) if (radeon_crtc == NULL)
return; return;
radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
if (!radeon_crtc->flip_queue) {
kfree(radeon_crtc);
return;
}
drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs); drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
radeon_crtc->crtc_id = index; radeon_crtc->crtc_id = index;
radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
rdev->mode_info.crtcs[index] = radeon_crtc; rdev->mode_info.crtcs[index] = radeon_crtc;
if (rdev->family >= CHIP_BONAIRE) { if (rdev->family >= CHIP_BONAIRE) {

Some files were not shown because too many files have changed in this diff Show More