
This merges up to the 5.10.222 LTS release into the android12-5.10 branch. Included in here are the following commits: *c6acc5f079
Revert "scsi: core: Fix a use-after-free" *dc67fccdbe
ANDROID: properly backport filelock fix in 5.10.223 *72f4574b8d
Revert "ext4: Send notifications on error" *306e16d49c
Revert "net: mac802154: Fix racy device stats updates by DEV_STATS_INC() and DEV_STATS_ADD()" *8c417688f0
Merge 5.10.223 into android12-5.10-lts |\ | *b15dc4170c
Linux 5.10.223 | *7431144b40
tap: add missing verification for short frame | *6100e02372
tun: add missing verification for short frame | *911cc83e56
filelock: Fix fcntl/close race recovery compat path | *7fa9d1d252
ALSA: pcm_dmaengine: Don't synchronize DMA channel when DMA is paused | *ddf0caf012
arm64: dts: qcom: msm8996: Disable SS instance in Parkmode for USB | *a7ec8a5a7f
ALSA: hda/realtek: Fix the speaker output on Samsung Galaxy Book Pro 360 | *74c6b151a8
ALSA: hda/realtek: Enable headset mic on Positivo SU C1400 | *6386f1b6a1
jfs: don't walk off the end of ealist | *77495e5da5
ocfs2: add bounds checking to ocfs2_check_dir_entry() | *6e03006548
net: relax socket state check at accept time. | *a5224e2123
drm/amdgpu: Fix signedness bug in sdma_v4_0_process_trap_irq() | *9760c6ceb2
ext4: Send notifications on error | *88e44424a6
ext4: fix error code saved on super block during file system abort | *5ce8fad941
scsi: core: Fix a use-after-free | *c0809c128d
bpf, skmsg: Fix NULL pointer dereference in sk_psock_skb_ingress_enqueue | *be35504b95
bpf: Fix overrunning reservations in ringbuf | *9e2b0a5e25
ACPI: processor_idle: Fix invalid comparison with insertion sort for latency | *c85e6b7d9e
ARM: 9324/1: fix get_user() broken with veneer | *727ed4810c
spi: mux: set ctlr->bits_per_word_mask | *34f8efd274
hfsplus: fix uninit-value in copy_name | *f236af7561
selftests/vDSO: fix clang build errors and warnings | *38c2028bb3
spi: imx: Don't expect DMA for i.MX{25,35,50,51,53} cspi devices | *f65bffb464
fs: better handle deep ancestor chains in is_subdir() | *ddeda6ca5f
Bluetooth: hci_core: cancel all works upon hci_unregister_dev() | *739d8d0082
scsi: libsas: Fix exp-attached device scan after probe failure scanned in again after probe failed | *033c51dfdb
powerpc/eeh: avoid possible crash when edev->pdev changes | *6b16098148
powerpc/pseries: Whitelist dtl slub object for copying to userspace | *d1e4e94cb8
net: mac802154: Fix racy device stats updates by DEV_STATS_INC() and DEV_STATS_ADD() | *909f4c2fc9
net: usb: qmi_wwan: add Telit FN912 compositions | *8acf8801f3
ALSA: dmaengine_pcm: terminate dmaengine before synchronize | *2a28531dd0
ALSA: hda/relatek: Enable Mute LED on HP Laptop 15-gw0xxx | *94818bdb00
btrfs: qgroup: fix quota root leak after quota disable failure | *2e51db7ab7
s390/sclp: Fix sclp_init() cleanup on failure | *71db8dc6f8
can: kvaser_usb: fix return value for hif_usb_send_regout | *6e90cd1696
ASoC: ti: omap-hdmi: Fix too long driver name | *96414bf037
ASoC: ti: davinci-mcasp: Set min period size using FIFO config | *072f6348c5
ALSA: dmaengine: Synchronize dma channel after drop() | *73bb3e0194
bytcr_rt5640 : inverse jack detect for Archos 101 cesium | *a87d15d1a3
Input: i8042 - add Ayaneo Kun to i8042 quirk table | *9b32a13486
Input: elantech - fix touchpad state on resume for Lenovo N24 | *cf704e7d04
mips: fix compat_sys_lseek syscall | *134b12f0c5
ALSA: hda/realtek: Add more codec ID to no shutup pins list | *4cdf6926f4
KVM: PPC: Book3S HV: Prevent UAF in kvm_spapr_tce_attach_iommu_group() | *6295bad58f
wifi: cfg80211: wext: add extra SIOCSIWSCAN data check | *9774641b25
mei: demote client disconnect warning on suspend to debug | *229bce543b
fs/file: fix the check in find_next_fd() | *ffe47bf986
kconfig: remove wrong expr_trans_bool() | *4beba24085
kconfig: gconf: give a proper initial state to the Save button | *9625afe1dd
null_blk: fix validation of block size | *9934cda0e7
arm64: armv8_deprecated: Fix warning in isndep cpuhp starting process | *a0cafb7b0b
ila: block BH in ila_output() | *34eb7ab9af
net: ipv6: rpl_iptunnel: block BH in rpl_output() and rpl_input() | *fe855e5b16
Input: silead - Always support 10 fingers | *42e60f3bde
selftests/openat2: Fix build warnings on ppc64 | *bb8ace6794
wifi: mac80211: fix UBSAN noise in ieee80211_prep_hw_scan() | *60cf36f290
wifi: mac80211: mesh: init nonpeer_pm to active by default in mesh sdata | *6df01b7eab
ACPI: EC: Avoid returning AE_OK on errors in address space handler | *fd57dbffd9
ACPI: EC: Abort address space access upon error | *cd9472c43f
scsi: qedf: Set qed_slowpath_params to zero before use | *5661b9c7ec
filelock: Remove locks reliably when fcntl/close race is detected | *2e272e7d71
gcc-plugins: Rename last_stmt() for GCC 14+ * |b7647fb740
Merge branch 'android12-5.10' into branch 'android12-5.10-lts' * |875057880e
Merge 5.10.222 into android12-5.10-lts |\| | *83a48a4503
Linux 5.10.222 | *f52913e5d6
i2c: rcar: fix error code in probe() | *2907dd5855
i2c: rcar: clear NO_RXDMA flag after resetting | *41f62c95e0
i2c: rcar: ensure Gen3+ reset does not disturb local targets | *88046f94cc
i2c: rcar: introduce Gen4 devices | *a720e2e42f
i2c: rcar: reset controller is mandatory for Gen3+ | *b4c11a53e1
i2c: rcar: Add R-Car Gen4 support | *785290cb16
i2c: mark HostNotify target address as used | *8d99f26b55
i2c: rcar: bring hardware to known state when probing | *a9a466a69b
nilfs2: fix kernel bug on rename operation of broken directory | *ca42be8dd1
bpf: Allow reads from uninit stack | *9df3b2474a
ipv6: prevent NULL dereference in ip6_output() | *5edef79864
ipv6: annotate data-races around cnf.disable_ipv6 | *96c58b0966
efi: ia64: move IA64-only declarations to new asm/efi.h header | *596dedc6fa
x86/retpoline: Move a NOENDBR annotation to the SRSO dummy return thunk | *b6d942365d
wireguard: send: annotate intentional data race in checking empty queue | *0bdb5a7444
wireguard: queueing: annotate intentional data race in cpu round robin | *ae630de24e
wireguard: allowedips: avoid unaligned 64-bit memory accesses | *34b76d1922
libceph: fix race between delayed_work() and ceph_monc_stop() | *f70b51a365
ALSA: hda/realtek: Limit mic boost on VAIO PRO PX | *4d62aa6247
ALSA: hda/realtek: Enable Mute LED on HP 250 G7 | *7810928842
nvmem: meson-efuse: Fix return value of nvmem callbacks | *bdb9c58e80
hpet: Support 32-bit userspace | *d09dd21bb5
USB: core: Fix duplicate endpoint bug by clearing reserved bits in the descriptor | *e8474a10c5
usb: gadget: configfs: Prevent OOB read/write in usb_string_copy() | *10ae6b364b
USB: Add USB_QUIRK_NO_SET_INTF quirk for START BP-850k | *932a86a711
USB: serial: mos7840: fix crash on resume | *868bc44086
USB: serial: option: add Rolling RW350-GL variants | *2dc6aad6ea
USB: serial: option: add Netprisma LCUK54 series modules | *fb9ff51396
USB: serial: option: add support for Foxconn T99W651 | *c9e1030198
USB: serial: option: add Fibocom FM350-GL | *9fb7367423
USB: serial: option: add Telit FN912 rmnet compositions | *8e4e917f9d
USB: serial: option: add Telit generic core-dump composition | *26b4d6802e
net: ks8851: Fix potential TX stall after interface reopen | *5d7e64d70a
tcp: avoid too many retransmit packets | *24b9fafe34
tcp: use signed arithmetic in tcp_rtx_probe0_timed_out() | *b4e9f8905d
octeontx2-af: fix detection of IP layer | *7e0297c80f
ARM: davinci: Convert comma to semicolon | *148d549425
s390: Mark psw in __load_psw_mask() as __unitialized | *b81a523d54
net/sched: Fix UAF when resolving a clash | *9f965684c5
udp: Set SOCK_RCU_FREE earlier in udp_lib_get_port(). | *c184be30b1
ethtool: netlink: do not return SQI value if link is down | *3ba12c2afd
ppp: reject claimed-as-LCP but actually malformed packets | *22b16618a8
net: ethernet: lantiq_etop: fix double free in detach | *b4ac93b041
net: lantiq_etop: add blank line after declaration | *efc05a5fdc
octeontx2-af: Fix incorrect value output on error path in rvu_check_rsrc_availability() | *893e140dcc
tcp: fix incorrect undo caused by DSACK of TLP retransmit | *1b95de9433
vfs: don't mod negative dentry count when on shrinker list | *7092f1e582
fs/dcache: Re-use value stored to dentry->d_flags instead of re-reading | *7d4c14f4b5
filelock: fix potential use-after-free in posix_lock_inode | *0100aeb8a1
mm: prevent derefencing NULL ptr in pfn_section_valid() | *1e99ce37e9
nilfs2: fix incorrect inode allocation from reserved inodes | *3affee779b
kbuild: fix short log for AS in link-vmlinux.sh | *2f3c22b1d3
nvmet: fix a possible leak when destroy a ctrl during qp establishment | *1fa5c6eef4
platform/x86: touchscreen_dmi: Add info for the EZpad 6s Pro | *8802d23350
platform/x86: touchscreen_dmi: Add info for GlobalSpace SolT IVW 11.6" tablet | *560eaa1af0
nvme: adjust multiples of NVME_CTRL_PAGE_SIZE in offset | *2d428a07e8
nvme-multipath: find NUMA path only for online numa-node | *97982c3106
ALSA: hda/realtek: Enable headset mic of JP-IK LEAP W502 with ALC897 | *2849a1b747
i2c: pnx: Fix potential deadlock warning from del_timer_sync() call in isr | *2032e5dfae
media: dw2102: fix a potential buffer overflow | *a6176a802c
ima: Avoid blocking in RCU read-side critical section | *37c59198bc
bpf, sockmap: Fix sk->sk_forward_alloc warn_on in sk_stream_kill_queues | *8b17cec338
bnx2x: Fix multiple UBSAN array-index-out-of-bounds | *55d6a97cf0
mtd: rawnand: Bypass a couple of sanity checks during NAND identification | *fac2544b8c
drm/amdgpu/atomfirmware: silence UBSAN warning | *274cba8d2d
drm/nouveau: fix null pointer dereference in nouveau_connector_get_modes | *145faa3d03
Revert "mm/writeback: fix possible divide-by-zero in wb_dirty_limits(), again" | *c9f715f1b4
fsnotify: Do not generate events for O_PATH file descriptors | *9528e95d6e
can: kvaser_usb: Explicitly initialize family in leafimx driver_info struct | *215a26c240
Bluetooth: qca: Fix BT enable failure again for QCA6390 after warm reboot | *7a49389771
mm: avoid overflows in dirty throttling logic | *f033241a7c
mm: optimize the redundant loop of mm_update_owner_next() | *2f2fa9cf7c
nilfs2: add missing check for inode numbers on directory entries | *731011ac6c
nilfs2: fix inode number range checks | *7ef519c8ef
inet_diag: Initialize pad field in struct inet_diag_req_v2 | *3908637dce
selftests: make order checking verbose in msg_zerocopy selftest | *1782a42ca2
selftests: fix OOM in msg_zerocopy selftest | *707c85ba35
bonding: Fix out-of-bounds read in bond_option_arp_ip_targets_set() | *df76fb67ea
wifi: wilc1000: fix ies_len type in connect path | *cdffc35871
tcp_metrics: validate source addr length | *febed740a3
UPSTREAM: tcp: fix DSACK undo in fast recovery to call tcp_try_to_open() | *93c034c431
s390/pkey: Wipe sensitive data on failure | *6d6d94287f
jffs2: Fix potential illegal address access in jffs2_free_inode | *b694989bb1
bpf: Avoid uninitialized value in BPF_CORE_READ_BITFIELD | *6b84e9d53b
powerpc/xmon: Check cpu id in commands "c#", "dp#" and "dx#" | *9d046f697e
kunit: Fix timeout message | *1617249e24
orangefs: fix out-of-bounds fsid access | *19cd1d96d6
powerpc/64: Set _IO_BASE to POISON_POINTER_DELTA not 0 for CONFIG_PCI=n | *158bcaa2e3
i2c: i801: Annotate apanel_addr as __ro_after_init | *e1ba226187
media: dvb-frontends: tda10048: Fix integer overflow | *7d2fbd822d
media: s2255: Use refcount_t instead of atomic_t for num_channels | *39e7a27813
media: dvb-frontends: tda18271c2dd: Remove casting during div | *2a2fe25a10
net: dsa: mv88e6xxx: Correct check for empty list | *8eac1cc159
Input: ff-core - prefer struct_size over open coded arithmetic | *402825a23a
firmware: dmi: Stop decoding on broken entry | *5a18ea7d86
sctp: prefer struct_size over open coded arithmetic | *4dcce63a6f
media: dw2102: Don't translate i2c read into write | *ffa7bd3ca9
drm/amd/display: Skip finding free audio for unknown engine_id | *b2e9abc955
drm/amd/display: Check pipe offset before setting vblank | *b5b8837d06
drm/amd/display: Check index msg_id before read or write | *f0645c99c2
drm/amdgpu: Initialize timestamp for some legacy SOCs | *9db8c299a5
crypto: aead,cipher - zeroize key buffer after use | *5ceb40cdee
scsi: qedf: Make qedf_execute_tmf() non-preemptible | *62349fbf86
IB/core: Implement a limit on UMAD receive List | *167afd3fed
media: dvb-usb: dib0700_devices: Add missing release_firmware() | *82ef3fa640
media: dvb: as102-fe: Fix as10x_register_addr packing | *25d0d9b83d
drm/lima: fix shared irq handling on driver remove | *94ffdde326
Compiler Attributes: Add __uninitialized macro * |a297eae7e6
ANDROID: fix build error in ksz9477.c * |2ebd481b31
Merge 5.10.221 into android12-5.10-lts |\| | *6ab8b697d7
Linux 5.10.221 | *ec3adc2af0
tracing/net_sched: NULL pointer dereference in perf_trace_qdisc_reset() | *0a95f0f6d6
serial: 8250_omap: Fix Errata i2310 with RX FIFO level check | *1bd2dc7702
xdp: xdp_mem_allocator can be NULL in trace_mem_connect(). | *4686892f61
arm64: dts: rockchip: Add sound-dai-cells for RK3368 | *e9918954e3
ARM: dts: rockchip: rk3066a: add #sound-dai-cells to hdmi node | *b63d015b7a
KVM: arm64: vgic-v4: Make the doorbell request robust w.r.t preemption | *91efb15b5a
efi/x86: Free EFI memory map only when installing a new one. | *e5d730882d
efi: xen: Set EFI_PARAVIRT for Xen dom0 boot on all architectures | *31e0721aea
efi: memmap: Move manipulation routines into x86 arch tree | *52dc463a76
efi: Correct comment on efi_memmap_alloc | *d204beedc8
drivers: fix typo in firmware/efi/memmap.c | *3b32f26580
tcp: Fix data races around icsk->icsk_af_ops. | *ea2ed3f78a
ipv6: Fix data races around sk->sk_prot. | *d3bf338e9c
ipv6: annotate some data-races around sk->sk_prot | *ed07b26c54
nfs: Leave pages in the pagecache if readpage failed | *2e6bbfa1ab
pwm: stm32: Refuse too small period requests | *2c43adf364
mtd: spinand: macronix: Add support for serial NAND flash | *d5f75f0199
syscalls: fix compat_sys_io_pgetevents_time64 usage | *84bf6b64a1
ftruncate: pass a signed offset | *010de9acbe
ata: libata-core: Fix double free on error | *be5016ae5a
ata: ahci: Clean up sysfs file on error | *692858d9ed
batman-adv: Don't accept TT entries for out-of-spec VIDs | *56fc4d3b0b
drm/nouveau/dispnv04: fix null pointer dereference in nv17_tv_get_hd_modes | *f771b91f21
drm/i915/gt: Fix potential UAF by revoke of fence registers | *259549b2cc
drm/nouveau/dispnv04: fix null pointer dereference in nv17_tv_get_ld_modes | *9ec84770e4
hexagon: fix fadvise64_64 calling conventions | *6906335529
csky, hexagon: fix broken sys_sync_file_range | *bf4a43c533
kbuild: Install dtb files as 0644 in Makefile.dtbinst | *f6c839e717
net: can: j1939: enhanced error handling for tightly received RTS messages in xtp_rx_rts_session_new | *3f177e46c9
net: can: j1939: recover socket queue on CAN bus error during BAM transmission | *a2a0ebff7f
net: can: j1939: Initialize unused data in j1939_send_one() | *44add57b5b
tty: mcf: MCF54418 has 10 UARTS | *cb87930066
serial: 8250_omap: Implementation of Errata i2310 | *75ddbf776d
usb: atm: cxacru: fix endpoint checking in cxacru_bind() | *621e90201c
usb: musb: da8xx: fix a resource leak in probe() | *2798fc1560
usb: gadget: printer: fix races against disable | *84ca47192f
usb: gadget: printer: SS+ support | *ee88636607
net: usb: ax88179_178a: improve link status logs | *59a84bcf1c
iio: chemical: bme680: Fix sensor data read operation | *c326551e99
iio: chemical: bme680: Fix overflows in compensate() functions | *3d78fc351b
iio: chemical: bme680: Fix calibration data variable | *44f04b1a88
iio: chemical: bme680: Fix pressure value output | *28f6d0b5ff
iio: adc: ad7266: Fix variable checking bug | *78ece307f8
counter: ti-eqep: enable clock at probe | *76da476a4c
mmc: sdhci: Do not lock spinlock around mmc_gpio_get_ro() | *803835fda3
mmc: sdhci: Do not invert write-protect twice | *5048a44a25
mmc: sdhci-pci: Convert PCIBIOS_* return codes to errnos | *a68b896aa5
ocfs2: fix DIO failure due to insufficient transaction credits | *49c09ca35a
x86: stop playing stack games in profile_pc() | *38ce307939
gpiolib: cdev: Disallow reconfiguration without direction (uAPI v1) | *e44a83bf15
gpio: davinci: Validate the obtained number of IRQs | *98eae65cb5
drm/panel: simple: Add missing display timing flags for KOE TX26D202VM0BWA | *cb4e7a8f39
nvme: fixup comment for nvme RDMA Provider Type | *1a7a494184
drm/radeon/radeon_display: Decrease the size of allocated memory | *a45c45767b
soc: ti: wkup_m3_ipc: Send NULL dummy message instead of pointer message | *e47d3babaa
media: dvbdev: Initialize sbuf | *d23982ea9a
ALSA: emux: improve patch ioctl data validation | *763896ab62
net/dpaa2: Avoid explicit cpumask var allocation on stack | *9dadab0db7
net/iucv: Avoid explicit cpumask var allocation on stack | *3d6432f20f
bpf: Add a check for struct bpf_fib_lookup size | *37f646c604
mtd: partitions: redboot: Added conversion of operands to a larger type | *cae52f61fd
drm/panel: ilitek-ili9881c: Fix warning with GPIO controllers that sleep | *5d43d789b5
netfilter: nf_tables: fully validate NFT_DATA_VALUE on store to data registers | *4e6367fe32
parisc: use correct compat recv/recvfrom syscalls | *7620738513
sparc: fix compat recv/recvfrom syscalls | *2a700b8de5
sparc: fix old compat_sys_select() | *1095b8efbb
xdp: Remove WARN() from __xdp_reg_mem_model() | *5a3035306a
xdp: Allow registering memory model without rxq reference | *f4aa8268d7
xdp: Move the rxq_info.mem clearing to unreg_mem_model() | *0427f74a79
net: phy: micrel: add Microchip KSZ 9477 to the device table | *65a9383389
net: dsa: microchip: fix initial port flush problem | *3662eb2170
ASoC: fsl-asoc-card: set priv->pdev before using it | *229e145a81
nfsd: hold a lighter-weight client reference over CB_RECALL_ANY | *a4f3907ab5
SUNRPC: Fix svcxdr_init_encode's buflen calculation | *f1ef3dc758
SUNRPC: Fix svcxdr_init_decode's end-of-buffer calculation | *be20af2458
SUNRPC: Fix a NULL pointer deref in trace_svc_stats_latency() | *3de81c1e84
SUNRPC: Fix null pointer dereference in svc_rqst_free() | *310dee7235
netfilter: nf_tables: validate family when identifying table via handle | *d8a04a6bfa
drm/amdgpu: fix UBSAN warning in kv_dpm.c | *52af94393d
pinctrl: rockchip: fix pinmux reset in rockchip_pmx_set | *6531f8c666
pinctrl: rockchip: use dedicated pinctrl type for RK3328 | *926cb583b9
pinctrl/rockchip: separate struct rockchip_pin_bank to a head file | *cfa2527ac8
pinctrl: rockchip: fix pinmux bits for RK3328 GPIO3-B pins | *6ff152b2be
pinctrl: rockchip: fix pinmux bits for RK3328 GPIO2-B pins | *b813e3fd10
pinctrl: fix deadlock in create_pinctrl() when handling -EPROBE_DEFER | *b6be2b025c
Input: ili210x - fix ili251x_read_touch_data() return value | *f0ef5ca85f
ACPI: x86: Force StorageD3Enable on more products | *3f830c2484
ACPI: x86: utils: Add Picasso to the list for forcing StorageD3Enable | *bb1758cc4a
ACPI: x86: utils: Add Cezanne to the list for forcing StorageD3Enable | *c2a6ab506f
ACPI: x86: Add another system to quirk list for forcing StorageD3Enable | *fe73b1d080
ACPI: x86: Add a quirk for Dell Inspiron 14 2-in-1 for StorageD3Enable | *83f6522210
ACPI: Add quirks for AMD Renoir/Lucienne CPUs to force the D3 hint | *b055752675
smb: client: fix deadlock in smb2_find_smb_tcon() | *78ebec450e
cifs: missed ref-counting smb session in find | *b03555a8fa
x86/amd_nb: Check for invalid SMN reads | *0caf70a8e8
PCI: Add PCI_ERROR_RESPONSE and related definitions | *a335ad77bd
perf/core: Fix missing wakeup when waiting for context reference | *695f20c678
kheaders: explicitly define file modes for archived headers | *247c3f8958
Revert "kheaders: substituting --sort in archive creation" | *61c1c98e26
r8169: Fix possible ring buffer corruption on fragmented Tx packets. | *5c88f4f634
r8169: remove not needed check in rtl8169_start_xmit | *48833226fb
r8169: remove nr_frags argument from rtl_tx_slots_avail | *41eeb13459
r8169: improve rtl8169_start_xmit | *04f9d0cd39
r8169: improve rtl_tx | *6d3eb1658b
r8169: remove unneeded memory barrier in rtl_tx | *40a697e345
x86/cpu: Fix x86_match_cpu() to match just X86_VENDOR_INTEL | *50b1b4e4f3
x86/cpu/vfm: Add new macros to work with (vendor/family/model) values | *0e84701753
tracing: Add MODULE_DESCRIPTION() to preemptirq_delay_test | *2c3d7b03b6
bcache: fix variable length array abuse in btree_iter | *6337072467
spmi: hisi-spmi-controller: Do not override device identifier | *90551062fd
knfsd: LOOKUP can return an illegal error value | *f77c8a2ce2
pmdomain: ti-sci: Fix duplicate PD referrals | *5fe1b2c72e
wifi: rtlwifi: rtl8192de: Fix 5 GHz TX power | *33628b6ed3
rtlwifi: rtl8192de: Style clean-ups | *77942a0272
ARM: dts: samsung: smdk4412: fix keypad no-autorepeat | *1fdaecc326
ARM: dts: samsung: exynos4412-origen: fix keypad no-autorepeat | *b263a895d8
ARM: dts: samsung: smdkv310: fix keypad no-autorepeat | *cc255080c1
drm/amd/display: revert Exit idle optimizations before HDCP execution | *5d7fef7522
dt-bindings: i2c: google,cros-ec-i2c-tunnel: correct path to i2c-controller schema | *7884f4afec
i2c: ocores: set IACK bit after core is enabled | *7879b54f0b
kcov: don't lose track of remote references during softirqs | *b1684798a3
gcov: add support for GCC 14 | *febe794b83
drm/radeon: fix UBSAN warning in kv_dpm.c | *71bea3e648
ALSA: hda/realtek: Limit mic boost on N14AP7 | *7186b81c1f
RDMA/mlx5: Add check for srq max_sge attribute | *6eca23100e
ACPICA: Revert "ACPICA: avoid Info: mapping multiple BARs. Your kernel is fine." | *f3d17826d6
dmaengine: ioatdma: Fix missing kmem_cache_destroy() | *34cc20a544
dmaengine: ioatdma: Fix kmemleak in ioat_pci_probe() | *768ae5e025
dmaengine: ioatdma: Fix error path in ioat3_dma_probe() | *a486fca282
dmaengine: ioat: use PCI core macros for PCIe Capability | *c017a8e3e3
dmaengine: ioatdma: Fix leaking on version mismatch | *f99b00ed9b
dmaengine: ioat: Drop redundant pci_enable_pcie_error_reporting() | *d293db11cb
dmaengine: ioat: switch from 'pci_' to 'dma_' API | *97509608b7
regulator: core: Fix modpost error "regulator_get_regmap" undefined | *6a0f5d540f
net: usb: rtl8150 fix unintiatilzed variables in rtl8150_get_link_ksettings | *72d9611968
netfilter: ipset: Fix suspicious rcu_dereference_protected() | *333c0a1f7d
virtio_net: checksum offloading handling fix | *b4bca4722f
net: stmmac: No need to calculate speed divider when offload is disabled | *03f625505e
sched: act_ct: add netns into the key of tcf_ct_flow_table | *b4899d75b8
net/sched: act_ct: set 'net' pointer when creating new nf_flow_table | *3eb1b39627
tipc: force a dst refcount before doing decryption | *c6a7da65a2
net/sched: act_api: fix possible infinite loop in tcf_idr_check_alloc() | *66c7aa157a
net/sched: act_api: rely on rcu in tcf_idr_check_alloc | *fb910ac2d3
qca_spi: Make interrupt remembering atomic | *2b82028a1f
netns: Make get_net_ns() handle zero refcount net | *20427b8578
xfrm6: check ip6_dst_idev() return value in xfrm6_get_saddr() | *1ed9849fdf
ipv6: prevent possible NULL dereference in rt6_probe() | *de5ad4d45c
ipv6: prevent possible NULL deref in fib6_nh_init() | *5391f9db2c
netrom: Fix a memory leak in nr_heartbeat_expiry() | *1aabe0f850
cipso: fix total option length computation | *a85bae262c
tracing: Build event generation tests only as modules | *36d771ce60
mips: bmips: BCM6358: make sure CBR is correctly set | *7117969bff
MIPS: Routerboard 532: Fix vendor retry check code | *15c8b2e1d6
serial: exar: adding missing CTI and Exar PCI ids | *6c1b9fe148
MIPS: Octeon: Add PCIe link status check | *c59f79e2b4
PCI/PM: Avoid D3cold for HP Pavilion 17 PC/1972 PCIe Ports | *bffff80d10
udf: udftime: prevent overflow in udf_disk_stamp_to_time() | *04736c1bc3
usb: misc: uss720: check for incompatible versions of the Belkin F5U002 | *38a82c8d00
f2fs: remove clear SB_INLINECRYPT flag in default_options | *d8481016c2
iommu/arm-smmu-v3: Free MSIs in case of ENOMEM | *449d55871c
power: supply: cros_usbpd: provide ID table for avoiding fallback match | *1939648b3a
powerpc/io: Avoid clang null pointer arithmetic warnings | *a8c988d752
powerpc/pseries: Enforce hcall result buffer validity and size | *03e7b2f7ae
drm/lima: mask irqs in timeout path before hard reset | *e12c363cf5
drm/lima: add mask irq callback to gp and pp | *2db63bf7d8
ASoC: Intel: sof_sdw: add JD2 quirk for HP Omen 14 | *b4291f58a9
drm/amd/display: Exit idle optimizations before HDCP execution | *52d4cfa56b
Bluetooth: ath3k: Fix multiple issues reported by checkpatch.pl | *6fdc98bcc6
ACPI: video: Add backlight=native quirk for Lenovo Slim 7 16ARH7 | *6f6cb07482
HID: Add quirk for Logitech Casa touchpad | *43c0ca793a
netpoll: Fix race condition in netpoll_owner_active | *1b577bb1cb
kselftest: arm64: Add a null pointer check | *144d76a676
scsi: qedi: Fix crash while reading debugfs attribute | *96941f29eb
drop_monitor: replace spin_lock by raw_spin_lock | *a720d71dd4
af_packet: avoid a false positive warning in packet_setsockopt() | *b5a53d14dd
wifi: ath9k: work around memset overflow warning | *82cdea8f3a
batman-adv: bypass empty buckets in batadv_purge_orig_ref() | *e1c3f5fb1b
selftests/bpf: Fix flaky test btf_map_in_map/lookup_update | *973b32034c
selftests/bpf: Prevent client connect before server bind in test_tc_tunnel.sh | *58706e482b
block/ioctl: prefer different overflow check | *c15df6f498
rcutorture: Fix invalid context warning when enable srcu barrier testing | *dd2cb39afc
rcutorture: Fix rcu_torture_one_read() pipe_count overflow comment | *ec58e6ff29
padata: Disable BH when taking works lock on MT path | *82c7acf9a1
zap_pid_ns_processes: clear TIF_NOTIFY_SIGNAL along with TIF_SIGPENDING | *864963d269
i2c: designware: Fix the functionality flags of the slave-only interface | *0f37d22a62
i2c: at91: Fix the functionality flags of the slave-only interface | *f68820f125
usb-storage: alauda: Check whether the media is initialized | *2b6bb0b4ab
greybus: Fix use-after-free bug in gb_interface_release due to race condition. | *d6c26a59e6
remoteproc: k3-r5: Jump to error handling labels in start/stop errors | *990d071010
mptcp: pm: update add_addr counters after connect | *5a4efafcf8
mptcp: pm: inc RmAddr MIB counter once per RM_ADDR ID | *208cd22ef5
mptcp: ensure snd_una is properly initialized on connect | *73014c77ec
hugetlb_encode.h: fix undefined behaviour (34 << 26) | *0047568dbd
serial: 8250_pxa: Configure tx_loadsz to match FIFO IRQ level | *33eae51f65
tick/nohz_full: Don't abuse smp_call_function_single() in tick_setup_device() | *0ecfe3a928
nilfs2: fix potential kernel bug due to lack of writeback flag waiting | *f699f9f8b2
intel_th: pci: Add Lunar Lake support | *31f3136fd6
intel_th: pci: Add Meteor Lake-S support | *c02003a97a
intel_th: pci: Add Sapphire Rapids SOC support | *dbfe50b50e
intel_th: pci: Add Granite Rapids SOC support | *78a41b1614
intel_th: pci: Add Granite Rapids support | *02d3b5e48d
remoteproc: k3-r5: Do not allow core1 to power up before core0 via sysfs | *fe5b53c602
dmaengine: axi-dmac: fix possible race in remove() | *42ed6bfc2d
PCI: rockchip-ep: Remove wrong mask on subsys_vendor_id | *050ce8af68
ocfs2: fix races between hole punching and AIO+DIO | *11a075a1c8
ocfs2: use coarse time for new created files | *70c1835e77
fs/proc: fix softlockup in __read_vmcore | *f70ff73734
vmci: prevent speculation leaks by sanitizing event in event_deliver() | *4dfffb5031
drm/exynos: hdmi: report safe 640x480 mode as a fallback when no EDID found | *0acc356da8
drm/exynos/vidi: fix memory leak in .get_modes() | *760603e30b
drivers: core: synchronize really_probe() and dev_uevent() | *fd45d6f194
iio: imu: inv_icm42600: delete unneeded update watermark call | *9d4dce5870
iio: dac: ad5592r: fix temperature channel scaling value | *e4ce76890e
iio: adc: ad9467: fix scan type sign | *ff9c2a9426
ionic: fix use after netif_napi_del() | *b278f9b458
net/ipv6: Fix the RT cache flush via sysctl using a previous delay | *01ce5bdfdf
net: stmmac: replace priv->speed with the portTransmitRate from the tc-cbs parameters | *93b53c202b
netfilter: ipset: Fix race between namespace cleanup and gc in the list:set type | *ea1a98c9a3
Bluetooth: L2CAP: Fix rejecting L2CAP_CONN_PARAM_UPDATE_REQ | *dfd7f46707
net/mlx5e: Fix features validation check for tunneled UDP (non-VXLAN) packets | *330c8661c9
tcp: fix race in tcp_v6_syn_recv_sock() | *9b164605c1
drm/bridge/panel: Fix runtime warning on panel bridge release | *bda7cdaeeb
drm/komeda: check for error-valued pointer | *cbf18d8128
liquidio: Adjust a NULL pointer handling path in lio_vf_rep_copy_packet | *187e293c82
net: hns3: add cond_resched() to hns3 ring buffer init process | *bd8e1e6af6
net: sfp: Always call `sfp_sm_mod_remove()` on remove | *abc55e738b
drm/vmwgfx: 3D disabled should not effect STDU memory limits | *caa9c9acb9
HID: logitech-dj: Fix memory leak in logi_dj_recv_switch_to_dj_mode() | *cf34f8f669
iommu: Return right value in iommu_sva_bind_device() | *d4673a34d8
iommu/amd: Fix sysfs leak in iommu init | *c0f1bd317b
iommu/amd: Introduce pci segment structure | *a843c0e9da
gpio: tqmx86: store IRQ trigger type and unmask status separately | *33f6832798
HID: core: remove unnecessary WARN_ON() in implement() | *544015b945
gpio: tqmx86: fix typo in Kconfig label | *66c79c5acc
SUNRPC: return proper error from gss_wrap_req_priv | *b6a204f937
Input: try trimming too long modalias strings | *20b3f435b7
powerpc/uaccess: Fix build errors seen with GCC 13/14 | *0081d2b3ae
scsi: mpt3sas: Avoid test/set_bit() operating in non-allocated memory | *baeae72258
xhci: Apply broken streams quirk to Etron EJ188 xHCI host | *0b05b12e2d
xhci: Apply reset resume quirk to Etron EJ188 xHCI host | *22de7c9cba
xhci: Set correct transferred length for cancelled bulk transfers | *fc745f6e83
jfs: xattr: fix buffer overflow for invalid xattr | *498ff29800
mei: me: release irq in mei_me_pci_resume error path | *c0747d76eb
USB: class: cdc-wdm: Fix CPU lockup caused by excessive log messages | *c77ad608df
nilfs2: fix nilfs_empty_dir() misjudgment and long loop on I/O errors | *adf1b931d5
nilfs2: return the mapped address from nilfs_get_page() | *8b56df81b3
nilfs2: Remove check for PageError | *05544fd3f1
btrfs: fix leak of qgroup extent records after transaction abort | *79bf1ea0d5
selftests/mm: compaction_test: fix bogus test success on Aarch64 | *7c1cc0a5d4
selftests/mm: conform test to TAP format output | *9d3886a160
selftests/mm: compaction_test: fix incorrect write of zero to nr_hugepages | *6ff7cfa02b
mmc: davinci: Don't strip remove function when driver is builtin | *b5a2a69081
serial: sc16is7xx: fix bug in sc16is7xx_set_baud() when using prescaler | *9a2e0aa9a8
serial: sc16is7xx: replace hardcoded divisor value with BIT() macro | *e8b8054f5e
drm/amd/display: Handle Y carry-over in VCP X.Y calculation | *e500b1c4e2
usb: gadget: f_fs: Fix race between aio_cancel() and AIO request complete | *c693698787
ipv6: fix possible race in __fib6_drop_pcpu_from() | *74c97c8003
af_unix: Annotate data-race of sk->sk_shutdown in sk_diag_fill(). | *35a69f9e5d
af_unix: Use skb_queue_len_lockless() in sk_diag_show_rqlen(). | *a64e4b8f9b
af_unix: Use unix_recvq_full_lockless() in unix_stream_connect(). | *f70ef84b82
af_unix: Annotate data-race of net->unx.sysctl_max_dgram_qlen. | *44a2437c60
af_unix: Annotate data-races around sk->sk_state in UNIX_DIAG. | *b5a6507c61
af_unix: Annotate data-races around sk->sk_state in sendmsg() and recvmsg(). | *cc5d123ce4
af_unix: Annotate data-races around sk->sk_state in unix_write_space() and poll(). | *d2c53bedeb
af_unix: Annotate data-race of sk->sk_state in unix_inq_len(). | *13f61e503e
ptp: Fix error message on failed pin verification | *6db4af0998
net/sched: taprio: always validate TCA_TAPRIO_ATTR_PRIOMAP | *d8c79ae03e
tcp: count CLOSE-WAIT sockets for TCP_MIB_CURRESTAB | *dd254cde57
vxlan: Fix regression when dropping packets due to invalid src addresses | *52b1aa07cd
net: sched: sch_multiq: fix possible OOB write in multiq_tune() | *f9f69e3f69
ipv6: sr: block BH in seg6_output_core() and seg6_input_core() | *3cbb2ba0a0
net/ncsi: Fix the multi thread manner of NCSI driver | *7329bc66b4
net/ncsi: Simplify Kconfig/dts control flow | *f40cac4e70
net/ncsi: add NCSI Intel OEM command to keep PHY up | *7c9b9f822e
wifi: mac80211: correctly parse Spatial Reuse Parameter Set element | *46c59a2533
wifi: iwlwifi: mvm: don't read past the mfuart notifcation | *3c4771091e
wifi: iwlwifi: mvm: check n_ssids before accessing the ssids | *2c80bd07c1
wifi: iwlwifi: dbg_ini: move iwl_dbg_tlv_free outside of debugfs ifdef | *99c4903dce
wifi: iwlwifi: mvm: revert gen2 TX A-MPDU size to 64 | *8d5c7d7bfd
wifi: cfg80211: pmsr: use correct nla_get_uX functions | *e7e916d693
wifi: mac80211: Fix deadlock in ieee80211_sta_ps_deliver_wakeup() | *7518e20a18
wifi: mac80211: mesh: Fix leak of mesh_preq_queue objects | *f64d566f43
null_blk: Print correct max open zones limit in null_init_zoned_dev() | *8f48a7f8b9
tracing/selftests: Fix kprobe event name test for .isra. functions * |88eb084d18
Revert "Merge 5.10.220 into android12-5.10-lts" * |87a7f35a24
Merge 5.10.220 into android12-5.10-lts |\| | *3a3877de44
Linux 5.10.220 | *9444ce5cd4
nfsd: Fix a regression in nfsd_setattr() | *a1a153fc73
nfsd: don't call locks_release_private() twice concurrently | *feb3352af7
nfsd: don't take fi_lock in nfsd_break_deleg_cb() | *99fb654d01
nfsd: fix RELEASE_LOCKOWNER | *ca791e1a31
nfsd: drop the nfsd_put helper | *838a602db7
nfsd: call nfsd_last_thread() before final nfsd_put() | *e35cb663a4
NFSD: fix possible oops when nfsd/pool_stats is closed. | *3add01e067
Documentation: Add missing documentation for EXPORT_OP flags | *d31cd25f55
nfsd: separate nfsd_last_thread() from nfsd_put() | *987c0e1028
nfsd: Simplify code around svc_exit_thread() call in nfsd() | *7229200f68
nfsd: don't allow nfsd threads to be signalled. | *8ef87fe6e8
nfsd: Fix creation time serialization order | *72f28b5ad0
NFSD: Add an nfsd4_encode_nfstime4() helper | *b4417c53d4
lockd: drop inappropriate svc_get() from locked_get() | *b28b5c726e
nfsd: fix double fget() bug in __write_ports_addfd() | *8157832461
nfsd: make a copy of struct iattr before calling notify_change | *05f45f3981
NFSD: Fix problem of COMMIT and NFS4ERR_DELAY in infinite loop | *6c05d25ca8
nfsd: simplify the delayed disposal list code | *56b36b8960
NFSD: Convert filecache to rhltable | *5a132ffa76
nfsd: allow reaping files still under writeback | *f7b157737c
nfsd: update comment over __nfsd_file_cache_purge | *f593ea1423
nfsd: don't take/put an extra reference when putting a file | *c3677c14b3
nfsd: add some comments to nfsd_file_do_acquire | *c9e8ed6efa
nfsd: don't kill nfsd_files because of lease break error | *2c95ad0a0c
nfsd: simplify test_bit return in NFSD_FILE_KEY_FULL comparator | *e378da8357
nfsd: NFSD_FILE_KEY_INODE only needs to find GC'ed entries | *9c599dee87
nfsd: don't open-code clear_and_wake_up_bit | *65a33135e9
nfsd: call op_release, even when op_func returns an error | *50827896c3
NFSD: Avoid calling OPDESC() with ops->opnum == OP_ILLEGAL | *8235cd619d
nfsd: don't replace page in rq_pages if it's a continuation of last page | *37b34eb567
lockd: set file_lock start and end when decoding nlm4 testargs | *b0f3373279
NFSD: Protect against filesystem freezing | *37cd49faaa
NFSD: copy the whole verifier in nfsd_copy_write_verifier | *dd7d50c695
nfsd: don't fsync nfsd_files on last close | *1178547637
nfsd: fix courtesy client with deny mode handling in nfs4_upgrade_open | *3db6c79de9
NFSD: fix problems with cleanup on errors in nfsd4_copy | *e5e1dc8284
nfsd: don't hand out delegation on setuid files being opened for write | *2da5014998
NFSD: fix leaked reference count of nfsd4_ssc_umount_item | *fd63299db8
nfsd: clean up potential nfsd_file refcount leaks in COPY codepath | *3c7b9b3487
nfsd: allow nfsd_file_get to sanely handle a NULL pointer | *9d7608dc4b
NFSD: enhance inter-server copy cleanup | *6856f1385d
nfsd: don't destroy global nfs4_file table in per-net shutdown | *e997a230d8
nfsd: don't free files unconditionally in __nfsd_file_cache_purge | *2bbf10861d
NFSD: replace delayed_work with work_struct for nfsd_client_shrinker | *438ef64bbf
NFSD: register/unregister of nfsd-client shrinker at nfsd startup/shutdown time | *6ac4c383c3
NFSD: fix use-after-free in nfsd4_ssc_setup_dul() | *2ecc439931
NFSD: Use set_bit(RQ_DROPME) | *115b58b56f
Revert "SUNRPC: Use RMW bitops in single-threaded hot paths" | *45c08a7529
nfsd: fix handling of cached open files in nfsd4_open codepath | *f31bc0bc12
nfsd: rework refcounting in filecache | *dfbf3066d9
NFSD: Avoid clashing function prototypes | *ea46809860
NFSD: Use only RQ_DROPME to signal the need to drop a reply | *71a98737cd
NFSD: add delegation reaper to react to low memory condition | *80a81db01a
NFSD: add support for sending CB_RECALL_ANY | *87098b663f
NFSD: refactoring courtesy_client_reaper to a generic low memory shrinker | *35a48412f6
NFSD: pass range end to vfs_fsync_range() instead of count | *0d5f3de2b4
lockd: fix file selection in nlmsvc_cancel_blocked | *7ecaa9aff9
lockd: ensure we use the correct file descriptor when unlocking | *781c3f3d18
lockd: set missing fl_flags field when retrieving args | *ae8f2bb3dd
NFSD: Use struct_size() helper in alloc_session() | *e2505cb851
nfsd: return error if nfs4_setacl fails | *31c93ee5f1
lockd: set other missing fields when unlocking files | *739202b2b9
NFSD: Add an nfsd_file_fsync tracepoint | *4453e0c1bb
nfsd: fix up the filecache laundrette scheduling | *3d479899f4
nfsd: reorganize filecache.c | *605a5acd6f
nfsd: remove the pages_flushed statistic from filecache | *384b23f136
NFSD: Fix licensing header in filecache.c | *56eedeaf71
NFSD: Use rhashtable for managing nfs4_file objects | *8fdef89612
NFSD: Refactor find_file() | *5e92a16849
NFSD: Clean up find_or_add_file() | *5aa2c4a1fe
NFSD: Add a nfsd4_file_hash_remove() helper | *e77b1d63c0
NFSD: Clean up nfsd4_init_file() | *c152e4ffb9
NFSD: Update file_hashtbl() helpers | *b0952d4948
NFSD: Use const pointers as parameters to fh_ helpers | *a10d111fd0
NFSD: Trace delegation revocations | *88cf6a1e76
NFSD: Trace stateids returned via DELEGRETURN | *14c9c091f2
NFSD: Clean up nfs4_preprocess_stateid_op() call sites | *d9991b0b9d
NFSD: Flesh out a documenting comment for filecache.c | *5f866f5a86
NFSD: Add an NFSD_FILE_GC flag to enable nfsd_file garbage collection | *c09b456a81
NFSD: Revert "NFSD: NFSv4 CLOSE should release an nfsd_file immediately" | *caa6270201
NFSD: Pass the target nfsd_file to nfsd_commit() | *599d5c2291
exportfs: use pr_debug for unreachable debug statements | *4ab1211c28
nfsd: allow disabling NFSv2 at compile time | *68f7bd7f29
nfsd: move nfserrno() to vfs.c | *abbd1215c3
nfsd: ignore requests to disable unsupported versions | *81714ef8e3
NFSD: Finish converting the NFSv3 GETACL result encoder | *a20b0abab9
NFSD: Finish converting the NFSv2 GETACL result encoder | *1dd04600f6
NFSD: Remove redundant assignment to variable host_err | *48a237cb5e
NFSD: Simplify READ_PLUS | *10727ce312
nfsd: use locks_inode_context helper | *32c59062f8
lockd: use locks_inode_context helper | *70ffaa7896
filelock: add a new locks_inode_context accessor function | *7ea635fc47
NFSD: Fix reads with a non-zero offset that don't end on a page boundary | *7d867c6c30
nfsd: put the export reference in nfsd4_verify_deleg_dentry | *551f17db65
nfsd: fix use-after-free in nfsd_file_do_acquire tracepoint | *31268eb457
nfsd: fix net-namespace logic in __nfsd_file_cache_purge | *5428383c6f
NFSD: unregister shrinker when nfsd_init_net() fails | *1bb3349257
nfsd: rework hashtable handling in nfsd_do_file_acquire | *2db3e73f9a
nfsd: fix nfsd_file_unhash_and_dispose | *683fb922e7
fanotify: Remove obsoleted fanotify_event_has_path() | *229e73a0f4
fsnotify: remove unused declaration | *a2d440dce6
fs/notify: constify path | *241685bab2
nfsd: extra checks when freeing delegation stateids | *345e3bb5e8
nfsd: make nfsd4_run_cb a bool return function | *d7f2774d8c
nfsd: fix comments about spinlock handling with delegations | *89b6362704
nfsd: only fill out return pointer on success in nfsd4_lookup_stateid | *31b16e6b0b
NFSD: Cap rsize_bop result based on send buffer size | *60b46564e0
NFSD: Rename the fields in copy_stateid_t | *b7aea45a67
nfsd: use DEFINE_SHOW_ATTRIBUTE to define nfsd_file_cache_stats_fops | *21e18dd5eb
nfsd: use DEFINE_SHOW_ATTRIBUTE to define nfsd_reply_cache_stats_fops | *443e648425
nfsd: use DEFINE_SHOW_ATTRIBUTE to define client_info_fops | *615d761a6b
nfsd: use DEFINE_SHOW_ATTRIBUTE to define export_features_fops and supported_enctypes_fops | *a063abefc6
nfsd: use DEFINE_PROC_SHOW_ATTRIBUTE to define nfsd_proc_ops | *cda3e9b8cd
NFSD: Pack struct nfsd4_compoundres | *a54822e64d
NFSD: Remove unused nfsd4_compoundargs::cachetype field | *17bb698078
NFSD: Remove "inline" directives on op_rsize_bop helpers | *f533a01b09
NFSD: Clean up nfs4svc_encode_compoundres() | *918054d2d8
NFSD: Clean up WRITE arg decoders | *c92e8b295a
NFSD: Use xdr_inline_decode() to decode NFSv3 symlinks | *d08acee648
NFSD: Refactor common code out of dirlist helpers | *5e76b25d7c
NFSD: Reduce amount of struct nfsd4_compoundargs that needs clearing | *5ed2524893
SUNRPC: Parametrize how much of argsize should be zeroed | *6e50de3b3a
NFSD: add shrinker to reap courtesy clients on low memory condition | *67302ef04e
NFSD: keep track of the number of courtesy clients in the system | *1022fe63c5
NFSD: Make nfsd4_remove() wait before returning NFS4ERR_DELAY | *235738ccea
NFSD: Make nfsd4_rename() wait before returning NFS4ERR_DELAY | *b6c6c7153b
NFSD: Make nfsd4_setattr() wait before returning NFS4ERR_DELAY | *f326970df1
NFSD: Refactor nfsd_setattr() | *95dce2279c
NFSD: Add a mechanism to wait for a DELEGRETURN | *3c0e831b87
NFSD: Add tracepoints to report NFSv4 callback completions | *bc6bead0af
nfsd: remove nfsd4_prepare_cb_recall() declaration | *330914c342
nfsd: clean up mounted_on_fileid handling | *f574d41b1b
NFSD: Fix handling of oversized NFSv4 COMPOUND requests | *b0062184a1
NFSD: drop fname and flen args from nfsd_create_locked() | *c23687911f
NFSD: Protect against send buffer overflow in NFSv3 READ | *2007867c58
NFSD: Protect against send buffer overflow in NFSv2 READ | *57774b1526
NFSD: Protect against send buffer overflow in NFSv3 READDIR | *0e57d696f6
NFSD: Protect against send buffer overflow in NFSv2 READDIR | *2bd6f95ff9
NFSD: Increase NFSD_MAX_OPS_PER_COMPOUND | *d40bef3801
nfsd: Propagate some error code returned by memdup_user() | *490af5b07d
nfsd: Avoid some useless tests | *cef1ab71ae
NFSD: remove redundant variable status | *30b0e49a95
NFSD enforce filehandle check for source file in COPY | *9dc20a662f
lockd: move from strlcpy with unused retval to strscpy | *91eebaa181
NFSD: move from strlcpy with unused retval to strscpy | *57afda7bf2
nfsd_splice_actor(): handle compound pages | *c7d320e620
NFSD: fix regression with setting ACLs. | *1f87122d34
lockd: detect and reject lock arguments that overflow | *b15656dfa2
NFSD: discard fh_locked flag and fh_lock/fh_unlock | *5a8d428f5e
NFSD: use (un)lock_inode instead of fh_(un)lock for file operations | *9ef325edea
NFSD: use explicit lock/unlock for directory ops | *203f09fae4
NFSD: reduce locking in nfsd_lookup() | *bedd266b1f
NFSD: only call fh_unlock() once in nfsd_link() | *77f83bc2ed
NFSD: always drop directory lock in nfsd_unlink() | *617f72a1aa
NFSD: change nfsd_create()/nfsd_symlink() to unlock directory before returning. | *c5409ce523
NFSD: add posix ACLs to struct nfsd_attrs | *18ee0869d6
NFSD: add security label to struct nfsd_attrs | *2a5642abeb
NFSD: set attributes when creating symlinks | *45cf4b1bb1
NFSD: introduce struct nfsd_attrs | *3aac39eaa6
NFSD: verify the opened dentry after setting a delegation | *820bf1383d
NFSD: drop fh argument from alloc_init_deleg | *c62dcf8633
NFSD: Move copy offload callback arguments into a separate structure | *e1d1b6574e
NFSD: Add nfsd4_send_cb_offload() | *d87486acbd
NFSD: Remove kmalloc from nfsd4_do_async_copy() | *a860bd179e
NFSD: Refactor nfsd4_do_copy() | *8153ed38cc
NFSD: Refactor nfsd4_cleanup_inter_ssc() (2/2) | *0d592d96d6
NFSD: Refactor nfsd4_cleanup_inter_ssc() (1/2) | *ac774e1eeb
NFSD: Replace boolean fields in struct nfsd4_copy | *627b896c52
NFSD: Make nfs4_put_copy() static | *0d7e3df76b
NFSD: Reorder the fields in struct nfsd4_op | *94fd87568e
NFSD: Shrink size of struct nfsd4_copy | *7c6fd14057
NFSD: Shrink size of struct nfsd4_copy_notify | *02bc4d514c
NFSD: nfserrno(-ENOMEM) is nfserr_jukebox | *8ce03085cc
NFSD: Fix strncpy() fortify warning | *0a1b9a216f
NFSD: Clean up nfsd4_encode_readlink() | *c7863472e5
NFSD: Use xdr_pad_size() | *c587004a76
NFSD: Simplify starting_len | *e77d3f5ee5
NFSD: Optimize nfsd4_encode_readv() | *d176e7348b
NFSD: Add an nfsd4_read::rd_eof field | *427bd174a4
NFSD: Clean up SPLICE_OK in nfsd4_encode_read() | *8fd87bf897
NFSD: Optimize nfsd4_encode_fattr() | *d8c3d70408
NFSD: Optimize nfsd4_encode_operation() | *3b5dcf6b46
nfsd: silence extraneous printk on nfsd.ko insertion | *f81ab23756
NFSD: limit the number of v4 clients to 1024 per 1GB of system memory | *ec16f5f7fa
NFSD: keep track of the number of v4 clients in the system | *4e7a739f63
NFSD: refactoring v4 specific code to a helper in nfs4state.c | *705e2cb1fe
NFSD: Ensure nf_inode is never dereferenced | *451b2c2125
NFSD: NFSv4 CLOSE should release an nfsd_file immediately | *c553e79c08
NFSD: Move nfsd_file_trace_alloc() tracepoint | *26664203dd
NFSD: Separate tracepoints for acquire and create | *de070f66d2
NFSD: Clean up unused code after rhashtable conversion | *a174ce98b3
NFSD: Convert the filecache to use rhashtable | *ebe886ac37
NFSD: Set up an rhashtable for the filecache | *1ea9b51f73
NFSD: Replace the "init once" mechanism | *bbb260f3ce
NFSD: Remove nfsd_file::nf_hashval | *12494d98fe
NFSD: nfsd_file_hash_remove can compute hashval | *10ba39f788
NFSD: Refactor __nfsd_file_close_inode() | *a86953523e
NFSD: nfsd_file_unhash can compute hashval from nf->nf_inode | *ef7fe4908a
NFSD: Remove lockdep assertion from unhash_and_release_locked() | *525c2c81fd
NFSD: No longer record nf_hashval in the trace log | *99735b8d82
NFSD: Never call nfsd_file_gc() in foreground paths | *586e8d6c3d
NFSD: Fix the filecache LRU shrinker | *51fc2b2c79
NFSD: Leave open files out of the filecache LRU | *c15db0869e
NFSD: Trace filecache LRU activity | *7cca6908fa
NFSD: WARN when freeing an item still linked via nf_lru | *0c426d4621
NFSD: Hook up the filecache stat file | *6dc5cab808
NFSD: Zero counters when the filecache is re-initialized | *04b9376a10
NFSD: Record number of flush calls | *2cba48b3d0
NFSD: Report the number of items evicted by the LRU walk | *af057e5884
NFSD: Refactor nfsd_file_lru_scan() | *e7d5efd20e
NFSD: Refactor nfsd_file_gc() | *8d038e72e7
NFSD: Add nfsd_file_lru_dispose_list() helper | *d176e98400
NFSD: Report average age of filecache items | *ca9cc17ec0
NFSD: Report count of freed filecache items | *a38dff5964
NFSD: Report count of calls to nfsd_file_acquire() | *91c03a6124
NFSD: Report filecache LRU size | *4ff0e22e54
NFSD: Demote a WARN to a pr_warn() | *cc3b111e3b
nfsd: remove redundant assignment to variable len | *0a18cd2b94
NFSD: Fix space and spelling mistake | *b5b79fc3ff
NLM: Defend against file_lock changes after vfs_test_lock() | *16acc0677f
SUNRPC: Fix xdr_encode_bool() | *bcaac325dd
nfsd: eliminate the NFSD_FILE_BREAK_* flags | *302ae1fb80
fsnotify: Fix comment typo | *85c640adf9
fanotify: introduce FAN_MARK_IGNORE | *99a022c4bc
fanotify: cleanups for fanotify_mark() input validations | *b8d06d1187
fanotify: prepare for setting event flags in ignore mask | *71860cc4e4
fs: inotify: Fix typo in inotify comment | *795f9fa1b5
lockd: fix nlm_close_files | *486c1acf14
lockd: set fl_owner when unlocking files | *845b309cf5
NFSD: Decode NFSv4 birth time attribute | *58f985d688
NFS: restore module put when manager exits. | *e9156a2431
fanotify: refine the validation checks on non-dir inode mask | *6943f1073a
SUNRPC: Optimize xdr_reserve_space() | *ada1757b25
NFSD: Fix potential use-after-free in nfsd_file_put() | *4862b61886
NFSD: nfsd_file_put() can sleep | *06252d1bd5
NFSD: Add documenting comment for nfsd4_release_lockowner() | *345e2e48d8
NFSD: Modernize nfsd4_release_lockowner() | *13459d2225
nfsd: destroy percpu stats counters after reply cache shutdown | *15081df04a
nfsd: Fix null-ptr-deref in nfsd_fill_super() | *ff4e7a4b49
nfsd: Unregister the cld notifier when laundry_wq create failed | *e1e87709c4
SUNRPC: Use RMW bitops in single-threaded hot paths | *f7a1ecf2aa
NFSD: Clean up the show_nf_flags() macro | *7b8462f22a
NFSD: Trace filecache opens | *a38be00474
NFSD: Move documenting comment for nfsd4_process_open2() | *bfe9aab120
NFSD: Fix whitespace | *2805c5439c
NFSD: Remove dprintk call sites from tail of nfsd4_open() | *c20097329d
NFSD: Instantiate a struct file when creating a regular NFSv4 file | *d8714bda3f
NFSD: Clean up nfsd_open_verified() | *274fd0f9c2
NFSD: Remove do_nfsd_create() | *66af1db0cc
NFSD: Refactor NFSv4 OPEN(CREATE) | *a019add1b4
NFSD: Refactor NFSv3 CREATE | *a132795b61
NFSD: Refactor nfsd_create_setattr() | *ee0742a93c
NFSD: Avoid calling fh_drop_write() twice in do_nfsd_create() | *304505e2e8
NFSD: Clean up nfsd3_proc_create() | *c6207942b2
NFSD: Show state of courtesy client in client info | *4a39f029e7
NFSD: add support for lock conflict to courteous server | *97f77d7d50
fs/lock: add 2 callbacks to lock_manager_operations to resolve conflict | *eb2eb6b6af
fs/lock: add helper locks_owner_has_blockers to check for blockers | *461d0b57c9
NFSD: move create/destroy of laundry_wq to init_nfsd and exit_nfsd | *a26848e2bc
NFSD: add support for share reservation conflict to courteous server | *67ef9e5fd7
NFSD: add courteous server support for thread with only delegation | *bf1cbe2f36
NFSD: Clean up nfsd_splice_actor() | *2723d479f5
fanotify: fix incorrect fmode_t casts | *4cd725129e
fsnotify: consistent behavior for parent not watching children | *e3bce57ffc
fsnotify: introduce mark type iterator | *f6017a718b
fanotify: enable "evictable" inode marks | *3083d602ba
fanotify: use fsnotify group lock helpers | *f85d590059
fanotify: implement "evictable" inode marks | *80fb0ae4b1
fanotify: factor out helper fanotify_mark_update_flags() | *b9576077ee
fanotify: create helper fanotify_mark_user_flags() | *ff34ebaa6f
fsnotify: allow adding an inode mark without pinning inode | *3bd557cfdf
dnotify: use fsnotify group lock helpers | *cc1c875b69
nfsd: use fsnotify group lock helpers | *c2c6ced500
inotify: use fsnotify group lock helpers | *f91ba4a49b
fsnotify: create helpers for group mark_mutex lock | *74f9be7f64
fsnotify: make allow_dups a property of the group | *4dc30393bd
fsnotify: pass flags argument to fsnotify_alloc_group() | *1c47d87317
inotify: move control flags from mask to mark flags | *aecfd231bf
fs/lock: documentation cleanup. Replace inode->i_lock with flc_lock. | *d71ea54835
fanotify: do not allow setting dirent events in mask of non-dir | *9862064ca8
nfsd: Clean up nfsd_file_put() | *cf04df21a4
nfsd: Fix a write performance regression | *997575f1a1
SUNRPC: Return true/false (not 1/0) from bool functions | *a5fa9c824d
fsnotify: remove redundant parameter judgment | *552c24a32c
fsnotify: optimize FS_MODIFY events with no ignored masks | *5e84e33832
fsnotify: fix merge with parent's ignored mask | *62fa144b85
nfsd: fix using the correct variable for sizeof() | *e96076f579
NFSD: Clean up _lm_ operation names | *ec3b252a55
NFSD: Remove CONFIG_NFSD_V3 | *7e4328b3b9
NFSD: Move svc_serv_ops::svo_function into struct svc_serv | *9802c57460
NFSD: Remove svc_serv_ops::svo_module | *36c57b27a7
SUNRPC: Remove svc_shutdown_net() | *a4bbb1ab69
SUNRPC: Rename svc_close_xprt() | *c58a9cfd20
SUNRPC: Rename svc_create_xprt() | *9a43ddd6b6
SUNRPC: Remove svo_shutdown method | *8c60a47670
SUNRPC: Merge svc_do_enqueue_xprt() into svc_enqueue_xprt() | *99ab6abc88
SUNRPC: Remove the .svo_enqueue_xprt method | *194071d46c
NFSD: Streamline the rare "found" case | *3304d16c24
NFSD: Skip extra computation for RC_NOCACHE case | *4aa8dac58c
NFSD: De-duplicate hash bucket indexing | *ca6761d39a
nfsd: Add support for the birth time attribute | *0d1bbb0efe
NFSD: Deprecate NFS_OFFSET_MAX | *70a80c7e8d
NFSD: COMMIT operations must not return NFS?ERR_INVAL | *a231ae6bb5
NFSD: Fix NFSv3 SETATTR/CREATE's handling of large file sizes | *38d02ba22e
NFSD: Fix ia_size underflow | *1726a39b08
NFSD: Fix the behavior of READ near OFFSET_MAX | *fc2d8c153d
lockd: fix failure to cleanup client locks | *20a74a6911
lockd: fix server crash on reboot of client holding lock | *a667e1df40
fanotify: remove variable set but not used | *11bcfabf24
nfsd: fix crash on COPY_NOTIFY with special stateid | *4eefd1125b
NFSD: Move fill_pre_wcc() and fill_post_wcc() | *695719e5e6
Revert "nfsd: skip some unnecessary stats in the v4 case" | *5e07d49f4a
NFSD: Trace boot verifier resets | *a1c9bcfd16
NFSD: Rename boot verifier functions | *e49677ff33
NFSD: Clean up the nfsd_net::nfssvc_boot field | *083d44094f
NFSD: Write verifier might go backwards | *306d2c1c08
nfsd: Add a tracepoint for errors in nfsd4_clone_file_range() | *45ef8b7aea
NFSD: De-duplicate net_generic(nf->nf_net, nfsd_net_id) | *5a1575c02b
NFSD: De-duplicate net_generic(SVC_NET(rqstp), nfsd_net_id) | *aa9ea9ec29
NFSD: Clean up nfsd_vfs_write() | *30282a70aa
nfsd: Retry once in nfsd_open on an -EOPENSTALE return | *3128aa9c98
nfsd: Add errno mapping for EREMOTEIO | *f12557372b
nfsd: map EBADF | *9175fcf39c
NFSD: Fix zero-length NFSv3 WRITEs | *fab02e9799
nfsd4: add refcount for nfsd4_blocked_lock | *535204ecae
nfs: block notification on fs with its own ->lock | *bf5e7e1fa1
NFSD: De-duplicate nfsd4_decode_bitmap4() | *5a0710a6b4
nfsd: improve stateid access bitmask documentation | *f0dbe05f6d
NFSD: Combine XDR error tracepoints | *e8f923e1e9
NFSD: simplify per-net file cache management | *677fd67d8b
NFSD: Fix inconsistent indenting | *0bc12c1289
NFSD: Remove be32_to_cpu() from DRC hash function | *e072a635c1
NFS: switch the callback service back to non-pooled. | *948e4664cc
lockd: use svc_set_num_threads() for thread start and stop | *deeda24a67
SUNRPC: always treat sv_nrpools==1 as "not pooled" | *74a0e37a20
SUNRPC: move the pool_map definitions (back) into svc.c | *9fe19a48a3
lockd: rename lockd_create_svc() to lockd_get() | *e5087b3d58
lockd: introduce lockd_put() | *8304dd04fb
lockd: move svc_exit_thread() into the thread | *7077a00703
lockd: move lockd_start_svc() call into lockd_create_svc() | *a389baad91
lockd: simplify management of network status notifiers | *32f3e5a70f
lockd: introduce nlmsvc_serv | *d95899dadb
NFSD: simplify locking for network notifier. | *7149250bee
SUNRPC: discard svo_setup and rename svc_set_num_threads_sync() | *3614523741
NFSD: Make it possible to use svc_set_num_threads_sync | *6343271d53
NFSD: narrow nfsd_mutex protection in nfsd thread | *61d12fc30a
SUNRPC: use sv_lock to protect updates to sv_nrthreads. | *4efe0b9d11
nfsd: make nfsd_stats.th_cnt atomic_t | *17041f0140
SUNRPC: stop using ->sv_nrthreads as a refcount | *64312a7c9f
SUNRPC/NFSD: clean up get/put functions. | *e9a4156137
SUNRPC: change svc_get() to return the svc. | *e0bf899352
NFSD: handle errors better in write_ports_addfd() | *307b391221
NFSD: Fix sparse warning | *c59dc174b2
exit: Rename module_put_and_exit to module_put_and_kthread_exit | *15606c8d52
exit: Implement kthread_exit | *63b8c19231
fanotify: wire up FAN_RENAME event | *a860dd8bf5
fanotify: report old and/or new parent+name in FAN_RENAME event | *c76fa85159
fanotify: record either old name new name or both for FAN_RENAME | *da527da33b
fanotify: record old and new parent and name in FAN_RENAME event | *f59e978cfa
fanotify: support secondary dir fh and name in fanotify_info | *967ae13720
fanotify: use helpers to parcel fanotify_info buffer | *4e63ce9199
fanotify: use macros to get the offset to fanotify_info buffer | *580eb8de84
fsnotify: generate FS_RENAME event with rich information | *4e59c7b3e3
fanotify: introduce group flag FAN_REPORT_TARGET_FID | *be14cab43d
fsnotify: separate mark iterator type from object type enum | *c0a5f0b561
fsnotify: clarify object type argument | *9e291a6a28
NFSD: Fix READDIR buffer overflow | *1abf3ec558
NFSD: Fix exposure in nfsd4_decode_bitmap() | *88ccda1a81
nfsd4: remove obselete comment | *f4e9e9565e
NFSD:fix boolreturn.cocci warning | *022723fe15
nfsd: update create verifier comment | *c7b0a9c75d
SUNRPC: Change return value type of .pc_encode | *61cf681507
SUNRPC: Replace the "__be32 *p" parameter to .pc_encode | *47047d40af
NFSD: Save location of NFSv4 COMPOUND status | *f747ce574c
SUNRPC: Change return value type of .pc_decode | *0696b6b513
SUNRPC: Replace the "__be32 *p" parameter to .pc_decode | *396b359832
NFSD: Have legacy NFSD WRITE decoders use xdr_stream_subsegment() | *c23b25dd19
NFSD: Initialize pointer ni with NULL and not plain integer 0 | *6784188090
NFSD: simplify struct nfsfh | *25054b04ec
NFSD: drop support for ancient filehandles | *918bc45a57
NFSD: move filehandle format declarations out of "uapi". | *d2815110a7
NFSD: Optimize DRC bucket pruning | *2b2963c72c
SUNRPC: Trace calls to .rpc_call_done | *2eda014477
fanotify: Allow users to request FAN_FS_ERROR events | *b0f01b7c08
fanotify: Emit generic error info for error event | *aefd9029fa
fanotify: Report fid info for file related file system errors | *bb247feb22
fanotify: WARN_ON against too large file handles | *7fa20568b6
fanotify: Add helpers to decide whether to report FID/DFID | *7935cf4070
fanotify: Wrap object_fh inline space in a creator macro | *b974c8aa00
fanotify: Support merging of error events | *9b98f4ff51
fanotify: Support enqueueing of error events | *68aacb60a7
fanotify: Pre-allocate pool of error events | *eec22d03a9
fanotify: Reserve UAPI bits for FAN_FS_ERROR | *badbf879de
fsnotify: Support FS_ERROR event type | *8ccc724f50
fanotify: Require fid_mode for any non-fd event | *2f65be6209
fanotify: Encode empty file handle when no inode is provided | *86bda2d752
fanotify: Allow file handle encoding for unhashed events | *44ce59c254
fanotify: Support null inode event in fanotify_dfid_inode | *313234a93e
fsnotify: Pass group argument to free_event | *c9f9d99ea4
fsnotify: Protect fsnotify_handle_inode_event from no-inode events | *5c4ce075c9
fsnotify: Retrieve super block from the data field | *44844158ee
fsnotify: Add wrapper around fsnotify_add_event | *24eda1b5e6
fsnotify: Add helper to detect overflow_event | *7c9ba74cb3
inotify: Don't force FS_IN_IGNORED | *9539a89f28
fanotify: Split fsid check from other fid mode checks | *326be73a59
fanotify: Fold event size calculation to its own function | *7fee789540
fsnotify: Don't insert unmergeable events in hashtable | *60b6dab8c8
fsnotify: clarify contract for create event hooks | *9601d20734
fsnotify: pass dentry instead of inode data | *f114860f72
fsnotify: pass data_type to fsnotify_name() | *6719531e67
nfsd: Fix a warning for nfsd_file_close_inode | *7918a95bc2
NLM: Fix svcxdr_encode_owner() | *b801327ba3
fsnotify: fix sb_connectors leak | *1773901afb
NFS: Remove unused callback void decoder | *edf220fe15
NFS: Add a private local dispatcher for NFSv4 callback operations | *91bbbffece
SUNRPC: Eliminate the RQ_AUTHERR flag | *febf43bcdc
SUNRPC: Set rq_auth_stat in the pg_authenticate() callout | *a96da583ff
SUNRPC: Add svc_rqst::rq_auth_stat | *efea5d558e
nfs: don't allow reexport reclaims | *bd5b3deed0
lockd: don't attempt blocking locks on nfs reexports | *5ea5be84dd
nfs: don't atempt blocking locks on nfs reexports | *e580323ac0
Keep read and write fds with each nlm_file | *b4bf52174b
lockd: update nlm_lookup_file reexport comment | *14c2a0fad5
nlm: minor refactoring | *3fbc744783
nlm: minor nlm_lookup_file argument change | *860f01260e
lockd: change the proc_handler for nsm_use_hostnames | *f469e60f9a
sysctl: introduce new proc handler proc_dobool | *130dcbf77a
NFSD: remove vanity comments | *86df138e8d
NFSD: Batch release pages during splice read | *a4f616afb4
SUNRPC: Add svc_rqst_replace_page() API | *9e5f2e0ae0
NFSD: Clean up splice actor | *860893f9e3
fsnotify: optimize the case of no marks of any type | *9917e1bda3
fsnotify: count all objects with attached connectors | *44858a3488
fsnotify: count s_fsnotify_inode_refs for attached connectors | *cdbf9c5f81
fsnotify: replace igrab() with ihold() on attach connector | *cde8883b0b
fanotify: add pidfd support to the fanotify API | *77bc7f529a
fanotify: introduce a generic info record copying helper | *3ddcb19396
fanotify: minor cosmetic adjustments to fid labels | *03b5d3ee50
kernel/pid.c: implement additional checks upon pidfd_create() parameters | *774c2dbca7
kernel/pid.c: remove static qualifier from pidfd_create() | *e79057d15d
nfsd: fix NULL dereference in nfs3svc_encode_getaclres | *5610ed80e8
NFSD: Prevent a possible oops in the nfs_dirent() tracepoint | *17600880e1
nfsd: remove redundant assignment to pointer 'this' | *ce18198762
lockd: Update the NLMv4 SHARE results encoder to use struct xdr_stream | *fec0730992
lockd: Update the NLMv4 nlm_res results encoder to use struct xdr_stream | *e1e61d647f
lockd: Update the NLMv4 TEST results encoder to use struct xdr_stream | *4f5ba2e6b4
lockd: Update the NLMv4 void results encoder to use struct xdr_stream | *0add7c13bf
lockd: Update the NLMv4 FREE_ALL arguments decoder to use struct xdr_stream | *604c8a432c
lockd: Update the NLMv4 SHARE arguments decoder to use struct xdr_stream | *300a4b1632
lockd: Update the NLMv4 SM_NOTIFY arguments decoder to use struct xdr_stream | *33f31f6e85
lockd: Update the NLMv4 nlm_res arguments decoder to use struct xdr_stream | *9e1daae630
lockd: Update the NLMv4 UNLOCK arguments decoder to use struct xdr_stream | *0652983fbe
lockd: Update the NLMv4 CANCEL arguments decoder to use struct xdr_stream | *101d45274a
lockd: Update the NLMv4 LOCK arguments decoder to use struct xdr_stream | *360159aafa
lockd: Update the NLMv4 TEST arguments decoder to use struct xdr_stream | *c8f4048250
lockd: Update the NLMv4 void arguments decoder to use struct xdr_stream | *45c1384bd7
lockd: Update the NLMv1 SHARE results encoder to use struct xdr_stream | *b049476790
lockd: Update the NLMv1 nlm_res results encoder to use struct xdr_stream | *d0ddd21bd5
lockd: Update the NLMv1 TEST results encoder to use struct xdr_stream | *e6c92714e9
lockd: Update the NLMv1 void results encoder to use struct xdr_stream | *02a3c81665
lockd: Update the NLMv1 FREE_ALL arguments decoder to use struct xdr_stream | *6c522daf60
lockd: Update the NLMv1 SHARE arguments decoder to use struct xdr_stream | *56c936af53
lockd: Update the NLMv1 SM_NOTIFY arguments decoder to use struct xdr_stream | *90f483a775
lockd: Update the NLMv1 nlm_res arguments decoder to use struct xdr_stream | *b4ea38d69d
lockd: Update the NLMv1 UNLOCK arguments decoder to use struct xdr_stream | *2025b3acf6
lockd: Update the NLMv1 CANCEL arguments decoder to use struct xdr_stream | *3e8675ff1e
lockd: Update the NLMv1 LOCK arguments decoder to use struct xdr_stream | *8f9f41ebfa
lockd: Update the NLMv1 TEST arguments decoder to use struct xdr_stream | *4c3f448aaa
lockd: Update the NLMv1 void argument decoder to use struct xdr_stream | *fa4b890c0d
lockd: Common NLM XDR helpers | *3595ff1c2c
lockd: Create a simplified .vs_dispatch method for NLM requests | *eeea3b96d1
lockd: Remove stale comments | *c58120ab47
nfsd: rpc_peeraddr2str needs rcu lock | *2983611a66
NFSD: Fix error return code in nfsd4_interssc_connect() | *c5a305d93e
nfsd: fix kernel test robot warning in SSC code | *22b7c93d96
nfsd4: Expose the callback address and state of each NFS4 client | *dbc0aa4795
nfsd: move fsnotify on client creation outside spinlock | *a4bc287943
NFSD: delay unmount source's export after inter-server copy completed. | *817c6eb975
NFSD add vfs_fsync after async copy is done | *94a8924701
nfsd: move some commit_metadata()s outside the inode lock | *f666a75ccd
nfsd: Prevent truncation of an unlinked inode from blocking access to its directory | *e7bbdd7dee
NFSD: Update nfsd_cb_args tracepoint | *3e8aeb13a7
NFSD: Remove the nfsd_cb_work and nfsd_cb_done tracepoints | *3a63aa2459
NFSD: Add an nfsd_cb_probe tracepoint | *a577eb06de
NFSD: Replace the nfsd_deleg_break tracepoint | *9f76187f0a
NFSD: Add an nfsd_cb_offload tracepoint | *60aac21534
NFSD: Add an nfsd_cb_lm_notify tracepoint | *59ddc5a82b
NFSD: Enhance the nfsd_cb_setup tracepoint | *fc3b4f0188
NFSD: Adjust cb_shutdown tracepoint | *634816f9d3
NFSD: Add cb_lost tracepoint | *3076ede3fc
NFSD: Drop TRACE_DEFINE_ENUM for NFSD4_CB_<state> macros | *2be1f22751
NFSD: Capture every CB state transition | *b6ba775ccc
NFSD: Constify @fh argument of knfsd_fh_hash() | *88b3cdfd48
NFSD: Add tracepoints for EXCHANGEID edge cases | *5070351cdc
NFSD: Add tracepoints for SETCLIENTID edge cases | *650530d522
NFSD: Add a couple more nfsd_clid_expired call sites | *056332823c
NFSD: Add nfsd_clid_destroyed tracepoint | *580ec8b653
NFSD: Add nfsd_clid_reclaim_complete tracepoint | *3b6808c793
NFSD: Add nfsd_clid_confirmed tracepoint | *c6889b75a6
NFSD: Remove trace_nfsd_clid_inuse_err | *8da1871206
NFSD: Add nfsd_clid_verf_mismatch tracepoint | *c8493d7308
NFSD: Add nfsd_clid_cred_mismatch tracepoint | *b00bb7dfe2
NFSD: Add an RPC authflavor tracepoint display helper | *a4d250f510
fanotify: fix permission model of unprivileged group | *0245993ace
NFS: fix nfs_fetch_iversion() | *b2c0c7cb7f
NFSv4.2: Remove ifdef CONFIG_NFSD from NFSv4.2 client SSC code. | *3793f28102
nfsd: Fix fall-through warnings for Clang | *39ab09108e
nfsd: grant read delegations to clients holding writes | *d2431cc967
nfsd: reshuffle some code | *ee548b1629
nfsd: track filehandle aliasing in nfs4_files | *cc6d658669
nfsd: hash nfs4_files by inode number | *e63b956b2d
nfsd: removed unused argument in nfsd_startup_generic() | *856b0c4979
nfsd: remove unused function | *bd373a90d0
fanotify_user: use upper_32_bits() to verify mask | *4ac0ad2372
fanotify: support limited functionality for unprivileged users | *3e441a872a
fanotify: configurable limits via sysfs | *7df80a90e1
fanotify: limit number of event merge attempts | *40e1e98c1b
fsnotify: use hash table for faster events merge | *ae7fd89dae
fanotify: mix event info and pid into merge key hash | *5b57a2b74d
fanotify: reduce event objectid to 29-bit hash | *4f14948942
Revert "fanotify: limit number of event merge attempts" | *62b7f38473
fsnotify: allow fsnotify_{peek,remove}_first_event with empty queue | *d9168ab8d7
NFSD: Use DEFINE_SPINLOCK() for spinlock | *b20d88bf1e
UAPI: nfsfh.h: Replace one-element array with flexible-array member | *117dac268d
SUNRPC: Export svc_xprt_received() | *289adc864d
nfsd: report client confirmation status in "info" file | *14b13e0603
nfsd: don't ignore high bits of copy count | *1f76b1e659
nfsd: COPY with length 0 should copy to end of file | *ed01819390
nfsd: Fix typo "accesible" | *2a5df97ba4
nfsd: Log client tracking type log message as info instead of warning | *0fa20162bf
nfsd: helper for laundromat expiry calculations | *aab7be2475
NFSD: Clean up NFSDDBG_FACILITY macro | *e7dac943b4
NFSD: Add a tracepoint to record directory entry encoding | *a6d9f6f371
NFSD: Clean up after updating NFSv3 ACL encoders | *857a37235c
NFSD: Update the NFSv3 SETACL result encoder to use struct xdr_stream | *d505e66191
NFSD: Update the NFSv3 GETACL result encoder to use struct xdr_stream | *67d4f36707
NFSD: Clean up after updating NFSv2 ACL encoders | *3d2033a58c
NFSD: Update the NFSv2 ACL ACCESS result encoder to use struct xdr_stream | *6ef7a56fd7
NFSD: Update the NFSv2 ACL GETATTR result encoder to use struct xdr_stream | *82ac35b167
NFSD: Update the NFSv2 SETACL result encoder to use struct xdr_stream | *6677b0d16a
NFSD: Update the NFSv2 GETACL result encoder to use struct xdr_stream | *89ac9a8101
NFSD: Add an xdr_stream-based encoder for NFSv2/3 ACLs | *93584780eb
NFSD: Remove unused NFSv2 directory entry encoders | *b8658c947d
NFSD: Update the NFSv2 READDIR entry encoder to use struct xdr_stream | *801e4d79b7
NFSD: Update the NFSv2 READDIR result encoder to use struct xdr_stream | *bc17759a4e
NFSD: Count bytes instead of pages in the NFSv2 READDIR encoder | *c4e2727589
NFSD: Add a helper that encodes NFSv3 directory offset cookies | *60bc5af5b8
NFSD: Update the NFSv2 STATFS result encoder to use struct xdr_stream | *ad0614d3a8
NFSD: Update the NFSv2 READ result encoder to use struct xdr_stream | *27909a583c
NFSD: Update the NFSv2 READLINK result encoder to use struct xdr_stream | *9aab4f03e8
NFSD: Update the NFSv2 diropres encoder to use struct xdr_stream | *c64d5d0ca9
NFSD: Update the NFSv2 attrstat encoder to use struct xdr_stream | *816c23c911
NFSD: Update the NFSv2 stat encoder to use struct xdr_stream | *e4e6019ce5
NFSD: Reduce svc_rqst::rq_pages churn during READDIR operations | *d855480201
NFSD: Remove unused NFSv3 directory entry encoders | *37aa5e6402
NFSD: Update NFSv3 READDIR entry encoders to use struct xdr_stream | *7cbec0dc09
NFSD: Update the NFSv3 READDIR3res encoder to use struct xdr_stream | *cacfe8f6d8
NFSD: Count bytes instead of pages in the NFSv3 READDIR encoder | *3b2fef48b7
NFSD: Add a helper that encodes NFSv3 directory offset cookies | *30dabf1d4f
NFSD: Update the NFSv3 COMMIT3res encoder to use struct xdr_stream | *349d96b070
NFSD: Update the NFSv3 PATHCONF3res encoder to use struct xdr_stream | *4c06f831d2
NFSD: Update the NFSv3 FSINFO3res encoder to use struct xdr_stream | *f6908e2bcd
NFSD: Update the NFSv3 FSSTAT3res encoder to use struct xdr_stream | *066dc317fa
NFSD: Update the NFSv3 LINK3res encoder to use struct xdr_stream | *0404cffec4
NFSD: Update the NFSv3 RENAMEv3res encoder to use struct xdr_stream | *1863ca4c9e
NFSD: Update the NFSv3 CREATE family of encoders to use struct xdr_stream | *8737a75f26
NFSD: Update the NFSv3 WRITE3res encoder to use struct xdr_stream | *b241ab9823
NFSD: Update the NFSv3 READ3res encode to use struct xdr_stream | *170e6bd25e
NFSD: Update the NFSv3 READLINK3res encoder to use struct xdr_stream | *c3995f8be1
NFSD: Update the NFSv3 wccstat result encoder to use struct xdr_stream | *f74e0652a6
NFSD: Update the NFSv3 LOOKUP3res encoder to use struct xdr_stream | *fd9e183df6
NFSD: Update the NFSv3 ACCESS3res encoder to use struct xdr_stream | *0ef12d755c
NFSD: Update the GETATTR3res encoder to use struct xdr_stream | *48aadfa75b
NFSD: Extract the svcxdr_init_encode() helper | *e864d4d834
namei: introduce struct renamedata | *b0fa673c8c
fs: add file and path permissions helpers | *666a413295
kallsyms: only build {,module_}kallsyms_on_each_symbol when required | *f8d8568627
kallsyms: refactor {,module_}kallsyms_on_each_symbol | *bef9d8b4f8
module: use RCU to synchronize find_module | *32edffff86
module: unexport find_module and module_mutex | *51f620fcc4
inotify, memcg: account inotify instances to kmemcg | *c1fe2bb305
nfsd: skip some unnecessary stats in the v4 case | *0220d51186
nfs: use change attribute for NFS re-exports | *5a0b45626f
NFSv4_2: SSC helper should use its own config. | *b267f61182
nfsd: cstate->session->se_client -> cstate->clp | *bc6015541c
nfsd: simplify nfsd4_check_open_reclaim | *25ac4fdbdc
nfsd: remove unused set_client argument | *87ab73c1cc
nfsd: find_cpntf_state cleanup | *1d4ccfdc7d
nfsd: refactor set_client | *1700657468
nfsd: rename lookup_clientid->set_client | *ea92c0768f
nfsd: simplify nfsd_renew | *52923f25be
nfsd: simplify process_lock | *4f26b1747a
nfsd4: simplify process_lookup1 | *42cf742d86
nfsd: report per-export stats | *65b1df1358
nfsd: protect concurrent access to nfsd stats counters | *d1344de0d6
nfsd: remove unused stats counters | *0a13baa6ab
NFSD: Clean up after updating NFSv3 ACL decoders | *22af3dfbe6
NFSD: Update the NFSv2 SETACL argument decoder to use struct xdr_stream | *f89e3fa89e
NFSD: Update the NFSv3 GETACL argument decoder to use struct xdr_stream | *5ea5e56cfb
NFSD: Clean up after updating NFSv2 ACL decoders | *81f79eb223
NFSD: Update the NFSv2 ACL ACCESS argument decoder to use struct xdr_stream | *9eea3915dd
NFSD: Update the NFSv2 ACL GETATTR argument decoder to use struct xdr_stream | *508a791fbe
NFSD: Update the NFSv2 SETACL argument decoder to use struct xdr_stream | *e077857ef0
NFSD: Add an xdr_stream-based decoder for NFSv2/3 ACLs | *ea6b0e02dc
NFSD: Update the NFSv2 GETACL argument decoder to use struct xdr_stream | *e440613886
NFSD: Remove argument length checking in nfsd_dispatch() | *7e6746027b
NFSD: Update the NFSv2 SYMLINK argument decoder to use struct xdr_stream | *1db54ce543
NFSD: Update the NFSv2 CREATE argument decoder to use struct xdr_stream | *40de4113f8
NFSD: Update the NFSv2 SETATTR argument decoder to use struct xdr_stream | *ebfb21605f
NFSD: Update the NFSv2 LINK argument decoder to use struct xdr_stream | *a362dd478b
NFSD: Update the NFSv2 RENAME argument decoder to use struct xdr_stream | *0047abd4c4
NFSD: Update NFSv2 diropargs decoding to use struct xdr_stream | *7d9ab8ee57
NFSD: Update the NFSv2 READDIR argument decoder to use struct xdr_stream | *672111a408
NFSD: Add helper to set up the pages where the dirlist is encoded | *365835d2ff
NFSD: Update the NFSv2 READLINK argument decoder to use struct xdr_stream | *ecee6ba592
NFSD: Update the NFSv2 WRITE argument decoder to use struct xdr_stream | *6e88b7ec6c
NFSD: Update the NFSv2 READ argument decoder to use struct xdr_stream | *ba7e0412fb
NFSD: Update the NFSv2 GETATTR argument decoder to use struct xdr_stream | *9ceeee0ec8
NFSD: Update the MKNOD3args decoder to use struct xdr_stream | *8841760f68
NFSD: Update the SYMLINK3args decoder to use struct xdr_stream | *b5d1ae6cc4
NFSD: Update the MKDIR3args decoder to use struct xdr_stream | *bd54084b58
NFSD: Update the CREATE3args decoder to use struct xdr_stream | *48ea0cb79b
NFSD: Update the SETATTR3args decoder to use struct xdr_stream | *71d7e7c6a6
NFSD: Update the LINK3args decoder to use struct xdr_stream | *e84af23391
NFSD: Update the RENAME3args decoder to use struct xdr_stream | *69e54a4470
NFSD: Update the NFSv3 DIROPargs decoder to use struct xdr_stream | *47614a374e
NFSD: Update COMMIT3arg decoder to use struct xdr_stream | *fbcd668016
NFSD: Update READDIR3args decoders to use struct xdr_stream | *e0ddafcc25
NFSD: Add helper to set up the pages where the dirlist is encoded | *29270d477f
NFSD: Fix returned READDIR offset cookie | *19285d319f
NFSD: Update READLINK3arg decoder to use struct xdr_stream | *5f36ae59d6
NFSD: Update WRITE3arg decoder to use struct xdr_stream | *b77a4a968d
NFSD: Update READ3arg decoder to use struct xdr_stream | *7bb23be450
NFSD: Update ACCESS3arg decoder to use struct xdr_stream | *d668aa92a6
NFSD: Update GETATTR3args decoder to use struct xdr_stream | *22b19656ea
SUNRPC: Move definition of XDR_UNIT | *97d254cba3
SUNRPC: Display RPC procedure names instead of proc numbers | *c336597d03
SUNRPC: Make trace_svc_process() display the RPC procedure symbolically | *5b82798f78
NFSD: Restore NFSv4 decoding's SAVEMEM functionality | *bca0057f68
NFSD: Fix sparse warning in nfssvc.c | *131676b824
fs/lockd: convert comma to semicolon | *185e81a977
inotify: Increase default inotify.max_user_watches limit to 1048576 | *1aecdaa7e2
file: Replace ksys_close with close_fd | *6d256a904c
file: Rename __close_fd to close_fd and remove the files parameter | *7458c5ae46
file: Merge __alloc_fd into alloc_fd | *9e8ef54ca8
file: In f_dupfd read RLIMIT_NOFILE once. | *89f9e52964
file: Merge __fd_install into fd_install | *b4b827da90
proc/fd: In fdinfo seq_show don't use get_files_struct | *c0e3f6df04
proc/fd: In proc_readfd_common use task_lookup_next_fd_rcu | *a6da7536e4
file: Implement task_lookup_next_fd_rcu | *6007aeeaef
kcmp: In get_file_raw_ptr use task_lookup_fd_rcu | *c2291f7bdf
proc/fd: In tid_fd_mode use task_lookup_fd_rcu | *32ac87287d
file: Implement task_lookup_fd_rcu | *c4716bb296
file: Rename fcheck lookup_fd_rcu | *23f5564992
file: Replace fcheck_files with files_lookup_fd_rcu | *9080557c56
file: Factor files_lookup_fd_locked out of fcheck_files | *ddb21f9984
file: Rename __fcheck_files to files_lookup_fd_raw | *e6f42bc11a
Revert "fget: clarify and improve __fget_files() implementation" | *4d037e1173
proc/fd: In proc_fd_link use fget_task | *c874ec02cb
bpf: In bpf_task_fd_query use fget_task | *fe1722255e
kcmp: In kcmp_epoll_target use fget_task | *ba7aac19b4
exec: Remove reset_files_struct | *44f79df28b
exec: Simplify unshare_files | *5091d051c5
exec: Move unshare_files to fix posix file locking during exec | *527c9b6eb1
exec: Don't open code get_close_on_exec | *8f1df3d0c1
nfsd: Record NFSv4 pre/post-op attributes as non-atomic | *0750e494c7
nfsd: Set PF_LOCAL_THROTTLE on local filesystems only | *f3056a0ac2
nfsd: Fix up nfsd to ensure that timeout errors don't result in ESTALE | *38e213c1e4
exportfs: Add a function to return the raw output from fh_to_dentry() | *93f7d515d8
nfsd: close cached files prior to a REMOVE or RENAME that would replace target | *203ca3253b
nfsd: allow filesystems to opt out of subtree checking | *d5314c9bb7
nfsd: add a new EXPORT_OP_NOWCC flag to struct export_operations | *34de27ed84
Revert "nfsd4: support change_attr_type attribute" | *b720ceec88
nfsd4: don't query change attribute in v2/v3 case | *796785a79b
nfsd: minor nfsd4_change_attribute cleanup | *88dea0f92b
nfsd: simplify nfsd4_change_info | *f8032b859d
nfsd: only call inode_query_iversion in the I_VERSION case | *3aea16e6b7
NFSD: Remove macros that are no longer used | *b24e6a40ee
NFSD: Replace READ* macros in nfsd4_decode_compound() | *6b48808835
NFSD: Make nfsd4_ops::opnum a u32 | *c2d0c16990
NFSD: Replace READ* macros in nfsd4_decode_listxattrs() | *8e1b8a78a9
NFSD: Replace READ* macros in nfsd4_decode_setxattr() | *9bc67df0f9
NFSD: Replace READ* macros in nfsd4_decode_xattr_name() | *b719fc9375
NFSD: Replace READ* macros in nfsd4_decode_clone() | *a2f6c16ad1
NFSD: Replace READ* macros in nfsd4_decode_seek() | *f8eb5424e3
NFSD: Replace READ* macros in nfsd4_decode_offload_status() | *c2d2a919b2
NFSD: Replace READ* macros in nfsd4_decode_copy_notify() | *8604d294c1
NFSD: Replace READ* macros in nfsd4_decode_copy() | *dc1a31ca8e
NFSD: Replace READ* macros in nfsd4_decode_nl4_server() | *a0b8dabc59
NFSD: Replace READ* macros in nfsd4_decode_fallocate() | *de0dc37a79
NFSD: Replace READ* macros in nfsd4_decode_reclaim_complete() | *093f9d2c8f
NFSD: Replace READ* macros in nfsd4_decode_destroy_clientid() | *7675420fde
NFSD: Replace READ* macros in nfsd4_decode_test_stateid() | *f0de0b6895
NFSD: Replace READ* macros in nfsd4_decode_sequence() | *1ea743dc48
NFSD: Replace READ* macros in nfsd4_decode_secinfo_no_name() | *b63e313dce
NFSD: Replace READ* macros in nfsd4_decode_layoutreturn() | *40e627c502
NFSD: Replace READ* macros in nfsd4_decode_layoutget() | *40770a0f8e
NFSD: Replace READ* macros in nfsd4_decode_layoutcommit() | *c0a4c4e46b
NFSD: Replace READ* macros in nfsd4_decode_getdeviceinfo() | *5f892c1178
NFSD: Replace READ* macros in nfsd4_decode_free_stateid() | *92ae309a99
NFSD: Replace READ* macros in nfsd4_decode_destroy_session() | *73684a8118
NFSD: Replace READ* macros in nfsd4_decode_create_session() | *2bd9ef494a
NFSD: Add a helper to decode channel_attrs4 | *d01f41320d
NFSD: Add a helper to decode nfs_impl_id4 | *d50a76f1f3
NFSD: Add a helper to decode state_protect4_a | *0c935af3cf
NFSD: Add a separate decoder for ssv_sp_parms | *cb568dbdef
NFSD: Add a separate decoder to handle state_protect_ops | *b736338042
NFSD: Replace READ* macros in nfsd4_decode_bind_conn_to_session() | *7d21084074
NFSD: Replace READ* macros in nfsd4_decode_backchannel_ctl() | *5658ca0651
NFSD: Replace READ* macros in nfsd4_decode_cb_sec() | *79f1a8323a
NFSD: Replace READ* macros in nfsd4_decode_release_lockowner() | *eeab2f3bf2
NFSD: Replace READ* macros in nfsd4_decode_write() | *b1af8f131e
NFSD: Replace READ* macros in nfsd4_decode_verify() | *19a4c05e81
NFSD: Replace READ* macros in nfsd4_decode_setclientid_confirm() | *2503dcf0f6
NFSD: Replace READ* macros in nfsd4_decode_setclientid() | *7c06ba5c8b
NFSD: Replace READ* macros in nfsd4_decode_setattr() | *5277d60346
NFSD: Replace READ* macros in nfsd4_decode_secinfo() | *2cef1009f8
NFSD: Replace READ* macros in nfsd4_decode_renew() | *e2b287a53c
NFSD: Replace READ* macros in nfsd4_decode_rename() | *274b8f0597
NFSD: Replace READ* macros in nfsd4_decode_remove() | *c24e2a4943
NFSD: Replace READ* macros in nfsd4_decode_readdir() | *d0a0219a35
NFSD: Replace READ* macros in nfsd4_decode_read() | *4b28cd7e83
NFSD: Replace READ* macros in nfsd4_decode_putfh() | *ad1ea32c97
NFSD: Replace READ* macros in nfsd4_decode_open_downgrade() | *e557a2eabb
NFSD: Replace READ* macros in nfsd4_decode_open_confirm() | *f6eb911d79
NFSD: Replace READ* macros in nfsd4_decode_open() | *4507c23e42
NFSD: Add helper to decode OPEN's open_claim4 argument | *48385b58bc
NFSD: Replace READ* macros in nfsd4_decode_share_deny() | *fa60cc6971
NFSD: Replace READ* macros in nfsd4_decode_share_access() | *070df4a4e9
NFSD: Add helper to decode OPEN's openflag4 argument | *c1ea8812d4
NFSD: Add helper to decode OPEN's createhow4 argument | *11ea3e65f0
NFSD: Add helper to decode NFSv4 verifiers | *def95074db
NFSD: Replace READ* macros in nfsd4_decode_lookup() | *340878b2e0
NFSD: Replace READ* macros in nfsd4_decode_locku() | *3625de1522
NFSD: Replace READ* macros in nfsd4_decode_lockt() | *8357985d21
NFSD: Replace READ* macros in nfsd4_decode_lock() | *d27f2dceda
NFSD: Add helper for decoding locker4 | *0c281b7083
NFSD: Add helpers to decode a clientid4 and an NFSv4 state owner | *753bb6b0e7
NFSD: Relocate nfsd4_decode_opaque() | *84bc365eee
NFSD: Replace READ* macros in nfsd4_decode_link() | *42c4437d78
NFSD: Replace READ* macros in nfsd4_decode_getattr() | *42e319695e
NFSD: Replace READ* macros in nfsd4_decode_delegreturn() | *3012fe5fea
NFSD: Replace READ* macros in nfsd4_decode_create() | *57516a96ca
NFSD: Replace READ* macros in nfsd4_decode_fattr() | *9737a9a8f9
NFSD: Replace READ* macros that decode the fattr4 umask attribute | *91a6752dad
NFSD: Replace READ* macros that decode the fattr4 security label attribute | *064e439bef
NFSD: Replace READ* macros that decode the fattr4 time_set attributes | *df42ebb61b
NFSD: Replace READ* macros that decode the fattr4 owner_group attribute | *dec78fb66d
NFSD: Replace READ* macros that decode the fattr4 owner attribute | *8801b0c284
NFSD: Replace READ* macros that decode the fattr4 mode attribute | *3d3690b662
NFSD: Replace READ* macros that decode the fattr4 acl attribute | *ee02662724
NFSD: Replace READ* macros that decode the fattr4 size attribute | *2a8ae03957
NFSD: Change the way the expected length of a fattr4 is checked | *f82c6ad7e2
NFSD: Replace READ* macros in nfsd4_decode_commit() | *c701c0e5a9
NFSD: Replace READ* macros in nfsd4_decode_close() | *9921353a52
NFSD: Replace READ* macros in nfsd4_decode_access() | *bbb0a710a2
NFSD: Replace the internals of the READ_BUF() macro | *2994c88884
NFSD: Add tracepoints in nfsd4_decode/encode_compound() | *568f9ca73d
NFSD: Add tracepoints in nfsd_dispatch() | *fbffaddb76
NFSD: Add common helpers to decode void args and encode void results | *79e4e0d489
SUNRPC: Prepare for xdr_stream-style decoding on the server-side | *2f46cc8141
SUNRPC: Add xdr_set_scratch_page() and xdr_reset_scratch_buffer() | *164937edca
nfsd: Fix error return code in nfsd_file_cache_init() | *9393f1628f
NFSD: Add SPDX header for fs/nfsd/trace.c | *a7b8e883ce
NFSD: Remove extra "0x" in tracepoint format specifier | *9f8405182b
NFSD: Clean up the show_nf_may macro | *e513685101
nfsd/nfs3: remove unused macro nfsd3_fhandleres | *92f59545b9
NFSD: A semicolon is not needed after a switch statement. | *a2f25c3208
NFSD: Invoke svc_encode_result_payload() in "read" NFSD encoders | *9aa0a43a55
SUNRPC: Rename svc_encode_read_payload() * |640645c85b
Revert "drm/mipi-dsi: use correct return type for the DSC functions" * |e4f3376872
Revert "media: cec: fix a deadlock situation" * |12d97237e4
Revert "media: cec: call enable_adap on s_log_addrs" * |f257da513d
Revert "media: cec: abort if the current transmit was canceled" * |baa6c4164b
Revert "media: cec: correctly pass on reply results" * |802e36bc55
Revert "media: cec: use call_op and check for !unregistered" * |590dc9d34f
Revert "media: cec-adap.c: drop activate_cnt, use state info instead" * |c0342019d8
Revert "media: cec: core: avoid confusing "transmit timed out" message" * |8047831dc6
Revert "media: cec: core: avoid recursive cec_claim_log_addrs" * |0546f6a05d
Revert "media: cec: core: add adap_nb_transmit_canceled() callback" * |054258ff89
ANDROID: ABI fixup for abi break in struct dst_ops * |fedef46c69
Merge 5.10.219 into android12-5.10-lts |/ *a2ed160621
Linux 5.10.219 *487489c4c8
RDMA/hns: Fix CQ and QP cache affinity *68a9559376
RDMA/hns: Use mutex instead of spinlock for ida allocation *5fe764c781
f2fs: compress: fix compression chksum *d7ae4792b5
scsi: ufs: ufs-qcom: Clear qunipro_g4_sel for HW major version > 5 *6285d50a23
NFS: Fix READ_PLUS when server doesn't support OP_READ_PLUS *3e41609e62
nfs: fix undefined behavior in nfs_block_bits() *7360cef95a
s390/ap: Fix crash in AP internal function modify_bitmap() *76dc776153
ext4: fix mb_cache_entry's e_refcnt leak in ext4_xattr_block_cache_find() *08018302f2
sparc: move struct termio to asm/termios.h *2295a7ef5c
net: fix __dst_negative_advice() race *51664ef6ac
kdb: Use format-specifiers rather than memset() for padding in kdb_read() *e3d11ff45f
kdb: Merge identical case statements in kdb_read() *2b5e1534df
kdb: Fix console handling when editing and tab-completing commands *7c19e28f3a
kdb: Use format-strings rather than '\0' injection in kdb_read() *cfdc2fa4db
kdb: Fix buffer overflow during tab-complete *b487b48efd
watchdog: rti_wdt: Set min_hw_heartbeat_ms to accommodate a safety margin *161f5a1189
sparc64: Fix number of online CPUs *6e7dd338c0
intel_th: pci: Add Meteor Lake-S CPU support *1249478555
net/9p: fix uninit-value in p9_client_rpc() *3cc7687f7f
net/ipv6: Fix route deleting failure when metric equals 0 *a718b6d2a3
crypto: qat - Fix ADF_DEV_RESET_SYNC memory leak *6815376b7f
crypto: ecrdsa - Fix module auto-load on add_key *4d8226bc7e
KVM: arm64: Allow AArch32 PSTATE.M to be restored as System mode *32f92b0078
fbdev: savage: Handle err return when savagefb_check_var failed *68447c350f
mmc: sdhci-acpi: Disable write protect detection on Toshiba WT10-A *63eda0f3eb
mmc: sdhci-acpi: Fix Lenovo Yoga Tablet 2 Pro 1380 sdcard slot not working *b3418751cc
mmc: sdhci-acpi: Sort DMI quirks alphabetically *24b7af86a8
mmc: core: Add mmc_gpiod_set_cd_config() function *e6823bb7f4
media: v4l2-core: hold videodev_lock until dev reg, finishes *567d3a4959
media: mxl5xx: Move xpt structures off stack *1514e1fb2a
media: mc: mark the media devnode as registered from the, start *82e6eba1a5
arm64: dts: hi3798cv200: fix the size of GICR *c539721e90
wifi: rtlwifi: rtl8192de: Fix endianness issue in RX path *6d5bfcd2cc
wifi: rtlwifi: rtl8192de: Fix low speed with WPA3-SAE *8dffc574c7
wifi: rtl8xxxu: Fix the TX power of RTL8192CU, RTL8723AU *aa64464c8f
md/raid5: fix deadlock that raid5d() wait for itself to clear MD_SB_CHANGE_PENDING *1f26711c08
arm64: dts: qcom: qcs404: fix bluetooth device address *5cd0428356
arm64: tegra: Correct Tegra132 I2C alias *e2c6a9b342
ACPI: resource: Do IRQ override on TongFang GXxHRXx and GMxHGxx *73485d6bd9
ata: pata_legacy: make legacy_exit() work again *8112fa72b7
drm/amdgpu: add error handle to avoid out-of-bounds *b479fd59a1
media: lgdt3306a: Add a check against null-pointer-def *75c87e2ac6
f2fs: fix to do sanity check on i_xattr_nid in sanity_check_inode() *cade34279c
netfilter: nf_tables: Fix potential data-race in __nft_obj_type_get() *9c1c2ea099
netfilter: nf_tables: restrict tunnel object to NFPROTO_NETDEV *a447f26830
x86/mm: Remove broken vsyscall emulation code from the page fault code *3ee36f0048
vxlan: Fix regression when dropping packets due to invalid src addresses *67fa90d4a2
nilfs2: fix use-after-free of timer for log writer thread *e31fe702ed
afs: Don't cross .backup mountpoint from backup volume *b6920325ac
io_uring: fail NOP if non-zero op flags is passed in *54e8f88d2b
mmc: core: Do not force a retune before RPMB switch *75805481c3
binder: fix max_thread type inconsistency *4cefcd0af7
SUNRPC: Fix loop termination condition in gss_free_in_token_pages() *0cf6693d3f
media: cec: core: add adap_nb_transmit_canceled() callback *6752dfcfff
genirq/cpuhotplug, x86/vector: Prevent vector leak during CPU offline *bdd0aa055b
ALSA: timer: Set lower bound of start tick time *1f4b848935
hwmon: (shtc1) Fix property misspelling *1abbf079da
ipvlan: Dont Use skb->sk in ipvlan_process_v{4,6}_outbound *3c5caaef46
spi: stm32: Don't warn about spurious interrupts *19e5a3d771
kconfig: fix comparison to constant symbols, 'm', 'n' *07eeedafc5
netfilter: tproxy: bail out if IP has been disabled on the device *ddd2912a94
net:fec: Add fec_enet_deinit() *29467edc23
bpf: Allow delete from sockmap/sockhash only if update is allowed *117cacd72c
net: usb: smsc95xx: fix changing LED_SEL bit value updated from EEPROM *3c0d36972e
enic: Validate length of nl attributes in enic_set_vf_port *540d73a5c0
bpf: Fix potential integer overflow in resolve_btfids *ae6fc4e6a3
dma-buf/sw-sync: don't enable IRQ from sync_print_obj() *72c6038d23
net/mlx5e: Use rx_missed_errors instead of rx_dropped for reporting buffer exhaustion *82fdfbf242
nvmet: fix ns enable/disable possible hang *5f72ba46f1
spi: Don't mark message DMA mapped when no transfer in it is *7ca9cf24b0
netfilter: nft_payload: restore vlan q-in-q match support *e01065b339
netfilter: nfnetlink_queue: acquire rcu_read_lock() in instance_destroy_rcu() *0f26983c24
nfc: nci: Fix handling of zero-length payload packets in nci_rx_work() *728fb8b3b5
nfc: nci: Fix kcov check in nci_rx_work() *d72e126e9a
tls: fix missing memory barrier in tls_init *cb95173e6c
net: fec: avoid lock evasion when reading pps_enable *7fbe54f02a
virtio: delete vq in vp_find_vqs_msix() when request_irq() fails *461a760d57
arm64: asm-bug: Add .align 2 to the end of __BUG_ENTRY *8cae65ace4
openvswitch: Set the skbuff pkt_type for proper pmtud support. *e9b2f60636
tcp: Fix shift-out-of-bounds in dctcp_update_alpha(). *42bd4e491c
params: lift param_set_uint_minmax to common code *daf341e0a2
ipv6: sr: fix memleak in seg6_hmac_init_algo *1c65ebce7d
rpcrdma: fix handling for RDMA_CM_EVENT_DEVICE_REMOVAL *f2b326b774
sunrpc: fix NFSACL RPC retry on soft mount *6f39d5aae6
ASoC: tas2552: Add TX path for capturing AUDIO-OUT data *f80b786ab0
nfc: nci: Fix uninit-value in nci_rx_work *ee6a497844
x86/kconfig: Select ARCH_WANT_FRAME_POINTERS again when UNWINDER_FRAME_POINTER=y *8fb8be0e3b
regulator: bd71828: Don't overwrite runtime voltages *a2b0c3a6d4
null_blk: Fix the WARNING: modpost: missing MODULE_DESCRIPTION() *3166b2dffa
media: cec: core: avoid confusing "transmit timed out" message *5103090f4e
media: cec: core: avoid recursive cec_claim_log_addrs *3e938b7d40
media: cec-adap.c: drop activate_cnt, use state info instead *73ef9ae980
media: cec: use call_op and check for !unregistered *8fa7e4896f
media: cec: correctly pass on reply results *b64cb24a9e
media: cec: abort if the current transmit was canceled *2c67f3634f
media: cec: call enable_adap on s_log_addrs *0ab74ae99f
media: cec: fix a deadlock situation *ca55f013be
media: core headers: fix kernel-doc warnings *9f6da5da3d
media: cec: cec-api: add locking in cec_release() *a0ca5ff242
media: cec: cec-adap: always cancel work in cec_transmit_msg_fh *6be4923ade
um: Fix the -Wmissing-prototypes warning for __switch_mm *12ea1ec137
powerpc/pseries: Add failure related checks for h_get_mpp and h_get_ppp *a2d61b328e
media: flexcop-usb: fix sanity check of bNumEndpoints *c11caf1339
media: flexcop-usb: clean up endpoint sanity checks *ca17da9000
scsi: qla2xxx: Replace all non-returning strlcpy() with strscpy() *a16775828a
media: stk1160: fix bounds checking in stk1160_copy_video() *f4b3d2585b
drm/bridge: tc358775: fix support for jeida-18 and jeida-24 *4d5ef7face
um: vector: fix bpfflash parameter evaluation *351d1a6454
um: Add winch to winch_handlers before registering winch IRQ *1ef5d235be
um: Fix return value in ubd_init() *96b9ed94dc
drm/msm/dpu: Always flush the slave INTF on the CTL *a3bb8070b7
Input: pm8xxx-vibrator - correct VIB_MAX_LEVELS calculation *580e47c282
mmc: sdhci_am654: Fix ITAPDLY for HS400 timing *2621bf50f5
mmc: sdhci_am654: Add ITAPDLYSEL in sdhci_j721e_4bit_set_clock *b55d988df1
mmc: sdhci_am654: Add OTAP/ITAP delay enable *76f2b3ccbd
mmc: sdhci_am654: Drop lookup for deprecated ti,otap-del-sel *8dcfbb27e4
mmc: sdhci_am654: Write ITAPDLY for DDR52 timing *e7a444a35e
mmc: sdhci_am654: Add tuning algorithm for delay chain *52f8d76769
Input: ioc3kbd - add device table *0096d223f7
Input: ioc3kbd - convert to platform remove callback returning void *abeaeaee7f
Input: ims-pcu - fix printf string overflow *adc7dc29b7
s390/ipl: Fix incorrect initialization of nvme dump block *66a02effb8
s390/ipl: Fix incorrect initialization of len fields in nvme reipl block *ec43f32f66
libsubcmd: Fix parse-options memory leak *158adcb7fd
serial: sh-sci: protect invalidating RXDMA on shutdown *b8962cf985
f2fs: compress: don't allow unaligned truncation on released compress inode *c1958b978d
f2fs: fix to release node block count in error path of f2fs_new_node_page() *a6e1f7744e
f2fs: compress: fix to cover {reserve,release}_compress_blocks() w/ cp_rwsem lock *4b6e5edefd
PCI/EDR: Align EDR_PORT_LOCATE_DSM with PCI Firmware r3.3 *d2e2e90c76
PCI/EDR: Align EDR_PORT_DPC_ENABLE_DSM with PCI Firmware r3.3 *9ca02da316
extcon: max8997: select IRQ_DOMAIN instead of depending on it *d32caf5137
ppdev: Add an error check in register_device *9349e1f2c9
ppdev: Remove usage of the deprecated ida_simple_xx() API *713fc00c57
stm class: Fix a double free in stm_register_device() *7716b201d2
usb: gadget: u_audio: Clear uac pointer when freed. *23209f947d
microblaze: Remove early printk call from cpuinfo-static.c *34ff72bb5d
microblaze: Remove gcc flag for non existing early_printk.c file *26e6e25d74
fpga: region: add owner module and take its refcount *af02dec83a
fpga: region: Use standard dev_release for class driver *b089bb733c
docs: driver-api: fpga: avoid using UTF-8 chars *9fdd3d1cd0
fpga: region: Rename dev to parent for parent device *840c9c7d6a
fpga: region: change FPGA indirect article to an *be76107dc4
iio: pressure: dps310: support negative temperature values *cdbe0477a0
f2fs: fix to check pinfile flag in f2fs_move_file_range() *2b16554fb2
f2fs: fix to relocate check condition in f2fs_fallocate() *f1169d2b2a
f2fs: fix typos in comments *bdca4b6786
f2fs: do not allow partial truncation on pinned file *d992b78026
f2fs: fix to force keeping write barrier for strict fsync mode *eebbc4eb7e
f2fs: add cp_error check in f2fs_write_compressed_pages *431ecafbff
f2fs: compress: fix to relocate check condition in f2fs_{release,reserve}_compress_blocks() *8e1651cd66
f2fs: introduce FI_COMPRESS_RELEASED instead of using IMMUTABLE bit *486009bc2f
f2fs: compress: remove unneeded preallocation *df4978d968
f2fs: compress: clean up parameter of __f2fs_cluster_blocks() *5b09d2e790
f2fs: add compress_mode mount option *70fb69e05a
f2fs: compress: support chksum *1ada965692
serial: sc16is7xx: add proper sched.h include for sched_set_fifo() *276bc8a7dc
greybus: arche-ctrl: move device table to its right location *d1f67d1d8c
serial: max3100: Fix bitwise types *e8e2a4339d
serial: max3100: Update uart_driver_registered on driver removal *cc121e3722
serial: max3100: Lock port->lock when calling uart_handle_cts_change() *bc40d0e356
firmware: dmi-id: add a release callback function *83e078085f
dmaengine: idma64: Add check for dma_set_max_seg_size *fd4bcb991e
soundwire: cadence: fix invalid PDI offset *7b98f1493a
f2fs: fix to wait on page writeback in __clone_blkaddrs() *eac10cf3a9
greybus: lights: check return of get_channel_from_mode *ae20865fe6
sched/fair: Allow disabling sched_balance_newidle with sched_relax_domain_level *e892f9932d
af_packet: do not call packet_read_pending() from tpacket_destruct_skb() *b117e5b4f2
netrom: fix possible dead-lock in nr_rt_ioctl() *cafccde429
net: qrtr: ns: Fix module refcnt *66fd37d0a8
net: qrtr: fix null-ptr-deref in qrtr_ns_remove *3e83903cd4
RDMA/IPoIB: Fix format truncation compilation errors *68e8c44c0d
selftests/kcmp: remove unused open mode *08df7b006c
selftests/kcmp: Make the test output consistent and clear *f148a95f68
SUNRPC: Fix gss_free_in_token_pages() *e28a16af4c
ext4: fix potential unnitialized variable *4deaa4d5f8
ext4: remove unused parameter from ext4_mb_new_blocks_simple() *fdbce45449
ext4: try all groups in ext4_mb_new_blocks_simple *e498c2f441
ext4: fix unit mismatch in ext4_mb_new_blocks_simple *f49c865d5b
ext4: simplify calculation of blkoff in ext4_mb_new_blocks_simple *2a2bba3cbd
sunrpc: removed redundant procp check *7c9ab0a449
ext4: avoid excessive credit estimate in ext4_tmpfile() *4d693ca24a
x86/insn: Fix PUSH instruction in x86 instruction decoder opcode map *9ff328de02
clk: qcom: mmcc-msm8998: fix venus clock issue *45b31be4dd
RDMA/hns: Modify the print level of CQE error *9cce44567f
RDMA/hns: Use complete parentheses in macros *9efed7448b
RDMA/hns: Fix return value in hns_roce_map_mr_sg *4c91ad5ed5
RDMA/hns: Fix incorrect symbol types *e612e695d3
RDMA/hns: Create QP with selected QPN for bank load balance *3c3744c309
RDMA/hns: Refactor the hns_roce_buf allocation flow *eb9635b4a9
drm/mipi-dsi: use correct return type for the DSC functions *5d344b3089
drm/panel: simple: Add missing Innolux G121X1-L03 format, flags, connector *f10aa595ee
drm/bridge: tc358775: Don't log an error when DSI host can't be found *ee751403fb
drm/bridge: lt9611: Don't log an error when DSI host can't be found *3ce31a0e37
ASoC: tracing: Export SND_SOC_DAPM_DIR_OUT to its value *2d9adecc88
drm: vc4: Fix possible null pointer dereference *a5fa5b40a2
drm/arm/malidp: fix a possible null pointer dereference *4b68b861b5
media: atomisp: ssh_css: Fix a null-pointer dereference in load_video_binaries *08ce354f3d
fbdev: sh7760fb: allow modular build *85d1a27402
drm: bridge: cdns-mhdp8546: Fix possible null pointer dereference *552280a992
media: radio-shark2: Avoid led_names truncations *409f20085d
media: ngene: Add dvb_ca_en50221_init return value check *bb3c425921
fbdev: sisfb: hide unused variables *94349e015c
powerpc/fsl-soc: hide unused const variable *d17b75ee9c
drm/mediatek: Add 0 size check to mtk_drm_gem_obj *7441f9e056
drm/meson: vclk: fix calculation of 59.94 fractional rates *078e02dcb4
fbdev: shmobile: fix snprintf truncation *d3727d6e2b
mtd: rawnand: hynix: fixed typo *613a349cbf
ASoC: Intel: Disable route checks for Skylake boards *ece8098579
ASoC: soc-acpi: add helper to identify parent driver. *04bc4d1090
drm/amd/display: Fix potential index out of bounds in color transformation function *f6fbb8535e
net/mlx5: Discard command completions in internal error *00e6335329
ipv6: sr: fix invalid unregister error path *d33327a7c0
ipv6: sr: fix incorrect unregister order *1ba1b4cc3a
ipv6: sr: add missing seg6_local_exit *5ab6aecbed
net: openvswitch: fix overwriting ct original tuple for ICMPv6 *c3dc80f633
net: usb: smsc95xx: stop lying about skb->truesize *4d51845d73
af_unix: Fix data races in unix_release_sock/unix_stream_sendmsg *161e43067b
net: ethernet: cortina: Locking fixes *e22b23f588
eth: sungem: remove .ndo_poll_controller to avoid deadlocks *ad31e0e765
net: ipv6: fix wrong start position when receive hop-by-hop fragment *3eccf76b57
m68k: mac: Fix reboot hang on Mac IIci *4eeffecc8e
m68k: Fix spinlock race in kernel thread creation *d50b11c21f
net: usb: sr9700: stop lying about skb->truesize *2093cc6e87
usb: aqc111: stop lying about skb->truesize *4cff6817ee
wifi: mwl8k: initialize cmd->addr[] properly *769b9fd2af
scsi: qedf: Ensure the copied buf is NUL terminated *00b425ff08
scsi: bfa: Ensure the copied buf is NUL terminated *2321281f19
HID: intel-ish-hid: ipc: Add check for pci_alloc_irq_vectors *2bd97a0868
selftests/binderfs: use the Makefile's rules, not Make's implicit rules *019ae041a5
Revert "sh: Handle calling csum_partial with misaligned data" *3557a7fc5c
sh: kprobes: Merge arch_copy_kprobe() into arch_prepare_kprobe() *ee25389df8
wifi: ar5523: enable proper endpoint verification *8650725bb0
wifi: carl9170: add a proper sanity check for endpoints *280619bbde
macintosh/via-macii: Fix "BUG: sleeping function called from invalid context" *9d13202922
net: give more chances to rcu in netdev_wait_allrefs_any() *bc7cae63fa
tcp: avoid premature drops in tcp_add_backlog() *88aa40df8e
wifi: ath10k: populate board data for WCN3990 *5c4756e0fb
wifi: ath10k: Fix an error code problem in ath10k_dbg_sta_write_peer_debug_trigger() *1b27468dbe
x86/purgatory: Switch to the position-independent small code model *176fb7770d
scsi: hpsa: Fix allocation size for Scsi_Host private data *0dc60ee1ed
scsi: libsas: Fix the failure of adding phy with zero-address to port *69fab9d2e2
udp: Avoid call to compute_score on multiple sites *452f8dc251
net: remove duplicate reuseport_lookup functions *0ce990e6ef
net: export inet_lookup_reuseport and inet6_lookup_reuseport *dfc56ff5ec
cpufreq: exit() callback is optional *92aca16797
cpufreq: Rearrange locking in cpufreq_remove_dev() *19b06dec36
cpufreq: Split cpufreq_offline() *3a28fbf533
cpufreq: Reorganize checks in cpufreq_offline() *7a4d18a27d
selftests/bpf: Fix umount cgroup2 error in test_sockmap *dd52e3bc4f
gfs2: Fix "ignore unlock failures after withdraw" *11f9bd1102
ACPI: disable -Wstringop-truncation *a8f0a14c3b
irqchip/loongson-pch-msi: Fix off-by-one on allocation error path *f31b49ba36
irqchip/alpine-msi: Fix off-by-one in allocation error path *bb291d4d08
scsi: ufs: core: Perform read back after disabling UIC_COMMAND_COMPL *3bbfbd5a36
scsi: ufs: core: Perform read back after disabling interrupts *bbc00d1b7a
scsi: ufs: cdns-pltfrm: Perform read back after writing HCLKDIV *dca9754276
scsi: ufs: qcom: Perform read back after writing CGC enable *c8f2eefc49
scsi: ufs: qcom: Perform read back after writing unipro mode *506f63e97d
scsi: ufs: ufs-qcom: Clear qunipro_g4_sel for HW version major 5 *ee4bf03d26
scsi: ufs: ufs-qcom: Fix the Qcom register name for offset 0xD0 *d113c66bb4
scsi: ufs: qcom: Perform read back after writing REG_UFS_SYS1CLK_1US *82783759e8
scsi: ufs-qcom: Fix ufs RST_n spec violation *d193f4a153
scsi: ufs: qcom: Perform read back after writing reset bit *742f580670
qed: avoid truncating work queue length *01ea6818fa
x86/boot: Ignore relocations in .notes sections in walk_relocs() too *5fb37c456d
wifi: ath10k: poll service ready message before failing *3f5b73ef8f
md: fix resync softlockup when bitmap size is less than array size *c62f315238
null_blk: Fix missing mutex_destroy() at module removal *b2f8354f73
soc: mediatek: cmdq: Fix typo of CMDQ_JUMP_RELATIVE *f0eea095ce
jffs2: prevent xattr node from overflowing the eraseblock *de1207e5fd
s390/cio: fix tracepoint subchannel type field *9f6dbd0aa1
crypto: x86/sha256-avx2 - add missing vzeroupper *0ceb0a40c5
crypto: x86/nh-avx2 - add missing vzeroupper *6cd2cbd553
crypto: ccp - drop platform ifdef checks *f24cac6459
parisc: add missing export of __cmpxchg_u8() *dae53e39cd
nilfs2: fix out-of-range warning *edbfc42ab0
ecryptfs: Fix buffer size for tag 66 packet *e05ee61361
firmware: raspberrypi: Use correct device for DMA mappings *ebed0d666f
crypto: bcm - Fix pointer arithmetic *a81f072e50
openpromfs: finish conversion to the new mount API *904a590dab
nvme: find numa distance only if controller has valid numa id *5b4d14a0bc
drm/amdkfd: Flush the process wq before creating a kfd_process *eb464a8d82
ASoC: da7219-aad: fix usage of device_get_named_child_node() *43ff957b96
ASoC: rt715: add vendor clear control register *0c71bfad14
regulator: vqmmc-ipq4019: fix module autoloading *fcc54151a9
ASoC: dt-bindings: rt5645: add cbj sleeve gpio property *6cd625926e
ASoC: rt5645: Fix the electric noise due to the CBJ contacts floating *c9c742eaa5
drm/amd/display: Set color_mgmt_changed to true on unsuspend *c393ce8157
net: usb: qmi_wwan: add Telit FN920C04 compositions *c7d22022ec
wifi: cfg80211: fix the order of arguments for trace events of the tx_rx_evt class *d7ff29a429
ALSA: core: Fix NULL module pointer assignment at card init *eff7cdf890
nilfs2: fix potential hang in nilfs_detach_log_writer() *89e07418a6
nilfs2: fix unexpected freezing of nilfs_segctor_sync() *0d0ecd841f
net: smc91x: Fix m68k kernel compilation for ColdFire CPU *1e16019604
ring-buffer: Fix a race between readers and resize checks *07ef95cc7a
speakup: Fix sizeof() vs ARRAY_SIZE() bug *0fb736c993
tty: n_gsm: fix possible out-of-bounds in gsm0_receive() *03a7939453
x86/tsc: Trust initial offset in architectural TSC-adjust MSRs Change-Id: I1e3b04e5bcd7844daa82bc19f6db4faa4c8f9f7d Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
6729 lines
190 KiB
C
6729 lines
190 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/* binder.c
|
|
*
|
|
* Android IPC Subsystem
|
|
*
|
|
* Copyright (C) 2007-2008 Google, Inc.
|
|
*/
|
|
|
|
/*
|
|
* Locking overview
|
|
*
|
|
* There are 3 main spinlocks which must be acquired in the
|
|
* order shown:
|
|
*
|
|
* 1) proc->outer_lock : protects binder_ref
|
|
* binder_proc_lock() and binder_proc_unlock() are
|
|
* used to acq/rel.
|
|
* 2) node->lock : protects most fields of binder_node.
|
|
* binder_node_lock() and binder_node_unlock() are
|
|
* used to acq/rel
|
|
* 3) proc->inner_lock : protects the thread and node lists
|
|
* (proc->threads, proc->waiting_threads, proc->nodes)
|
|
* and all todo lists associated with the binder_proc
|
|
* (proc->todo, thread->todo, proc->delivered_death and
|
|
* node->async_todo), as well as thread->transaction_stack
|
|
* binder_inner_proc_lock() and binder_inner_proc_unlock()
|
|
* are used to acq/rel
|
|
*
|
|
* Any lock under procA must never be nested under any lock at the same
|
|
* level or below on procB.
|
|
*
|
|
* Functions that require a lock held on entry indicate which lock
|
|
* in the suffix of the function name:
|
|
*
|
|
* foo_olocked() : requires node->outer_lock
|
|
* foo_nlocked() : requires node->lock
|
|
* foo_ilocked() : requires proc->inner_lock
|
|
* foo_oilocked(): requires proc->outer_lock and proc->inner_lock
|
|
* foo_nilocked(): requires node->lock and proc->inner_lock
|
|
* ...
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/fdtable.h>
|
|
#include <linux/file.h>
|
|
#include <linux/freezer.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/list.h>
|
|
#include <linux/miscdevice.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/nsproxy.h>
|
|
#include <linux/poll.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/rbtree.h>
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/sched/mm.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/string.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/pid_namespace.h>
|
|
#include <linux/security.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/ratelimit.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/task_work.h>
|
|
#include <linux/sizes.h>
|
|
#include <linux/android_vendor.h>
|
|
|
|
#include <uapi/linux/sched/types.h>
|
|
#include <uapi/linux/android/binder.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include "binder_internal.h"
|
|
#include "binder_trace.h"
|
|
#include <trace/hooks/binder.h>
|
|
|
|
static HLIST_HEAD(binder_deferred_list);
|
|
static DEFINE_MUTEX(binder_deferred_lock);
|
|
|
|
static HLIST_HEAD(binder_devices);
|
|
static HLIST_HEAD(binder_procs);
|
|
static DEFINE_MUTEX(binder_procs_lock);
|
|
|
|
static HLIST_HEAD(binder_dead_nodes);
|
|
static DEFINE_SPINLOCK(binder_dead_nodes_lock);
|
|
|
|
static struct dentry *binder_debugfs_dir_entry_root;
|
|
static struct dentry *binder_debugfs_dir_entry_proc;
|
|
static atomic_t binder_last_id;
|
|
|
|
static int proc_show(struct seq_file *m, void *unused);
|
|
DEFINE_SHOW_ATTRIBUTE(proc);
|
|
|
|
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
|
|
|
|
enum {
|
|
BINDER_DEBUG_USER_ERROR = 1U << 0,
|
|
BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
|
|
BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
|
|
BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
|
|
BINDER_DEBUG_DEAD_BINDER = 1U << 4,
|
|
BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
|
|
BINDER_DEBUG_READ_WRITE = 1U << 6,
|
|
BINDER_DEBUG_USER_REFS = 1U << 7,
|
|
BINDER_DEBUG_THREADS = 1U << 8,
|
|
BINDER_DEBUG_TRANSACTION = 1U << 9,
|
|
BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
|
|
BINDER_DEBUG_FREE_BUFFER = 1U << 11,
|
|
BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
|
|
BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
|
|
BINDER_DEBUG_SPINLOCKS = 1U << 14,
|
|
};
|
|
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
|
|
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
|
|
module_param_named(debug_mask, binder_debug_mask, uint, 0644);
|
|
|
|
char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
|
|
module_param_named(devices, binder_devices_param, charp, 0444);
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
|
|
static int binder_stop_on_user_error;
|
|
|
|
static int binder_set_stop_on_user_error(const char *val,
|
|
const struct kernel_param *kp)
|
|
{
|
|
int ret;
|
|
|
|
ret = param_set_int(val, kp);
|
|
if (binder_stop_on_user_error < 2)
|
|
wake_up(&binder_user_error_wait);
|
|
return ret;
|
|
}
|
|
module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
|
|
param_get_int, &binder_stop_on_user_error, 0644);
|
|
|
|
#define binder_debug(mask, x...) \
|
|
do { \
|
|
if (binder_debug_mask & mask) \
|
|
pr_info_ratelimited(x); \
|
|
} while (0)
|
|
|
|
#define binder_user_error(x...) \
|
|
do { \
|
|
if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
|
|
pr_info_ratelimited(x); \
|
|
if (binder_stop_on_user_error) \
|
|
binder_stop_on_user_error = 2; \
|
|
} while (0)
|
|
|
|
#define to_flat_binder_object(hdr) \
|
|
container_of(hdr, struct flat_binder_object, hdr)
|
|
|
|
#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
|
|
|
|
#define to_binder_buffer_object(hdr) \
|
|
container_of(hdr, struct binder_buffer_object, hdr)
|
|
|
|
#define to_binder_fd_array_object(hdr) \
|
|
container_of(hdr, struct binder_fd_array_object, hdr)
|
|
|
|
static struct binder_stats binder_stats;
|
|
|
|
static inline void binder_stats_deleted(enum binder_stat_types type)
|
|
{
|
|
atomic_inc(&binder_stats.obj_deleted[type]);
|
|
}
|
|
|
|
static inline void binder_stats_created(enum binder_stat_types type)
|
|
{
|
|
atomic_inc(&binder_stats.obj_created[type]);
|
|
}
|
|
|
|
struct binder_transaction_log_entry {
|
|
int debug_id;
|
|
int debug_id_done;
|
|
int call_type;
|
|
int from_proc;
|
|
int from_thread;
|
|
int target_handle;
|
|
int to_proc;
|
|
int to_thread;
|
|
int to_node;
|
|
int data_size;
|
|
int offsets_size;
|
|
int return_error_line;
|
|
uint32_t return_error;
|
|
uint32_t return_error_param;
|
|
char context_name[BINDERFS_MAX_NAME + 1];
|
|
};
|
|
|
|
struct binder_transaction_log {
|
|
atomic_t cur;
|
|
bool full;
|
|
struct binder_transaction_log_entry entry[32];
|
|
};
|
|
|
|
static struct binder_transaction_log binder_transaction_log;
|
|
static struct binder_transaction_log binder_transaction_log_failed;
|
|
|
|
static struct binder_transaction_log_entry *binder_transaction_log_add(
|
|
struct binder_transaction_log *log)
|
|
{
|
|
struct binder_transaction_log_entry *e;
|
|
unsigned int cur = atomic_inc_return(&log->cur);
|
|
|
|
if (cur >= ARRAY_SIZE(log->entry))
|
|
log->full = true;
|
|
e = &log->entry[cur % ARRAY_SIZE(log->entry)];
|
|
WRITE_ONCE(e->debug_id_done, 0);
|
|
/*
|
|
* write-barrier to synchronize access to e->debug_id_done.
|
|
* We make sure the initialized 0 value is seen before
|
|
* memset() other fields are zeroed by memset.
|
|
*/
|
|
smp_wmb();
|
|
memset(e, 0, sizeof(*e));
|
|
return e;
|
|
}
|
|
|
|
enum binder_deferred_state {
|
|
BINDER_DEFERRED_FLUSH = 0x01,
|
|
BINDER_DEFERRED_RELEASE = 0x02,
|
|
};
|
|
|
|
enum {
|
|
BINDER_LOOPER_STATE_REGISTERED = 0x01,
|
|
BINDER_LOOPER_STATE_ENTERED = 0x02,
|
|
BINDER_LOOPER_STATE_EXITED = 0x04,
|
|
BINDER_LOOPER_STATE_INVALID = 0x08,
|
|
BINDER_LOOPER_STATE_WAITING = 0x10,
|
|
BINDER_LOOPER_STATE_POLL = 0x20,
|
|
};
|
|
|
|
/**
|
|
* binder_proc_lock() - Acquire outer lock for given binder_proc
|
|
* @proc: struct binder_proc to acquire
|
|
*
|
|
* Acquires proc->outer_lock. Used to protect binder_ref
|
|
* structures associated with the given proc.
|
|
*/
|
|
#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
|
|
static void
|
|
_binder_proc_lock(struct binder_proc *proc, int line)
|
|
__acquires(&proc->outer_lock)
|
|
{
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
"%s: line=%d\n", __func__, line);
|
|
spin_lock(&proc->outer_lock);
|
|
}
|
|
|
|
/**
|
|
* binder_proc_unlock() - Release spinlock for given binder_proc
|
|
* @proc: struct binder_proc to acquire
|
|
*
|
|
* Release lock acquired via binder_proc_lock()
|
|
*/
|
|
#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
|
|
static void
|
|
_binder_proc_unlock(struct binder_proc *proc, int line)
|
|
__releases(&proc->outer_lock)
|
|
{
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
"%s: line=%d\n", __func__, line);
|
|
spin_unlock(&proc->outer_lock);
|
|
}
|
|
|
|
/**
|
|
* binder_inner_proc_lock() - Acquire inner lock for given binder_proc
|
|
* @proc: struct binder_proc to acquire
|
|
*
|
|
* Acquires proc->inner_lock. Used to protect todo lists
|
|
*/
|
|
#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
|
|
static void
|
|
_binder_inner_proc_lock(struct binder_proc *proc, int line)
|
|
__acquires(&proc->inner_lock)
|
|
{
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
"%s: line=%d\n", __func__, line);
|
|
spin_lock(&proc->inner_lock);
|
|
}
|
|
|
|
/**
|
|
* binder_inner_proc_unlock() - Release inner lock for given binder_proc
|
|
* @proc: struct binder_proc to acquire
|
|
*
|
|
* Release lock acquired via binder_inner_proc_lock()
|
|
*/
|
|
#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
|
|
static void
|
|
_binder_inner_proc_unlock(struct binder_proc *proc, int line)
|
|
__releases(&proc->inner_lock)
|
|
{
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
"%s: line=%d\n", __func__, line);
|
|
spin_unlock(&proc->inner_lock);
|
|
}
|
|
|
|
/**
|
|
* binder_node_lock() - Acquire spinlock for given binder_node
|
|
* @node: struct binder_node to acquire
|
|
*
|
|
* Acquires node->lock. Used to protect binder_node fields
|
|
*/
|
|
#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
|
|
static void
|
|
_binder_node_lock(struct binder_node *node, int line)
|
|
__acquires(&node->lock)
|
|
{
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
"%s: line=%d\n", __func__, line);
|
|
spin_lock(&node->lock);
|
|
}
|
|
|
|
/**
|
|
* binder_node_unlock() - Release spinlock for given binder_proc
|
|
* @node: struct binder_node to acquire
|
|
*
|
|
* Release lock acquired via binder_node_lock()
|
|
*/
|
|
#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
|
|
static void
|
|
_binder_node_unlock(struct binder_node *node, int line)
|
|
__releases(&node->lock)
|
|
{
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
"%s: line=%d\n", __func__, line);
|
|
spin_unlock(&node->lock);
|
|
}
|
|
|
|
/**
|
|
* binder_node_inner_lock() - Acquire node and inner locks
|
|
* @node: struct binder_node to acquire
|
|
*
|
|
* Acquires node->lock. If node->proc also acquires
|
|
* proc->inner_lock. Used to protect binder_node fields
|
|
*/
|
|
#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
|
|
static void
|
|
_binder_node_inner_lock(struct binder_node *node, int line)
|
|
__acquires(&node->lock) __acquires(&node->proc->inner_lock)
|
|
{
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
"%s: line=%d\n", __func__, line);
|
|
spin_lock(&node->lock);
|
|
if (node->proc)
|
|
binder_inner_proc_lock(node->proc);
|
|
else
|
|
/* annotation for sparse */
|
|
__acquire(&node->proc->inner_lock);
|
|
}
|
|
|
|
/**
|
|
* binder_node_unlock() - Release node and inner locks
|
|
* @node: struct binder_node to acquire
|
|
*
|
|
* Release lock acquired via binder_node_lock()
|
|
*/
|
|
#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
|
|
static void
|
|
_binder_node_inner_unlock(struct binder_node *node, int line)
|
|
__releases(&node->lock) __releases(&node->proc->inner_lock)
|
|
{
|
|
struct binder_proc *proc = node->proc;
|
|
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
"%s: line=%d\n", __func__, line);
|
|
if (proc)
|
|
binder_inner_proc_unlock(proc);
|
|
else
|
|
/* annotation for sparse */
|
|
__release(&node->proc->inner_lock);
|
|
spin_unlock(&node->lock);
|
|
}
|
|
|
|
static bool binder_worklist_empty_ilocked(struct list_head *list)
|
|
{
|
|
return list_empty(list);
|
|
}
|
|
|
|
/**
|
|
* binder_worklist_empty() - Check if no items on the work list
|
|
* @proc: binder_proc associated with list
|
|
* @list: list to check
|
|
*
|
|
* Return: true if there are no items on list, else false
|
|
*/
|
|
static bool binder_worklist_empty(struct binder_proc *proc,
|
|
struct list_head *list)
|
|
{
|
|
bool ret;
|
|
|
|
binder_inner_proc_lock(proc);
|
|
ret = binder_worklist_empty_ilocked(list);
|
|
binder_inner_proc_unlock(proc);
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* binder_enqueue_work_ilocked() - Add an item to the work list
|
|
* @work: struct binder_work to add to list
|
|
* @target_list: list to add work to
|
|
*
|
|
* Adds the work to the specified list. Asserts that work
|
|
* is not already on a list.
|
|
*
|
|
* Requires the proc->inner_lock to be held.
|
|
*/
|
|
static void
|
|
binder_enqueue_work_ilocked(struct binder_work *work,
|
|
struct list_head *target_list)
|
|
{
|
|
BUG_ON(target_list == NULL);
|
|
BUG_ON(work->entry.next && !list_empty(&work->entry));
|
|
list_add_tail(&work->entry, target_list);
|
|
}
|
|
|
|
/**
|
|
* binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
|
|
* @thread: thread to queue work to
|
|
* @work: struct binder_work to add to list
|
|
*
|
|
* Adds the work to the todo list of the thread. Doesn't set the process_todo
|
|
* flag, which means that (if it wasn't already set) the thread will go to
|
|
* sleep without handling this work when it calls read.
|
|
*
|
|
* Requires the proc->inner_lock to be held.
|
|
*/
|
|
static void
|
|
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
|
|
struct binder_work *work)
|
|
{
|
|
WARN_ON(!list_empty(&thread->waiting_thread_node));
|
|
binder_enqueue_work_ilocked(work, &thread->todo);
|
|
}
|
|
|
|
/**
|
|
* binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
|
|
* @thread: thread to queue work to
|
|
* @work: struct binder_work to add to list
|
|
*
|
|
* Adds the work to the todo list of the thread, and enables processing
|
|
* of the todo queue.
|
|
*
|
|
* Requires the proc->inner_lock to be held.
|
|
*/
|
|
static void
|
|
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
|
|
struct binder_work *work)
|
|
{
|
|
WARN_ON(!list_empty(&thread->waiting_thread_node));
|
|
binder_enqueue_work_ilocked(work, &thread->todo);
|
|
|
|
/* (e)poll-based threads require an explicit wakeup signal when
|
|
* queuing their own work; they rely on these events to consume
|
|
* messages without I/O block. Without it, threads risk waiting
|
|
* indefinitely without handling the work.
|
|
*/
|
|
if (thread->looper & BINDER_LOOPER_STATE_POLL &&
|
|
thread->pid == current->pid && !thread->process_todo)
|
|
wake_up_interruptible_sync(&thread->wait);
|
|
|
|
thread->process_todo = true;
|
|
}
|
|
|
|
/**
|
|
* binder_enqueue_thread_work() - Add an item to the thread work list
|
|
* @thread: thread to queue work to
|
|
* @work: struct binder_work to add to list
|
|
*
|
|
* Adds the work to the todo list of the thread, and enables processing
|
|
* of the todo queue.
|
|
*/
|
|
static void
|
|
binder_enqueue_thread_work(struct binder_thread *thread,
|
|
struct binder_work *work)
|
|
{
|
|
binder_inner_proc_lock(thread->proc);
|
|
binder_enqueue_thread_work_ilocked(thread, work);
|
|
binder_inner_proc_unlock(thread->proc);
|
|
}
|
|
|
|
static void
|
|
binder_dequeue_work_ilocked(struct binder_work *work)
|
|
{
|
|
list_del_init(&work->entry);
|
|
}
|
|
|
|
/**
|
|
* binder_dequeue_work() - Removes an item from the work list
|
|
* @proc: binder_proc associated with list
|
|
* @work: struct binder_work to remove from list
|
|
*
|
|
* Removes the specified work item from whatever list it is on.
|
|
* Can safely be called if work is not on any list.
|
|
*/
|
|
static void
|
|
binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
|
|
{
|
|
binder_inner_proc_lock(proc);
|
|
binder_dequeue_work_ilocked(work);
|
|
binder_inner_proc_unlock(proc);
|
|
}
|
|
|
|
static struct binder_work *binder_dequeue_work_head_ilocked(
|
|
struct list_head *list)
|
|
{
|
|
struct binder_work *w;
|
|
|
|
w = list_first_entry_or_null(list, struct binder_work, entry);
|
|
if (w)
|
|
list_del_init(&w->entry);
|
|
return w;
|
|
}
|
|
|
|
static void
|
|
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
|
|
static void binder_free_thread(struct binder_thread *thread);
|
|
static void binder_free_proc(struct binder_proc *proc);
|
|
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
|
|
|
|
static bool binder_has_work_ilocked(struct binder_thread *thread,
|
|
bool do_proc_work)
|
|
{
|
|
int ret = 0;
|
|
|
|
trace_android_vh_binder_has_work_ilocked(thread, do_proc_work, &ret);
|
|
if (ret)
|
|
return true;
|
|
return thread->process_todo ||
|
|
thread->looper_need_return ||
|
|
(do_proc_work &&
|
|
!binder_worklist_empty_ilocked(&thread->proc->todo));
|
|
}
|
|
|
|
static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
|
|
{
|
|
bool has_work;
|
|
|
|
binder_inner_proc_lock(thread->proc);
|
|
has_work = binder_has_work_ilocked(thread, do_proc_work);
|
|
binder_inner_proc_unlock(thread->proc);
|
|
|
|
return has_work;
|
|
}
|
|
|
|
static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
|
|
{
|
|
return !thread->transaction_stack &&
|
|
binder_worklist_empty_ilocked(&thread->todo) &&
|
|
(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
|
|
BINDER_LOOPER_STATE_REGISTERED));
|
|
}
|
|
|
|
static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
|
|
bool sync)
|
|
{
|
|
struct rb_node *n;
|
|
struct binder_thread *thread;
|
|
|
|
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
|
|
thread = rb_entry(n, struct binder_thread, rb_node);
|
|
if (thread->looper & BINDER_LOOPER_STATE_POLL &&
|
|
binder_available_for_proc_work_ilocked(thread)) {
|
|
trace_android_vh_binder_wakeup_ilocked(thread->task, sync, proc);
|
|
if (sync)
|
|
wake_up_interruptible_sync(&thread->wait);
|
|
else
|
|
wake_up_interruptible(&thread->wait);
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* binder_select_thread_ilocked() - selects a thread for doing proc work.
|
|
* @proc: process to select a thread from
|
|
*
|
|
* Note that calling this function moves the thread off the waiting_threads
|
|
* list, so it can only be woken up by the caller of this function, or a
|
|
* signal. Therefore, callers *should* always wake up the thread this function
|
|
* returns.
|
|
*
|
|
* Return: If there's a thread currently waiting for process work,
|
|
* returns that thread. Otherwise returns NULL.
|
|
*/
|
|
static struct binder_thread *
|
|
binder_select_thread_ilocked(struct binder_proc *proc)
|
|
{
|
|
struct binder_thread *thread;
|
|
|
|
assert_spin_locked(&proc->inner_lock);
|
|
thread = list_first_entry_or_null(&proc->waiting_threads,
|
|
struct binder_thread,
|
|
waiting_thread_node);
|
|
|
|
if (thread)
|
|
list_del_init(&thread->waiting_thread_node);
|
|
|
|
return thread;
|
|
}
|
|
|
|
/**
|
|
* binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
|
|
* @proc: process to wake up a thread in
|
|
* @thread: specific thread to wake-up (may be NULL)
|
|
* @sync: whether to do a synchronous wake-up
|
|
*
|
|
* This function wakes up a thread in the @proc process.
|
|
* The caller may provide a specific thread to wake-up in
|
|
* the @thread parameter. If @thread is NULL, this function
|
|
* will wake up threads that have called poll().
|
|
*
|
|
* Note that for this function to work as expected, callers
|
|
* should first call binder_select_thread() to find a thread
|
|
* to handle the work (if they don't have a thread already),
|
|
* and pass the result into the @thread parameter.
|
|
*/
|
|
static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
|
|
struct binder_thread *thread,
|
|
bool sync)
|
|
{
|
|
assert_spin_locked(&proc->inner_lock);
|
|
|
|
if (thread) {
|
|
trace_android_vh_binder_wakeup_ilocked(thread->task, sync, proc);
|
|
if (sync)
|
|
wake_up_interruptible_sync(&thread->wait);
|
|
else
|
|
wake_up_interruptible(&thread->wait);
|
|
return;
|
|
}
|
|
|
|
/* Didn't find a thread waiting for proc work; this can happen
|
|
* in two scenarios:
|
|
* 1. All threads are busy handling transactions
|
|
* In that case, one of those threads should call back into
|
|
* the kernel driver soon and pick up this work.
|
|
* 2. Threads are using the (e)poll interface, in which case
|
|
* they may be blocked on the waitqueue without having been
|
|
* added to waiting_threads. For this case, we just iterate
|
|
* over all threads not handling transaction work, and
|
|
* wake them all up. We wake all because we don't know whether
|
|
* a thread that called into (e)poll is handling non-binder
|
|
* work currently.
|
|
*/
|
|
binder_wakeup_poll_threads_ilocked(proc, sync);
|
|
}
|
|
|
|
static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
|
|
{
|
|
struct binder_thread *thread = binder_select_thread_ilocked(proc);
|
|
|
|
binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
|
|
}
|
|
|
|
static bool is_rt_policy(int policy)
|
|
{
|
|
return policy == SCHED_FIFO || policy == SCHED_RR;
|
|
}
|
|
|
|
static bool is_fair_policy(int policy)
|
|
{
|
|
return policy == SCHED_NORMAL || policy == SCHED_BATCH;
|
|
}
|
|
|
|
static bool binder_supported_policy(int policy)
|
|
{
|
|
return is_fair_policy(policy) || is_rt_policy(policy);
|
|
}
|
|
|
|
static int to_userspace_prio(int policy, int kernel_priority)
|
|
{
|
|
if (is_fair_policy(policy))
|
|
return PRIO_TO_NICE(kernel_priority);
|
|
else
|
|
return MAX_USER_RT_PRIO - 1 - kernel_priority;
|
|
}
|
|
|
|
static int to_kernel_prio(int policy, int user_priority)
|
|
{
|
|
if (is_fair_policy(policy))
|
|
return NICE_TO_PRIO(user_priority);
|
|
else
|
|
return MAX_USER_RT_PRIO - 1 - user_priority;
|
|
}
|
|
|
|
static void binder_do_set_priority(struct task_struct *task,
|
|
struct binder_priority desired,
|
|
bool verify)
|
|
{
|
|
int priority; /* user-space prio value */
|
|
bool has_cap_nice;
|
|
unsigned int policy = desired.sched_policy;
|
|
|
|
if (task->policy == policy && task->normal_prio == desired.prio)
|
|
return;
|
|
|
|
has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
|
|
|
|
priority = to_userspace_prio(policy, desired.prio);
|
|
|
|
if (verify && is_rt_policy(policy) && !has_cap_nice) {
|
|
long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
|
|
|
|
if (max_rtprio == 0) {
|
|
policy = SCHED_NORMAL;
|
|
priority = MIN_NICE;
|
|
} else if (priority > max_rtprio) {
|
|
priority = max_rtprio;
|
|
}
|
|
}
|
|
|
|
if (verify && is_fair_policy(policy) && !has_cap_nice) {
|
|
long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
|
|
|
|
if (min_nice > MAX_NICE) {
|
|
binder_user_error("%d RLIMIT_NICE not set\n",
|
|
task->pid);
|
|
return;
|
|
} else if (priority < min_nice) {
|
|
priority = min_nice;
|
|
}
|
|
}
|
|
|
|
if (policy != desired.sched_policy ||
|
|
to_kernel_prio(policy, priority) != desired.prio)
|
|
binder_debug(BINDER_DEBUG_PRIORITY_CAP,
|
|
"%d: priority %d not allowed, using %d instead\n",
|
|
task->pid, desired.prio,
|
|
to_kernel_prio(policy, priority));
|
|
|
|
trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
|
|
to_kernel_prio(policy, priority),
|
|
desired.prio);
|
|
|
|
/* Set the actual priority */
|
|
if (task->policy != policy || is_rt_policy(policy)) {
|
|
struct sched_param params;
|
|
|
|
params.sched_priority = is_rt_policy(policy) ? priority : 0;
|
|
|
|
sched_setscheduler_nocheck(task,
|
|
policy | SCHED_RESET_ON_FORK,
|
|
¶ms);
|
|
}
|
|
if (is_fair_policy(policy))
|
|
set_user_nice(task, priority);
|
|
}
|
|
|
|
static void binder_set_priority(struct task_struct *task,
|
|
struct binder_priority desired)
|
|
{
|
|
binder_do_set_priority(task, desired, /* verify = */ true);
|
|
}
|
|
|
|
static void binder_restore_priority(struct task_struct *task,
|
|
struct binder_priority desired)
|
|
{
|
|
binder_do_set_priority(task, desired, /* verify = */ false);
|
|
}
|
|
|
|
static void binder_transaction_priority(struct task_struct *task,
|
|
struct binder_transaction *t,
|
|
struct binder_priority node_prio,
|
|
bool inherit_rt)
|
|
{
|
|
struct binder_priority desired_prio = t->priority;
|
|
bool skip = false;
|
|
|
|
if (t->set_priority_called)
|
|
return;
|
|
|
|
t->set_priority_called = true;
|
|
t->saved_priority.sched_policy = task->policy;
|
|
t->saved_priority.prio = task->normal_prio;
|
|
|
|
trace_android_vh_binder_priority_skip(task, &skip);
|
|
if (skip)
|
|
return;
|
|
|
|
if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
|
|
desired_prio.prio = NICE_TO_PRIO(0);
|
|
desired_prio.sched_policy = SCHED_NORMAL;
|
|
}
|
|
|
|
if (node_prio.prio < t->priority.prio ||
|
|
(node_prio.prio == t->priority.prio &&
|
|
node_prio.sched_policy == SCHED_FIFO)) {
|
|
/*
|
|
* In case the minimum priority on the node is
|
|
* higher (lower value), use that priority. If
|
|
* the priority is the same, but the node uses
|
|
* SCHED_FIFO, prefer SCHED_FIFO, since it can
|
|
* run unbounded, unlike SCHED_RR.
|
|
*/
|
|
desired_prio = node_prio;
|
|
}
|
|
|
|
binder_set_priority(task, desired_prio);
|
|
trace_android_vh_binder_set_priority(t, task);
|
|
}
|
|
|
|
static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
|
|
binder_uintptr_t ptr)
|
|
{
|
|
struct rb_node *n = proc->nodes.rb_node;
|
|
struct binder_node *node;
|
|
|
|
assert_spin_locked(&proc->inner_lock);
|
|
|
|
while (n) {
|
|
node = rb_entry(n, struct binder_node, rb_node);
|
|
|
|
if (ptr < node->ptr)
|
|
n = n->rb_left;
|
|
else if (ptr > node->ptr)
|
|
n = n->rb_right;
|
|
else {
|
|
/*
|
|
* take an implicit weak reference
|
|
* to ensure node stays alive until
|
|
* call to binder_put_node()
|
|
*/
|
|
binder_inc_node_tmpref_ilocked(node);
|
|
return node;
|
|
}
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
static struct binder_node *binder_get_node(struct binder_proc *proc,
|
|
binder_uintptr_t ptr)
|
|
{
|
|
struct binder_node *node;
|
|
|
|
binder_inner_proc_lock(proc);
|
|
node = binder_get_node_ilocked(proc, ptr);
|
|
binder_inner_proc_unlock(proc);
|
|
return node;
|
|
}
|
|
|
|
static struct binder_node *binder_init_node_ilocked(
|
|
struct binder_proc *proc,
|
|
struct binder_node *new_node,
|
|
struct flat_binder_object *fp)
|
|
{
|
|
struct rb_node **p = &proc->nodes.rb_node;
|
|
struct rb_node *parent = NULL;
|
|
struct binder_node *node;
|
|
binder_uintptr_t ptr = fp ? fp->binder : 0;
|
|
binder_uintptr_t cookie = fp ? fp->cookie : 0;
|
|
__u32 flags = fp ? fp->flags : 0;
|
|
s8 priority;
|
|
|
|
assert_spin_locked(&proc->inner_lock);
|
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
|
node = rb_entry(parent, struct binder_node, rb_node);
|
|
|
|
if (ptr < node->ptr)
|
|
p = &(*p)->rb_left;
|
|
else if (ptr > node->ptr)
|
|
p = &(*p)->rb_right;
|
|
else {
|
|
/*
|
|
* A matching node is already in
|
|
* the rb tree. Abandon the init
|
|
* and return it.
|
|
*/
|
|
binder_inc_node_tmpref_ilocked(node);
|
|
return node;
|
|
}
|
|
}
|
|
node = new_node;
|
|
binder_stats_created(BINDER_STAT_NODE);
|
|
node->tmp_refs++;
|
|
rb_link_node(&node->rb_node, parent, p);
|
|
rb_insert_color(&node->rb_node, &proc->nodes);
|
|
node->debug_id = atomic_inc_return(&binder_last_id);
|
|
node->proc = proc;
|
|
node->ptr = ptr;
|
|
node->cookie = cookie;
|
|
node->work.type = BINDER_WORK_NODE;
|
|
priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
|
|
node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
|
|
FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
|
|
node->min_priority = to_kernel_prio(node->sched_policy, priority);
|
|
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
|
|
node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
|
|
node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
|
|
spin_lock_init(&node->lock);
|
|
INIT_LIST_HEAD(&node->work.entry);
|
|
INIT_LIST_HEAD(&node->async_todo);
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
"%d:%d node %d u%016llx c%016llx created\n",
|
|
proc->pid, current->pid, node->debug_id,
|
|
(u64)node->ptr, (u64)node->cookie);
|
|
|
|
return node;
|
|
}
|
|
|
|
static struct binder_node *binder_new_node(struct binder_proc *proc,
|
|
struct flat_binder_object *fp)
|
|
{
|
|
struct binder_node *node;
|
|
struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
|
|
|
|
if (!new_node)
|
|
return NULL;
|
|
binder_inner_proc_lock(proc);
|
|
node = binder_init_node_ilocked(proc, new_node, fp);
|
|
binder_inner_proc_unlock(proc);
|
|
if (node != new_node)
|
|
/*
|
|
* The node was already added by another thread
|
|
*/
|
|
kfree(new_node);
|
|
|
|
return node;
|
|
}
|
|
|
|
static void binder_free_node(struct binder_node *node)
|
|
{
|
|
kfree(node);
|
|
binder_stats_deleted(BINDER_STAT_NODE);
|
|
}
|
|
|
|
static int binder_inc_node_nilocked(struct binder_node *node, int strong,
|
|
int internal,
|
|
struct list_head *target_list)
|
|
{
|
|
struct binder_proc *proc = node->proc;
|
|
|
|
assert_spin_locked(&node->lock);
|
|
if (proc)
|
|
assert_spin_locked(&proc->inner_lock);
|
|
if (strong) {
|
|
if (internal) {
|
|
if (target_list == NULL &&
|
|
node->internal_strong_refs == 0 &&
|
|
!(node->proc &&
|
|
node == node->proc->context->binder_context_mgr_node &&
|
|
node->has_strong_ref)) {
|
|
pr_err("invalid inc strong node for %d\n",
|
|
node->debug_id);
|
|
return -EINVAL;
|
|
}
|
|
node->internal_strong_refs++;
|
|
} else
|
|
node->local_strong_refs++;
|
|
if (!node->has_strong_ref && target_list) {
|
|
struct binder_thread *thread = container_of(target_list,
|
|
struct binder_thread, todo);
|
|
binder_dequeue_work_ilocked(&node->work);
|
|
BUG_ON(&thread->todo != target_list);
|
|
binder_enqueue_deferred_thread_work_ilocked(thread,
|
|
&node->work);
|
|
}
|
|
} else {
|
|
if (!internal)
|
|
node->local_weak_refs++;
|
|
if (!node->has_weak_ref && list_empty(&node->work.entry)) {
|
|
if (target_list == NULL) {
|
|
pr_err("invalid inc weak node for %d\n",
|
|
node->debug_id);
|
|
return -EINVAL;
|
|
}
|
|
/*
|
|
* See comment above
|
|
*/
|
|
binder_enqueue_work_ilocked(&node->work, target_list);
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int binder_inc_node(struct binder_node *node, int strong, int internal,
|
|
struct list_head *target_list)
|
|
{
|
|
int ret;
|
|
|
|
binder_node_inner_lock(node);
|
|
ret = binder_inc_node_nilocked(node, strong, internal, target_list);
|
|
binder_node_inner_unlock(node);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static bool binder_dec_node_nilocked(struct binder_node *node,
|
|
int strong, int internal)
|
|
{
|
|
struct binder_proc *proc = node->proc;
|
|
|
|
assert_spin_locked(&node->lock);
|
|
if (proc)
|
|
assert_spin_locked(&proc->inner_lock);
|
|
if (strong) {
|
|
if (internal)
|
|
node->internal_strong_refs--;
|
|
else
|
|
node->local_strong_refs--;
|
|
if (node->local_strong_refs || node->internal_strong_refs)
|
|
return false;
|
|
} else {
|
|
if (!internal)
|
|
node->local_weak_refs--;
|
|
if (node->local_weak_refs || node->tmp_refs ||
|
|
!hlist_empty(&node->refs))
|
|
return false;
|
|
}
|
|
|
|
if (proc && (node->has_strong_ref || node->has_weak_ref)) {
|
|
if (list_empty(&node->work.entry)) {
|
|
binder_enqueue_work_ilocked(&node->work, &proc->todo);
|
|
binder_wakeup_proc_ilocked(proc);
|
|
}
|
|
} else {
|
|
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
|
|
!node->local_weak_refs && !node->tmp_refs) {
|
|
if (proc) {
|
|
binder_dequeue_work_ilocked(&node->work);
|
|
rb_erase(&node->rb_node, &proc->nodes);
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
"refless node %d deleted\n",
|
|
node->debug_id);
|
|
} else {
|
|
BUG_ON(!list_empty(&node->work.entry));
|
|
spin_lock(&binder_dead_nodes_lock);
|
|
/*
|
|
* tmp_refs could have changed so
|
|
* check it again
|
|
*/
|
|
if (node->tmp_refs) {
|
|
spin_unlock(&binder_dead_nodes_lock);
|
|
return false;
|
|
}
|
|
hlist_del(&node->dead_node);
|
|
spin_unlock(&binder_dead_nodes_lock);
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
"dead node %d deleted\n",
|
|
node->debug_id);
|
|
}
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static void binder_dec_node(struct binder_node *node, int strong, int internal)
|
|
{
|
|
bool free_node;
|
|
|
|
binder_node_inner_lock(node);
|
|
free_node = binder_dec_node_nilocked(node, strong, internal);
|
|
binder_node_inner_unlock(node);
|
|
if (free_node)
|
|
binder_free_node(node);
|
|
}
|
|
|
|
static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
|
|
{
|
|
/*
|
|
* No call to binder_inc_node() is needed since we
|
|
* don't need to inform userspace of any changes to
|
|
* tmp_refs
|
|
*/
|
|
node->tmp_refs++;
|
|
}
|
|
|
|
/**
|
|
* binder_inc_node_tmpref() - take a temporary reference on node
|
|
* @node: node to reference
|
|
*
|
|
* Take reference on node to prevent the node from being freed
|
|
* while referenced only by a local variable. The inner lock is
|
|
* needed to serialize with the node work on the queue (which
|
|
* isn't needed after the node is dead). If the node is dead
|
|
* (node->proc is NULL), use binder_dead_nodes_lock to protect
|
|
* node->tmp_refs against dead-node-only cases where the node
|
|
* lock cannot be acquired (eg traversing the dead node list to
|
|
* print nodes)
|
|
*/
|
|
static void binder_inc_node_tmpref(struct binder_node *node)
|
|
{
|
|
binder_node_lock(node);
|
|
if (node->proc)
|
|
binder_inner_proc_lock(node->proc);
|
|
else
|
|
spin_lock(&binder_dead_nodes_lock);
|
|
binder_inc_node_tmpref_ilocked(node);
|
|
if (node->proc)
|
|
binder_inner_proc_unlock(node->proc);
|
|
else
|
|
spin_unlock(&binder_dead_nodes_lock);
|
|
binder_node_unlock(node);
|
|
}
|
|
|
|
/**
|
|
* binder_dec_node_tmpref() - remove a temporary reference on node
|
|
* @node: node to reference
|
|
*
|
|
* Release temporary reference on node taken via binder_inc_node_tmpref()
|
|
*/
|
|
static void binder_dec_node_tmpref(struct binder_node *node)
|
|
{
|
|
bool free_node;
|
|
|
|
binder_node_inner_lock(node);
|
|
if (!node->proc)
|
|
spin_lock(&binder_dead_nodes_lock);
|
|
else
|
|
__acquire(&binder_dead_nodes_lock);
|
|
node->tmp_refs--;
|
|
BUG_ON(node->tmp_refs < 0);
|
|
if (!node->proc)
|
|
spin_unlock(&binder_dead_nodes_lock);
|
|
else
|
|
__release(&binder_dead_nodes_lock);
|
|
/*
|
|
* Call binder_dec_node() to check if all refcounts are 0
|
|
* and cleanup is needed. Calling with strong=0 and internal=1
|
|
* causes no actual reference to be released in binder_dec_node().
|
|
* If that changes, a change is needed here too.
|
|
*/
|
|
free_node = binder_dec_node_nilocked(node, 0, 1);
|
|
binder_node_inner_unlock(node);
|
|
if (free_node)
|
|
binder_free_node(node);
|
|
}
|
|
|
|
static void binder_put_node(struct binder_node *node)
|
|
{
|
|
binder_dec_node_tmpref(node);
|
|
}
|
|
|
|
static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
|
|
u32 desc, bool need_strong_ref)
|
|
{
|
|
struct rb_node *n = proc->refs_by_desc.rb_node;
|
|
struct binder_ref *ref;
|
|
|
|
while (n) {
|
|
ref = rb_entry(n, struct binder_ref, rb_node_desc);
|
|
|
|
if (desc < ref->data.desc) {
|
|
n = n->rb_left;
|
|
} else if (desc > ref->data.desc) {
|
|
n = n->rb_right;
|
|
} else if (need_strong_ref && !ref->data.strong) {
|
|
binder_user_error("tried to use weak ref as strong ref\n");
|
|
return NULL;
|
|
} else {
|
|
return ref;
|
|
}
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
/**
|
|
* binder_get_ref_for_node_olocked() - get the ref associated with given node
|
|
* @proc: binder_proc that owns the ref
|
|
* @node: binder_node of target
|
|
* @new_ref: newly allocated binder_ref to be initialized or %NULL
|
|
*
|
|
* Look up the ref for the given node and return it if it exists
|
|
*
|
|
* If it doesn't exist and the caller provides a newly allocated
|
|
* ref, initialize the fields of the newly allocated ref and insert
|
|
* into the given proc rb_trees and node refs list.
|
|
*
|
|
* Return: the ref for node. It is possible that another thread
|
|
* allocated/initialized the ref first in which case the
|
|
* returned ref would be different than the passed-in
|
|
* new_ref. new_ref must be kfree'd by the caller in
|
|
* this case.
|
|
*/
|
|
static struct binder_ref *binder_get_ref_for_node_olocked(
|
|
struct binder_proc *proc,
|
|
struct binder_node *node,
|
|
struct binder_ref *new_ref)
|
|
{
|
|
struct binder_context *context = proc->context;
|
|
struct rb_node **p = &proc->refs_by_node.rb_node;
|
|
struct rb_node *parent = NULL;
|
|
struct binder_ref *ref;
|
|
struct rb_node *n;
|
|
|
|
while (*p) {
|
|
parent = *p;
|
|
ref = rb_entry(parent, struct binder_ref, rb_node_node);
|
|
|
|
if (node < ref->node)
|
|
p = &(*p)->rb_left;
|
|
else if (node > ref->node)
|
|
p = &(*p)->rb_right;
|
|
else
|
|
return ref;
|
|
}
|
|
if (!new_ref)
|
|
return NULL;
|
|
|
|
binder_stats_created(BINDER_STAT_REF);
|
|
new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
|
|
new_ref->proc = proc;
|
|
new_ref->node = node;
|
|
rb_link_node(&new_ref->rb_node_node, parent, p);
|
|
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
|
|
|
|
new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
|
|
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
|
|
ref = rb_entry(n, struct binder_ref, rb_node_desc);
|
|
if (ref->data.desc > new_ref->data.desc)
|
|
break;
|
|
new_ref->data.desc = ref->data.desc + 1;
|
|
}
|
|
|
|
p = &proc->refs_by_desc.rb_node;
|
|
while (*p) {
|
|
parent = *p;
|
|
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
|
|
|
|
if (new_ref->data.desc < ref->data.desc)
|
|
p = &(*p)->rb_left;
|
|
else if (new_ref->data.desc > ref->data.desc)
|
|
p = &(*p)->rb_right;
|
|
else
|
|
BUG();
|
|
}
|
|
rb_link_node(&new_ref->rb_node_desc, parent, p);
|
|
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
|
|
|
|
binder_node_lock(node);
|
|
hlist_add_head(&new_ref->node_entry, &node->refs);
|
|
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
"%d new ref %d desc %d for node %d\n",
|
|
proc->pid, new_ref->data.debug_id, new_ref->data.desc,
|
|
node->debug_id);
|
|
trace_android_vh_binder_new_ref(proc->tsk, new_ref->data.desc, new_ref->node->debug_id);
|
|
binder_node_unlock(node);
|
|
return new_ref;
|
|
}
|
|
|
|
static void binder_cleanup_ref_olocked(struct binder_ref *ref)
|
|
{
|
|
bool delete_node = false;
|
|
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
"%d delete ref %d desc %d for node %d\n",
|
|
ref->proc->pid, ref->data.debug_id, ref->data.desc,
|
|
ref->node->debug_id);
|
|
|
|
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
|
|
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
|
|
|
|
binder_node_inner_lock(ref->node);
|
|
if (ref->data.strong)
|
|
binder_dec_node_nilocked(ref->node, 1, 1);
|
|
|
|
hlist_del(&ref->node_entry);
|
|
delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
|
|
binder_node_inner_unlock(ref->node);
|
|
/*
|
|
* Clear ref->node unless we want the caller to free the node
|
|
*/
|
|
if (!delete_node) {
|
|
/*
|
|
* The caller uses ref->node to determine
|
|
* whether the node needs to be freed. Clear
|
|
* it since the node is still alive.
|
|
*/
|
|
ref->node = NULL;
|
|
}
|
|
|
|
if (ref->death) {
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
|
"%d delete ref %d desc %d has death notification\n",
|
|
ref->proc->pid, ref->data.debug_id,
|
|
ref->data.desc);
|
|
binder_dequeue_work(ref->proc, &ref->death->work);
|
|
binder_stats_deleted(BINDER_STAT_DEATH);
|
|
}
|
|
binder_stats_deleted(BINDER_STAT_REF);
|
|
}
|
|
|
|
/**
|
|
* binder_inc_ref_olocked() - increment the ref for given handle
|
|
* @ref: ref to be incremented
|
|
* @strong: if true, strong increment, else weak
|
|
* @target_list: list to queue node work on
|
|
*
|
|
* Increment the ref. @ref->proc->outer_lock must be held on entry
|
|
*
|
|
* Return: 0, if successful, else errno
|
|
*/
|
|
static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
|
|
struct list_head *target_list)
|
|
{
|
|
int ret;
|
|
|
|
if (strong) {
|
|
if (ref->data.strong == 0) {
|
|
ret = binder_inc_node(ref->node, 1, 1, target_list);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
ref->data.strong++;
|
|
} else {
|
|
if (ref->data.weak == 0) {
|
|
ret = binder_inc_node(ref->node, 0, 1, target_list);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
ref->data.weak++;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* binder_dec_ref() - dec the ref for given handle
|
|
* @ref: ref to be decremented
|
|
* @strong: if true, strong decrement, else weak
|
|
*
|
|
* Decrement the ref.
|
|
*
|
|
* Return: true if ref is cleaned up and ready to be freed
|
|
*/
|
|
static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
|
|
{
|
|
if (strong) {
|
|
if (ref->data.strong == 0) {
|
|
binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
|
|
ref->proc->pid, ref->data.debug_id,
|
|
ref->data.desc, ref->data.strong,
|
|
ref->data.weak);
|
|
return false;
|
|
}
|
|
ref->data.strong--;
|
|
if (ref->data.strong == 0)
|
|
binder_dec_node(ref->node, strong, 1);
|
|
} else {
|
|
if (ref->data.weak == 0) {
|
|
binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
|
|
ref->proc->pid, ref->data.debug_id,
|
|
ref->data.desc, ref->data.strong,
|
|
ref->data.weak);
|
|
return false;
|
|
}
|
|
ref->data.weak--;
|
|
}
|
|
if (ref->data.strong == 0 && ref->data.weak == 0) {
|
|
binder_cleanup_ref_olocked(ref);
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
/**
|
|
* binder_get_node_from_ref() - get the node from the given proc/desc
|
|
* @proc: proc containing the ref
|
|
* @desc: the handle associated with the ref
|
|
* @need_strong_ref: if true, only return node if ref is strong
|
|
* @rdata: the id/refcount data for the ref
|
|
*
|
|
* Given a proc and ref handle, return the associated binder_node
|
|
*
|
|
* Return: a binder_node or NULL if not found or not strong when strong required
|
|
*/
|
|
static struct binder_node *binder_get_node_from_ref(
|
|
struct binder_proc *proc,
|
|
u32 desc, bool need_strong_ref,
|
|
struct binder_ref_data *rdata)
|
|
{
|
|
struct binder_node *node;
|
|
struct binder_ref *ref;
|
|
|
|
binder_proc_lock(proc);
|
|
ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
|
|
if (!ref)
|
|
goto err_no_ref;
|
|
node = ref->node;
|
|
/*
|
|
* Take an implicit reference on the node to ensure
|
|
* it stays alive until the call to binder_put_node()
|
|
*/
|
|
binder_inc_node_tmpref(node);
|
|
if (rdata)
|
|
*rdata = ref->data;
|
|
binder_proc_unlock(proc);
|
|
|
|
return node;
|
|
|
|
err_no_ref:
|
|
binder_proc_unlock(proc);
|
|
return NULL;
|
|
}
|
|
|
|
/**
|
|
* binder_free_ref() - free the binder_ref
|
|
* @ref: ref to free
|
|
*
|
|
* Free the binder_ref. Free the binder_node indicated by ref->node
|
|
* (if non-NULL) and the binder_ref_death indicated by ref->death.
|
|
*/
|
|
static void binder_free_ref(struct binder_ref *ref)
|
|
{
|
|
trace_android_vh_binder_del_ref(ref->proc ? ref->proc->tsk : NULL,
|
|
ref->data.desc);
|
|
if (ref->node)
|
|
binder_free_node(ref->node);
|
|
kfree(ref->death);
|
|
kfree(ref);
|
|
}
|
|
|
|
/**
|
|
* binder_update_ref_for_handle() - inc/dec the ref for given handle
|
|
* @proc: proc containing the ref
|
|
* @desc: the handle associated with the ref
|
|
* @increment: true=inc reference, false=dec reference
|
|
* @strong: true=strong reference, false=weak reference
|
|
* @rdata: the id/refcount data for the ref
|
|
*
|
|
* Given a proc and ref handle, increment or decrement the ref
|
|
* according to "increment" arg.
|
|
*
|
|
* Return: 0 if successful, else errno
|
|
*/
|
|
static int binder_update_ref_for_handle(struct binder_proc *proc,
|
|
uint32_t desc, bool increment, bool strong,
|
|
struct binder_ref_data *rdata)
|
|
{
|
|
int ret = 0;
|
|
struct binder_ref *ref;
|
|
bool delete_ref = false;
|
|
|
|
binder_proc_lock(proc);
|
|
ref = binder_get_ref_olocked(proc, desc, strong);
|
|
if (!ref) {
|
|
ret = -EINVAL;
|
|
goto err_no_ref;
|
|
}
|
|
if (increment)
|
|
ret = binder_inc_ref_olocked(ref, strong, NULL);
|
|
else
|
|
delete_ref = binder_dec_ref_olocked(ref, strong);
|
|
|
|
if (rdata)
|
|
*rdata = ref->data;
|
|
binder_proc_unlock(proc);
|
|
|
|
if (delete_ref)
|
|
binder_free_ref(ref);
|
|
return ret;
|
|
|
|
err_no_ref:
|
|
binder_proc_unlock(proc);
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* binder_dec_ref_for_handle() - dec the ref for given handle
|
|
* @proc: proc containing the ref
|
|
* @desc: the handle associated with the ref
|
|
* @strong: true=strong reference, false=weak reference
|
|
* @rdata: the id/refcount data for the ref
|
|
*
|
|
* Just calls binder_update_ref_for_handle() to decrement the ref.
|
|
*
|
|
* Return: 0 if successful, else errno
|
|
*/
|
|
static int binder_dec_ref_for_handle(struct binder_proc *proc,
|
|
uint32_t desc, bool strong, struct binder_ref_data *rdata)
|
|
{
|
|
return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
|
|
}
|
|
|
|
|
|
/**
|
|
* binder_inc_ref_for_node() - increment the ref for given proc/node
|
|
* @proc: proc containing the ref
|
|
* @node: target node
|
|
* @strong: true=strong reference, false=weak reference
|
|
* @target_list: worklist to use if node is incremented
|
|
* @rdata: the id/refcount data for the ref
|
|
*
|
|
* Given a proc and node, increment the ref. Create the ref if it
|
|
* doesn't already exist
|
|
*
|
|
* Return: 0 if successful, else errno
|
|
*/
|
|
static int binder_inc_ref_for_node(struct binder_proc *proc,
|
|
struct binder_node *node,
|
|
bool strong,
|
|
struct list_head *target_list,
|
|
struct binder_ref_data *rdata)
|
|
{
|
|
struct binder_ref *ref;
|
|
struct binder_ref *new_ref = NULL;
|
|
int ret = 0;
|
|
|
|
binder_proc_lock(proc);
|
|
ref = binder_get_ref_for_node_olocked(proc, node, NULL);
|
|
if (!ref) {
|
|
binder_proc_unlock(proc);
|
|
new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
|
|
if (!new_ref)
|
|
return -ENOMEM;
|
|
binder_proc_lock(proc);
|
|
ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
|
|
}
|
|
ret = binder_inc_ref_olocked(ref, strong, target_list);
|
|
*rdata = ref->data;
|
|
if (ret && ref == new_ref) {
|
|
/*
|
|
* Cleanup the failed reference here as the target
|
|
* could now be dead and have already released its
|
|
* references by now. Calling on the new reference
|
|
* with strong=0 and a tmp_refs will not decrement
|
|
* the node. The new_ref gets kfree'd below.
|
|
*/
|
|
binder_cleanup_ref_olocked(new_ref);
|
|
ref = NULL;
|
|
}
|
|
|
|
binder_proc_unlock(proc);
|
|
if (new_ref && ref != new_ref)
|
|
/*
|
|
* Another thread created the ref first so
|
|
* free the one we allocated
|
|
*/
|
|
kfree(new_ref);
|
|
return ret;
|
|
}
|
|
|
|
static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
|
|
struct binder_transaction *t)
|
|
{
|
|
BUG_ON(!target_thread);
|
|
assert_spin_locked(&target_thread->proc->inner_lock);
|
|
BUG_ON(target_thread->transaction_stack != t);
|
|
BUG_ON(target_thread->transaction_stack->from != target_thread);
|
|
target_thread->transaction_stack =
|
|
target_thread->transaction_stack->from_parent;
|
|
t->from = NULL;
|
|
}
|
|
|
|
/**
|
|
* binder_thread_dec_tmpref() - decrement thread->tmp_ref
|
|
* @thread: thread to decrement
|
|
*
|
|
* A thread needs to be kept alive while being used to create or
|
|
* handle a transaction. binder_get_txn_from() is used to safely
|
|
* extract t->from from a binder_transaction and keep the thread
|
|
* indicated by t->from from being freed. When done with that
|
|
* binder_thread, this function is called to decrement the
|
|
* tmp_ref and free if appropriate (thread has been released
|
|
* and no transaction being processed by the driver)
|
|
*/
|
|
static void binder_thread_dec_tmpref(struct binder_thread *thread)
|
|
{
|
|
/*
|
|
* atomic is used to protect the counter value while
|
|
* it cannot reach zero or thread->is_dead is false
|
|
*/
|
|
binder_inner_proc_lock(thread->proc);
|
|
atomic_dec(&thread->tmp_ref);
|
|
if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
|
|
binder_inner_proc_unlock(thread->proc);
|
|
binder_free_thread(thread);
|
|
return;
|
|
}
|
|
binder_inner_proc_unlock(thread->proc);
|
|
}
|
|
|
|
/**
|
|
* binder_proc_dec_tmpref() - decrement proc->tmp_ref
|
|
* @proc: proc to decrement
|
|
*
|
|
* A binder_proc needs to be kept alive while being used to create or
|
|
* handle a transaction. proc->tmp_ref is incremented when
|
|
* creating a new transaction or the binder_proc is currently in-use
|
|
* by threads that are being released. When done with the binder_proc,
|
|
* this function is called to decrement the counter and free the
|
|
* proc if appropriate (proc has been released, all threads have
|
|
* been released and not currenly in-use to process a transaction).
|
|
*/
|
|
static void binder_proc_dec_tmpref(struct binder_proc *proc)
|
|
{
|
|
binder_inner_proc_lock(proc);
|
|
proc->tmp_ref--;
|
|
if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
|
|
!proc->tmp_ref) {
|
|
binder_inner_proc_unlock(proc);
|
|
binder_free_proc(proc);
|
|
return;
|
|
}
|
|
binder_inner_proc_unlock(proc);
|
|
}
|
|
|
|
/**
|
|
* binder_get_txn_from() - safely extract the "from" thread in transaction
|
|
* @t: binder transaction for t->from
|
|
*
|
|
* Atomically return the "from" thread and increment the tmp_ref
|
|
* count for the thread to ensure it stays alive until
|
|
* binder_thread_dec_tmpref() is called.
|
|
*
|
|
* Return: the value of t->from
|
|
*/
|
|
static struct binder_thread *binder_get_txn_from(
|
|
struct binder_transaction *t)
|
|
{
|
|
struct binder_thread *from;
|
|
|
|
spin_lock(&t->lock);
|
|
from = t->from;
|
|
if (from)
|
|
atomic_inc(&from->tmp_ref);
|
|
spin_unlock(&t->lock);
|
|
return from;
|
|
}
|
|
|
|
/**
|
|
* binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
|
|
* @t: binder transaction for t->from
|
|
*
|
|
* Same as binder_get_txn_from() except it also acquires the proc->inner_lock
|
|
* to guarantee that the thread cannot be released while operating on it.
|
|
* The caller must call binder_inner_proc_unlock() to release the inner lock
|
|
* as well as call binder_dec_thread_txn() to release the reference.
|
|
*
|
|
* Return: the value of t->from
|
|
*/
|
|
static struct binder_thread *binder_get_txn_from_and_acq_inner(
|
|
struct binder_transaction *t)
|
|
__acquires(&t->from->proc->inner_lock)
|
|
{
|
|
struct binder_thread *from;
|
|
|
|
from = binder_get_txn_from(t);
|
|
if (!from) {
|
|
__acquire(&from->proc->inner_lock);
|
|
return NULL;
|
|
}
|
|
binder_inner_proc_lock(from->proc);
|
|
if (t->from) {
|
|
BUG_ON(from != t->from);
|
|
return from;
|
|
}
|
|
binder_inner_proc_unlock(from->proc);
|
|
__acquire(&from->proc->inner_lock);
|
|
binder_thread_dec_tmpref(from);
|
|
return NULL;
|
|
}
|
|
|
|
/**
|
|
* binder_free_txn_fixups() - free unprocessed fd fixups
|
|
* @t: binder transaction for t->from
|
|
*
|
|
* If the transaction is being torn down prior to being
|
|
* processed by the target process, free all of the
|
|
* fd fixups and fput the file structs. It is safe to
|
|
* call this function after the fixups have been
|
|
* processed -- in that case, the list will be empty.
|
|
*/
|
|
static void binder_free_txn_fixups(struct binder_transaction *t)
|
|
{
|
|
struct binder_txn_fd_fixup *fixup, *tmp;
|
|
|
|
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
|
|
fput(fixup->file);
|
|
list_del(&fixup->fixup_entry);
|
|
kfree(fixup);
|
|
}
|
|
}
|
|
|
|
static void binder_free_transaction(struct binder_transaction *t)
|
|
{
|
|
struct binder_proc *target_proc = t->to_proc;
|
|
|
|
trace_android_vh_free_oem_binder_struct(t);
|
|
if (target_proc) {
|
|
binder_inner_proc_lock(target_proc);
|
|
target_proc->outstanding_txns--;
|
|
if (target_proc->outstanding_txns < 0)
|
|
pr_warn("%s: Unexpected outstanding_txns %d\n",
|
|
__func__, target_proc->outstanding_txns);
|
|
if (!target_proc->outstanding_txns && target_proc->is_frozen)
|
|
wake_up_interruptible_all(&target_proc->freeze_wait);
|
|
if (t->buffer)
|
|
t->buffer->transaction = NULL;
|
|
binder_inner_proc_unlock(target_proc);
|
|
}
|
|
/*
|
|
* If the transaction has no target_proc, then
|
|
* t->buffer->transaction has already been cleared.
|
|
*/
|
|
binder_free_txn_fixups(t);
|
|
kfree(t);
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
|
}
|
|
|
|
static void binder_send_failed_reply(struct binder_transaction *t,
|
|
uint32_t error_code)
|
|
{
|
|
struct binder_thread *target_thread;
|
|
struct binder_transaction *next;
|
|
|
|
BUG_ON(t->flags & TF_ONE_WAY);
|
|
while (1) {
|
|
target_thread = binder_get_txn_from_and_acq_inner(t);
|
|
if (target_thread) {
|
|
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
|
"send failed reply for transaction %d to %d:%d\n",
|
|
t->debug_id,
|
|
target_thread->proc->pid,
|
|
target_thread->pid);
|
|
|
|
binder_pop_transaction_ilocked(target_thread, t);
|
|
if (target_thread->reply_error.cmd == BR_OK) {
|
|
target_thread->reply_error.cmd = error_code;
|
|
binder_enqueue_thread_work_ilocked(
|
|
target_thread,
|
|
&target_thread->reply_error.work);
|
|
wake_up_interruptible(&target_thread->wait);
|
|
} else {
|
|
/*
|
|
* Cannot get here for normal operation, but
|
|
* we can if multiple synchronous transactions
|
|
* are sent without blocking for responses.
|
|
* Just ignore the 2nd error in this case.
|
|
*/
|
|
pr_warn("Unexpected reply error: %u\n",
|
|
target_thread->reply_error.cmd);
|
|
}
|
|
binder_inner_proc_unlock(target_thread->proc);
|
|
binder_thread_dec_tmpref(target_thread);
|
|
binder_free_transaction(t);
|
|
return;
|
|
}
|
|
__release(&target_thread->proc->inner_lock);
|
|
next = t->from_parent;
|
|
|
|
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
|
"send failed reply for transaction %d, target dead\n",
|
|
t->debug_id);
|
|
|
|
binder_free_transaction(t);
|
|
if (next == NULL) {
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
|
"reply failed, no target thread at root\n");
|
|
return;
|
|
}
|
|
t = next;
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
|
"reply failed, no target thread -- retry %d\n",
|
|
t->debug_id);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* binder_cleanup_transaction() - cleans up undelivered transaction
|
|
* @t: transaction that needs to be cleaned up
|
|
* @reason: reason the transaction wasn't delivered
|
|
* @error_code: error to return to caller (if synchronous call)
|
|
*/
|
|
static void binder_cleanup_transaction(struct binder_transaction *t,
|
|
const char *reason,
|
|
uint32_t error_code)
|
|
{
|
|
if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
|
|
binder_send_failed_reply(t, error_code);
|
|
} else {
|
|
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
|
"undelivered transaction %d, %s\n",
|
|
t->debug_id, reason);
|
|
binder_free_transaction(t);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* binder_get_object() - gets object and checks for valid metadata
|
|
* @proc: binder_proc owning the buffer
|
|
* @u: sender's user pointer to base of buffer
|
|
* @buffer: binder_buffer that we're parsing.
|
|
* @offset: offset in the @buffer at which to validate an object.
|
|
* @object: struct binder_object to read into
|
|
*
|
|
* Copy the binder object at the given offset into @object. If @u is
|
|
* provided then the copy is from the sender's buffer. If not, then
|
|
* it is copied from the target's @buffer.
|
|
*
|
|
* Return: If there's a valid metadata object at @offset, the
|
|
* size of that object. Otherwise, it returns zero. The object
|
|
* is read into the struct binder_object pointed to by @object.
|
|
*/
|
|
static size_t binder_get_object(struct binder_proc *proc,
|
|
const void __user *u,
|
|
struct binder_buffer *buffer,
|
|
unsigned long offset,
|
|
struct binder_object *object)
|
|
{
|
|
size_t read_size;
|
|
struct binder_object_header *hdr;
|
|
size_t object_size = 0;
|
|
|
|
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
|
|
if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
|
|
!IS_ALIGNED(offset, sizeof(u32)))
|
|
return 0;
|
|
|
|
if (u) {
|
|
if (copy_from_user(object, u + offset, read_size))
|
|
return 0;
|
|
} else {
|
|
if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
|
|
offset, read_size))
|
|
return 0;
|
|
}
|
|
|
|
/* Ok, now see if we read a complete object. */
|
|
hdr = &object->hdr;
|
|
switch (hdr->type) {
|
|
case BINDER_TYPE_BINDER:
|
|
case BINDER_TYPE_WEAK_BINDER:
|
|
case BINDER_TYPE_HANDLE:
|
|
case BINDER_TYPE_WEAK_HANDLE:
|
|
object_size = sizeof(struct flat_binder_object);
|
|
break;
|
|
case BINDER_TYPE_FD:
|
|
object_size = sizeof(struct binder_fd_object);
|
|
break;
|
|
case BINDER_TYPE_PTR:
|
|
object_size = sizeof(struct binder_buffer_object);
|
|
break;
|
|
case BINDER_TYPE_FDA:
|
|
object_size = sizeof(struct binder_fd_array_object);
|
|
break;
|
|
default:
|
|
return 0;
|
|
}
|
|
if (offset <= buffer->data_size - object_size &&
|
|
buffer->data_size >= object_size)
|
|
return object_size;
|
|
else
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
|
|
* @proc: binder_proc owning the buffer
|
|
* @b: binder_buffer containing the object
|
|
* @object: struct binder_object to read into
|
|
* @index: index in offset array at which the binder_buffer_object is
|
|
* located
|
|
* @start_offset: points to the start of the offset array
|
|
* @object_offsetp: offset of @object read from @b
|
|
* @num_valid: the number of valid offsets in the offset array
|
|
*
|
|
* Return: If @index is within the valid range of the offset array
|
|
* described by @start and @num_valid, and if there's a valid
|
|
* binder_buffer_object at the offset found in index @index
|
|
* of the offset array, that object is returned. Otherwise,
|
|
* %NULL is returned.
|
|
* Note that the offset found in index @index itself is not
|
|
* verified; this function assumes that @num_valid elements
|
|
* from @start were previously verified to have valid offsets.
|
|
* If @object_offsetp is non-NULL, then the offset within
|
|
* @b is written to it.
|
|
*/
|
|
static struct binder_buffer_object *binder_validate_ptr(
|
|
struct binder_proc *proc,
|
|
struct binder_buffer *b,
|
|
struct binder_object *object,
|
|
binder_size_t index,
|
|
binder_size_t start_offset,
|
|
binder_size_t *object_offsetp,
|
|
binder_size_t num_valid)
|
|
{
|
|
size_t object_size;
|
|
binder_size_t object_offset;
|
|
unsigned long buffer_offset;
|
|
|
|
if (index >= num_valid)
|
|
return NULL;
|
|
|
|
buffer_offset = start_offset + sizeof(binder_size_t) * index;
|
|
if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
|
|
b, buffer_offset,
|
|
sizeof(object_offset)))
|
|
return NULL;
|
|
object_size = binder_get_object(proc, NULL, b, object_offset, object);
|
|
if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
|
|
return NULL;
|
|
if (object_offsetp)
|
|
*object_offsetp = object_offset;
|
|
|
|
return &object->bbo;
|
|
}
|
|
|
|
/**
|
|
* binder_validate_fixup() - validates pointer/fd fixups happen in order.
|
|
* @proc: binder_proc owning the buffer
|
|
* @b: transaction buffer
|
|
* @objects_start_offset: offset to start of objects buffer
|
|
* @buffer_obj_offset: offset to binder_buffer_object in which to fix up
|
|
* @fixup_offset: start offset in @buffer to fix up
|
|
* @last_obj_offset: offset to last binder_buffer_object that we fixed
|
|
* @last_min_offset: minimum fixup offset in object at @last_obj_offset
|
|
*
|
|
* Return: %true if a fixup in buffer @buffer at offset @offset is
|
|
* allowed.
|
|
*
|
|
* For safety reasons, we only allow fixups inside a buffer to happen
|
|
* at increasing offsets; additionally, we only allow fixup on the last
|
|
* buffer object that was verified, or one of its parents.
|
|
*
|
|
* Example of what is allowed:
|
|
*
|
|
* A
|
|
* B (parent = A, offset = 0)
|
|
* C (parent = A, offset = 16)
|
|
* D (parent = C, offset = 0)
|
|
* E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
|
|
*
|
|
* Examples of what is not allowed:
|
|
*
|
|
* Decreasing offsets within the same parent:
|
|
* A
|
|
* C (parent = A, offset = 16)
|
|
* B (parent = A, offset = 0) // decreasing offset within A
|
|
*
|
|
* Referring to a parent that wasn't the last object or any of its parents:
|
|
* A
|
|
* B (parent = A, offset = 0)
|
|
* C (parent = A, offset = 0)
|
|
* C (parent = A, offset = 16)
|
|
* D (parent = B, offset = 0) // B is not A or any of A's parents
|
|
*/
|
|
static bool binder_validate_fixup(struct binder_proc *proc,
|
|
struct binder_buffer *b,
|
|
binder_size_t objects_start_offset,
|
|
binder_size_t buffer_obj_offset,
|
|
binder_size_t fixup_offset,
|
|
binder_size_t last_obj_offset,
|
|
binder_size_t last_min_offset)
|
|
{
|
|
if (!last_obj_offset) {
|
|
/* Nothing to fix up in */
|
|
return false;
|
|
}
|
|
|
|
while (last_obj_offset != buffer_obj_offset) {
|
|
unsigned long buffer_offset;
|
|
struct binder_object last_object;
|
|
struct binder_buffer_object *last_bbo;
|
|
size_t object_size = binder_get_object(proc, NULL, b,
|
|
last_obj_offset,
|
|
&last_object);
|
|
if (object_size != sizeof(*last_bbo))
|
|
return false;
|
|
|
|
last_bbo = &last_object.bbo;
|
|
/*
|
|
* Safe to retrieve the parent of last_obj, since it
|
|
* was already previously verified by the driver.
|
|
*/
|
|
if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
|
|
return false;
|
|
last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
|
|
buffer_offset = objects_start_offset +
|
|
sizeof(binder_size_t) * last_bbo->parent;
|
|
if (binder_alloc_copy_from_buffer(&proc->alloc,
|
|
&last_obj_offset,
|
|
b, buffer_offset,
|
|
sizeof(last_obj_offset)))
|
|
return false;
|
|
}
|
|
return (fixup_offset >= last_min_offset);
|
|
}
|
|
|
|
/**
|
|
* struct binder_task_work_cb - for deferred close
|
|
*
|
|
* @twork: callback_head for task work
|
|
* @fd: fd to close
|
|
*
|
|
* Structure to pass task work to be handled after
|
|
* returning from binder_ioctl() via task_work_add().
|
|
*/
|
|
struct binder_task_work_cb {
|
|
struct callback_head twork;
|
|
struct file *file;
|
|
};
|
|
|
|
/**
|
|
* binder_do_fd_close() - close list of file descriptors
|
|
* @twork: callback head for task work
|
|
*
|
|
* It is not safe to call ksys_close() during the binder_ioctl()
|
|
* function if there is a chance that binder's own file descriptor
|
|
* might be closed. This is to meet the requirements for using
|
|
* fdget() (see comments for __fget_light()). Therefore use
|
|
* task_work_add() to schedule the close operation once we have
|
|
* returned from binder_ioctl(). This function is a callback
|
|
* for that mechanism and does the actual ksys_close() on the
|
|
* given file descriptor.
|
|
*/
|
|
static void binder_do_fd_close(struct callback_head *twork)
|
|
{
|
|
struct binder_task_work_cb *twcb = container_of(twork,
|
|
struct binder_task_work_cb, twork);
|
|
|
|
fput(twcb->file);
|
|
kfree(twcb);
|
|
}
|
|
|
|
/**
|
|
* binder_deferred_fd_close() - schedule a close for the given file-descriptor
|
|
* @fd: file-descriptor to close
|
|
*
|
|
* See comments in binder_do_fd_close(). This function is used to schedule
|
|
* a file-descriptor to be closed after returning from binder_ioctl().
|
|
*/
|
|
static void binder_deferred_fd_close(int fd)
|
|
{
|
|
struct binder_task_work_cb *twcb;
|
|
|
|
twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
|
|
if (!twcb)
|
|
return;
|
|
init_task_work(&twcb->twork, binder_do_fd_close);
|
|
close_fd_get_file(fd, &twcb->file);
|
|
if (twcb->file) {
|
|
filp_close(twcb->file, current->files);
|
|
task_work_add(current, &twcb->twork, TWA_RESUME);
|
|
} else {
|
|
kfree(twcb);
|
|
}
|
|
}
|
|
|
|
static void binder_transaction_buffer_release(struct binder_proc *proc,
|
|
struct binder_thread *thread,
|
|
struct binder_buffer *buffer,
|
|
binder_size_t off_end_offset,
|
|
bool is_failure)
|
|
{
|
|
int debug_id = buffer->debug_id;
|
|
binder_size_t off_start_offset, buffer_offset;
|
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
"%d buffer release %d, size %zd-%zd, failed at %llx\n",
|
|
proc->pid, buffer->debug_id,
|
|
buffer->data_size, buffer->offsets_size,
|
|
(unsigned long long)off_end_offset);
|
|
|
|
if (buffer->target_node)
|
|
binder_dec_node(buffer->target_node, 1, 0);
|
|
|
|
off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
|
|
|
|
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
|
|
buffer_offset += sizeof(binder_size_t)) {
|
|
struct binder_object_header *hdr;
|
|
size_t object_size = 0;
|
|
struct binder_object object;
|
|
binder_size_t object_offset;
|
|
|
|
if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
|
|
buffer, buffer_offset,
|
|
sizeof(object_offset)))
|
|
object_size = binder_get_object(proc, NULL, buffer,
|
|
object_offset, &object);
|
|
if (object_size == 0) {
|
|
pr_err("transaction release %d bad object at offset %lld, size %zd\n",
|
|
debug_id, (u64)object_offset, buffer->data_size);
|
|
continue;
|
|
}
|
|
hdr = &object.hdr;
|
|
switch (hdr->type) {
|
|
case BINDER_TYPE_BINDER:
|
|
case BINDER_TYPE_WEAK_BINDER: {
|
|
struct flat_binder_object *fp;
|
|
struct binder_node *node;
|
|
|
|
fp = to_flat_binder_object(hdr);
|
|
node = binder_get_node(proc, fp->binder);
|
|
if (node == NULL) {
|
|
pr_err("transaction release %d bad node %016llx\n",
|
|
debug_id, (u64)fp->binder);
|
|
break;
|
|
}
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
" node %d u%016llx\n",
|
|
node->debug_id, (u64)node->ptr);
|
|
binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
|
|
0);
|
|
binder_put_node(node);
|
|
} break;
|
|
case BINDER_TYPE_HANDLE:
|
|
case BINDER_TYPE_WEAK_HANDLE: {
|
|
struct flat_binder_object *fp;
|
|
struct binder_ref_data rdata;
|
|
int ret;
|
|
|
|
fp = to_flat_binder_object(hdr);
|
|
ret = binder_dec_ref_for_handle(proc, fp->handle,
|
|
hdr->type == BINDER_TYPE_HANDLE, &rdata);
|
|
|
|
if (ret) {
|
|
pr_err("transaction release %d bad handle %d, ret = %d\n",
|
|
debug_id, fp->handle, ret);
|
|
break;
|
|
}
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
" ref %d desc %d\n",
|
|
rdata.debug_id, rdata.desc);
|
|
} break;
|
|
|
|
case BINDER_TYPE_FD: {
|
|
/*
|
|
* No need to close the file here since user-space
|
|
* closes it for for successfully delivered
|
|
* transactions. For transactions that weren't
|
|
* delivered, the new fd was never allocated so
|
|
* there is no need to close and the fput on the
|
|
* file is done when the transaction is torn
|
|
* down.
|
|
*/
|
|
} break;
|
|
case BINDER_TYPE_PTR:
|
|
/*
|
|
* Nothing to do here, this will get cleaned up when the
|
|
* transaction buffer gets freed
|
|
*/
|
|
break;
|
|
case BINDER_TYPE_FDA: {
|
|
struct binder_fd_array_object *fda;
|
|
struct binder_buffer_object *parent;
|
|
struct binder_object ptr_object;
|
|
binder_size_t fda_offset;
|
|
size_t fd_index;
|
|
binder_size_t fd_buf_size;
|
|
binder_size_t num_valid;
|
|
|
|
if (is_failure) {
|
|
/*
|
|
* The fd fixups have not been applied so no
|
|
* fds need to be closed.
|
|
*/
|
|
continue;
|
|
}
|
|
|
|
num_valid = (buffer_offset - off_start_offset) /
|
|
sizeof(binder_size_t);
|
|
fda = to_binder_fd_array_object(hdr);
|
|
parent = binder_validate_ptr(proc, buffer, &ptr_object,
|
|
fda->parent,
|
|
off_start_offset,
|
|
NULL,
|
|
num_valid);
|
|
if (!parent) {
|
|
pr_err("transaction release %d bad parent offset\n",
|
|
debug_id);
|
|
continue;
|
|
}
|
|
fd_buf_size = sizeof(u32) * fda->num_fds;
|
|
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
|
|
pr_err("transaction release %d invalid number of fds (%lld)\n",
|
|
debug_id, (u64)fda->num_fds);
|
|
continue;
|
|
}
|
|
if (fd_buf_size > parent->length ||
|
|
fda->parent_offset > parent->length - fd_buf_size) {
|
|
/* No space for all file descriptors here. */
|
|
pr_err("transaction release %d not enough space for %lld fds in buffer\n",
|
|
debug_id, (u64)fda->num_fds);
|
|
continue;
|
|
}
|
|
/*
|
|
* the source data for binder_buffer_object is visible
|
|
* to user-space and the @buffer element is the user
|
|
* pointer to the buffer_object containing the fd_array.
|
|
* Convert the address to an offset relative to
|
|
* the base of the transaction buffer.
|
|
*/
|
|
fda_offset =
|
|
(parent->buffer - (uintptr_t)buffer->user_data) +
|
|
fda->parent_offset;
|
|
for (fd_index = 0; fd_index < fda->num_fds;
|
|
fd_index++) {
|
|
u32 fd;
|
|
int err;
|
|
binder_size_t offset = fda_offset +
|
|
fd_index * sizeof(fd);
|
|
|
|
err = binder_alloc_copy_from_buffer(
|
|
&proc->alloc, &fd, buffer,
|
|
offset, sizeof(fd));
|
|
WARN_ON(err);
|
|
if (!err) {
|
|
binder_deferred_fd_close(fd);
|
|
/*
|
|
* Need to make sure the thread goes
|
|
* back to userspace to complete the
|
|
* deferred close
|
|
*/
|
|
if (thread)
|
|
thread->looper_need_return = true;
|
|
}
|
|
}
|
|
} break;
|
|
default:
|
|
pr_err("transaction release %d bad object type %x\n",
|
|
debug_id, hdr->type);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Clean up all the objects in the buffer */
|
|
static inline void binder_release_entire_buffer(struct binder_proc *proc,
|
|
struct binder_thread *thread,
|
|
struct binder_buffer *buffer,
|
|
bool is_failure)
|
|
{
|
|
binder_size_t off_end_offset;
|
|
|
|
off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
|
|
off_end_offset += buffer->offsets_size;
|
|
|
|
binder_transaction_buffer_release(proc, thread, buffer,
|
|
off_end_offset, is_failure);
|
|
}
|
|
|
|
static int binder_translate_binder(struct flat_binder_object *fp,
|
|
struct binder_transaction *t,
|
|
struct binder_thread *thread)
|
|
{
|
|
struct binder_node *node;
|
|
struct binder_proc *proc = thread->proc;
|
|
struct binder_proc *target_proc = t->to_proc;
|
|
struct binder_ref_data rdata;
|
|
int ret = 0;
|
|
|
|
node = binder_get_node(proc, fp->binder);
|
|
if (!node) {
|
|
node = binder_new_node(proc, fp);
|
|
if (!node)
|
|
return -ENOMEM;
|
|
}
|
|
if (fp->cookie != node->cookie) {
|
|
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
|
|
proc->pid, thread->pid, (u64)fp->binder,
|
|
node->debug_id, (u64)fp->cookie,
|
|
(u64)node->cookie);
|
|
ret = -EINVAL;
|
|
goto done;
|
|
}
|
|
if (security_binder_transfer_binder(binder_get_cred(proc),
|
|
binder_get_cred(target_proc))) {
|
|
ret = -EPERM;
|
|
goto done;
|
|
}
|
|
|
|
ret = binder_inc_ref_for_node(target_proc, node,
|
|
fp->hdr.type == BINDER_TYPE_BINDER,
|
|
&thread->todo, &rdata);
|
|
if (ret)
|
|
goto done;
|
|
|
|
if (fp->hdr.type == BINDER_TYPE_BINDER)
|
|
fp->hdr.type = BINDER_TYPE_HANDLE;
|
|
else
|
|
fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
|
|
fp->binder = 0;
|
|
fp->handle = rdata.desc;
|
|
fp->cookie = 0;
|
|
|
|
trace_binder_transaction_node_to_ref(t, node, &rdata);
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
" node %d u%016llx -> ref %d desc %d\n",
|
|
node->debug_id, (u64)node->ptr,
|
|
rdata.debug_id, rdata.desc);
|
|
done:
|
|
binder_put_node(node);
|
|
return ret;
|
|
}
|
|
|
|
static int binder_translate_handle(struct flat_binder_object *fp,
|
|
struct binder_transaction *t,
|
|
struct binder_thread *thread)
|
|
{
|
|
struct binder_proc *proc = thread->proc;
|
|
struct binder_proc *target_proc = t->to_proc;
|
|
struct binder_node *node;
|
|
struct binder_ref_data src_rdata;
|
|
int ret = 0;
|
|
|
|
node = binder_get_node_from_ref(proc, fp->handle,
|
|
fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
|
|
if (!node) {
|
|
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
|
|
proc->pid, thread->pid, fp->handle);
|
|
return -EINVAL;
|
|
}
|
|
if (security_binder_transfer_binder(binder_get_cred(proc),
|
|
binder_get_cred(target_proc))) {
|
|
ret = -EPERM;
|
|
goto done;
|
|
}
|
|
|
|
binder_node_lock(node);
|
|
if (node->proc == target_proc) {
|
|
if (fp->hdr.type == BINDER_TYPE_HANDLE)
|
|
fp->hdr.type = BINDER_TYPE_BINDER;
|
|
else
|
|
fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
|
|
fp->binder = node->ptr;
|
|
fp->cookie = node->cookie;
|
|
if (node->proc)
|
|
binder_inner_proc_lock(node->proc);
|
|
else
|
|
__acquire(&node->proc->inner_lock);
|
|
binder_inc_node_nilocked(node,
|
|
fp->hdr.type == BINDER_TYPE_BINDER,
|
|
0, NULL);
|
|
if (node->proc)
|
|
binder_inner_proc_unlock(node->proc);
|
|
else
|
|
__release(&node->proc->inner_lock);
|
|
trace_binder_transaction_ref_to_node(t, node, &src_rdata);
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
" ref %d desc %d -> node %d u%016llx\n",
|
|
src_rdata.debug_id, src_rdata.desc, node->debug_id,
|
|
(u64)node->ptr);
|
|
binder_node_unlock(node);
|
|
} else {
|
|
struct binder_ref_data dest_rdata;
|
|
|
|
binder_node_unlock(node);
|
|
ret = binder_inc_ref_for_node(target_proc, node,
|
|
fp->hdr.type == BINDER_TYPE_HANDLE,
|
|
NULL, &dest_rdata);
|
|
if (ret)
|
|
goto done;
|
|
|
|
fp->binder = 0;
|
|
fp->handle = dest_rdata.desc;
|
|
fp->cookie = 0;
|
|
trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
|
|
&dest_rdata);
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
" ref %d desc %d -> ref %d desc %d (node %d)\n",
|
|
src_rdata.debug_id, src_rdata.desc,
|
|
dest_rdata.debug_id, dest_rdata.desc,
|
|
node->debug_id);
|
|
}
|
|
done:
|
|
binder_put_node(node);
|
|
return ret;
|
|
}
|
|
|
|
static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
|
|
struct binder_transaction *t,
|
|
struct binder_thread *thread,
|
|
struct binder_transaction *in_reply_to)
|
|
{
|
|
struct binder_proc *proc = thread->proc;
|
|
struct binder_proc *target_proc = t->to_proc;
|
|
struct binder_txn_fd_fixup *fixup;
|
|
struct file *file;
|
|
int ret = 0;
|
|
bool target_allows_fd;
|
|
|
|
if (in_reply_to)
|
|
target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
|
|
else
|
|
target_allows_fd = t->buffer->target_node->accept_fds;
|
|
if (!target_allows_fd) {
|
|
binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
|
|
proc->pid, thread->pid,
|
|
in_reply_to ? "reply" : "transaction",
|
|
fd);
|
|
ret = -EPERM;
|
|
goto err_fd_not_accepted;
|
|
}
|
|
|
|
file = fget(fd);
|
|
if (!file) {
|
|
binder_user_error("%d:%d got transaction with invalid fd, %d\n",
|
|
proc->pid, thread->pid, fd);
|
|
ret = -EBADF;
|
|
goto err_fget;
|
|
}
|
|
ret = security_binder_transfer_file(binder_get_cred(proc),
|
|
binder_get_cred(target_proc), file);
|
|
if (ret < 0) {
|
|
ret = -EPERM;
|
|
goto err_security;
|
|
}
|
|
|
|
/*
|
|
* Add fixup record for this transaction. The allocation
|
|
* of the fd in the target needs to be done from a
|
|
* target thread.
|
|
*/
|
|
fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
|
|
if (!fixup) {
|
|
ret = -ENOMEM;
|
|
goto err_alloc;
|
|
}
|
|
fixup->file = file;
|
|
fixup->offset = fd_offset;
|
|
trace_binder_transaction_fd_send(t, fd, fixup->offset);
|
|
list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
|
|
|
|
return ret;
|
|
|
|
err_alloc:
|
|
err_security:
|
|
fput(file);
|
|
err_fget:
|
|
err_fd_not_accepted:
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* struct binder_ptr_fixup - data to be fixed-up in target buffer
|
|
* @offset offset in target buffer to fixup
|
|
* @skip_size bytes to skip in copy (fixup will be written later)
|
|
* @fixup_data data to write at fixup offset
|
|
* @node list node
|
|
*
|
|
* This is used for the pointer fixup list (pf) which is created and consumed
|
|
* during binder_transaction() and is only accessed locally. No
|
|
* locking is necessary.
|
|
*
|
|
* The list is ordered by @offset.
|
|
*/
|
|
struct binder_ptr_fixup {
|
|
binder_size_t offset;
|
|
size_t skip_size;
|
|
binder_uintptr_t fixup_data;
|
|
struct list_head node;
|
|
};
|
|
|
|
/**
|
|
* struct binder_sg_copy - scatter-gather data to be copied
|
|
* @offset offset in target buffer
|
|
* @sender_uaddr user address in source buffer
|
|
* @length bytes to copy
|
|
* @node list node
|
|
*
|
|
* This is used for the sg copy list (sgc) which is created and consumed
|
|
* during binder_transaction() and is only accessed locally. No
|
|
* locking is necessary.
|
|
*
|
|
* The list is ordered by @offset.
|
|
*/
|
|
struct binder_sg_copy {
|
|
binder_size_t offset;
|
|
const void __user *sender_uaddr;
|
|
size_t length;
|
|
struct list_head node;
|
|
};
|
|
|
|
/**
|
|
* binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
|
|
* @alloc: binder_alloc associated with @buffer
|
|
* @buffer: binder buffer in target process
|
|
* @sgc_head: list_head of scatter-gather copy list
|
|
* @pf_head: list_head of pointer fixup list
|
|
*
|
|
* Processes all elements of @sgc_head, applying fixups from @pf_head
|
|
* and copying the scatter-gather data from the source process' user
|
|
* buffer to the target's buffer. It is expected that the list creation
|
|
* and processing all occurs during binder_transaction() so these lists
|
|
* are only accessed in local context.
|
|
*
|
|
* Return: 0=success, else -errno
|
|
*/
|
|
static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
|
|
struct binder_buffer *buffer,
|
|
struct list_head *sgc_head,
|
|
struct list_head *pf_head)
|
|
{
|
|
int ret = 0;
|
|
struct binder_sg_copy *sgc, *tmpsgc;
|
|
struct binder_ptr_fixup *tmppf;
|
|
struct binder_ptr_fixup *pf =
|
|
list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
|
|
node);
|
|
|
|
list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
|
|
size_t bytes_copied = 0;
|
|
|
|
while (bytes_copied < sgc->length) {
|
|
size_t copy_size;
|
|
size_t bytes_left = sgc->length - bytes_copied;
|
|
size_t offset = sgc->offset + bytes_copied;
|
|
|
|
/*
|
|
* We copy up to the fixup (pointed to by pf)
|
|
*/
|
|
copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
|
|
: bytes_left;
|
|
if (!ret && copy_size)
|
|
ret = binder_alloc_copy_user_to_buffer(
|
|
alloc, buffer,
|
|
offset,
|
|
sgc->sender_uaddr + bytes_copied,
|
|
copy_size);
|
|
bytes_copied += copy_size;
|
|
if (copy_size != bytes_left) {
|
|
BUG_ON(!pf);
|
|
/* we stopped at a fixup offset */
|
|
if (pf->skip_size) {
|
|
/*
|
|
* we are just skipping. This is for
|
|
* BINDER_TYPE_FDA where the translated
|
|
* fds will be fixed up when we get
|
|
* to target context.
|
|
*/
|
|
bytes_copied += pf->skip_size;
|
|
} else {
|
|
/* apply the fixup indicated by pf */
|
|
if (!ret)
|
|
ret = binder_alloc_copy_to_buffer(
|
|
alloc, buffer,
|
|
pf->offset,
|
|
&pf->fixup_data,
|
|
sizeof(pf->fixup_data));
|
|
bytes_copied += sizeof(pf->fixup_data);
|
|
}
|
|
list_del(&pf->node);
|
|
kfree(pf);
|
|
pf = list_first_entry_or_null(pf_head,
|
|
struct binder_ptr_fixup, node);
|
|
}
|
|
}
|
|
list_del(&sgc->node);
|
|
kfree(sgc);
|
|
}
|
|
list_for_each_entry_safe(pf, tmppf, pf_head, node) {
|
|
BUG_ON(pf->skip_size == 0);
|
|
list_del(&pf->node);
|
|
kfree(pf);
|
|
}
|
|
BUG_ON(!list_empty(sgc_head));
|
|
|
|
return ret > 0 ? -EINVAL : ret;
|
|
}
|
|
|
|
/**
|
|
* binder_cleanup_deferred_txn_lists() - free specified lists
|
|
* @sgc_head: list_head of scatter-gather copy list
|
|
* @pf_head: list_head of pointer fixup list
|
|
*
|
|
* Called to clean up @sgc_head and @pf_head if there is an
|
|
* error.
|
|
*/
|
|
static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
|
|
struct list_head *pf_head)
|
|
{
|
|
struct binder_sg_copy *sgc, *tmpsgc;
|
|
struct binder_ptr_fixup *pf, *tmppf;
|
|
|
|
list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
|
|
list_del(&sgc->node);
|
|
kfree(sgc);
|
|
}
|
|
list_for_each_entry_safe(pf, tmppf, pf_head, node) {
|
|
list_del(&pf->node);
|
|
kfree(pf);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* binder_defer_copy() - queue a scatter-gather buffer for copy
|
|
* @sgc_head: list_head of scatter-gather copy list
|
|
* @offset: binder buffer offset in target process
|
|
* @sender_uaddr: user address in source process
|
|
* @length: bytes to copy
|
|
*
|
|
* Specify a scatter-gather block to be copied. The actual copy must
|
|
* be deferred until all the needed fixups are identified and queued.
|
|
* Then the copy and fixups are done together so un-translated values
|
|
* from the source are never visible in the target buffer.
|
|
*
|
|
* We are guaranteed that repeated calls to this function will have
|
|
* monotonically increasing @offset values so the list will naturally
|
|
* be ordered.
|
|
*
|
|
* Return: 0=success, else -errno
|
|
*/
|
|
static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
|
|
const void __user *sender_uaddr, size_t length)
|
|
{
|
|
struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
|
|
|
|
if (!bc)
|
|
return -ENOMEM;
|
|
|
|
bc->offset = offset;
|
|
bc->sender_uaddr = sender_uaddr;
|
|
bc->length = length;
|
|
INIT_LIST_HEAD(&bc->node);
|
|
|
|
/*
|
|
* We are guaranteed that the deferred copies are in-order
|
|
* so just add to the tail.
|
|
*/
|
|
list_add_tail(&bc->node, sgc_head);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* binder_add_fixup() - queue a fixup to be applied to sg copy
|
|
* @pf_head: list_head of binder ptr fixup list
|
|
* @offset: binder buffer offset in target process
|
|
* @fixup: bytes to be copied for fixup
|
|
* @skip_size: bytes to skip when copying (fixup will be applied later)
|
|
*
|
|
* Add the specified fixup to a list ordered by @offset. When copying
|
|
* the scatter-gather buffers, the fixup will be copied instead of
|
|
* data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
|
|
* will be applied later (in target process context), so we just skip
|
|
* the bytes specified by @skip_size. If @skip_size is 0, we copy the
|
|
* value in @fixup.
|
|
*
|
|
* This function is called *mostly* in @offset order, but there are
|
|
* exceptions. Since out-of-order inserts are relatively uncommon,
|
|
* we insert the new element by searching backward from the tail of
|
|
* the list.
|
|
*
|
|
* Return: 0=success, else -errno
|
|
*/
|
|
static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
|
|
binder_uintptr_t fixup, size_t skip_size)
|
|
{
|
|
struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
|
|
struct binder_ptr_fixup *tmppf;
|
|
|
|
if (!pf)
|
|
return -ENOMEM;
|
|
|
|
pf->offset = offset;
|
|
pf->fixup_data = fixup;
|
|
pf->skip_size = skip_size;
|
|
INIT_LIST_HEAD(&pf->node);
|
|
|
|
/* Fixups are *mostly* added in-order, but there are some
|
|
* exceptions. Look backwards through list for insertion point.
|
|
*/
|
|
list_for_each_entry_reverse(tmppf, pf_head, node) {
|
|
if (tmppf->offset < pf->offset) {
|
|
list_add(&pf->node, &tmppf->node);
|
|
return 0;
|
|
}
|
|
}
|
|
/*
|
|
* if we get here, then the new offset is the lowest so
|
|
* insert at the head
|
|
*/
|
|
list_add(&pf->node, pf_head);
|
|
return 0;
|
|
}
|
|
|
|
static int binder_translate_fd_array(struct list_head *pf_head,
|
|
struct binder_fd_array_object *fda,
|
|
const void __user *sender_ubuffer,
|
|
struct binder_buffer_object *parent,
|
|
struct binder_buffer_object *sender_uparent,
|
|
struct binder_transaction *t,
|
|
struct binder_thread *thread,
|
|
struct binder_transaction *in_reply_to)
|
|
{
|
|
binder_size_t fdi, fd_buf_size;
|
|
binder_size_t fda_offset;
|
|
const void __user *sender_ufda_base;
|
|
struct binder_proc *proc = thread->proc;
|
|
int ret;
|
|
|
|
if (fda->num_fds == 0)
|
|
return 0;
|
|
|
|
fd_buf_size = sizeof(u32) * fda->num_fds;
|
|
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
|
|
binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
|
|
proc->pid, thread->pid, (u64)fda->num_fds);
|
|
return -EINVAL;
|
|
}
|
|
if (fd_buf_size > parent->length ||
|
|
fda->parent_offset > parent->length - fd_buf_size) {
|
|
/* No space for all file descriptors here. */
|
|
binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
|
|
proc->pid, thread->pid, (u64)fda->num_fds);
|
|
return -EINVAL;
|
|
}
|
|
/*
|
|
* the source data for binder_buffer_object is visible
|
|
* to user-space and the @buffer element is the user
|
|
* pointer to the buffer_object containing the fd_array.
|
|
* Convert the address to an offset relative to
|
|
* the base of the transaction buffer.
|
|
*/
|
|
fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
|
|
fda->parent_offset;
|
|
sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
|
|
fda->parent_offset;
|
|
|
|
if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
|
|
!IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
|
|
binder_user_error("%d:%d parent offset not aligned correctly.\n",
|
|
proc->pid, thread->pid);
|
|
return -EINVAL;
|
|
}
|
|
ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
|
|
if (ret)
|
|
return ret;
|
|
|
|
for (fdi = 0; fdi < fda->num_fds; fdi++) {
|
|
u32 fd;
|
|
binder_size_t offset = fda_offset + fdi * sizeof(fd);
|
|
binder_size_t sender_uoffset = fdi * sizeof(fd);
|
|
|
|
ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
|
|
if (!ret)
|
|
ret = binder_translate_fd(fd, offset, t, thread,
|
|
in_reply_to);
|
|
if (ret)
|
|
return ret > 0 ? -EINVAL : ret;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int binder_fixup_parent(struct list_head *pf_head,
|
|
struct binder_transaction *t,
|
|
struct binder_thread *thread,
|
|
struct binder_buffer_object *bp,
|
|
binder_size_t off_start_offset,
|
|
binder_size_t num_valid,
|
|
binder_size_t last_fixup_obj_off,
|
|
binder_size_t last_fixup_min_off)
|
|
{
|
|
struct binder_buffer_object *parent;
|
|
struct binder_buffer *b = t->buffer;
|
|
struct binder_proc *proc = thread->proc;
|
|
struct binder_proc *target_proc = t->to_proc;
|
|
struct binder_object object;
|
|
binder_size_t buffer_offset;
|
|
binder_size_t parent_offset;
|
|
|
|
if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
|
|
return 0;
|
|
|
|
parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
|
|
off_start_offset, &parent_offset,
|
|
num_valid);
|
|
if (!parent) {
|
|
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
|
|
proc->pid, thread->pid);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (!binder_validate_fixup(target_proc, b, off_start_offset,
|
|
parent_offset, bp->parent_offset,
|
|
last_fixup_obj_off,
|
|
last_fixup_min_off)) {
|
|
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
|
|
proc->pid, thread->pid);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (parent->length < sizeof(binder_uintptr_t) ||
|
|
bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
|
|
/* No space for a pointer here! */
|
|
binder_user_error("%d:%d got transaction with invalid parent offset\n",
|
|
proc->pid, thread->pid);
|
|
return -EINVAL;
|
|
}
|
|
buffer_offset = bp->parent_offset +
|
|
(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
|
|
return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
|
|
}
|
|
|
|
/**
|
|
* binder_can_update_transaction() - Can a txn be superseded by an updated one?
|
|
* @t1: the pending async txn in the frozen process
|
|
* @t2: the new async txn to supersede the outdated pending one
|
|
*
|
|
* Return: true if t2 can supersede t1
|
|
* false if t2 can not supersede t1
|
|
*/
|
|
static bool binder_can_update_transaction(struct binder_transaction *t1,
|
|
struct binder_transaction *t2)
|
|
{
|
|
if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
|
|
(TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
|
|
return false;
|
|
if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
|
|
t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
|
|
t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
|
|
t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
/**
|
|
* binder_find_outdated_transaction_ilocked() - Find the outdated transaction
|
|
* @t: new async transaction
|
|
* @target_list: list to find outdated transaction
|
|
*
|
|
* Return: the outdated transaction if found
|
|
* NULL if no outdated transacton can be found
|
|
*
|
|
* Requires the proc->inner_lock to be held.
|
|
*/
|
|
static struct binder_transaction *
|
|
binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
|
|
struct list_head *target_list)
|
|
{
|
|
struct binder_work *w;
|
|
|
|
list_for_each_entry(w, target_list, entry) {
|
|
struct binder_transaction *t_queued;
|
|
|
|
if (w->type != BINDER_WORK_TRANSACTION)
|
|
continue;
|
|
t_queued = container_of(w, struct binder_transaction, work);
|
|
if (binder_can_update_transaction(t_queued, t))
|
|
return t_queued;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
/**
|
|
* binder_proc_transaction() - sends a transaction to a process and wakes it up
|
|
* @t: transaction to send
|
|
* @proc: process to send the transaction to
|
|
* @thread: thread in @proc to send the transaction to (may be NULL)
|
|
*
|
|
* This function queues a transaction to the specified process. It will try
|
|
* to find a thread in the target process to handle the transaction and
|
|
* wake it up. If no thread is found, the work is queued to the proc
|
|
* waitqueue.
|
|
*
|
|
* If the @thread parameter is not NULL, the transaction is always queued
|
|
* to the waitlist of that specific thread.
|
|
*
|
|
* Return: 0 if the transaction was successfully queued
|
|
* BR_DEAD_REPLY if the target process or thread is dead
|
|
* BR_FROZEN_REPLY if the target process or thread is frozen
|
|
*/
|
|
static int binder_proc_transaction(struct binder_transaction *t,
|
|
struct binder_proc *proc,
|
|
struct binder_thread *thread)
|
|
{
|
|
struct binder_node *node = t->buffer->target_node;
|
|
struct binder_priority node_prio;
|
|
bool oneway = !!(t->flags & TF_ONE_WAY);
|
|
bool pending_async = false;
|
|
struct binder_transaction *t_outdated = NULL;
|
|
bool enqueue_task = true;
|
|
|
|
BUG_ON(!node);
|
|
binder_node_lock(node);
|
|
node_prio.prio = node->min_priority;
|
|
node_prio.sched_policy = node->sched_policy;
|
|
|
|
if (oneway) {
|
|
BUG_ON(thread);
|
|
if (node->has_async_transaction)
|
|
pending_async = true;
|
|
else
|
|
node->has_async_transaction = true;
|
|
}
|
|
|
|
binder_inner_proc_lock(proc);
|
|
if (proc->is_frozen) {
|
|
proc->sync_recv |= !oneway;
|
|
proc->async_recv |= oneway;
|
|
}
|
|
|
|
if ((proc->is_frozen && !oneway) || proc->is_dead ||
|
|
(thread && thread->is_dead)) {
|
|
binder_inner_proc_unlock(proc);
|
|
binder_node_unlock(node);
|
|
return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
|
|
}
|
|
|
|
if (!thread && !pending_async)
|
|
thread = binder_select_thread_ilocked(proc);
|
|
|
|
trace_android_vh_binder_proc_transaction(current, proc->tsk,
|
|
thread ? thread->task : NULL, node->debug_id, t->code,
|
|
pending_async);
|
|
|
|
if (thread) {
|
|
binder_transaction_priority(thread->task, t, node_prio,
|
|
node->inherit_rt);
|
|
binder_enqueue_thread_work_ilocked(thread, &t->work);
|
|
} else if (!pending_async) {
|
|
trace_android_vh_binder_special_task(t, proc, thread,
|
|
&t->work, &proc->todo, !oneway, &enqueue_task);
|
|
if (enqueue_task)
|
|
binder_enqueue_work_ilocked(&t->work, &proc->todo);
|
|
} else {
|
|
if ((t->flags & TF_UPDATE_TXN) && proc->is_frozen) {
|
|
t_outdated = binder_find_outdated_transaction_ilocked(t,
|
|
&node->async_todo);
|
|
if (t_outdated) {
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
"txn %d supersedes %d\n",
|
|
t->debug_id, t_outdated->debug_id);
|
|
list_del_init(&t_outdated->work.entry);
|
|
proc->outstanding_txns--;
|
|
}
|
|
}
|
|
trace_android_vh_binder_special_task(t, proc, thread,
|
|
&t->work, &node->async_todo, !oneway, &enqueue_task);
|
|
if (enqueue_task)
|
|
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
|
|
}
|
|
|
|
trace_android_vh_binder_proc_transaction_end(current, proc->tsk,
|
|
thread ? thread->task : NULL, t->code, pending_async, !oneway);
|
|
trace_android_vh_binder_proc_transaction_finish(proc, t,
|
|
thread ? thread->task : NULL, pending_async, !oneway);
|
|
|
|
if (!pending_async)
|
|
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
|
|
|
|
proc->outstanding_txns++;
|
|
binder_inner_proc_unlock(proc);
|
|
binder_node_unlock(node);
|
|
|
|
/*
|
|
* To reduce potential contention, free the outdated transaction and
|
|
* buffer after releasing the locks.
|
|
*/
|
|
if (t_outdated) {
|
|
struct binder_buffer *buffer = t_outdated->buffer;
|
|
|
|
t_outdated->buffer = NULL;
|
|
buffer->transaction = NULL;
|
|
trace_binder_transaction_update_buffer_release(buffer);
|
|
binder_release_entire_buffer(proc, NULL, buffer, false);
|
|
binder_alloc_free_buf(&proc->alloc, buffer);
|
|
kfree(t_outdated);
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* binder_get_node_refs_for_txn() - Get required refs on node for txn
|
|
* @node: struct binder_node for which to get refs
|
|
* @proc: returns @node->proc if valid
|
|
* @error: if no @proc then returns BR_DEAD_REPLY
|
|
*
|
|
* User-space normally keeps the node alive when creating a transaction
|
|
* since it has a reference to the target. The local strong ref keeps it
|
|
* alive if the sending process dies before the target process processes
|
|
* the transaction. If the source process is malicious or has a reference
|
|
* counting bug, relying on the local strong ref can fail.
|
|
*
|
|
* Since user-space can cause the local strong ref to go away, we also take
|
|
* a tmpref on the node to ensure it survives while we are constructing
|
|
* the transaction. We also need a tmpref on the proc while we are
|
|
* constructing the transaction, so we take that here as well.
|
|
*
|
|
* Return: The target_node with refs taken or NULL if no @node->proc is NULL.
|
|
* Also sets @proc if valid. If the @node->proc is NULL indicating that the
|
|
* target proc has died, @error is set to BR_DEAD_REPLY
|
|
*/
|
|
static struct binder_node *binder_get_node_refs_for_txn(
|
|
struct binder_node *node,
|
|
struct binder_proc **procp,
|
|
uint32_t *error)
|
|
{
|
|
struct binder_node *target_node = NULL;
|
|
|
|
binder_node_inner_lock(node);
|
|
if (node->proc) {
|
|
target_node = node;
|
|
binder_inc_node_nilocked(node, 1, 0, NULL);
|
|
binder_inc_node_tmpref_ilocked(node);
|
|
node->proc->tmp_ref++;
|
|
*procp = node->proc;
|
|
} else
|
|
*error = BR_DEAD_REPLY;
|
|
binder_node_inner_unlock(node);
|
|
|
|
return target_node;
|
|
}
|
|
|
|
static void binder_transaction(struct binder_proc *proc,
|
|
struct binder_thread *thread,
|
|
struct binder_transaction_data *tr, int reply,
|
|
binder_size_t extra_buffers_size)
|
|
{
|
|
int ret;
|
|
struct binder_transaction *t;
|
|
struct binder_work *w;
|
|
struct binder_work *tcomplete;
|
|
binder_size_t buffer_offset = 0;
|
|
binder_size_t off_start_offset, off_end_offset;
|
|
binder_size_t off_min;
|
|
binder_size_t sg_buf_offset, sg_buf_end_offset;
|
|
binder_size_t user_offset = 0;
|
|
struct binder_proc *target_proc = NULL;
|
|
struct binder_thread *target_thread = NULL;
|
|
struct binder_node *target_node = NULL;
|
|
struct binder_transaction *in_reply_to = NULL;
|
|
struct binder_transaction_log_entry *e;
|
|
uint32_t return_error = 0;
|
|
uint32_t return_error_param = 0;
|
|
uint32_t return_error_line = 0;
|
|
binder_size_t last_fixup_obj_off = 0;
|
|
binder_size_t last_fixup_min_off = 0;
|
|
struct binder_context *context = proc->context;
|
|
int t_debug_id = atomic_inc_return(&binder_last_id);
|
|
char *secctx = NULL;
|
|
u32 secctx_sz = 0;
|
|
struct list_head sgc_head;
|
|
struct list_head pf_head;
|
|
const void __user *user_buffer = (const void __user *)
|
|
(uintptr_t)tr->data.ptr.buffer;
|
|
INIT_LIST_HEAD(&sgc_head);
|
|
INIT_LIST_HEAD(&pf_head);
|
|
|
|
e = binder_transaction_log_add(&binder_transaction_log);
|
|
e->debug_id = t_debug_id;
|
|
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
|
|
e->from_proc = proc->pid;
|
|
e->from_thread = thread->pid;
|
|
e->target_handle = tr->target.handle;
|
|
e->data_size = tr->data_size;
|
|
e->offsets_size = tr->offsets_size;
|
|
strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
|
|
|
|
if (reply) {
|
|
binder_inner_proc_lock(proc);
|
|
in_reply_to = thread->transaction_stack;
|
|
if (in_reply_to == NULL) {
|
|
binder_inner_proc_unlock(proc);
|
|
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
|
|
proc->pid, thread->pid);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EPROTO;
|
|
return_error_line = __LINE__;
|
|
goto err_empty_call_stack;
|
|
}
|
|
if (in_reply_to->to_thread != thread) {
|
|
spin_lock(&in_reply_to->lock);
|
|
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
|
|
proc->pid, thread->pid, in_reply_to->debug_id,
|
|
in_reply_to->to_proc ?
|
|
in_reply_to->to_proc->pid : 0,
|
|
in_reply_to->to_thread ?
|
|
in_reply_to->to_thread->pid : 0);
|
|
spin_unlock(&in_reply_to->lock);
|
|
binder_inner_proc_unlock(proc);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EPROTO;
|
|
return_error_line = __LINE__;
|
|
in_reply_to = NULL;
|
|
goto err_bad_call_stack;
|
|
}
|
|
thread->transaction_stack = in_reply_to->to_parent;
|
|
binder_inner_proc_unlock(proc);
|
|
target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
|
|
if (target_thread == NULL) {
|
|
/* annotation for sparse */
|
|
__release(&target_thread->proc->inner_lock);
|
|
return_error = BR_DEAD_REPLY;
|
|
return_error_line = __LINE__;
|
|
goto err_dead_binder;
|
|
}
|
|
if (target_thread->transaction_stack != in_reply_to) {
|
|
binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
|
|
proc->pid, thread->pid,
|
|
target_thread->transaction_stack ?
|
|
target_thread->transaction_stack->debug_id : 0,
|
|
in_reply_to->debug_id);
|
|
binder_inner_proc_unlock(target_thread->proc);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EPROTO;
|
|
return_error_line = __LINE__;
|
|
in_reply_to = NULL;
|
|
target_thread = NULL;
|
|
goto err_dead_binder;
|
|
}
|
|
target_proc = target_thread->proc;
|
|
target_proc->tmp_ref++;
|
|
binder_inner_proc_unlock(target_thread->proc);
|
|
trace_android_vh_binder_reply(target_proc, proc, thread, tr);
|
|
} else {
|
|
if (tr->target.handle) {
|
|
struct binder_ref *ref;
|
|
|
|
/*
|
|
* There must already be a strong ref
|
|
* on this node. If so, do a strong
|
|
* increment on the node to ensure it
|
|
* stays alive until the transaction is
|
|
* done.
|
|
*/
|
|
binder_proc_lock(proc);
|
|
ref = binder_get_ref_olocked(proc, tr->target.handle,
|
|
true);
|
|
if (ref) {
|
|
target_node = binder_get_node_refs_for_txn(
|
|
ref->node, &target_proc,
|
|
&return_error);
|
|
} else {
|
|
binder_user_error("%d:%d got transaction to invalid handle, %u\n",
|
|
proc->pid, thread->pid, tr->target.handle);
|
|
return_error = BR_FAILED_REPLY;
|
|
}
|
|
binder_proc_unlock(proc);
|
|
} else {
|
|
mutex_lock(&context->context_mgr_node_lock);
|
|
target_node = context->binder_context_mgr_node;
|
|
if (target_node)
|
|
target_node = binder_get_node_refs_for_txn(
|
|
target_node, &target_proc,
|
|
&return_error);
|
|
else
|
|
return_error = BR_DEAD_REPLY;
|
|
mutex_unlock(&context->context_mgr_node_lock);
|
|
if (target_node && target_proc->pid == proc->pid) {
|
|
binder_user_error("%d:%d got transaction to context manager from process owning it\n",
|
|
proc->pid, thread->pid);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EINVAL;
|
|
return_error_line = __LINE__;
|
|
goto err_invalid_target_handle;
|
|
}
|
|
}
|
|
if (!target_node) {
|
|
/*
|
|
* return_error is set above
|
|
*/
|
|
return_error_param = -EINVAL;
|
|
return_error_line = __LINE__;
|
|
goto err_dead_binder;
|
|
}
|
|
e->to_node = target_node->debug_id;
|
|
trace_android_vh_binder_trans(target_proc, proc, thread, tr);
|
|
if (security_binder_transaction(binder_get_cred(proc),
|
|
binder_get_cred(target_proc)) < 0) {
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EPERM;
|
|
return_error_line = __LINE__;
|
|
goto err_invalid_target_handle;
|
|
}
|
|
binder_inner_proc_lock(proc);
|
|
|
|
w = list_first_entry_or_null(&thread->todo,
|
|
struct binder_work, entry);
|
|
if (!(tr->flags & TF_ONE_WAY) && w &&
|
|
w->type == BINDER_WORK_TRANSACTION) {
|
|
/*
|
|
* Do not allow new outgoing transaction from a
|
|
* thread that has a transaction at the head of
|
|
* its todo list. Only need to check the head
|
|
* because binder_select_thread_ilocked picks a
|
|
* thread from proc->waiting_threads to enqueue
|
|
* the transaction, and nothing is queued to the
|
|
* todo list while the thread is on waiting_threads.
|
|
*/
|
|
binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
|
|
proc->pid, thread->pid);
|
|
binder_inner_proc_unlock(proc);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EPROTO;
|
|
return_error_line = __LINE__;
|
|
goto err_bad_todo_list;
|
|
}
|
|
|
|
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
|
|
struct binder_transaction *tmp;
|
|
|
|
tmp = thread->transaction_stack;
|
|
if (tmp->to_thread != thread) {
|
|
spin_lock(&tmp->lock);
|
|
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
|
|
proc->pid, thread->pid, tmp->debug_id,
|
|
tmp->to_proc ? tmp->to_proc->pid : 0,
|
|
tmp->to_thread ?
|
|
tmp->to_thread->pid : 0);
|
|
spin_unlock(&tmp->lock);
|
|
binder_inner_proc_unlock(proc);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EPROTO;
|
|
return_error_line = __LINE__;
|
|
goto err_bad_call_stack;
|
|
}
|
|
while (tmp) {
|
|
struct binder_thread *from;
|
|
|
|
spin_lock(&tmp->lock);
|
|
from = tmp->from;
|
|
if (from && from->proc == target_proc) {
|
|
atomic_inc(&from->tmp_ref);
|
|
target_thread = from;
|
|
spin_unlock(&tmp->lock);
|
|
break;
|
|
}
|
|
spin_unlock(&tmp->lock);
|
|
tmp = tmp->from_parent;
|
|
}
|
|
}
|
|
binder_inner_proc_unlock(proc);
|
|
}
|
|
if (target_thread)
|
|
e->to_thread = target_thread->pid;
|
|
e->to_proc = target_proc->pid;
|
|
trace_android_rvh_binder_transaction(target_proc, proc, thread, tr);
|
|
|
|
/* TODO: reuse incoming transaction for reply */
|
|
t = kzalloc(sizeof(*t), GFP_KERNEL);
|
|
if (t == NULL) {
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -ENOMEM;
|
|
return_error_line = __LINE__;
|
|
goto err_alloc_t_failed;
|
|
}
|
|
INIT_LIST_HEAD(&t->fd_fixups);
|
|
binder_stats_created(BINDER_STAT_TRANSACTION);
|
|
spin_lock_init(&t->lock);
|
|
trace_android_vh_binder_transaction_init(t);
|
|
|
|
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
|
|
if (tcomplete == NULL) {
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -ENOMEM;
|
|
return_error_line = __LINE__;
|
|
goto err_alloc_tcomplete_failed;
|
|
}
|
|
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
|
|
|
|
t->debug_id = t_debug_id;
|
|
|
|
if (reply)
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
"%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
|
|
proc->pid, thread->pid, t->debug_id,
|
|
target_proc->pid, target_thread->pid,
|
|
(u64)tr->data.ptr.buffer,
|
|
(u64)tr->data.ptr.offsets,
|
|
(u64)tr->data_size, (u64)tr->offsets_size,
|
|
(u64)extra_buffers_size);
|
|
else
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
"%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
|
|
proc->pid, thread->pid, t->debug_id,
|
|
target_proc->pid, target_node->debug_id,
|
|
(u64)tr->data.ptr.buffer,
|
|
(u64)tr->data.ptr.offsets,
|
|
(u64)tr->data_size, (u64)tr->offsets_size,
|
|
(u64)extra_buffers_size);
|
|
|
|
if (!reply && !(tr->flags & TF_ONE_WAY))
|
|
t->from = thread;
|
|
else
|
|
t->from = NULL;
|
|
t->sender_euid = task_euid(proc->tsk);
|
|
t->to_proc = target_proc;
|
|
t->to_thread = target_thread;
|
|
t->code = tr->code;
|
|
t->flags = tr->flags;
|
|
if (!(t->flags & TF_ONE_WAY) &&
|
|
binder_supported_policy(current->policy)) {
|
|
/* Inherit supported policies for synchronous transactions */
|
|
t->priority.sched_policy = current->policy;
|
|
t->priority.prio = current->normal_prio;
|
|
} else {
|
|
/* Otherwise, fall back to the default priority */
|
|
t->priority = target_proc->default_priority;
|
|
}
|
|
|
|
if (target_node && target_node->txn_security_ctx) {
|
|
u32 secid;
|
|
size_t added_size;
|
|
int max_retries = 100;
|
|
|
|
security_cred_getsecid(binder_get_cred(proc), &secid);
|
|
retry_alloc:
|
|
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
|
|
if (ret == -ENOMEM && max_retries-- > 0) {
|
|
struct page *dummy_page;
|
|
|
|
/*
|
|
* security_secid_to_secctx() can fail because of a
|
|
* GFP_ATOMIC allocation in which case -ENOMEM is
|
|
* returned. This needs to be retried, but there is
|
|
* currently no way to tell userspace to retry so we
|
|
* do it here. We make sure there is still available
|
|
* memory first and then retry.
|
|
*/
|
|
dummy_page = alloc_page(GFP_KERNEL);
|
|
if (dummy_page) {
|
|
__free_page(dummy_page);
|
|
goto retry_alloc;
|
|
}
|
|
}
|
|
if (ret) {
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = ret;
|
|
return_error_line = __LINE__;
|
|
goto err_get_secctx_failed;
|
|
}
|
|
added_size = ALIGN(secctx_sz, sizeof(u64));
|
|
extra_buffers_size += added_size;
|
|
if (extra_buffers_size < added_size) {
|
|
/* integer overflow of extra_buffers_size */
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = EINVAL;
|
|
return_error_line = __LINE__;
|
|
goto err_bad_extra_size;
|
|
}
|
|
}
|
|
|
|
trace_binder_transaction(reply, t, target_node);
|
|
|
|
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
|
|
tr->offsets_size, extra_buffers_size,
|
|
!reply && (t->flags & TF_ONE_WAY), current->tgid);
|
|
if (IS_ERR(t->buffer)) {
|
|
/*
|
|
* -ESRCH indicates VMA cleared. The target is dying.
|
|
*/
|
|
return_error_param = PTR_ERR(t->buffer);
|
|
return_error = return_error_param == -ESRCH ?
|
|
BR_DEAD_REPLY : BR_FAILED_REPLY;
|
|
return_error_line = __LINE__;
|
|
t->buffer = NULL;
|
|
goto err_binder_alloc_buf_failed;
|
|
}
|
|
if (secctx) {
|
|
int err;
|
|
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
|
|
ALIGN(tr->offsets_size, sizeof(void *)) +
|
|
ALIGN(extra_buffers_size, sizeof(void *)) -
|
|
ALIGN(secctx_sz, sizeof(u64));
|
|
|
|
t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
|
|
err = binder_alloc_copy_to_buffer(&target_proc->alloc,
|
|
t->buffer, buf_offset,
|
|
secctx, secctx_sz);
|
|
if (err) {
|
|
t->security_ctx = 0;
|
|
WARN_ON(1);
|
|
}
|
|
security_release_secctx(secctx, secctx_sz);
|
|
secctx = NULL;
|
|
}
|
|
t->buffer->debug_id = t->debug_id;
|
|
t->buffer->transaction = t;
|
|
t->buffer->target_node = target_node;
|
|
t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
|
|
trace_binder_transaction_alloc_buf(t->buffer);
|
|
trace_android_vh_alloc_oem_binder_struct(tr, t, target_proc);
|
|
|
|
if (binder_alloc_copy_user_to_buffer(
|
|
&target_proc->alloc,
|
|
t->buffer,
|
|
ALIGN(tr->data_size, sizeof(void *)),
|
|
(const void __user *)
|
|
(uintptr_t)tr->data.ptr.offsets,
|
|
tr->offsets_size)) {
|
|
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
|
|
proc->pid, thread->pid);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EFAULT;
|
|
return_error_line = __LINE__;
|
|
goto err_copy_data_failed;
|
|
}
|
|
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
|
|
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
|
|
proc->pid, thread->pid, (u64)tr->offsets_size);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EINVAL;
|
|
return_error_line = __LINE__;
|
|
goto err_bad_offset;
|
|
}
|
|
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
|
|
binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
|
|
proc->pid, thread->pid,
|
|
(u64)extra_buffers_size);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EINVAL;
|
|
return_error_line = __LINE__;
|
|
goto err_bad_offset;
|
|
}
|
|
off_start_offset = ALIGN(tr->data_size, sizeof(void *));
|
|
buffer_offset = off_start_offset;
|
|
off_end_offset = off_start_offset + tr->offsets_size;
|
|
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
|
|
sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
|
|
ALIGN(secctx_sz, sizeof(u64));
|
|
off_min = 0;
|
|
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
|
|
buffer_offset += sizeof(binder_size_t)) {
|
|
struct binder_object_header *hdr;
|
|
size_t object_size;
|
|
struct binder_object object;
|
|
binder_size_t object_offset;
|
|
binder_size_t copy_size;
|
|
|
|
if (binder_alloc_copy_from_buffer(&target_proc->alloc,
|
|
&object_offset,
|
|
t->buffer,
|
|
buffer_offset,
|
|
sizeof(object_offset))) {
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EINVAL;
|
|
return_error_line = __LINE__;
|
|
goto err_bad_offset;
|
|
}
|
|
|
|
/*
|
|
* Copy the source user buffer up to the next object
|
|
* that will be processed.
|
|
*/
|
|
copy_size = object_offset - user_offset;
|
|
if (copy_size && (user_offset > object_offset ||
|
|
object_offset > tr->data_size ||
|
|
binder_alloc_copy_user_to_buffer(
|
|
&target_proc->alloc,
|
|
t->buffer, user_offset,
|
|
user_buffer + user_offset,
|
|
copy_size))) {
|
|
binder_user_error("%d:%d got transaction with invalid data ptr\n",
|
|
proc->pid, thread->pid);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EFAULT;
|
|
return_error_line = __LINE__;
|
|
goto err_copy_data_failed;
|
|
}
|
|
object_size = binder_get_object(target_proc, user_buffer,
|
|
t->buffer, object_offset, &object);
|
|
if (object_size == 0 || object_offset < off_min) {
|
|
binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
|
|
proc->pid, thread->pid,
|
|
(u64)object_offset,
|
|
(u64)off_min,
|
|
(u64)t->buffer->data_size);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EINVAL;
|
|
return_error_line = __LINE__;
|
|
goto err_bad_offset;
|
|
}
|
|
/*
|
|
* Set offset to the next buffer fragment to be
|
|
* copied
|
|
*/
|
|
user_offset = object_offset + object_size;
|
|
|
|
hdr = &object.hdr;
|
|
off_min = object_offset + object_size;
|
|
switch (hdr->type) {
|
|
case BINDER_TYPE_BINDER:
|
|
case BINDER_TYPE_WEAK_BINDER: {
|
|
struct flat_binder_object *fp;
|
|
|
|
fp = to_flat_binder_object(hdr);
|
|
ret = binder_translate_binder(fp, t, thread);
|
|
|
|
if (ret < 0 ||
|
|
binder_alloc_copy_to_buffer(&target_proc->alloc,
|
|
t->buffer,
|
|
object_offset,
|
|
fp, sizeof(*fp))) {
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = ret;
|
|
return_error_line = __LINE__;
|
|
goto err_translate_failed;
|
|
}
|
|
} break;
|
|
case BINDER_TYPE_HANDLE:
|
|
case BINDER_TYPE_WEAK_HANDLE: {
|
|
struct flat_binder_object *fp;
|
|
|
|
fp = to_flat_binder_object(hdr);
|
|
ret = binder_translate_handle(fp, t, thread);
|
|
if (ret < 0 ||
|
|
binder_alloc_copy_to_buffer(&target_proc->alloc,
|
|
t->buffer,
|
|
object_offset,
|
|
fp, sizeof(*fp))) {
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = ret;
|
|
return_error_line = __LINE__;
|
|
goto err_translate_failed;
|
|
}
|
|
} break;
|
|
|
|
case BINDER_TYPE_FD: {
|
|
struct binder_fd_object *fp = to_binder_fd_object(hdr);
|
|
binder_size_t fd_offset = object_offset +
|
|
(uintptr_t)&fp->fd - (uintptr_t)fp;
|
|
int ret = binder_translate_fd(fp->fd, fd_offset, t,
|
|
thread, in_reply_to);
|
|
|
|
fp->pad_binder = 0;
|
|
if (ret < 0 ||
|
|
binder_alloc_copy_to_buffer(&target_proc->alloc,
|
|
t->buffer,
|
|
object_offset,
|
|
fp, sizeof(*fp))) {
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = ret;
|
|
return_error_line = __LINE__;
|
|
goto err_translate_failed;
|
|
}
|
|
} break;
|
|
case BINDER_TYPE_FDA: {
|
|
struct binder_object ptr_object;
|
|
binder_size_t parent_offset;
|
|
struct binder_object user_object;
|
|
size_t user_parent_size;
|
|
struct binder_fd_array_object *fda =
|
|
to_binder_fd_array_object(hdr);
|
|
size_t num_valid = (buffer_offset - off_start_offset) /
|
|
sizeof(binder_size_t);
|
|
struct binder_buffer_object *parent =
|
|
binder_validate_ptr(target_proc, t->buffer,
|
|
&ptr_object, fda->parent,
|
|
off_start_offset,
|
|
&parent_offset,
|
|
num_valid);
|
|
if (!parent) {
|
|
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
|
|
proc->pid, thread->pid);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EINVAL;
|
|
return_error_line = __LINE__;
|
|
goto err_bad_parent;
|
|
}
|
|
if (!binder_validate_fixup(target_proc, t->buffer,
|
|
off_start_offset,
|
|
parent_offset,
|
|
fda->parent_offset,
|
|
last_fixup_obj_off,
|
|
last_fixup_min_off)) {
|
|
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
|
|
proc->pid, thread->pid);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EINVAL;
|
|
return_error_line = __LINE__;
|
|
goto err_bad_parent;
|
|
}
|
|
/*
|
|
* We need to read the user version of the parent
|
|
* object to get the original user offset
|
|
*/
|
|
user_parent_size =
|
|
binder_get_object(proc, user_buffer, t->buffer,
|
|
parent_offset, &user_object);
|
|
if (user_parent_size != sizeof(user_object.bbo)) {
|
|
binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
|
|
proc->pid, thread->pid,
|
|
user_parent_size,
|
|
sizeof(user_object.bbo));
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EINVAL;
|
|
return_error_line = __LINE__;
|
|
goto err_bad_parent;
|
|
}
|
|
ret = binder_translate_fd_array(&pf_head, fda,
|
|
user_buffer, parent,
|
|
&user_object.bbo, t,
|
|
thread, in_reply_to);
|
|
if (!ret)
|
|
ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
|
|
t->buffer,
|
|
object_offset,
|
|
fda, sizeof(*fda));
|
|
if (ret) {
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = ret > 0 ? -EINVAL : ret;
|
|
return_error_line = __LINE__;
|
|
goto err_translate_failed;
|
|
}
|
|
last_fixup_obj_off = parent_offset;
|
|
last_fixup_min_off =
|
|
fda->parent_offset + sizeof(u32) * fda->num_fds;
|
|
} break;
|
|
case BINDER_TYPE_PTR: {
|
|
struct binder_buffer_object *bp =
|
|
to_binder_buffer_object(hdr);
|
|
size_t buf_left = sg_buf_end_offset - sg_buf_offset;
|
|
size_t num_valid;
|
|
|
|
if (bp->length > buf_left) {
|
|
binder_user_error("%d:%d got transaction with too large buffer\n",
|
|
proc->pid, thread->pid);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EINVAL;
|
|
return_error_line = __LINE__;
|
|
goto err_bad_offset;
|
|
}
|
|
ret = binder_defer_copy(&sgc_head, sg_buf_offset,
|
|
(const void __user *)(uintptr_t)bp->buffer,
|
|
bp->length);
|
|
if (ret) {
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = ret;
|
|
return_error_line = __LINE__;
|
|
goto err_translate_failed;
|
|
}
|
|
/* Fixup buffer pointer to target proc address space */
|
|
bp->buffer = (uintptr_t)
|
|
t->buffer->user_data + sg_buf_offset;
|
|
sg_buf_offset += ALIGN(bp->length, sizeof(u64));
|
|
|
|
num_valid = (buffer_offset - off_start_offset) /
|
|
sizeof(binder_size_t);
|
|
ret = binder_fixup_parent(&pf_head, t,
|
|
thread, bp,
|
|
off_start_offset,
|
|
num_valid,
|
|
last_fixup_obj_off,
|
|
last_fixup_min_off);
|
|
if (ret < 0 ||
|
|
binder_alloc_copy_to_buffer(&target_proc->alloc,
|
|
t->buffer,
|
|
object_offset,
|
|
bp, sizeof(*bp))) {
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = ret;
|
|
return_error_line = __LINE__;
|
|
goto err_translate_failed;
|
|
}
|
|
last_fixup_obj_off = object_offset;
|
|
last_fixup_min_off = 0;
|
|
} break;
|
|
default:
|
|
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
|
|
proc->pid, thread->pid, hdr->type);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EINVAL;
|
|
return_error_line = __LINE__;
|
|
goto err_bad_object_type;
|
|
}
|
|
}
|
|
/* Done processing objects, copy the rest of the buffer */
|
|
if (binder_alloc_copy_user_to_buffer(
|
|
&target_proc->alloc,
|
|
t->buffer, user_offset,
|
|
user_buffer + user_offset,
|
|
tr->data_size - user_offset)) {
|
|
binder_user_error("%d:%d got transaction with invalid data ptr\n",
|
|
proc->pid, thread->pid);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = -EFAULT;
|
|
return_error_line = __LINE__;
|
|
goto err_copy_data_failed;
|
|
}
|
|
|
|
ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
|
|
&sgc_head, &pf_head);
|
|
if (ret) {
|
|
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
|
|
proc->pid, thread->pid);
|
|
return_error = BR_FAILED_REPLY;
|
|
return_error_param = ret;
|
|
return_error_line = __LINE__;
|
|
goto err_copy_data_failed;
|
|
}
|
|
if (t->buffer->oneway_spam_suspect)
|
|
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
|
|
else
|
|
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
|
|
t->work.type = BINDER_WORK_TRANSACTION;
|
|
|
|
if (reply) {
|
|
binder_enqueue_thread_work(thread, tcomplete);
|
|
binder_inner_proc_lock(target_proc);
|
|
if (target_thread->is_dead) {
|
|
return_error = BR_DEAD_REPLY;
|
|
binder_inner_proc_unlock(target_proc);
|
|
goto err_dead_proc_or_thread;
|
|
}
|
|
BUG_ON(t->buffer->async_transaction != 0);
|
|
binder_pop_transaction_ilocked(target_thread, in_reply_to);
|
|
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
|
|
target_proc->outstanding_txns++;
|
|
binder_inner_proc_unlock(target_proc);
|
|
wake_up_interruptible_sync(&target_thread->wait);
|
|
trace_android_vh_binder_restore_priority(in_reply_to, current);
|
|
binder_restore_priority(current, in_reply_to->saved_priority);
|
|
binder_free_transaction(in_reply_to);
|
|
} else if (!(t->flags & TF_ONE_WAY)) {
|
|
BUG_ON(t->buffer->async_transaction != 0);
|
|
binder_inner_proc_lock(proc);
|
|
/*
|
|
* Defer the TRANSACTION_COMPLETE, so we don't return to
|
|
* userspace immediately; this allows the target process to
|
|
* immediately start processing this transaction, reducing
|
|
* latency. We will then return the TRANSACTION_COMPLETE when
|
|
* the target replies (or there is an error).
|
|
*/
|
|
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
|
|
t->need_reply = 1;
|
|
t->from_parent = thread->transaction_stack;
|
|
thread->transaction_stack = t;
|
|
binder_inner_proc_unlock(proc);
|
|
return_error = binder_proc_transaction(t,
|
|
target_proc, target_thread);
|
|
if (return_error) {
|
|
binder_inner_proc_lock(proc);
|
|
binder_pop_transaction_ilocked(thread, t);
|
|
binder_inner_proc_unlock(proc);
|
|
goto err_dead_proc_or_thread;
|
|
}
|
|
} else {
|
|
BUG_ON(target_node == NULL);
|
|
BUG_ON(t->buffer->async_transaction != 1);
|
|
binder_enqueue_thread_work(thread, tcomplete);
|
|
return_error = binder_proc_transaction(t, target_proc, NULL);
|
|
if (return_error)
|
|
goto err_dead_proc_or_thread;
|
|
}
|
|
if (target_thread)
|
|
binder_thread_dec_tmpref(target_thread);
|
|
binder_proc_dec_tmpref(target_proc);
|
|
if (target_node)
|
|
binder_dec_node_tmpref(target_node);
|
|
/*
|
|
* write barrier to synchronize with initialization
|
|
* of log entry
|
|
*/
|
|
smp_wmb();
|
|
WRITE_ONCE(e->debug_id_done, t_debug_id);
|
|
return;
|
|
|
|
err_dead_proc_or_thread:
|
|
return_error_line = __LINE__;
|
|
binder_dequeue_work(proc, tcomplete);
|
|
err_translate_failed:
|
|
err_bad_object_type:
|
|
err_bad_offset:
|
|
err_bad_parent:
|
|
err_copy_data_failed:
|
|
binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
|
|
binder_free_txn_fixups(t);
|
|
trace_binder_transaction_failed_buffer_release(t->buffer);
|
|
binder_transaction_buffer_release(target_proc, NULL, t->buffer,
|
|
buffer_offset, true);
|
|
if (target_node)
|
|
binder_dec_node_tmpref(target_node);
|
|
target_node = NULL;
|
|
t->buffer->transaction = NULL;
|
|
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
|
|
err_binder_alloc_buf_failed:
|
|
err_bad_extra_size:
|
|
if (secctx)
|
|
security_release_secctx(secctx, secctx_sz);
|
|
err_get_secctx_failed:
|
|
kfree(tcomplete);
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
|
|
err_alloc_tcomplete_failed:
|
|
kfree(t);
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
|
err_alloc_t_failed:
|
|
err_bad_todo_list:
|
|
err_bad_call_stack:
|
|
err_empty_call_stack:
|
|
err_dead_binder:
|
|
err_invalid_target_handle:
|
|
if (target_thread)
|
|
binder_thread_dec_tmpref(target_thread);
|
|
if (target_proc)
|
|
binder_proc_dec_tmpref(target_proc);
|
|
if (target_node) {
|
|
binder_dec_node(target_node, 1, 0);
|
|
binder_dec_node_tmpref(target_node);
|
|
}
|
|
|
|
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
|
"%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
|
|
proc->pid, thread->pid, return_error, return_error_param,
|
|
(u64)tr->data_size, (u64)tr->offsets_size,
|
|
return_error_line);
|
|
|
|
{
|
|
struct binder_transaction_log_entry *fe;
|
|
|
|
e->return_error = return_error;
|
|
e->return_error_param = return_error_param;
|
|
e->return_error_line = return_error_line;
|
|
fe = binder_transaction_log_add(&binder_transaction_log_failed);
|
|
*fe = *e;
|
|
/*
|
|
* write barrier to synchronize with initialization
|
|
* of log entry
|
|
*/
|
|
smp_wmb();
|
|
WRITE_ONCE(e->debug_id_done, t_debug_id);
|
|
WRITE_ONCE(fe->debug_id_done, t_debug_id);
|
|
}
|
|
|
|
BUG_ON(thread->return_error.cmd != BR_OK);
|
|
if (in_reply_to) {
|
|
trace_android_vh_binder_restore_priority(in_reply_to, current);
|
|
binder_restore_priority(current, in_reply_to->saved_priority);
|
|
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
|
|
binder_enqueue_thread_work(thread, &thread->return_error.work);
|
|
binder_send_failed_reply(in_reply_to, return_error);
|
|
} else {
|
|
thread->return_error.cmd = return_error;
|
|
binder_enqueue_thread_work(thread, &thread->return_error.work);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* binder_free_buf() - free the specified buffer
|
|
* @proc: binder proc that owns buffer
|
|
* @buffer: buffer to be freed
|
|
* @is_failure: failed to send transaction
|
|
*
|
|
* If buffer for an async transaction, enqueue the next async
|
|
* transaction from the node.
|
|
*
|
|
* Cleanup buffer and free it.
|
|
*/
|
|
static void
|
|
binder_free_buf(struct binder_proc *proc,
|
|
struct binder_thread *thread,
|
|
struct binder_buffer *buffer, bool is_failure)
|
|
{
|
|
bool enqueue_task = true;
|
|
bool has_transaction = false;
|
|
|
|
trace_android_vh_binder_free_buf(proc, thread, buffer);
|
|
binder_inner_proc_lock(proc);
|
|
if (buffer->transaction) {
|
|
buffer->transaction->buffer = NULL;
|
|
buffer->transaction = NULL;
|
|
has_transaction = true;
|
|
}
|
|
binder_inner_proc_unlock(proc);
|
|
if (buffer->async_transaction && buffer->target_node) {
|
|
struct binder_node *buf_node;
|
|
struct binder_work *w;
|
|
|
|
buf_node = buffer->target_node;
|
|
binder_node_inner_lock(buf_node);
|
|
BUG_ON(!buf_node->has_async_transaction);
|
|
BUG_ON(buf_node->proc != proc);
|
|
w = binder_dequeue_work_head_ilocked(
|
|
&buf_node->async_todo);
|
|
if (!w) {
|
|
buf_node->has_async_transaction = false;
|
|
} else {
|
|
trace_android_vh_binder_special_task(NULL, proc, thread, w,
|
|
&proc->todo, false, &enqueue_task);
|
|
if (enqueue_task)
|
|
binder_enqueue_work_ilocked(w, &proc->todo);
|
|
binder_wakeup_proc_ilocked(proc);
|
|
}
|
|
binder_node_inner_unlock(buf_node);
|
|
}
|
|
trace_android_vh_binder_buffer_release(proc, thread, buffer,
|
|
has_transaction);
|
|
trace_binder_transaction_buffer_release(buffer);
|
|
binder_release_entire_buffer(proc, thread, buffer, is_failure);
|
|
binder_alloc_free_buf(&proc->alloc, buffer);
|
|
}
|
|
|
|
static int binder_thread_write(struct binder_proc *proc,
|
|
struct binder_thread *thread,
|
|
binder_uintptr_t binder_buffer, size_t size,
|
|
binder_size_t *consumed)
|
|
{
|
|
uint32_t cmd;
|
|
struct binder_context *context = proc->context;
|
|
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
|
|
void __user *ptr = buffer + *consumed;
|
|
void __user *end = buffer + size;
|
|
|
|
while (ptr < end && thread->return_error.cmd == BR_OK) {
|
|
int ret;
|
|
|
|
if (get_user(cmd, (uint32_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(uint32_t);
|
|
trace_binder_command(cmd);
|
|
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
|
|
atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
|
|
atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
|
|
atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
|
|
}
|
|
switch (cmd) {
|
|
case BC_INCREFS:
|
|
case BC_ACQUIRE:
|
|
case BC_RELEASE:
|
|
case BC_DECREFS: {
|
|
uint32_t target;
|
|
const char *debug_string;
|
|
bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
|
|
bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
|
|
struct binder_ref_data rdata;
|
|
|
|
if (get_user(target, (uint32_t __user *)ptr))
|
|
return -EFAULT;
|
|
|
|
ptr += sizeof(uint32_t);
|
|
ret = -1;
|
|
if (increment && !target) {
|
|
struct binder_node *ctx_mgr_node;
|
|
mutex_lock(&context->context_mgr_node_lock);
|
|
ctx_mgr_node = context->binder_context_mgr_node;
|
|
if (ctx_mgr_node)
|
|
ret = binder_inc_ref_for_node(
|
|
proc, ctx_mgr_node,
|
|
strong, NULL, &rdata);
|
|
mutex_unlock(&context->context_mgr_node_lock);
|
|
}
|
|
if (ret)
|
|
ret = binder_update_ref_for_handle(
|
|
proc, target, increment, strong,
|
|
&rdata);
|
|
if (!ret && rdata.desc != target) {
|
|
binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
|
|
proc->pid, thread->pid,
|
|
target, rdata.desc);
|
|
}
|
|
switch (cmd) {
|
|
case BC_INCREFS:
|
|
debug_string = "IncRefs";
|
|
break;
|
|
case BC_ACQUIRE:
|
|
debug_string = "Acquire";
|
|
break;
|
|
case BC_RELEASE:
|
|
debug_string = "Release";
|
|
break;
|
|
case BC_DECREFS:
|
|
default:
|
|
debug_string = "DecRefs";
|
|
break;
|
|
}
|
|
if (ret) {
|
|
binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
|
|
proc->pid, thread->pid, debug_string,
|
|
strong, target, ret);
|
|
break;
|
|
}
|
|
binder_debug(BINDER_DEBUG_USER_REFS,
|
|
"%d:%d %s ref %d desc %d s %d w %d\n",
|
|
proc->pid, thread->pid, debug_string,
|
|
rdata.debug_id, rdata.desc, rdata.strong,
|
|
rdata.weak);
|
|
break;
|
|
}
|
|
case BC_INCREFS_DONE:
|
|
case BC_ACQUIRE_DONE: {
|
|
binder_uintptr_t node_ptr;
|
|
binder_uintptr_t cookie;
|
|
struct binder_node *node;
|
|
bool free_node;
|
|
|
|
if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(binder_uintptr_t);
|
|
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(binder_uintptr_t);
|
|
node = binder_get_node(proc, node_ptr);
|
|
if (node == NULL) {
|
|
binder_user_error("%d:%d %s u%016llx no match\n",
|
|
proc->pid, thread->pid,
|
|
cmd == BC_INCREFS_DONE ?
|
|
"BC_INCREFS_DONE" :
|
|
"BC_ACQUIRE_DONE",
|
|
(u64)node_ptr);
|
|
break;
|
|
}
|
|
if (cookie != node->cookie) {
|
|
binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
|
|
proc->pid, thread->pid,
|
|
cmd == BC_INCREFS_DONE ?
|
|
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
|
|
(u64)node_ptr, node->debug_id,
|
|
(u64)cookie, (u64)node->cookie);
|
|
binder_put_node(node);
|
|
break;
|
|
}
|
|
binder_node_inner_lock(node);
|
|
if (cmd == BC_ACQUIRE_DONE) {
|
|
if (node->pending_strong_ref == 0) {
|
|
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
|
|
proc->pid, thread->pid,
|
|
node->debug_id);
|
|
binder_node_inner_unlock(node);
|
|
binder_put_node(node);
|
|
break;
|
|
}
|
|
node->pending_strong_ref = 0;
|
|
} else {
|
|
if (node->pending_weak_ref == 0) {
|
|
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
|
|
proc->pid, thread->pid,
|
|
node->debug_id);
|
|
binder_node_inner_unlock(node);
|
|
binder_put_node(node);
|
|
break;
|
|
}
|
|
node->pending_weak_ref = 0;
|
|
}
|
|
free_node = binder_dec_node_nilocked(node,
|
|
cmd == BC_ACQUIRE_DONE, 0);
|
|
WARN_ON(free_node);
|
|
binder_debug(BINDER_DEBUG_USER_REFS,
|
|
"%d:%d %s node %d ls %d lw %d tr %d\n",
|
|
proc->pid, thread->pid,
|
|
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
|
|
node->debug_id, node->local_strong_refs,
|
|
node->local_weak_refs, node->tmp_refs);
|
|
binder_node_inner_unlock(node);
|
|
binder_put_node(node);
|
|
break;
|
|
}
|
|
case BC_ATTEMPT_ACQUIRE:
|
|
pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
|
|
return -EINVAL;
|
|
case BC_ACQUIRE_RESULT:
|
|
pr_err("BC_ACQUIRE_RESULT not supported\n");
|
|
return -EINVAL;
|
|
|
|
case BC_FREE_BUFFER: {
|
|
binder_uintptr_t data_ptr;
|
|
struct binder_buffer *buffer;
|
|
|
|
if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(binder_uintptr_t);
|
|
|
|
buffer = binder_alloc_prepare_to_free(&proc->alloc,
|
|
data_ptr);
|
|
if (IS_ERR_OR_NULL(buffer)) {
|
|
if (PTR_ERR(buffer) == -EPERM) {
|
|
binder_user_error(
|
|
"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
|
|
proc->pid, thread->pid,
|
|
(u64)data_ptr);
|
|
} else {
|
|
binder_user_error(
|
|
"%d:%d BC_FREE_BUFFER u%016llx no match\n",
|
|
proc->pid, thread->pid,
|
|
(u64)data_ptr);
|
|
}
|
|
break;
|
|
}
|
|
binder_debug(BINDER_DEBUG_FREE_BUFFER,
|
|
"%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
|
|
proc->pid, thread->pid, (u64)data_ptr,
|
|
buffer->debug_id,
|
|
buffer->transaction ? "active" : "finished");
|
|
binder_free_buf(proc, thread, buffer, false);
|
|
break;
|
|
}
|
|
|
|
case BC_TRANSACTION_SG:
|
|
case BC_REPLY_SG: {
|
|
struct binder_transaction_data_sg tr;
|
|
|
|
if (copy_from_user(&tr, ptr, sizeof(tr)))
|
|
return -EFAULT;
|
|
ptr += sizeof(tr);
|
|
binder_transaction(proc, thread, &tr.transaction_data,
|
|
cmd == BC_REPLY_SG, tr.buffers_size);
|
|
break;
|
|
}
|
|
case BC_TRANSACTION:
|
|
case BC_REPLY: {
|
|
struct binder_transaction_data tr;
|
|
|
|
if (copy_from_user(&tr, ptr, sizeof(tr)))
|
|
return -EFAULT;
|
|
ptr += sizeof(tr);
|
|
binder_transaction(proc, thread, &tr,
|
|
cmd == BC_REPLY, 0);
|
|
break;
|
|
}
|
|
|
|
case BC_REGISTER_LOOPER:
|
|
binder_debug(BINDER_DEBUG_THREADS,
|
|
"%d:%d BC_REGISTER_LOOPER\n",
|
|
proc->pid, thread->pid);
|
|
binder_inner_proc_lock(proc);
|
|
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
|
|
thread->looper |= BINDER_LOOPER_STATE_INVALID;
|
|
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
|
|
proc->pid, thread->pid);
|
|
} else if (proc->requested_threads == 0) {
|
|
thread->looper |= BINDER_LOOPER_STATE_INVALID;
|
|
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
|
|
proc->pid, thread->pid);
|
|
} else {
|
|
proc->requested_threads--;
|
|
proc->requested_threads_started++;
|
|
}
|
|
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
|
|
binder_inner_proc_unlock(proc);
|
|
trace_android_vh_binder_looper_state_registered(thread, proc);
|
|
break;
|
|
case BC_ENTER_LOOPER:
|
|
binder_debug(BINDER_DEBUG_THREADS,
|
|
"%d:%d BC_ENTER_LOOPER\n",
|
|
proc->pid, thread->pid);
|
|
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
|
|
thread->looper |= BINDER_LOOPER_STATE_INVALID;
|
|
binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
|
|
proc->pid, thread->pid);
|
|
}
|
|
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
|
|
break;
|
|
case BC_EXIT_LOOPER:
|
|
binder_debug(BINDER_DEBUG_THREADS,
|
|
"%d:%d BC_EXIT_LOOPER\n",
|
|
proc->pid, thread->pid);
|
|
thread->looper |= BINDER_LOOPER_STATE_EXITED;
|
|
break;
|
|
|
|
case BC_REQUEST_DEATH_NOTIFICATION:
|
|
case BC_CLEAR_DEATH_NOTIFICATION: {
|
|
uint32_t target;
|
|
binder_uintptr_t cookie;
|
|
struct binder_ref *ref;
|
|
struct binder_ref_death *death = NULL;
|
|
|
|
if (get_user(target, (uint32_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(uint32_t);
|
|
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(binder_uintptr_t);
|
|
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
|
|
/*
|
|
* Allocate memory for death notification
|
|
* before taking lock
|
|
*/
|
|
death = kzalloc(sizeof(*death), GFP_KERNEL);
|
|
if (death == NULL) {
|
|
WARN_ON(thread->return_error.cmd !=
|
|
BR_OK);
|
|
thread->return_error.cmd = BR_ERROR;
|
|
binder_enqueue_thread_work(
|
|
thread,
|
|
&thread->return_error.work);
|
|
binder_debug(
|
|
BINDER_DEBUG_FAILED_TRANSACTION,
|
|
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
|
|
proc->pid, thread->pid);
|
|
break;
|
|
}
|
|
}
|
|
binder_proc_lock(proc);
|
|
ref = binder_get_ref_olocked(proc, target, false);
|
|
if (ref == NULL) {
|
|
binder_user_error("%d:%d %s invalid ref %d\n",
|
|
proc->pid, thread->pid,
|
|
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
|
|
"BC_REQUEST_DEATH_NOTIFICATION" :
|
|
"BC_CLEAR_DEATH_NOTIFICATION",
|
|
target);
|
|
binder_proc_unlock(proc);
|
|
kfree(death);
|
|
break;
|
|
}
|
|
|
|
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
|
|
"%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
|
|
proc->pid, thread->pid,
|
|
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
|
|
"BC_REQUEST_DEATH_NOTIFICATION" :
|
|
"BC_CLEAR_DEATH_NOTIFICATION",
|
|
(u64)cookie, ref->data.debug_id,
|
|
ref->data.desc, ref->data.strong,
|
|
ref->data.weak, ref->node->debug_id);
|
|
|
|
binder_node_lock(ref->node);
|
|
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
|
|
if (ref->death) {
|
|
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
|
|
proc->pid, thread->pid);
|
|
binder_node_unlock(ref->node);
|
|
binder_proc_unlock(proc);
|
|
kfree(death);
|
|
break;
|
|
}
|
|
binder_stats_created(BINDER_STAT_DEATH);
|
|
INIT_LIST_HEAD(&death->work.entry);
|
|
death->cookie = cookie;
|
|
ref->death = death;
|
|
if (ref->node->proc == NULL) {
|
|
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
|
|
|
|
binder_inner_proc_lock(proc);
|
|
binder_enqueue_work_ilocked(
|
|
&ref->death->work, &proc->todo);
|
|
binder_wakeup_proc_ilocked(proc);
|
|
binder_inner_proc_unlock(proc);
|
|
}
|
|
} else {
|
|
if (ref->death == NULL) {
|
|
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
|
|
proc->pid, thread->pid);
|
|
binder_node_unlock(ref->node);
|
|
binder_proc_unlock(proc);
|
|
break;
|
|
}
|
|
death = ref->death;
|
|
if (death->cookie != cookie) {
|
|
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
|
|
proc->pid, thread->pid,
|
|
(u64)death->cookie,
|
|
(u64)cookie);
|
|
binder_node_unlock(ref->node);
|
|
binder_proc_unlock(proc);
|
|
break;
|
|
}
|
|
ref->death = NULL;
|
|
binder_inner_proc_lock(proc);
|
|
if (list_empty(&death->work.entry)) {
|
|
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
|
|
if (thread->looper &
|
|
(BINDER_LOOPER_STATE_REGISTERED |
|
|
BINDER_LOOPER_STATE_ENTERED))
|
|
binder_enqueue_thread_work_ilocked(
|
|
thread,
|
|
&death->work);
|
|
else {
|
|
binder_enqueue_work_ilocked(
|
|
&death->work,
|
|
&proc->todo);
|
|
binder_wakeup_proc_ilocked(
|
|
proc);
|
|
}
|
|
} else {
|
|
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
|
|
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
|
|
}
|
|
binder_inner_proc_unlock(proc);
|
|
}
|
|
binder_node_unlock(ref->node);
|
|
binder_proc_unlock(proc);
|
|
} break;
|
|
case BC_DEAD_BINDER_DONE: {
|
|
struct binder_work *w;
|
|
binder_uintptr_t cookie;
|
|
struct binder_ref_death *death = NULL;
|
|
|
|
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
|
|
return -EFAULT;
|
|
|
|
ptr += sizeof(cookie);
|
|
binder_inner_proc_lock(proc);
|
|
list_for_each_entry(w, &proc->delivered_death,
|
|
entry) {
|
|
struct binder_ref_death *tmp_death =
|
|
container_of(w,
|
|
struct binder_ref_death,
|
|
work);
|
|
|
|
if (tmp_death->cookie == cookie) {
|
|
death = tmp_death;
|
|
break;
|
|
}
|
|
}
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
|
"%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
|
|
proc->pid, thread->pid, (u64)cookie,
|
|
death);
|
|
if (death == NULL) {
|
|
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
|
|
proc->pid, thread->pid, (u64)cookie);
|
|
binder_inner_proc_unlock(proc);
|
|
break;
|
|
}
|
|
binder_dequeue_work_ilocked(&death->work);
|
|
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
|
|
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
|
|
if (thread->looper &
|
|
(BINDER_LOOPER_STATE_REGISTERED |
|
|
BINDER_LOOPER_STATE_ENTERED))
|
|
binder_enqueue_thread_work_ilocked(
|
|
thread, &death->work);
|
|
else {
|
|
binder_enqueue_work_ilocked(
|
|
&death->work,
|
|
&proc->todo);
|
|
binder_wakeup_proc_ilocked(proc);
|
|
}
|
|
}
|
|
binder_inner_proc_unlock(proc);
|
|
} break;
|
|
|
|
default:
|
|
pr_err("%d:%d unknown command %d\n",
|
|
proc->pid, thread->pid, cmd);
|
|
return -EINVAL;
|
|
}
|
|
*consumed = ptr - buffer;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static void binder_stat_br(struct binder_proc *proc,
|
|
struct binder_thread *thread, uint32_t cmd)
|
|
{
|
|
trace_binder_return(cmd);
|
|
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
|
|
atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
|
|
atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
|
|
atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
|
|
}
|
|
}
|
|
|
|
static int binder_put_node_cmd(struct binder_proc *proc,
|
|
struct binder_thread *thread,
|
|
void __user **ptrp,
|
|
binder_uintptr_t node_ptr,
|
|
binder_uintptr_t node_cookie,
|
|
int node_debug_id,
|
|
uint32_t cmd, const char *cmd_name)
|
|
{
|
|
void __user *ptr = *ptrp;
|
|
|
|
if (put_user(cmd, (uint32_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(uint32_t);
|
|
|
|
if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(binder_uintptr_t);
|
|
|
|
if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(binder_uintptr_t);
|
|
|
|
binder_stat_br(proc, thread, cmd);
|
|
binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
|
|
proc->pid, thread->pid, cmd_name, node_debug_id,
|
|
(u64)node_ptr, (u64)node_cookie);
|
|
|
|
*ptrp = ptr;
|
|
return 0;
|
|
}
|
|
|
|
static int binder_wait_for_work(struct binder_thread *thread,
|
|
bool do_proc_work)
|
|
{
|
|
DEFINE_WAIT(wait);
|
|
struct binder_proc *proc = thread->proc;
|
|
int ret = 0;
|
|
|
|
freezer_do_not_count();
|
|
binder_inner_proc_lock(proc);
|
|
for (;;) {
|
|
prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
|
|
if (binder_has_work_ilocked(thread, do_proc_work))
|
|
break;
|
|
if (do_proc_work)
|
|
list_add(&thread->waiting_thread_node,
|
|
&proc->waiting_threads);
|
|
trace_android_vh_binder_wait_for_work(do_proc_work, thread, proc);
|
|
binder_inner_proc_unlock(proc);
|
|
schedule();
|
|
binder_inner_proc_lock(proc);
|
|
list_del_init(&thread->waiting_thread_node);
|
|
if (signal_pending(current)) {
|
|
ret = -EINTR;
|
|
break;
|
|
}
|
|
}
|
|
finish_wait(&thread->wait, &wait);
|
|
binder_inner_proc_unlock(proc);
|
|
freezer_count();
|
|
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* binder_apply_fd_fixups() - finish fd translation
|
|
* @proc: binder_proc associated @t->buffer
|
|
* @t: binder transaction with list of fd fixups
|
|
*
|
|
* Now that we are in the context of the transaction target
|
|
* process, we can allocate and install fds. Process the
|
|
* list of fds to translate and fixup the buffer with the
|
|
* new fds.
|
|
*
|
|
* If we fail to allocate an fd, then free the resources by
|
|
* fput'ing files that have not been processed and ksys_close'ing
|
|
* any fds that have already been allocated.
|
|
*/
|
|
static int binder_apply_fd_fixups(struct binder_proc *proc,
|
|
struct binder_transaction *t)
|
|
{
|
|
struct binder_txn_fd_fixup *fixup, *tmp;
|
|
int ret = 0;
|
|
|
|
list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
|
|
int fd = get_unused_fd_flags(O_CLOEXEC);
|
|
|
|
if (fd < 0) {
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
"failed fd fixup txn %d fd %d\n",
|
|
t->debug_id, fd);
|
|
ret = -ENOMEM;
|
|
break;
|
|
}
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
"fd fixup txn %d fd %d\n",
|
|
t->debug_id, fd);
|
|
trace_binder_transaction_fd_recv(t, fd, fixup->offset);
|
|
fd_install(fd, fixup->file);
|
|
fixup->file = NULL;
|
|
if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
|
|
fixup->offset, &fd,
|
|
sizeof(u32))) {
|
|
ret = -EINVAL;
|
|
break;
|
|
}
|
|
}
|
|
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
|
|
if (fixup->file) {
|
|
fput(fixup->file);
|
|
} else if (ret) {
|
|
u32 fd;
|
|
int err;
|
|
|
|
err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
|
|
t->buffer,
|
|
fixup->offset,
|
|
sizeof(fd));
|
|
WARN_ON(err);
|
|
if (!err)
|
|
binder_deferred_fd_close(fd);
|
|
}
|
|
list_del(&fixup->fixup_entry);
|
|
kfree(fixup);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int binder_thread_read(struct binder_proc *proc,
|
|
struct binder_thread *thread,
|
|
binder_uintptr_t binder_buffer, size_t size,
|
|
binder_size_t *consumed, int non_block)
|
|
{
|
|
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
|
|
void __user *ptr = buffer + *consumed;
|
|
void __user *end = buffer + size;
|
|
|
|
int ret = 0;
|
|
int wait_for_proc_work;
|
|
|
|
if (*consumed == 0) {
|
|
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(uint32_t);
|
|
}
|
|
|
|
retry:
|
|
binder_inner_proc_lock(proc);
|
|
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
thread->looper |= BINDER_LOOPER_STATE_WAITING;
|
|
|
|
trace_binder_wait_for_work(wait_for_proc_work,
|
|
!!thread->transaction_stack,
|
|
!binder_worklist_empty(proc, &thread->todo));
|
|
if (wait_for_proc_work) {
|
|
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
|
|
BINDER_LOOPER_STATE_ENTERED))) {
|
|
binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
|
|
proc->pid, thread->pid, thread->looper);
|
|
wait_event_interruptible(binder_user_error_wait,
|
|
binder_stop_on_user_error < 2);
|
|
}
|
|
trace_android_vh_binder_restore_priority(NULL, current);
|
|
binder_restore_priority(current, proc->default_priority);
|
|
}
|
|
|
|
if (non_block) {
|
|
if (!binder_has_work(thread, wait_for_proc_work))
|
|
ret = -EAGAIN;
|
|
} else {
|
|
ret = binder_wait_for_work(thread, wait_for_proc_work);
|
|
}
|
|
|
|
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
|
|
|
|
if (ret)
|
|
return ret;
|
|
|
|
while (1) {
|
|
uint32_t cmd;
|
|
struct binder_transaction_data_secctx tr;
|
|
struct binder_transaction_data *trd = &tr.transaction_data;
|
|
struct binder_work *w = NULL;
|
|
struct list_head *list = NULL;
|
|
struct binder_transaction *t = NULL;
|
|
struct binder_thread *t_from;
|
|
size_t trsize = sizeof(*trd);
|
|
|
|
binder_inner_proc_lock(proc);
|
|
if (!binder_worklist_empty_ilocked(&thread->todo))
|
|
list = &thread->todo;
|
|
else if (!binder_worklist_empty_ilocked(&proc->todo) &&
|
|
wait_for_proc_work)
|
|
list = &proc->todo;
|
|
else {
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
/* no data added */
|
|
if (ptr - buffer == 4 && !thread->looper_need_return)
|
|
goto retry;
|
|
break;
|
|
}
|
|
|
|
if (end - ptr < sizeof(tr) + 4) {
|
|
binder_inner_proc_unlock(proc);
|
|
break;
|
|
}
|
|
trace_android_vh_binder_thread_read(&list, proc, thread);
|
|
w = binder_dequeue_work_head_ilocked(list);
|
|
if (binder_worklist_empty_ilocked(&thread->todo))
|
|
thread->process_todo = false;
|
|
|
|
switch (w->type) {
|
|
case BINDER_WORK_TRANSACTION: {
|
|
binder_inner_proc_unlock(proc);
|
|
t = container_of(w, struct binder_transaction, work);
|
|
} break;
|
|
case BINDER_WORK_RETURN_ERROR: {
|
|
struct binder_error *e = container_of(
|
|
w, struct binder_error, work);
|
|
|
|
WARN_ON(e->cmd == BR_OK);
|
|
binder_inner_proc_unlock(proc);
|
|
if (put_user(e->cmd, (uint32_t __user *)ptr))
|
|
return -EFAULT;
|
|
cmd = e->cmd;
|
|
e->cmd = BR_OK;
|
|
ptr += sizeof(uint32_t);
|
|
|
|
binder_stat_br(proc, thread, cmd);
|
|
} break;
|
|
case BINDER_WORK_TRANSACTION_COMPLETE:
|
|
case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
|
|
if (proc->oneway_spam_detection_enabled &&
|
|
w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
|
|
cmd = BR_ONEWAY_SPAM_SUSPECT;
|
|
else
|
|
cmd = BR_TRANSACTION_COMPLETE;
|
|
binder_inner_proc_unlock(proc);
|
|
kfree(w);
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
|
|
if (put_user(cmd, (uint32_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(uint32_t);
|
|
|
|
binder_stat_br(proc, thread, cmd);
|
|
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
|
|
"%d:%d BR_TRANSACTION_COMPLETE\n",
|
|
proc->pid, thread->pid);
|
|
} break;
|
|
case BINDER_WORK_NODE: {
|
|
struct binder_node *node = container_of(w, struct binder_node, work);
|
|
int strong, weak;
|
|
binder_uintptr_t node_ptr = node->ptr;
|
|
binder_uintptr_t node_cookie = node->cookie;
|
|
int node_debug_id = node->debug_id;
|
|
int has_weak_ref;
|
|
int has_strong_ref;
|
|
void __user *orig_ptr = ptr;
|
|
|
|
BUG_ON(proc != node->proc);
|
|
strong = node->internal_strong_refs ||
|
|
node->local_strong_refs;
|
|
weak = !hlist_empty(&node->refs) ||
|
|
node->local_weak_refs ||
|
|
node->tmp_refs || strong;
|
|
has_strong_ref = node->has_strong_ref;
|
|
has_weak_ref = node->has_weak_ref;
|
|
|
|
if (weak && !has_weak_ref) {
|
|
node->has_weak_ref = 1;
|
|
node->pending_weak_ref = 1;
|
|
node->local_weak_refs++;
|
|
}
|
|
if (strong && !has_strong_ref) {
|
|
node->has_strong_ref = 1;
|
|
node->pending_strong_ref = 1;
|
|
node->local_strong_refs++;
|
|
}
|
|
if (!strong && has_strong_ref)
|
|
node->has_strong_ref = 0;
|
|
if (!weak && has_weak_ref)
|
|
node->has_weak_ref = 0;
|
|
if (!weak && !strong) {
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
"%d:%d node %d u%016llx c%016llx deleted\n",
|
|
proc->pid, thread->pid,
|
|
node_debug_id,
|
|
(u64)node_ptr,
|
|
(u64)node_cookie);
|
|
rb_erase(&node->rb_node, &proc->nodes);
|
|
binder_inner_proc_unlock(proc);
|
|
binder_node_lock(node);
|
|
/*
|
|
* Acquire the node lock before freeing the
|
|
* node to serialize with other threads that
|
|
* may have been holding the node lock while
|
|
* decrementing this node (avoids race where
|
|
* this thread frees while the other thread
|
|
* is unlocking the node after the final
|
|
* decrement)
|
|
*/
|
|
binder_node_unlock(node);
|
|
binder_free_node(node);
|
|
} else
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
if (weak && !has_weak_ref)
|
|
ret = binder_put_node_cmd(
|
|
proc, thread, &ptr, node_ptr,
|
|
node_cookie, node_debug_id,
|
|
BR_INCREFS, "BR_INCREFS");
|
|
if (!ret && strong && !has_strong_ref)
|
|
ret = binder_put_node_cmd(
|
|
proc, thread, &ptr, node_ptr,
|
|
node_cookie, node_debug_id,
|
|
BR_ACQUIRE, "BR_ACQUIRE");
|
|
if (!ret && !strong && has_strong_ref)
|
|
ret = binder_put_node_cmd(
|
|
proc, thread, &ptr, node_ptr,
|
|
node_cookie, node_debug_id,
|
|
BR_RELEASE, "BR_RELEASE");
|
|
if (!ret && !weak && has_weak_ref)
|
|
ret = binder_put_node_cmd(
|
|
proc, thread, &ptr, node_ptr,
|
|
node_cookie, node_debug_id,
|
|
BR_DECREFS, "BR_DECREFS");
|
|
if (orig_ptr == ptr)
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
"%d:%d node %d u%016llx c%016llx state unchanged\n",
|
|
proc->pid, thread->pid,
|
|
node_debug_id,
|
|
(u64)node_ptr,
|
|
(u64)node_cookie);
|
|
if (ret)
|
|
return ret;
|
|
} break;
|
|
case BINDER_WORK_DEAD_BINDER:
|
|
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
|
|
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
|
|
struct binder_ref_death *death;
|
|
uint32_t cmd;
|
|
binder_uintptr_t cookie;
|
|
|
|
death = container_of(w, struct binder_ref_death, work);
|
|
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
|
|
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
|
|
else
|
|
cmd = BR_DEAD_BINDER;
|
|
cookie = death->cookie;
|
|
|
|
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
|
|
"%d:%d %s %016llx\n",
|
|
proc->pid, thread->pid,
|
|
cmd == BR_DEAD_BINDER ?
|
|
"BR_DEAD_BINDER" :
|
|
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
|
|
(u64)cookie);
|
|
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
|
|
binder_inner_proc_unlock(proc);
|
|
kfree(death);
|
|
binder_stats_deleted(BINDER_STAT_DEATH);
|
|
} else {
|
|
binder_enqueue_work_ilocked(
|
|
w, &proc->delivered_death);
|
|
binder_inner_proc_unlock(proc);
|
|
}
|
|
if (put_user(cmd, (uint32_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(uint32_t);
|
|
if (put_user(cookie,
|
|
(binder_uintptr_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(binder_uintptr_t);
|
|
binder_stat_br(proc, thread, cmd);
|
|
if (cmd == BR_DEAD_BINDER)
|
|
goto done; /* DEAD_BINDER notifications can cause transactions */
|
|
} break;
|
|
default:
|
|
binder_inner_proc_unlock(proc);
|
|
pr_err("%d:%d: bad work type %d\n",
|
|
proc->pid, thread->pid, w->type);
|
|
break;
|
|
}
|
|
|
|
if (!t)
|
|
continue;
|
|
|
|
BUG_ON(t->buffer == NULL);
|
|
if (t->buffer->target_node) {
|
|
struct binder_node *target_node = t->buffer->target_node;
|
|
struct binder_priority node_prio;
|
|
|
|
trd->target.ptr = target_node->ptr;
|
|
trd->cookie = target_node->cookie;
|
|
node_prio.sched_policy = target_node->sched_policy;
|
|
node_prio.prio = target_node->min_priority;
|
|
binder_transaction_priority(current, t, node_prio,
|
|
target_node->inherit_rt);
|
|
cmd = BR_TRANSACTION;
|
|
} else {
|
|
trd->target.ptr = 0;
|
|
trd->cookie = 0;
|
|
cmd = BR_REPLY;
|
|
}
|
|
trd->code = t->code;
|
|
trd->flags = t->flags;
|
|
trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
|
|
|
|
t_from = binder_get_txn_from(t);
|
|
if (t_from) {
|
|
struct task_struct *sender = t_from->proc->tsk;
|
|
|
|
trd->sender_pid =
|
|
task_tgid_nr_ns(sender,
|
|
task_active_pid_ns(current));
|
|
trace_android_vh_sync_txn_recvd(thread->task, t_from->task);
|
|
} else {
|
|
trd->sender_pid = 0;
|
|
}
|
|
|
|
ret = binder_apply_fd_fixups(proc, t);
|
|
if (ret) {
|
|
struct binder_buffer *buffer = t->buffer;
|
|
bool oneway = !!(t->flags & TF_ONE_WAY);
|
|
int tid = t->debug_id;
|
|
|
|
if (t_from)
|
|
binder_thread_dec_tmpref(t_from);
|
|
buffer->transaction = NULL;
|
|
binder_cleanup_transaction(t, "fd fixups failed",
|
|
BR_FAILED_REPLY);
|
|
binder_free_buf(proc, thread, buffer, true);
|
|
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
|
"%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
|
|
proc->pid, thread->pid,
|
|
oneway ? "async " :
|
|
(cmd == BR_REPLY ? "reply " : ""),
|
|
tid, BR_FAILED_REPLY, ret, __LINE__);
|
|
if (cmd == BR_REPLY) {
|
|
cmd = BR_FAILED_REPLY;
|
|
if (put_user(cmd, (uint32_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(uint32_t);
|
|
binder_stat_br(proc, thread, cmd);
|
|
break;
|
|
}
|
|
continue;
|
|
}
|
|
trd->data_size = t->buffer->data_size;
|
|
trd->offsets_size = t->buffer->offsets_size;
|
|
trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
|
|
trd->data.ptr.offsets = trd->data.ptr.buffer +
|
|
ALIGN(t->buffer->data_size,
|
|
sizeof(void *));
|
|
|
|
tr.secctx = t->security_ctx;
|
|
if (t->security_ctx) {
|
|
cmd = BR_TRANSACTION_SEC_CTX;
|
|
trsize = sizeof(tr);
|
|
}
|
|
if (put_user(cmd, (uint32_t __user *)ptr)) {
|
|
if (t_from)
|
|
binder_thread_dec_tmpref(t_from);
|
|
|
|
binder_cleanup_transaction(t, "put_user failed",
|
|
BR_FAILED_REPLY);
|
|
|
|
return -EFAULT;
|
|
}
|
|
ptr += sizeof(uint32_t);
|
|
if (copy_to_user(ptr, &tr, trsize)) {
|
|
if (t_from)
|
|
binder_thread_dec_tmpref(t_from);
|
|
|
|
binder_cleanup_transaction(t, "copy_to_user failed",
|
|
BR_FAILED_REPLY);
|
|
|
|
return -EFAULT;
|
|
}
|
|
ptr += trsize;
|
|
|
|
trace_binder_transaction_received(t);
|
|
trace_android_vh_binder_transaction_received(t, proc, thread, cmd);
|
|
binder_stat_br(proc, thread, cmd);
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
"%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
|
|
proc->pid, thread->pid,
|
|
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
|
|
(cmd == BR_TRANSACTION_SEC_CTX) ?
|
|
"BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
|
|
t->debug_id, t_from ? t_from->proc->pid : 0,
|
|
t_from ? t_from->pid : 0, cmd,
|
|
t->buffer->data_size, t->buffer->offsets_size,
|
|
(u64)trd->data.ptr.buffer,
|
|
(u64)trd->data.ptr.offsets);
|
|
|
|
if (t_from)
|
|
binder_thread_dec_tmpref(t_from);
|
|
t->buffer->allow_user_free = 1;
|
|
if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
|
|
binder_inner_proc_lock(thread->proc);
|
|
t->to_parent = thread->transaction_stack;
|
|
t->to_thread = thread;
|
|
thread->transaction_stack = t;
|
|
binder_inner_proc_unlock(thread->proc);
|
|
} else {
|
|
binder_free_transaction(t);
|
|
}
|
|
break;
|
|
}
|
|
|
|
done:
|
|
|
|
*consumed = ptr - buffer;
|
|
binder_inner_proc_lock(proc);
|
|
if (proc->requested_threads == 0 &&
|
|
list_empty(&thread->proc->waiting_threads) &&
|
|
proc->requested_threads_started < proc->max_threads &&
|
|
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
|
|
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
|
|
/*spawn a new thread if we leave this out */) {
|
|
proc->requested_threads++;
|
|
binder_inner_proc_unlock(proc);
|
|
binder_debug(BINDER_DEBUG_THREADS,
|
|
"%d:%d BR_SPAWN_LOOPER\n",
|
|
proc->pid, thread->pid);
|
|
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
|
|
return -EFAULT;
|
|
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
|
|
} else
|
|
binder_inner_proc_unlock(proc);
|
|
return 0;
|
|
}
|
|
|
|
static void binder_release_work(struct binder_proc *proc,
|
|
struct list_head *list)
|
|
{
|
|
struct binder_work *w;
|
|
enum binder_work_type wtype;
|
|
|
|
while (1) {
|
|
binder_inner_proc_lock(proc);
|
|
w = binder_dequeue_work_head_ilocked(list);
|
|
wtype = w ? w->type : 0;
|
|
binder_inner_proc_unlock(proc);
|
|
if (!w)
|
|
return;
|
|
|
|
switch (wtype) {
|
|
case BINDER_WORK_TRANSACTION: {
|
|
struct binder_transaction *t;
|
|
|
|
t = container_of(w, struct binder_transaction, work);
|
|
|
|
binder_cleanup_transaction(t, "process died.",
|
|
BR_DEAD_REPLY);
|
|
} break;
|
|
case BINDER_WORK_RETURN_ERROR: {
|
|
struct binder_error *e = container_of(
|
|
w, struct binder_error, work);
|
|
|
|
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
|
"undelivered TRANSACTION_ERROR: %u\n",
|
|
e->cmd);
|
|
} break;
|
|
case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
|
|
case BINDER_WORK_TRANSACTION_COMPLETE: {
|
|
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
|
"undelivered TRANSACTION_COMPLETE\n");
|
|
kfree(w);
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
|
|
} break;
|
|
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
|
|
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
|
|
struct binder_ref_death *death;
|
|
|
|
death = container_of(w, struct binder_ref_death, work);
|
|
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
|
"undelivered death notification, %016llx\n",
|
|
(u64)death->cookie);
|
|
kfree(death);
|
|
binder_stats_deleted(BINDER_STAT_DEATH);
|
|
} break;
|
|
case BINDER_WORK_NODE:
|
|
break;
|
|
default:
|
|
pr_err("unexpected work type, %d, not freed\n",
|
|
wtype);
|
|
break;
|
|
}
|
|
}
|
|
|
|
}
|
|
|
|
static struct binder_thread *binder_get_thread_ilocked(
|
|
struct binder_proc *proc, struct binder_thread *new_thread)
|
|
{
|
|
struct binder_thread *thread = NULL;
|
|
struct rb_node *parent = NULL;
|
|
struct rb_node **p = &proc->threads.rb_node;
|
|
|
|
while (*p) {
|
|
parent = *p;
|
|
thread = rb_entry(parent, struct binder_thread, rb_node);
|
|
|
|
if (current->pid < thread->pid)
|
|
p = &(*p)->rb_left;
|
|
else if (current->pid > thread->pid)
|
|
p = &(*p)->rb_right;
|
|
else
|
|
return thread;
|
|
}
|
|
if (!new_thread)
|
|
return NULL;
|
|
thread = new_thread;
|
|
binder_stats_created(BINDER_STAT_THREAD);
|
|
thread->proc = proc;
|
|
thread->pid = current->pid;
|
|
get_task_struct(current);
|
|
thread->task = current;
|
|
atomic_set(&thread->tmp_ref, 0);
|
|
init_waitqueue_head(&thread->wait);
|
|
INIT_LIST_HEAD(&thread->todo);
|
|
rb_link_node(&thread->rb_node, parent, p);
|
|
rb_insert_color(&thread->rb_node, &proc->threads);
|
|
thread->looper_need_return = true;
|
|
thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
|
|
thread->return_error.cmd = BR_OK;
|
|
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
|
|
thread->reply_error.cmd = BR_OK;
|
|
INIT_LIST_HEAD(&new_thread->waiting_thread_node);
|
|
return thread;
|
|
}
|
|
|
|
static struct binder_thread *binder_get_thread(struct binder_proc *proc)
|
|
{
|
|
struct binder_thread *thread;
|
|
struct binder_thread *new_thread;
|
|
|
|
binder_inner_proc_lock(proc);
|
|
thread = binder_get_thread_ilocked(proc, NULL);
|
|
binder_inner_proc_unlock(proc);
|
|
if (!thread) {
|
|
new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
|
|
if (new_thread == NULL)
|
|
return NULL;
|
|
binder_inner_proc_lock(proc);
|
|
thread = binder_get_thread_ilocked(proc, new_thread);
|
|
binder_inner_proc_unlock(proc);
|
|
if (thread != new_thread)
|
|
kfree(new_thread);
|
|
}
|
|
return thread;
|
|
}
|
|
|
|
static void binder_free_proc(struct binder_proc *proc)
|
|
{
|
|
struct binder_device *device;
|
|
struct binder_proc_ext *eproc =
|
|
container_of(proc, struct binder_proc_ext, proc);
|
|
|
|
BUG_ON(!list_empty(&proc->todo));
|
|
BUG_ON(!list_empty(&proc->delivered_death));
|
|
if (proc->outstanding_txns)
|
|
pr_warn("%s: Unexpected outstanding_txns %d\n",
|
|
__func__, proc->outstanding_txns);
|
|
device = container_of(proc->context, struct binder_device, context);
|
|
if (refcount_dec_and_test(&device->ref)) {
|
|
kfree(proc->context->name);
|
|
kfree(device);
|
|
}
|
|
binder_alloc_deferred_release(&proc->alloc);
|
|
put_task_struct(proc->tsk);
|
|
put_cred(eproc->cred);
|
|
binder_stats_deleted(BINDER_STAT_PROC);
|
|
trace_android_vh_binder_free_proc(proc);
|
|
kfree(eproc);
|
|
}
|
|
|
|
static void binder_free_thread(struct binder_thread *thread)
|
|
{
|
|
BUG_ON(!list_empty(&thread->todo));
|
|
binder_stats_deleted(BINDER_STAT_THREAD);
|
|
binder_proc_dec_tmpref(thread->proc);
|
|
put_task_struct(thread->task);
|
|
kfree(thread);
|
|
}
|
|
|
|
static int binder_thread_release(struct binder_proc *proc,
|
|
struct binder_thread *thread)
|
|
{
|
|
struct binder_transaction *t;
|
|
struct binder_transaction *send_reply = NULL;
|
|
int active_transactions = 0;
|
|
struct binder_transaction *last_t = NULL;
|
|
|
|
binder_inner_proc_lock(thread->proc);
|
|
/*
|
|
* take a ref on the proc so it survives
|
|
* after we remove this thread from proc->threads.
|
|
* The corresponding dec is when we actually
|
|
* free the thread in binder_free_thread()
|
|
*/
|
|
proc->tmp_ref++;
|
|
/*
|
|
* take a ref on this thread to ensure it
|
|
* survives while we are releasing it
|
|
*/
|
|
atomic_inc(&thread->tmp_ref);
|
|
rb_erase(&thread->rb_node, &proc->threads);
|
|
t = thread->transaction_stack;
|
|
if (t) {
|
|
spin_lock(&t->lock);
|
|
if (t->to_thread == thread)
|
|
send_reply = t;
|
|
} else {
|
|
__acquire(&t->lock);
|
|
}
|
|
thread->is_dead = true;
|
|
|
|
while (t) {
|
|
last_t = t;
|
|
active_transactions++;
|
|
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
|
"release %d:%d transaction %d %s, still active\n",
|
|
proc->pid, thread->pid,
|
|
t->debug_id,
|
|
(t->to_thread == thread) ? "in" : "out");
|
|
|
|
if (t->to_thread == thread) {
|
|
thread->proc->outstanding_txns--;
|
|
t->to_proc = NULL;
|
|
t->to_thread = NULL;
|
|
if (t->buffer) {
|
|
t->buffer->transaction = NULL;
|
|
t->buffer = NULL;
|
|
}
|
|
t = t->to_parent;
|
|
} else if (t->from == thread) {
|
|
t->from = NULL;
|
|
t = t->from_parent;
|
|
} else
|
|
BUG();
|
|
spin_unlock(&last_t->lock);
|
|
if (t)
|
|
spin_lock(&t->lock);
|
|
else
|
|
__acquire(&t->lock);
|
|
}
|
|
/* annotation for sparse, lock not acquired in last iteration above */
|
|
__release(&t->lock);
|
|
|
|
/*
|
|
* If this thread used poll, make sure we remove the waitqueue from any
|
|
* poll data structures holding it.
|
|
*/
|
|
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
|
wake_up_pollfree(&thread->wait);
|
|
|
|
binder_inner_proc_unlock(thread->proc);
|
|
|
|
/*
|
|
* This is needed to avoid races between wake_up_pollfree() above and
|
|
* someone else removing the last entry from the queue for other reasons
|
|
* (e.g. ep_remove_wait_queue() being called due to an epoll file
|
|
* descriptor being closed). Such other users hold an RCU read lock, so
|
|
* we can be sure they're done after we call synchronize_rcu().
|
|
*/
|
|
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
|
synchronize_rcu();
|
|
|
|
if (send_reply)
|
|
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
|
|
binder_release_work(proc, &thread->todo);
|
|
trace_android_vh_binder_thread_release(proc, thread);
|
|
binder_thread_dec_tmpref(thread);
|
|
return active_transactions;
|
|
}
|
|
|
|
static __poll_t binder_poll(struct file *filp,
|
|
struct poll_table_struct *wait)
|
|
{
|
|
struct binder_proc *proc = filp->private_data;
|
|
struct binder_thread *thread = NULL;
|
|
bool wait_for_proc_work;
|
|
|
|
thread = binder_get_thread(proc);
|
|
if (!thread)
|
|
return EPOLLERR;
|
|
|
|
binder_inner_proc_lock(thread->proc);
|
|
thread->looper |= BINDER_LOOPER_STATE_POLL;
|
|
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
|
|
|
|
binder_inner_proc_unlock(thread->proc);
|
|
|
|
poll_wait(filp, &thread->wait, wait);
|
|
|
|
if (binder_has_work(thread, wait_for_proc_work))
|
|
return EPOLLIN;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int binder_ioctl_write_read(struct file *filp,
|
|
unsigned int cmd, unsigned long arg,
|
|
struct binder_thread *thread)
|
|
{
|
|
int ret = 0;
|
|
struct binder_proc *proc = filp->private_data;
|
|
unsigned int size = _IOC_SIZE(cmd);
|
|
void __user *ubuf = (void __user *)arg;
|
|
struct binder_write_read bwr;
|
|
|
|
if (size != sizeof(struct binder_write_read)) {
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
|
|
ret = -EFAULT;
|
|
goto out;
|
|
}
|
|
binder_debug(BINDER_DEBUG_READ_WRITE,
|
|
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
|
|
proc->pid, thread->pid,
|
|
(u64)bwr.write_size, (u64)bwr.write_buffer,
|
|
(u64)bwr.read_size, (u64)bwr.read_buffer);
|
|
|
|
if (bwr.write_size > 0) {
|
|
ret = binder_thread_write(proc, thread,
|
|
bwr.write_buffer,
|
|
bwr.write_size,
|
|
&bwr.write_consumed);
|
|
trace_binder_write_done(ret);
|
|
if (ret < 0) {
|
|
bwr.read_consumed = 0;
|
|
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
|
|
ret = -EFAULT;
|
|
goto out;
|
|
}
|
|
}
|
|
if (bwr.read_size > 0) {
|
|
ret = binder_thread_read(proc, thread, bwr.read_buffer,
|
|
bwr.read_size,
|
|
&bwr.read_consumed,
|
|
filp->f_flags & O_NONBLOCK);
|
|
trace_binder_read_done(ret);
|
|
binder_inner_proc_lock(proc);
|
|
if (!binder_worklist_empty_ilocked(&proc->todo))
|
|
binder_wakeup_proc_ilocked(proc);
|
|
binder_inner_proc_unlock(proc);
|
|
trace_android_vh_binder_read_done(proc, thread);
|
|
if (ret < 0) {
|
|
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
|
|
ret = -EFAULT;
|
|
goto out;
|
|
}
|
|
}
|
|
binder_debug(BINDER_DEBUG_READ_WRITE,
|
|
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
|
|
proc->pid, thread->pid,
|
|
(u64)bwr.write_consumed, (u64)bwr.write_size,
|
|
(u64)bwr.read_consumed, (u64)bwr.read_size);
|
|
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
|
|
ret = -EFAULT;
|
|
goto out;
|
|
}
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
static int binder_ioctl_set_ctx_mgr(struct file *filp,
|
|
struct flat_binder_object *fbo)
|
|
{
|
|
int ret = 0;
|
|
struct binder_proc *proc = filp->private_data;
|
|
struct binder_context *context = proc->context;
|
|
struct binder_node *new_node;
|
|
kuid_t curr_euid = current_euid();
|
|
|
|
mutex_lock(&context->context_mgr_node_lock);
|
|
if (context->binder_context_mgr_node) {
|
|
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
|
|
ret = -EBUSY;
|
|
goto out;
|
|
}
|
|
ret = security_binder_set_context_mgr(binder_get_cred(proc));
|
|
if (ret < 0)
|
|
goto out;
|
|
if (uid_valid(context->binder_context_mgr_uid)) {
|
|
if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
|
|
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
|
|
from_kuid(&init_user_ns, curr_euid),
|
|
from_kuid(&init_user_ns,
|
|
context->binder_context_mgr_uid));
|
|
ret = -EPERM;
|
|
goto out;
|
|
}
|
|
} else {
|
|
context->binder_context_mgr_uid = curr_euid;
|
|
}
|
|
new_node = binder_new_node(proc, fbo);
|
|
if (!new_node) {
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
binder_node_lock(new_node);
|
|
new_node->local_weak_refs++;
|
|
new_node->local_strong_refs++;
|
|
new_node->has_strong_ref = 1;
|
|
new_node->has_weak_ref = 1;
|
|
context->binder_context_mgr_node = new_node;
|
|
binder_node_unlock(new_node);
|
|
binder_put_node(new_node);
|
|
out:
|
|
mutex_unlock(&context->context_mgr_node_lock);
|
|
return ret;
|
|
}
|
|
|
|
static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
|
|
struct binder_node_info_for_ref *info)
|
|
{
|
|
struct binder_node *node;
|
|
struct binder_context *context = proc->context;
|
|
__u32 handle = info->handle;
|
|
|
|
if (info->strong_count || info->weak_count || info->reserved1 ||
|
|
info->reserved2 || info->reserved3) {
|
|
binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
|
|
proc->pid);
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* This ioctl may only be used by the context manager */
|
|
mutex_lock(&context->context_mgr_node_lock);
|
|
if (!context->binder_context_mgr_node ||
|
|
context->binder_context_mgr_node->proc != proc) {
|
|
mutex_unlock(&context->context_mgr_node_lock);
|
|
return -EPERM;
|
|
}
|
|
mutex_unlock(&context->context_mgr_node_lock);
|
|
|
|
node = binder_get_node_from_ref(proc, handle, true, NULL);
|
|
if (!node)
|
|
return -EINVAL;
|
|
|
|
info->strong_count = node->local_strong_refs +
|
|
node->internal_strong_refs;
|
|
info->weak_count = node->local_weak_refs;
|
|
|
|
binder_put_node(node);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
|
|
struct binder_node_debug_info *info)
|
|
{
|
|
struct rb_node *n;
|
|
binder_uintptr_t ptr = info->ptr;
|
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
binder_inner_proc_lock(proc);
|
|
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
|
|
struct binder_node *node = rb_entry(n, struct binder_node,
|
|
rb_node);
|
|
if (node->ptr > ptr) {
|
|
info->ptr = node->ptr;
|
|
info->cookie = node->cookie;
|
|
info->has_strong_ref = node->has_strong_ref;
|
|
info->has_weak_ref = node->has_weak_ref;
|
|
break;
|
|
}
|
|
}
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static bool binder_txns_pending_ilocked(struct binder_proc *proc)
|
|
{
|
|
struct rb_node *n;
|
|
struct binder_thread *thread;
|
|
|
|
if (proc->outstanding_txns > 0)
|
|
return true;
|
|
|
|
for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
|
|
thread = rb_entry(n, struct binder_thread, rb_node);
|
|
if (thread->transaction_stack)
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static int binder_ioctl_freeze(struct binder_freeze_info *info,
|
|
struct binder_proc *target_proc)
|
|
{
|
|
int ret = 0;
|
|
|
|
if (!info->enable) {
|
|
binder_inner_proc_lock(target_proc);
|
|
target_proc->sync_recv = false;
|
|
target_proc->async_recv = false;
|
|
target_proc->is_frozen = false;
|
|
binder_inner_proc_unlock(target_proc);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Freezing the target. Prevent new transactions by
|
|
* setting frozen state. If timeout specified, wait
|
|
* for transactions to drain.
|
|
*/
|
|
binder_inner_proc_lock(target_proc);
|
|
target_proc->sync_recv = false;
|
|
target_proc->async_recv = false;
|
|
target_proc->is_frozen = true;
|
|
binder_inner_proc_unlock(target_proc);
|
|
|
|
if (info->timeout_ms > 0)
|
|
ret = wait_event_interruptible_timeout(
|
|
target_proc->freeze_wait,
|
|
(!target_proc->outstanding_txns),
|
|
msecs_to_jiffies(info->timeout_ms));
|
|
|
|
/* Check pending transactions that wait for reply */
|
|
if (ret >= 0) {
|
|
binder_inner_proc_lock(target_proc);
|
|
if (binder_txns_pending_ilocked(target_proc))
|
|
ret = -EAGAIN;
|
|
binder_inner_proc_unlock(target_proc);
|
|
}
|
|
|
|
if (ret < 0) {
|
|
binder_inner_proc_lock(target_proc);
|
|
target_proc->is_frozen = false;
|
|
binder_inner_proc_unlock(target_proc);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int binder_ioctl_get_freezer_info(
|
|
struct binder_frozen_status_info *info)
|
|
{
|
|
struct binder_proc *target_proc;
|
|
bool found = false;
|
|
__u32 txns_pending;
|
|
|
|
info->sync_recv = 0;
|
|
info->async_recv = 0;
|
|
|
|
mutex_lock(&binder_procs_lock);
|
|
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
|
|
if (target_proc->pid == info->pid) {
|
|
found = true;
|
|
binder_inner_proc_lock(target_proc);
|
|
txns_pending = binder_txns_pending_ilocked(target_proc);
|
|
info->sync_recv |= target_proc->sync_recv |
|
|
(txns_pending << 1);
|
|
info->async_recv |= target_proc->async_recv;
|
|
binder_inner_proc_unlock(target_proc);
|
|
}
|
|
}
|
|
mutex_unlock(&binder_procs_lock);
|
|
|
|
if (!found)
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|
{
|
|
int ret;
|
|
struct binder_proc *proc = filp->private_data;
|
|
struct binder_thread *thread;
|
|
unsigned int size = _IOC_SIZE(cmd);
|
|
void __user *ubuf = (void __user *)arg;
|
|
|
|
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
|
|
proc->pid, current->pid, cmd, arg);*/
|
|
|
|
binder_selftest_alloc(&proc->alloc);
|
|
|
|
trace_binder_ioctl(cmd, arg);
|
|
|
|
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
|
|
if (ret)
|
|
goto err_unlocked;
|
|
|
|
thread = binder_get_thread(proc);
|
|
if (thread == NULL) {
|
|
ret = -ENOMEM;
|
|
goto err;
|
|
}
|
|
|
|
switch (cmd) {
|
|
case BINDER_WRITE_READ:
|
|
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
|
|
if (ret)
|
|
goto err;
|
|
break;
|
|
case BINDER_SET_MAX_THREADS: {
|
|
u32 max_threads;
|
|
|
|
if (copy_from_user(&max_threads, ubuf,
|
|
sizeof(max_threads))) {
|
|
ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
binder_inner_proc_lock(proc);
|
|
proc->max_threads = max_threads;
|
|
binder_inner_proc_unlock(proc);
|
|
break;
|
|
}
|
|
case BINDER_SET_CONTEXT_MGR_EXT: {
|
|
struct flat_binder_object fbo;
|
|
|
|
if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
|
|
ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
|
|
if (ret)
|
|
goto err;
|
|
break;
|
|
}
|
|
case BINDER_SET_CONTEXT_MGR:
|
|
ret = binder_ioctl_set_ctx_mgr(filp, NULL);
|
|
if (ret)
|
|
goto err;
|
|
break;
|
|
case BINDER_THREAD_EXIT:
|
|
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
|
|
proc->pid, thread->pid);
|
|
binder_thread_release(proc, thread);
|
|
thread = NULL;
|
|
break;
|
|
case BINDER_VERSION: {
|
|
struct binder_version __user *ver = ubuf;
|
|
|
|
if (size != sizeof(struct binder_version)) {
|
|
ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
|
|
&ver->protocol_version)) {
|
|
ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
break;
|
|
}
|
|
case BINDER_GET_NODE_INFO_FOR_REF: {
|
|
struct binder_node_info_for_ref info;
|
|
|
|
if (copy_from_user(&info, ubuf, sizeof(info))) {
|
|
ret = -EFAULT;
|
|
goto err;
|
|
}
|
|
|
|
ret = binder_ioctl_get_node_info_for_ref(proc, &info);
|
|
if (ret < 0)
|
|
goto err;
|
|
|
|
if (copy_to_user(ubuf, &info, sizeof(info))) {
|
|
ret = -EFAULT;
|
|
goto err;
|
|
}
|
|
|
|
break;
|
|
}
|
|
case BINDER_GET_NODE_DEBUG_INFO: {
|
|
struct binder_node_debug_info info;
|
|
|
|
if (copy_from_user(&info, ubuf, sizeof(info))) {
|
|
ret = -EFAULT;
|
|
goto err;
|
|
}
|
|
|
|
ret = binder_ioctl_get_node_debug_info(proc, &info);
|
|
if (ret < 0)
|
|
goto err;
|
|
|
|
if (copy_to_user(ubuf, &info, sizeof(info))) {
|
|
ret = -EFAULT;
|
|
goto err;
|
|
}
|
|
break;
|
|
}
|
|
case BINDER_FREEZE: {
|
|
struct binder_freeze_info info;
|
|
struct binder_proc **target_procs = NULL, *target_proc;
|
|
int target_procs_count = 0, i = 0;
|
|
|
|
ret = 0;
|
|
|
|
if (copy_from_user(&info, ubuf, sizeof(info))) {
|
|
ret = -EFAULT;
|
|
goto err;
|
|
}
|
|
|
|
mutex_lock(&binder_procs_lock);
|
|
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
|
|
if (target_proc->pid == info.pid)
|
|
target_procs_count++;
|
|
}
|
|
|
|
if (target_procs_count == 0) {
|
|
mutex_unlock(&binder_procs_lock);
|
|
ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
|
|
target_procs = kcalloc(target_procs_count,
|
|
sizeof(struct binder_proc *),
|
|
GFP_KERNEL);
|
|
|
|
if (!target_procs) {
|
|
mutex_unlock(&binder_procs_lock);
|
|
ret = -ENOMEM;
|
|
goto err;
|
|
}
|
|
|
|
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
|
|
if (target_proc->pid != info.pid)
|
|
continue;
|
|
|
|
binder_inner_proc_lock(target_proc);
|
|
target_proc->tmp_ref++;
|
|
binder_inner_proc_unlock(target_proc);
|
|
|
|
target_procs[i++] = target_proc;
|
|
}
|
|
mutex_unlock(&binder_procs_lock);
|
|
|
|
for (i = 0; i < target_procs_count; i++) {
|
|
if (ret >= 0)
|
|
ret = binder_ioctl_freeze(&info,
|
|
target_procs[i]);
|
|
|
|
binder_proc_dec_tmpref(target_procs[i]);
|
|
}
|
|
|
|
kfree(target_procs);
|
|
|
|
if (ret < 0)
|
|
goto err;
|
|
break;
|
|
}
|
|
case BINDER_GET_FROZEN_INFO: {
|
|
struct binder_frozen_status_info info;
|
|
|
|
if (copy_from_user(&info, ubuf, sizeof(info))) {
|
|
ret = -EFAULT;
|
|
goto err;
|
|
}
|
|
|
|
ret = binder_ioctl_get_freezer_info(&info);
|
|
if (ret < 0)
|
|
goto err;
|
|
|
|
if (copy_to_user(ubuf, &info, sizeof(info))) {
|
|
ret = -EFAULT;
|
|
goto err;
|
|
}
|
|
break;
|
|
}
|
|
case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
|
|
uint32_t enable;
|
|
|
|
if (copy_from_user(&enable, ubuf, sizeof(enable))) {
|
|
ret = -EFAULT;
|
|
goto err;
|
|
}
|
|
binder_inner_proc_lock(proc);
|
|
proc->oneway_spam_detection_enabled = (bool)enable;
|
|
binder_inner_proc_unlock(proc);
|
|
break;
|
|
}
|
|
default:
|
|
ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
ret = 0;
|
|
err:
|
|
if (thread)
|
|
thread->looper_need_return = false;
|
|
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
|
|
if (ret && ret != -EINTR)
|
|
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
|
|
err_unlocked:
|
|
trace_binder_ioctl_done(ret);
|
|
return ret;
|
|
}
|
|
|
|
static void binder_vma_open(struct vm_area_struct *vma)
|
|
{
|
|
struct binder_proc *proc = vma->vm_private_data;
|
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
|
|
"%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
|
|
proc->pid, vma->vm_start, vma->vm_end,
|
|
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
|
|
(unsigned long)pgprot_val(vma->vm_page_prot));
|
|
}
|
|
|
|
static void binder_vma_close(struct vm_area_struct *vma)
|
|
{
|
|
struct binder_proc *proc = vma->vm_private_data;
|
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
|
|
"%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
|
|
proc->pid, vma->vm_start, vma->vm_end,
|
|
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
|
|
(unsigned long)pgprot_val(vma->vm_page_prot));
|
|
binder_alloc_vma_close(&proc->alloc);
|
|
}
|
|
|
|
static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
|
|
{
|
|
return VM_FAULT_SIGBUS;
|
|
}
|
|
|
|
static const struct vm_operations_struct binder_vm_ops = {
|
|
.open = binder_vma_open,
|
|
.close = binder_vma_close,
|
|
.fault = binder_vm_fault,
|
|
};
|
|
|
|
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
|
|
{
|
|
struct binder_proc *proc = filp->private_data;
|
|
|
|
if (proc->tsk != current->group_leader)
|
|
return -EINVAL;
|
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
|
|
"%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
|
|
__func__, proc->pid, vma->vm_start, vma->vm_end,
|
|
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
|
|
(unsigned long)pgprot_val(vma->vm_page_prot));
|
|
|
|
if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
|
|
pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
|
|
proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
|
|
return -EPERM;
|
|
}
|
|
vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
|
|
vma->vm_flags &= ~VM_MAYWRITE;
|
|
|
|
vma->vm_ops = &binder_vm_ops;
|
|
vma->vm_private_data = proc;
|
|
|
|
return binder_alloc_mmap_handler(&proc->alloc, vma);
|
|
}
|
|
|
|
static int binder_open(struct inode *nodp, struct file *filp)
|
|
{
|
|
struct binder_proc *proc, *itr;
|
|
struct binder_proc_ext *eproc;
|
|
struct binder_device *binder_dev;
|
|
struct binderfs_info *info;
|
|
struct dentry *binder_binderfs_dir_entry_proc = NULL;
|
|
bool existing_pid = false;
|
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
|
|
current->group_leader->pid, current->pid);
|
|
|
|
eproc = kzalloc(sizeof(*eproc), GFP_KERNEL);
|
|
proc = &eproc->proc;
|
|
if (proc == NULL)
|
|
return -ENOMEM;
|
|
spin_lock_init(&proc->inner_lock);
|
|
spin_lock_init(&proc->outer_lock);
|
|
get_task_struct(current->group_leader);
|
|
proc->tsk = current->group_leader;
|
|
eproc->cred = get_cred(filp->f_cred);
|
|
INIT_LIST_HEAD(&proc->todo);
|
|
init_waitqueue_head(&proc->freeze_wait);
|
|
if (binder_supported_policy(current->policy)) {
|
|
proc->default_priority.sched_policy = current->policy;
|
|
proc->default_priority.prio = current->normal_prio;
|
|
} else {
|
|
proc->default_priority.sched_policy = SCHED_NORMAL;
|
|
proc->default_priority.prio = NICE_TO_PRIO(0);
|
|
}
|
|
|
|
/* binderfs stashes devices in i_private */
|
|
if (is_binderfs_device(nodp)) {
|
|
binder_dev = nodp->i_private;
|
|
info = nodp->i_sb->s_fs_info;
|
|
binder_binderfs_dir_entry_proc = info->proc_log_dir;
|
|
} else {
|
|
binder_dev = container_of(filp->private_data,
|
|
struct binder_device, miscdev);
|
|
}
|
|
refcount_inc(&binder_dev->ref);
|
|
proc->context = &binder_dev->context;
|
|
binder_alloc_init(&proc->alloc);
|
|
|
|
binder_stats_created(BINDER_STAT_PROC);
|
|
proc->pid = current->group_leader->pid;
|
|
INIT_LIST_HEAD(&proc->delivered_death);
|
|
INIT_LIST_HEAD(&proc->waiting_threads);
|
|
filp->private_data = proc;
|
|
|
|
mutex_lock(&binder_procs_lock);
|
|
hlist_for_each_entry(itr, &binder_procs, proc_node) {
|
|
if (itr->pid == proc->pid) {
|
|
existing_pid = true;
|
|
break;
|
|
}
|
|
}
|
|
hlist_add_head(&proc->proc_node, &binder_procs);
|
|
mutex_unlock(&binder_procs_lock);
|
|
trace_android_vh_binder_preset(&binder_procs, &binder_procs_lock);
|
|
if (binder_debugfs_dir_entry_proc && !existing_pid) {
|
|
char strbuf[11];
|
|
|
|
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
|
|
/*
|
|
* proc debug entries are shared between contexts.
|
|
* Only create for the first PID to avoid debugfs log spamming
|
|
* The printing code will anyway print all contexts for a given
|
|
* PID so this is not a problem.
|
|
*/
|
|
proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
|
|
binder_debugfs_dir_entry_proc,
|
|
(void *)(unsigned long)proc->pid,
|
|
&proc_fops);
|
|
}
|
|
|
|
if (binder_binderfs_dir_entry_proc && !existing_pid) {
|
|
char strbuf[11];
|
|
struct dentry *binderfs_entry;
|
|
|
|
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
|
|
/*
|
|
* Similar to debugfs, the process specific log file is shared
|
|
* between contexts. Only create for the first PID.
|
|
* This is ok since same as debugfs, the log file will contain
|
|
* information on all contexts of a given PID.
|
|
*/
|
|
binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
|
|
strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
|
|
if (!IS_ERR(binderfs_entry)) {
|
|
proc->binderfs_entry = binderfs_entry;
|
|
} else {
|
|
int error;
|
|
|
|
error = PTR_ERR(binderfs_entry);
|
|
pr_warn("Unable to create file %s in binderfs (error %d)\n",
|
|
strbuf, error);
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int binder_flush(struct file *filp, fl_owner_t id)
|
|
{
|
|
struct binder_proc *proc = filp->private_data;
|
|
|
|
binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void binder_deferred_flush(struct binder_proc *proc)
|
|
{
|
|
struct rb_node *n;
|
|
int wake_count = 0;
|
|
|
|
binder_inner_proc_lock(proc);
|
|
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
|
|
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
|
|
|
|
thread->looper_need_return = true;
|
|
if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
|
|
wake_up_interruptible(&thread->wait);
|
|
wake_count++;
|
|
}
|
|
}
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
|
|
"binder_flush: %d woke %d threads\n", proc->pid,
|
|
wake_count);
|
|
}
|
|
|
|
static int binder_release(struct inode *nodp, struct file *filp)
|
|
{
|
|
struct binder_proc *proc = filp->private_data;
|
|
|
|
debugfs_remove(proc->debugfs_entry);
|
|
|
|
if (proc->binderfs_entry) {
|
|
binderfs_remove_file(proc->binderfs_entry);
|
|
proc->binderfs_entry = NULL;
|
|
}
|
|
|
|
binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int binder_node_release(struct binder_node *node, int refs)
|
|
{
|
|
struct binder_ref *ref;
|
|
int death = 0;
|
|
struct binder_proc *proc = node->proc;
|
|
|
|
binder_release_work(proc, &node->async_todo);
|
|
|
|
binder_node_lock(node);
|
|
binder_inner_proc_lock(proc);
|
|
binder_dequeue_work_ilocked(&node->work);
|
|
/*
|
|
* The caller must have taken a temporary ref on the node,
|
|
*/
|
|
BUG_ON(!node->tmp_refs);
|
|
if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
|
|
binder_inner_proc_unlock(proc);
|
|
binder_node_unlock(node);
|
|
binder_free_node(node);
|
|
|
|
return refs;
|
|
}
|
|
|
|
node->proc = NULL;
|
|
node->local_strong_refs = 0;
|
|
node->local_weak_refs = 0;
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
spin_lock(&binder_dead_nodes_lock);
|
|
hlist_add_head(&node->dead_node, &binder_dead_nodes);
|
|
spin_unlock(&binder_dead_nodes_lock);
|
|
|
|
hlist_for_each_entry(ref, &node->refs, node_entry) {
|
|
refs++;
|
|
/*
|
|
* Need the node lock to synchronize
|
|
* with new notification requests and the
|
|
* inner lock to synchronize with queued
|
|
* death notifications.
|
|
*/
|
|
binder_inner_proc_lock(ref->proc);
|
|
if (!ref->death) {
|
|
binder_inner_proc_unlock(ref->proc);
|
|
continue;
|
|
}
|
|
|
|
death++;
|
|
|
|
BUG_ON(!list_empty(&ref->death->work.entry));
|
|
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
|
|
binder_enqueue_work_ilocked(&ref->death->work,
|
|
&ref->proc->todo);
|
|
binder_wakeup_proc_ilocked(ref->proc);
|
|
binder_inner_proc_unlock(ref->proc);
|
|
}
|
|
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
|
"node %d now dead, refs %d, death %d\n",
|
|
node->debug_id, refs, death);
|
|
binder_node_unlock(node);
|
|
binder_put_node(node);
|
|
|
|
return refs;
|
|
}
|
|
|
|
static void binder_deferred_release(struct binder_proc *proc)
|
|
{
|
|
struct binder_context *context = proc->context;
|
|
struct rb_node *n;
|
|
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
|
|
|
|
mutex_lock(&binder_procs_lock);
|
|
hlist_del(&proc->proc_node);
|
|
mutex_unlock(&binder_procs_lock);
|
|
|
|
mutex_lock(&context->context_mgr_node_lock);
|
|
if (context->binder_context_mgr_node &&
|
|
context->binder_context_mgr_node->proc == proc) {
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
|
"%s: %d context_mgr_node gone\n",
|
|
__func__, proc->pid);
|
|
context->binder_context_mgr_node = NULL;
|
|
}
|
|
mutex_unlock(&context->context_mgr_node_lock);
|
|
binder_inner_proc_lock(proc);
|
|
/*
|
|
* Make sure proc stays alive after we
|
|
* remove all the threads
|
|
*/
|
|
proc->tmp_ref++;
|
|
|
|
proc->is_dead = true;
|
|
proc->is_frozen = false;
|
|
proc->sync_recv = false;
|
|
proc->async_recv = false;
|
|
threads = 0;
|
|
active_transactions = 0;
|
|
while ((n = rb_first(&proc->threads))) {
|
|
struct binder_thread *thread;
|
|
|
|
thread = rb_entry(n, struct binder_thread, rb_node);
|
|
binder_inner_proc_unlock(proc);
|
|
threads++;
|
|
active_transactions += binder_thread_release(proc, thread);
|
|
binder_inner_proc_lock(proc);
|
|
}
|
|
|
|
nodes = 0;
|
|
incoming_refs = 0;
|
|
while ((n = rb_first(&proc->nodes))) {
|
|
struct binder_node *node;
|
|
|
|
node = rb_entry(n, struct binder_node, rb_node);
|
|
nodes++;
|
|
/*
|
|
* take a temporary ref on the node before
|
|
* calling binder_node_release() which will either
|
|
* kfree() the node or call binder_put_node()
|
|
*/
|
|
binder_inc_node_tmpref_ilocked(node);
|
|
rb_erase(&node->rb_node, &proc->nodes);
|
|
binder_inner_proc_unlock(proc);
|
|
incoming_refs = binder_node_release(node, incoming_refs);
|
|
binder_inner_proc_lock(proc);
|
|
}
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
outgoing_refs = 0;
|
|
binder_proc_lock(proc);
|
|
while ((n = rb_first(&proc->refs_by_desc))) {
|
|
struct binder_ref *ref;
|
|
|
|
ref = rb_entry(n, struct binder_ref, rb_node_desc);
|
|
outgoing_refs++;
|
|
binder_cleanup_ref_olocked(ref);
|
|
binder_proc_unlock(proc);
|
|
binder_free_ref(ref);
|
|
binder_proc_lock(proc);
|
|
}
|
|
binder_proc_unlock(proc);
|
|
|
|
binder_release_work(proc, &proc->todo);
|
|
binder_release_work(proc, &proc->delivered_death);
|
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
|
|
"%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
|
|
__func__, proc->pid, threads, nodes, incoming_refs,
|
|
outgoing_refs, active_transactions);
|
|
|
|
binder_proc_dec_tmpref(proc);
|
|
}
|
|
|
|
static void binder_deferred_func(struct work_struct *work)
|
|
{
|
|
struct binder_proc *proc;
|
|
|
|
int defer;
|
|
|
|
do {
|
|
mutex_lock(&binder_deferred_lock);
|
|
if (!hlist_empty(&binder_deferred_list)) {
|
|
proc = hlist_entry(binder_deferred_list.first,
|
|
struct binder_proc, deferred_work_node);
|
|
hlist_del_init(&proc->deferred_work_node);
|
|
defer = proc->deferred_work;
|
|
proc->deferred_work = 0;
|
|
} else {
|
|
proc = NULL;
|
|
defer = 0;
|
|
}
|
|
mutex_unlock(&binder_deferred_lock);
|
|
|
|
if (defer & BINDER_DEFERRED_FLUSH)
|
|
binder_deferred_flush(proc);
|
|
|
|
if (defer & BINDER_DEFERRED_RELEASE)
|
|
binder_deferred_release(proc); /* frees proc */
|
|
} while (proc);
|
|
}
|
|
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
|
|
|
|
static void
|
|
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
|
|
{
|
|
mutex_lock(&binder_deferred_lock);
|
|
proc->deferred_work |= defer;
|
|
if (hlist_unhashed(&proc->deferred_work_node)) {
|
|
hlist_add_head(&proc->deferred_work_node,
|
|
&binder_deferred_list);
|
|
schedule_work(&binder_deferred_work);
|
|
}
|
|
mutex_unlock(&binder_deferred_lock);
|
|
}
|
|
|
|
static void print_binder_transaction_ilocked(struct seq_file *m,
|
|
struct binder_proc *proc,
|
|
const char *prefix,
|
|
struct binder_transaction *t)
|
|
{
|
|
struct binder_proc *to_proc;
|
|
struct binder_buffer *buffer = t->buffer;
|
|
|
|
spin_lock(&t->lock);
|
|
trace_android_vh_binder_print_transaction_info(m, proc, prefix, t);
|
|
to_proc = t->to_proc;
|
|
seq_printf(m,
|
|
"%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
|
|
prefix, t->debug_id, t,
|
|
t->from ? t->from->proc->pid : 0,
|
|
t->from ? t->from->pid : 0,
|
|
to_proc ? to_proc->pid : 0,
|
|
t->to_thread ? t->to_thread->pid : 0,
|
|
t->code, t->flags, t->priority.sched_policy,
|
|
t->priority.prio, t->need_reply);
|
|
spin_unlock(&t->lock);
|
|
|
|
if (proc != to_proc) {
|
|
/*
|
|
* Can only safely deref buffer if we are holding the
|
|
* correct proc inner lock for this node
|
|
*/
|
|
seq_puts(m, "\n");
|
|
return;
|
|
}
|
|
|
|
if (buffer == NULL) {
|
|
seq_puts(m, " buffer free\n");
|
|
return;
|
|
}
|
|
if (buffer->target_node)
|
|
seq_printf(m, " node %d", buffer->target_node->debug_id);
|
|
seq_printf(m, " size %zd:%zd data %pK\n",
|
|
buffer->data_size, buffer->offsets_size,
|
|
buffer->user_data);
|
|
}
|
|
|
|
static void print_binder_work_ilocked(struct seq_file *m,
|
|
struct binder_proc *proc,
|
|
const char *prefix,
|
|
const char *transaction_prefix,
|
|
struct binder_work *w)
|
|
{
|
|
struct binder_node *node;
|
|
struct binder_transaction *t;
|
|
|
|
switch (w->type) {
|
|
case BINDER_WORK_TRANSACTION:
|
|
t = container_of(w, struct binder_transaction, work);
|
|
print_binder_transaction_ilocked(
|
|
m, proc, transaction_prefix, t);
|
|
break;
|
|
case BINDER_WORK_RETURN_ERROR: {
|
|
struct binder_error *e = container_of(
|
|
w, struct binder_error, work);
|
|
|
|
seq_printf(m, "%stransaction error: %u\n",
|
|
prefix, e->cmd);
|
|
} break;
|
|
case BINDER_WORK_TRANSACTION_COMPLETE:
|
|
seq_printf(m, "%stransaction complete\n", prefix);
|
|
break;
|
|
case BINDER_WORK_NODE:
|
|
node = container_of(w, struct binder_node, work);
|
|
seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
|
|
prefix, node->debug_id,
|
|
(u64)node->ptr, (u64)node->cookie);
|
|
break;
|
|
case BINDER_WORK_DEAD_BINDER:
|
|
seq_printf(m, "%shas dead binder\n", prefix);
|
|
break;
|
|
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
|
|
seq_printf(m, "%shas cleared dead binder\n", prefix);
|
|
break;
|
|
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
|
|
seq_printf(m, "%shas cleared death notification\n", prefix);
|
|
break;
|
|
default:
|
|
seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
|
|
break;
|
|
}
|
|
}
|
|
|
|
static void print_binder_thread_ilocked(struct seq_file *m,
|
|
struct binder_thread *thread,
|
|
int print_always)
|
|
{
|
|
struct binder_transaction *t;
|
|
struct binder_work *w;
|
|
size_t start_pos = m->count;
|
|
size_t header_pos;
|
|
|
|
seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
|
|
thread->pid, thread->looper,
|
|
thread->looper_need_return,
|
|
atomic_read(&thread->tmp_ref));
|
|
header_pos = m->count;
|
|
t = thread->transaction_stack;
|
|
while (t) {
|
|
if (t->from == thread) {
|
|
print_binder_transaction_ilocked(m, thread->proc,
|
|
" outgoing transaction", t);
|
|
t = t->from_parent;
|
|
} else if (t->to_thread == thread) {
|
|
print_binder_transaction_ilocked(m, thread->proc,
|
|
" incoming transaction", t);
|
|
t = t->to_parent;
|
|
} else {
|
|
print_binder_transaction_ilocked(m, thread->proc,
|
|
" bad transaction", t);
|
|
t = NULL;
|
|
}
|
|
}
|
|
list_for_each_entry(w, &thread->todo, entry) {
|
|
print_binder_work_ilocked(m, thread->proc, " ",
|
|
" pending transaction", w);
|
|
}
|
|
if (!print_always && m->count == header_pos)
|
|
m->count = start_pos;
|
|
}
|
|
|
|
static void print_binder_node_nilocked(struct seq_file *m,
|
|
struct binder_node *node)
|
|
{
|
|
struct binder_ref *ref;
|
|
struct binder_work *w;
|
|
int count;
|
|
|
|
count = 0;
|
|
hlist_for_each_entry(ref, &node->refs, node_entry)
|
|
count++;
|
|
|
|
seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
|
|
node->debug_id, (u64)node->ptr, (u64)node->cookie,
|
|
node->sched_policy, node->min_priority,
|
|
node->has_strong_ref, node->has_weak_ref,
|
|
node->local_strong_refs, node->local_weak_refs,
|
|
node->internal_strong_refs, count, node->tmp_refs);
|
|
if (count) {
|
|
seq_puts(m, " proc");
|
|
hlist_for_each_entry(ref, &node->refs, node_entry)
|
|
seq_printf(m, " %d", ref->proc->pid);
|
|
}
|
|
seq_puts(m, "\n");
|
|
if (node->proc) {
|
|
list_for_each_entry(w, &node->async_todo, entry)
|
|
print_binder_work_ilocked(m, node->proc, " ",
|
|
" pending async transaction", w);
|
|
}
|
|
}
|
|
|
|
static void print_binder_ref_olocked(struct seq_file *m,
|
|
struct binder_ref *ref)
|
|
{
|
|
binder_node_lock(ref->node);
|
|
seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
|
|
ref->data.debug_id, ref->data.desc,
|
|
ref->node->proc ? "" : "dead ",
|
|
ref->node->debug_id, ref->data.strong,
|
|
ref->data.weak, ref->death);
|
|
binder_node_unlock(ref->node);
|
|
}
|
|
|
|
static void print_binder_proc(struct seq_file *m,
|
|
struct binder_proc *proc, int print_all)
|
|
{
|
|
struct binder_work *w;
|
|
struct rb_node *n;
|
|
size_t start_pos = m->count;
|
|
size_t header_pos;
|
|
struct binder_node *last_node = NULL;
|
|
|
|
seq_printf(m, "proc %d\n", proc->pid);
|
|
seq_printf(m, "context %s\n", proc->context->name);
|
|
header_pos = m->count;
|
|
|
|
binder_inner_proc_lock(proc);
|
|
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
|
|
print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
|
|
rb_node), print_all);
|
|
|
|
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
|
|
struct binder_node *node = rb_entry(n, struct binder_node,
|
|
rb_node);
|
|
if (!print_all && !node->has_async_transaction)
|
|
continue;
|
|
|
|
/*
|
|
* take a temporary reference on the node so it
|
|
* survives and isn't removed from the tree
|
|
* while we print it.
|
|
*/
|
|
binder_inc_node_tmpref_ilocked(node);
|
|
/* Need to drop inner lock to take node lock */
|
|
binder_inner_proc_unlock(proc);
|
|
if (last_node)
|
|
binder_put_node(last_node);
|
|
binder_node_inner_lock(node);
|
|
print_binder_node_nilocked(m, node);
|
|
binder_node_inner_unlock(node);
|
|
last_node = node;
|
|
binder_inner_proc_lock(proc);
|
|
}
|
|
binder_inner_proc_unlock(proc);
|
|
if (last_node)
|
|
binder_put_node(last_node);
|
|
|
|
if (print_all) {
|
|
binder_proc_lock(proc);
|
|
for (n = rb_first(&proc->refs_by_desc);
|
|
n != NULL;
|
|
n = rb_next(n))
|
|
print_binder_ref_olocked(m, rb_entry(n,
|
|
struct binder_ref,
|
|
rb_node_desc));
|
|
binder_proc_unlock(proc);
|
|
}
|
|
binder_alloc_print_allocated(m, &proc->alloc);
|
|
binder_inner_proc_lock(proc);
|
|
list_for_each_entry(w, &proc->todo, entry)
|
|
print_binder_work_ilocked(m, proc, " ",
|
|
" pending transaction", w);
|
|
list_for_each_entry(w, &proc->delivered_death, entry) {
|
|
seq_puts(m, " has delivered dead binder\n");
|
|
break;
|
|
}
|
|
binder_inner_proc_unlock(proc);
|
|
if (!print_all && m->count == header_pos)
|
|
m->count = start_pos;
|
|
}
|
|
|
|
static const char * const binder_return_strings[] = {
|
|
"BR_ERROR",
|
|
"BR_OK",
|
|
"BR_TRANSACTION",
|
|
"BR_REPLY",
|
|
"BR_ACQUIRE_RESULT",
|
|
"BR_DEAD_REPLY",
|
|
"BR_TRANSACTION_COMPLETE",
|
|
"BR_INCREFS",
|
|
"BR_ACQUIRE",
|
|
"BR_RELEASE",
|
|
"BR_DECREFS",
|
|
"BR_ATTEMPT_ACQUIRE",
|
|
"BR_NOOP",
|
|
"BR_SPAWN_LOOPER",
|
|
"BR_FINISHED",
|
|
"BR_DEAD_BINDER",
|
|
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
|
|
"BR_FAILED_REPLY",
|
|
"BR_FROZEN_REPLY",
|
|
"BR_ONEWAY_SPAM_SUSPECT",
|
|
};
|
|
|
|
static const char * const binder_command_strings[] = {
|
|
"BC_TRANSACTION",
|
|
"BC_REPLY",
|
|
"BC_ACQUIRE_RESULT",
|
|
"BC_FREE_BUFFER",
|
|
"BC_INCREFS",
|
|
"BC_ACQUIRE",
|
|
"BC_RELEASE",
|
|
"BC_DECREFS",
|
|
"BC_INCREFS_DONE",
|
|
"BC_ACQUIRE_DONE",
|
|
"BC_ATTEMPT_ACQUIRE",
|
|
"BC_REGISTER_LOOPER",
|
|
"BC_ENTER_LOOPER",
|
|
"BC_EXIT_LOOPER",
|
|
"BC_REQUEST_DEATH_NOTIFICATION",
|
|
"BC_CLEAR_DEATH_NOTIFICATION",
|
|
"BC_DEAD_BINDER_DONE",
|
|
"BC_TRANSACTION_SG",
|
|
"BC_REPLY_SG",
|
|
};
|
|
|
|
static const char * const binder_objstat_strings[] = {
|
|
"proc",
|
|
"thread",
|
|
"node",
|
|
"ref",
|
|
"death",
|
|
"transaction",
|
|
"transaction_complete"
|
|
};
|
|
|
|
static void print_binder_stats(struct seq_file *m, const char *prefix,
|
|
struct binder_stats *stats)
|
|
{
|
|
int i;
|
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
|
|
ARRAY_SIZE(binder_command_strings));
|
|
for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
|
|
int temp = atomic_read(&stats->bc[i]);
|
|
|
|
if (temp)
|
|
seq_printf(m, "%s%s: %d\n", prefix,
|
|
binder_command_strings[i], temp);
|
|
}
|
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
|
|
ARRAY_SIZE(binder_return_strings));
|
|
for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
|
|
int temp = atomic_read(&stats->br[i]);
|
|
|
|
if (temp)
|
|
seq_printf(m, "%s%s: %d\n", prefix,
|
|
binder_return_strings[i], temp);
|
|
}
|
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
|
|
ARRAY_SIZE(binder_objstat_strings));
|
|
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
|
|
ARRAY_SIZE(stats->obj_deleted));
|
|
for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
|
|
int created = atomic_read(&stats->obj_created[i]);
|
|
int deleted = atomic_read(&stats->obj_deleted[i]);
|
|
|
|
if (created || deleted)
|
|
seq_printf(m, "%s%s: active %d total %d\n",
|
|
prefix,
|
|
binder_objstat_strings[i],
|
|
created - deleted,
|
|
created);
|
|
}
|
|
}
|
|
|
|
static void print_binder_proc_stats(struct seq_file *m,
|
|
struct binder_proc *proc)
|
|
{
|
|
struct binder_work *w;
|
|
struct binder_thread *thread;
|
|
struct rb_node *n;
|
|
int count, strong, weak, ready_threads;
|
|
size_t free_async_space =
|
|
binder_alloc_get_free_async_space(&proc->alloc);
|
|
|
|
seq_printf(m, "proc %d\n", proc->pid);
|
|
seq_printf(m, "context %s\n", proc->context->name);
|
|
count = 0;
|
|
ready_threads = 0;
|
|
binder_inner_proc_lock(proc);
|
|
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
|
|
count++;
|
|
|
|
list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
|
|
ready_threads++;
|
|
|
|
seq_printf(m, " threads: %d\n", count);
|
|
seq_printf(m, " requested threads: %d+%d/%d\n"
|
|
" ready threads %d\n"
|
|
" free async space %zd\n", proc->requested_threads,
|
|
proc->requested_threads_started, proc->max_threads,
|
|
ready_threads,
|
|
free_async_space);
|
|
count = 0;
|
|
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
|
|
count++;
|
|
binder_inner_proc_unlock(proc);
|
|
seq_printf(m, " nodes: %d\n", count);
|
|
count = 0;
|
|
strong = 0;
|
|
weak = 0;
|
|
binder_proc_lock(proc);
|
|
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
|
|
struct binder_ref *ref = rb_entry(n, struct binder_ref,
|
|
rb_node_desc);
|
|
count++;
|
|
strong += ref->data.strong;
|
|
weak += ref->data.weak;
|
|
}
|
|
binder_proc_unlock(proc);
|
|
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
|
|
|
|
count = binder_alloc_get_allocated_count(&proc->alloc);
|
|
seq_printf(m, " buffers: %d\n", count);
|
|
|
|
binder_alloc_print_pages(m, &proc->alloc);
|
|
|
|
count = 0;
|
|
binder_inner_proc_lock(proc);
|
|
list_for_each_entry(w, &proc->todo, entry) {
|
|
if (w->type == BINDER_WORK_TRANSACTION)
|
|
count++;
|
|
}
|
|
binder_inner_proc_unlock(proc);
|
|
seq_printf(m, " pending transactions: %d\n", count);
|
|
|
|
print_binder_stats(m, " ", &proc->stats);
|
|
}
|
|
|
|
static int state_show(struct seq_file *m, void *unused)
|
|
{
|
|
struct binder_proc *proc;
|
|
struct binder_node *node;
|
|
struct binder_node *last_node = NULL;
|
|
|
|
seq_puts(m, "binder state:\n");
|
|
|
|
spin_lock(&binder_dead_nodes_lock);
|
|
if (!hlist_empty(&binder_dead_nodes))
|
|
seq_puts(m, "dead nodes:\n");
|
|
hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
|
|
/*
|
|
* take a temporary reference on the node so it
|
|
* survives and isn't removed from the list
|
|
* while we print it.
|
|
*/
|
|
node->tmp_refs++;
|
|
spin_unlock(&binder_dead_nodes_lock);
|
|
if (last_node)
|
|
binder_put_node(last_node);
|
|
binder_node_lock(node);
|
|
print_binder_node_nilocked(m, node);
|
|
binder_node_unlock(node);
|
|
last_node = node;
|
|
spin_lock(&binder_dead_nodes_lock);
|
|
}
|
|
spin_unlock(&binder_dead_nodes_lock);
|
|
if (last_node)
|
|
binder_put_node(last_node);
|
|
|
|
mutex_lock(&binder_procs_lock);
|
|
hlist_for_each_entry(proc, &binder_procs, proc_node)
|
|
print_binder_proc(m, proc, 1);
|
|
mutex_unlock(&binder_procs_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int stats_show(struct seq_file *m, void *unused)
|
|
{
|
|
struct binder_proc *proc;
|
|
|
|
seq_puts(m, "binder stats:\n");
|
|
|
|
print_binder_stats(m, "", &binder_stats);
|
|
|
|
mutex_lock(&binder_procs_lock);
|
|
hlist_for_each_entry(proc, &binder_procs, proc_node)
|
|
print_binder_proc_stats(m, proc);
|
|
mutex_unlock(&binder_procs_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int transactions_show(struct seq_file *m, void *unused)
|
|
{
|
|
struct binder_proc *proc;
|
|
|
|
seq_puts(m, "binder transactions:\n");
|
|
mutex_lock(&binder_procs_lock);
|
|
hlist_for_each_entry(proc, &binder_procs, proc_node)
|
|
print_binder_proc(m, proc, 0);
|
|
mutex_unlock(&binder_procs_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int proc_show(struct seq_file *m, void *unused)
|
|
{
|
|
struct binder_proc *itr;
|
|
int pid = (unsigned long)m->private;
|
|
|
|
mutex_lock(&binder_procs_lock);
|
|
hlist_for_each_entry(itr, &binder_procs, proc_node) {
|
|
if (itr->pid == pid) {
|
|
seq_puts(m, "binder proc state:\n");
|
|
print_binder_proc(m, itr, 1);
|
|
}
|
|
}
|
|
mutex_unlock(&binder_procs_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void print_binder_transaction_log_entry(struct seq_file *m,
|
|
struct binder_transaction_log_entry *e)
|
|
{
|
|
int debug_id = READ_ONCE(e->debug_id_done);
|
|
/*
|
|
* read barrier to guarantee debug_id_done read before
|
|
* we print the log values
|
|
*/
|
|
smp_rmb();
|
|
seq_printf(m,
|
|
"%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
|
|
e->debug_id, (e->call_type == 2) ? "reply" :
|
|
((e->call_type == 1) ? "async" : "call "), e->from_proc,
|
|
e->from_thread, e->to_proc, e->to_thread, e->context_name,
|
|
e->to_node, e->target_handle, e->data_size, e->offsets_size,
|
|
e->return_error, e->return_error_param,
|
|
e->return_error_line);
|
|
/*
|
|
* read-barrier to guarantee read of debug_id_done after
|
|
* done printing the fields of the entry
|
|
*/
|
|
smp_rmb();
|
|
seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
|
|
"\n" : " (incomplete)\n");
|
|
}
|
|
|
|
static int transaction_log_show(struct seq_file *m, void *unused)
|
|
{
|
|
struct binder_transaction_log *log = m->private;
|
|
unsigned int log_cur = atomic_read(&log->cur);
|
|
unsigned int count;
|
|
unsigned int cur;
|
|
int i;
|
|
|
|
count = log_cur + 1;
|
|
cur = count < ARRAY_SIZE(log->entry) && !log->full ?
|
|
0 : count % ARRAY_SIZE(log->entry);
|
|
if (count > ARRAY_SIZE(log->entry) || log->full)
|
|
count = ARRAY_SIZE(log->entry);
|
|
for (i = 0; i < count; i++) {
|
|
unsigned int index = cur++ % ARRAY_SIZE(log->entry);
|
|
|
|
print_binder_transaction_log_entry(m, &log->entry[index]);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
const struct file_operations binder_fops = {
|
|
.owner = THIS_MODULE,
|
|
.poll = binder_poll,
|
|
.unlocked_ioctl = binder_ioctl,
|
|
.compat_ioctl = compat_ptr_ioctl,
|
|
.mmap = binder_mmap,
|
|
.open = binder_open,
|
|
.flush = binder_flush,
|
|
.release = binder_release,
|
|
};
|
|
|
|
DEFINE_SHOW_ATTRIBUTE(state);
|
|
DEFINE_SHOW_ATTRIBUTE(stats);
|
|
DEFINE_SHOW_ATTRIBUTE(transactions);
|
|
DEFINE_SHOW_ATTRIBUTE(transaction_log);
|
|
|
|
const struct binder_debugfs_entry binder_debugfs_entries[] = {
|
|
{
|
|
.name = "state",
|
|
.mode = 0444,
|
|
.fops = &state_fops,
|
|
.data = NULL,
|
|
},
|
|
{
|
|
.name = "stats",
|
|
.mode = 0444,
|
|
.fops = &stats_fops,
|
|
.data = NULL,
|
|
},
|
|
{
|
|
.name = "transactions",
|
|
.mode = 0444,
|
|
.fops = &transactions_fops,
|
|
.data = NULL,
|
|
},
|
|
{
|
|
.name = "transaction_log",
|
|
.mode = 0444,
|
|
.fops = &transaction_log_fops,
|
|
.data = &binder_transaction_log,
|
|
},
|
|
{
|
|
.name = "failed_transaction_log",
|
|
.mode = 0444,
|
|
.fops = &transaction_log_fops,
|
|
.data = &binder_transaction_log_failed,
|
|
},
|
|
{} /* terminator */
|
|
};
|
|
|
|
static int __init init_binder_device(const char *name)
|
|
{
|
|
int ret;
|
|
struct binder_device *binder_device;
|
|
|
|
binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
|
|
if (!binder_device)
|
|
return -ENOMEM;
|
|
|
|
binder_device->miscdev.fops = &binder_fops;
|
|
binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
|
|
binder_device->miscdev.name = name;
|
|
|
|
refcount_set(&binder_device->ref, 1);
|
|
binder_device->context.binder_context_mgr_uid = INVALID_UID;
|
|
binder_device->context.name = name;
|
|
mutex_init(&binder_device->context.context_mgr_node_lock);
|
|
|
|
ret = misc_register(&binder_device->miscdev);
|
|
if (ret < 0) {
|
|
kfree(binder_device);
|
|
return ret;
|
|
}
|
|
|
|
hlist_add_head(&binder_device->hlist, &binder_devices);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int __init binder_init(void)
|
|
{
|
|
int ret;
|
|
char *device_name, *device_tmp;
|
|
struct binder_device *device;
|
|
struct hlist_node *tmp;
|
|
char *device_names = NULL;
|
|
|
|
ret = binder_alloc_shrinker_init();
|
|
if (ret)
|
|
return ret;
|
|
|
|
atomic_set(&binder_transaction_log.cur, ~0U);
|
|
atomic_set(&binder_transaction_log_failed.cur, ~0U);
|
|
|
|
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
|
|
if (binder_debugfs_dir_entry_root) {
|
|
const struct binder_debugfs_entry *db_entry;
|
|
|
|
binder_for_each_debugfs_entry(db_entry)
|
|
debugfs_create_file(db_entry->name,
|
|
db_entry->mode,
|
|
binder_debugfs_dir_entry_root,
|
|
db_entry->data,
|
|
db_entry->fops);
|
|
|
|
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
|
|
binder_debugfs_dir_entry_root);
|
|
}
|
|
|
|
if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
|
|
strcmp(binder_devices_param, "") != 0) {
|
|
/*
|
|
* Copy the module_parameter string, because we don't want to
|
|
* tokenize it in-place.
|
|
*/
|
|
device_names = kstrdup(binder_devices_param, GFP_KERNEL);
|
|
if (!device_names) {
|
|
ret = -ENOMEM;
|
|
goto err_alloc_device_names_failed;
|
|
}
|
|
|
|
device_tmp = device_names;
|
|
while ((device_name = strsep(&device_tmp, ","))) {
|
|
ret = init_binder_device(device_name);
|
|
if (ret)
|
|
goto err_init_binder_device_failed;
|
|
}
|
|
}
|
|
|
|
ret = init_binderfs();
|
|
if (ret)
|
|
goto err_init_binder_device_failed;
|
|
|
|
return ret;
|
|
|
|
err_init_binder_device_failed:
|
|
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
|
|
misc_deregister(&device->miscdev);
|
|
hlist_del(&device->hlist);
|
|
kfree(device);
|
|
}
|
|
|
|
kfree(device_names);
|
|
|
|
err_alloc_device_names_failed:
|
|
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
|
|
binder_alloc_shrinker_exit();
|
|
|
|
return ret;
|
|
}
|
|
|
|
device_initcall(binder_init);
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
#include "binder_trace.h"
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(binder_transaction_received);
|
|
|
|
MODULE_LICENSE("GPL v2");
|