Prechádzať zdrojové kódy

support susfs and disable kperfmon

j7b3y 3 týždňov pred
rodič
commit
9e449c3f83

+ 1 - 1
KernelSU-Next

@@ -1 +1 @@
-Subproject commit 5ca5e2b0275a0e1dc7bffec10a7a22ead8fa2da4
+Subproject commit 81dff86d0298da936024c9702f4db01fde1f7200

+ 20 - 0
arch/arm64/configs/e1q_gki_defconfig

@@ -7534,3 +7534,23 @@ CONFIG_ARCH_USE_MEMTEST=y
 #
 # end of Rust hacking
 # end of Kernel hacking
+
+# KernelSU-Next
+CONFIG_KSU=y
+# SuSFS
+CONFIG_KSU_SUSFS=y
+CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y
+CONFIG_KSU_SUSFS_SUS_PATH=n
+CONFIG_KSU_SUSFS_SUS_MOUNT=y
+CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y
+CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y
+CONFIG_KSU_SUSFS_SUS_KSTAT=y
+CONFIG_KSU_SUSFS_SUS_OVERLAYFS=y
+CONFIG_KSU_SUSFS_TRY_UMOUNT=y
+CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y
+CONFIG_KSU_SUSFS_SPOOF_UNAME=y
+CONFIG_KSU_SUSFS_ENABLE_LOG=y
+CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y
+CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y
+CONFIG_KSU_SUSFS_OPEN_REDIRECT=y
+CONFIG_KSU_SUSFS_SUS_SU=y

+ 0 - 1
drivers/Kconfig

@@ -257,7 +257,6 @@ source "drivers/input/sec_input/Kconfig" # ADDED BY LEGO AUTOMATICALLY: DO NOT S
 source "drivers/net/dropdump/Kconfig" # ADDED BY LEGO AUTOMATICALLY: DO NOT SUBMIT
 source "drivers/vibrator/common/vib_info/Kconfig" # ADDED BY LEGO AUTOMATICALLY: DO NOT SUBMIT
 source "drivers/sti/common/Kconfig" # ADDED BY LEGO AUTOMATICALLY: DO NOT SUBMIT
-source "drivers/kperfmon/Kconfig" # ADDED BY LEGO AUTOMATICALLY: DO NOT SUBMIT
 source "drivers/vibrator/common/inputff/Kconfig" # ADDED BY LEGO AUTOMATICALLY: DO NOT SUBMIT
 source "drivers/usb/notify/Kconfig" # ADDED BY LEGO AUTOMATICALLY: DO NOT SUBMIT
 source "drivers/usb/vendor_notify/Kconfig" # ADDED BY LEGO AUTOMATICALLY: DO NOT SUBMIT

+ 0 - 1
drivers/Makefile

@@ -205,7 +205,6 @@ obj-y += usb/common/vbus_notifier/ # ADDED BY LEGO AUTOMATICALLY: DO NOT SUBMIT
 obj-y += input/sec_input/ # ADDED BY LEGO AUTOMATICALLY: DO NOT SUBMIT
 obj-y += net/dropdump/ # ADDED BY LEGO AUTOMATICALLY: DO NOT SUBMIT
 obj-y += vibrator/common/vib_info/ # ADDED BY LEGO AUTOMATICALLY: DO NOT SUBMIT
-obj-y += kperfmon/ # ADDED BY LEGO AUTOMATICALLY: DO NOT SUBMIT
 obj-y += vibrator/common/inputff/ # ADDED BY LEGO AUTOMATICALLY: DO NOT SUBMIT
 obj-y += usb/notify/ # ADDED BY LEGO AUTOMATICALLY: DO NOT SUBMIT
 obj-y += usb/vendor_notify/ # ADDED BY LEGO AUTOMATICALLY: DO NOT SUBMIT

+ 0 - 26
drivers/kperfmon/Kconfig

@@ -1,26 +0,0 @@
-#
-# Samsung Performance Logging system
-#
-
-menu "samsung Performace manager"
-
-config KPERFMON
-	bool "Enable performance log"
-	default y
-	help
-		Samsung performance log(OLOG).
-		Say Y here if enable performance olog driver to do logging system resources.
-		When some delay occurs in the kernel, native or user user space,
-		the logging information should be restored into the system.
-
-config KPERFMON_BUILD
-	tristate "Building tyoe of performance log"
-	default y
-	help
-		Samsung performance log(OLOG).
-		This is to set a build type for module or build-in.
-		Say m here if you want a module of performance olog driver.
-		Say y here if you want build-in object of the performance olog driver.
-
-endmenu
-

+ 0 - 51
drivers/kperfmon/Makefile

@@ -1,51 +0,0 @@
-#
-# Makefile for the Linux kernel device drivers.
-#
-# Sep 2018, Binse Park <[email protected]>
-# Rewritten to use lists instead of if-statements.
-#
-
-FLAG=1
-
-ifneq ($(CONFIG_KPERFMON), y)
-FLAG=0
-$(info kperfmon_DUMMY="CONFIG_KPERFMON is off.")
-endif
-
-ifneq ($(shell [ -e $(srctree)/include/linux/olog.pb.h ] && echo exist), exist)
-$(info kperfmon_DUMMY="olog.pb.h file is missing... retrying")
-
-$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/../../frameworks/base/proto/src/olog.proto  $(srctree)/drivers/kperfmon/)")
-$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/../../vendor/samsung/system/libperflog/aprotoc  $(srctree)/drivers/kperfmon/)")
-$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/../../system/logging/libperflog/aprotoc  $(srctree)/drivers/kperfmon/)")
-$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/../frameworks/base/proto/src/olog.proto  $(srctree)/drivers/kperfmon/)")
-$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/../vendor/samsung/system/libperflog/aprotoc  $(srctree)/drivers/kperfmon/)")
-$(info kperfmon_DUMMY="$(shell chmod 777 $(srctree)/drivers/kperfmon/aprotoc)")
-$(info kperfmon_DUMMY="$(shell $(srctree)/drivers/kperfmon/aprotoc --perflog_out=$(srctree)/drivers/kperfmon/ --proto_path=$(srctree)/drivers/kperfmon/ $(srctree)/drivers/kperfmon/olog.proto)")
-$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/drivers/kperfmon/olog.pb.h  $(srctree)/include/linux/)")
-#$(info kperfmon_DUMMY="$(shell ls $(srctree)/drivers/kperfmon/*)")
-#$(info kperfmon_DUMMY="$(shell ls $(srctree)/include/linux/olog*)")
-
-ifneq ($(shell [ -e $(srctree)/include/linux/olog.pb.h ] && echo exist), exist)
-$(info kperfmon_DUMMY="olog.pb.h file is missing... again")
-FLAG=0
-endif
-endif
-
-$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/../../system/core/liblog/include/log/perflog.h  $(srctree)/include/linux/)")
-$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/../../system/logging/liblog/include/log/perflog.h  $(srctree)/include/linux/)")
-$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/../system/core/liblog/include/log/perflog.h  $(srctree)/include/linux/)")
-
-ifneq ($(shell [ -e $(srctree)/drivers/kperfmon/perflog.h ] && echo exist), exist)
-FLAG=0
-$(info kperfmon_DUMMY="perflog.h file is missing.")
-endif
-
-ifeq ($(FLAG), 1)
-$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/drivers/kperfmon/ologk.h $(srctree)/include/linux/)")
-ifeq ($(CONFIG_KPERFMON_BUILD), y)
-	obj-y		+= kperfmon.o
-else
-	obj-m		+= kperfmon.o
-endif
-endif

+ 0 - 919
drivers/kperfmon/kperfmon.c

@@ -1,919 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-//
-// Samsung's performance logging
-//
-// Copyright (c) 2017 Samsung Electronics Co., Ltd
-//              http://www.samsung.com
-//
-// Binse Park <[email protected]>
-
-#define KPERFMON_KERNEL
-#include <linux/ologk.h>
-#undef KPERFMON_KERNEL
-
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/time.h>
-#include <linux/rtc.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-#include <linux/workqueue.h>
-#include <linux/slab.h>
-#include <linux/sec_debug.h>
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
-#define KPERFMON_KERNEL
-#include "perflog.h"
-#undef KPERFMON_KERNEL
-#else
-#include "perflog.h"
-#endif
-#if !defined(KPERFMON_KMALLOC)
-#include <linux/vmalloc.h>
-#endif
-#include <linux/mm.h>
-#include <linux/sched/cputime.h>
-#include <linux/sched/signal.h>
-#include <asm/uaccess.h>
-#include <asm/stacktrace.h>
-#include <linux/uidgid.h>
-
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
-#define rtc_time_to_tm(a, b) rtc_time64_to_tm(a, b)
-
-struct timeval {
-	time64_t    tv_sec;
-	long        tv_usec;
-};
-#endif
-
-
-#include "kperfmon.h"
-
-#define	PROC_NAME			"kperfmon"
-#if defined(KPERFMON_KMALLOC)
-#define BUFFER_SIZE			(5 * 1024)
-#else
-#define MEM_SZ_4G			0x100000000
-#define BUFFER_SIZE_2M			(2 * 1024 * 1024)
-#define BUFFER_SIZE_5M			(5 * 1024 * 1024)
-#define IS_MEMORY_UNDER_4GB(x)		(x <= (MEM_SZ_4G >> PAGE_SHIFT))
-#endif
-
-#define HEADER_SIZE			PERFLOG_HEADER_SIZE
-#define DEBUGGER_SIZE			32
-#define STREAM_SIZE			(PERFLOG_BUFF_STR_MAX_SIZE + PERFLOG_HEADER_SIZE)
-#define READ_BUFFER_SIZE		(STREAM_SIZE + 100)
-
-#define MAX_DEPTH_OF_CALLSTACK		20
-#define MAX_MUTEX_RAWDATA		20
-
-#define SIGNAL_35			35
-#define SIGNAL_OLOG			5209
-
-#if defined(USE_MONITOR)
-#define MAX_MUTEX_RAWDATA_DIGIT		2
-#define DIGIT_UNIT			100000000
-#endif
-
-#define KPERFMON_VERSION_LENGTH		100
-
-struct tRingBuffer buffer = {0, };
-//const struct file_operations;
-
-struct t_before_print *before_list_cur_pos;
-static LIST_HEAD(before_print_list);
-
-void CreateBuffer(struct tRingBuffer *buffer,
-			unsigned long length)
-{
-	if (buffer->data != 0)
-		return;
-
-#if defined(KPERFMON_KMALLOC)
-	buffer->data = kmalloc(length + 1, GFP_KERNEL);
-#else
-	buffer->data = vmalloc(length + 1);
-#endif
-	if (buffer->data == 0) {
-		pr_info("kperfmon error [%s] buffer->data is null!!!\n",
-			__func__);
-		return;
-	}
-
-	buffer->length = length;
-	buffer->start = -1;
-	buffer->end = 0;
-	buffer->status = FLAG_NOTHING;
-	buffer->debugger = 0;
-
-	memset(buffer->data, 0, length + 1);
-
-	mutex_init(&buffer->mutex);
-}
-
-void DestroyBuffer(struct tRingBuffer *buffer)
-{
-	if (buffer->data != 0) {
-#if defined(KPERFMON_KMALLOC)
-		kfree(buffer->data);
-#else
-		vfree(buffer->data);
-#endif
-		buffer->data = 0;
-	}
-}
-
-void WriteBuffer(struct tRingBuffer *buffer,
-			byte *data,
-			unsigned long length)
-{
-	long RemainSize = 0;
-
-	if (length < 0)
-		return;
-
-	if (buffer->length < buffer->end + length) {
-		long FirstSize = buffer->length - buffer->end;
-
-		WriteBuffer(buffer, data, FirstSize);
-		WriteBuffer(buffer, data + FirstSize, length - FirstSize);
-		return;
-	}
-
-	RemainSize = (buffer->start < buffer->end) ?
-			(buffer->length - buffer->end) :
-			(buffer->start - buffer->end);
-
-	while (RemainSize < length) {
-		int bstart = (buffer->start + HEADER_SIZE - 1);
-		int cur_length = *(buffer->data + bstart % buffer->length);
-
-		buffer->start += HEADER_SIZE + cur_length;
-		buffer->start %= buffer->length;
-
-		RemainSize = (buffer->start < buffer->end) ?
-			(buffer->length - buffer->end) :
-			(buffer->start - buffer->end);
-	}
-
-	memcpy(buffer->data + buffer->end, data, length);
-	//copy_from_user(buffer->data + buffer->end, data, length);
-
-	buffer->end += length;
-
-	if (buffer->start < 0)
-		buffer->start = 0;
-
-	if (buffer->end >= buffer->length)
-		buffer->end = 0;
-
-	if (buffer->status != FLAG_READING)
-		buffer->position = buffer->start;
-}
-
-void ReadBuffer(struct tRingBuffer *buffer,
-			byte *data,
-			unsigned long *length)
-{
-	if (buffer->start < buffer->end) {
-		*length = buffer->end - buffer->start;
-		memcpy(data, buffer->data + buffer->start, *length);
-		//copy_to_user(data, (buffer->data + buffer->start), *length);
-	} else {
-		*length = buffer->length - buffer->start;
-		memcpy(data, buffer->data + buffer->start, *length);
-		memcpy(data + *length, buffer->data, buffer->end);
-		//copy_to_user(data, (buffer->data + buffer->start), *length);
-		//copy_to_user(data + *length, (buffer->data), buffer->end);
-	}
-}
-
-void ReadBufferByPosition(struct tRingBuffer *buffer,
-					byte *data,
-					unsigned long *length,
-					unsigned long start,
-					unsigned long end)
-{
-	if (start < end) {
-		*length = end - start;
-
-		if (*length >= PERFLOG_PACKET_SIZE) {
-			*length = 0;
-			return;
-		}
-
-		memcpy(data, buffer->data + start, *length);
-	} else if (buffer->length > start) {
-		*length = buffer->length - start;
-
-		if ((*length + end) >= PERFLOG_PACKET_SIZE) {
-			*length = 0;
-			return;
-		}
-
-		memcpy(data, buffer->data + start, *length);
-		memcpy(data + *length, buffer->data, end);
-	} else {
-		*length = 0;
-	}
-}
-
-void GetNext(struct tRingBuffer *buffer)
-{
-	int bstart = (buffer->position + HEADER_SIZE - 1);
-	int cur_length = *(buffer->data + bstart % buffer->length);
-
-	buffer->position += HEADER_SIZE + cur_length;
-	buffer->position %= buffer->length;
-}
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
-static const struct proc_ops kperfmon_fops = {
-	.proc_open = kperfmon_open,
-	.proc_read = kperfmon_read,
-	.proc_write = kperfmon_write,
-};
-#else
-static const struct file_operations kperfmon_fops = {
-	.read = kperfmon_read,
-	.write = kperfmon_write,
-};
-#endif
-
-void set_kperfmon_debugger_function(char *writebuffer)
-{
-	buffer.debugger = !buffer.debugger;
-
-	pr_info("%s() - buffer.debugger : %d\n",
-		__func__,
-		(int)buffer.debugger);
-}
-
-void process_version_function(char *writebuffer)
-{
-	struct t_before_print *pprinter = NULL;
-	int length = 0;
-
-	if (writebuffer == NULL)
-		return;
-
-	pprinter = kmalloc(sizeof(struct t_before_print), GFP_ATOMIC);
-
-	if (pprinter == NULL)
-		return;
-
-	length = strlen(writebuffer) + 1;
-	pprinter->pdata = kmalloc(length, GFP_ATOMIC);
-
-	if (pprinter->pdata == NULL) {
-		kfree(pprinter);
-		return;
-	}
-
-	strlcpy(pprinter->pdata, writebuffer, length);
-
-	list_add_tail(&pprinter->list, &before_print_list);
-}
-
-int ops_write_buffer(struct tRingBuffer *buffer,
-			byte *writebuffer, unsigned long length)
-{
-	unsigned long DataLength;
-
-	if (buffer == NULL)
-		return length;
-
-	if (writebuffer[HEADER_SIZE - 1] > PERFLOG_BUFF_STR_MAX_SIZE)
-		writebuffer[HEADER_SIZE - 1] = PERFLOG_BUFF_STR_MAX_SIZE;
-
-	DataLength = writebuffer[HEADER_SIZE - 1] + HEADER_SIZE;
-
-	mutex_lock(&buffer->mutex);
-	WriteBuffer(buffer, writebuffer, DataLength);
-	mutex_unlock(&buffer->mutex);
-#if defined(KPERFMON_DEBUG)
-	{
-		int i;
-
-		for (i = 0 ; i < 110 ; i++) {
-			pr_info("%s(buffer.data[%d] : %c\n",
-				__func__,
-				i,
-				buffer.data[i]);
-		}
-	}
-#endif
-	return length;
-}
-
-int ops_process_command(struct tRingBuffer *buffer,
-			byte *writebuffer, unsigned long length)
-{
-	int idx;
-	int max_commands = (int)(sizeof(commands) / sizeof(struct t_command));
-
-	for (idx = 0 ; idx < max_commands ; idx++) {
-		int cmd_length = strlen(commands[idx].command);
-
-		if (cmd_length >= HEADER_SIZE + PERFLOG_BUFF_STR_MAX_SIZE)
-			continue;
-
-		if (!strncmp(writebuffer, commands[idx].command, cmd_length)
-		    && commands[idx].func != NULL
-		    && strlen(writebuffer) > cmd_length) {
-			commands[idx].func(writebuffer);
-			return length;
-		}
-	}
-
-	return length;
-}
-
-int kperfmon_open(struct inode *finode, struct file *filp)
-{
-	return 0;
-}
-
-ssize_t kperfmon_write(struct file *filp,
-				const char __user *data,
-				size_t length,
-				loff_t *loff_data)
-{
-	byte writebuffer[HEADER_SIZE + PERFLOG_BUFF_STR_MAX_SIZE + SH_IDX_PACKET + 1] = {0, };
-	unsigned long DataLength = length;
-	int max_write_ops = (int)(sizeof(write_opts) / sizeof(void *));
-	int type_of_data;
-
-	if (!buffer.data) {
-		pr_info("%s() - Error buffer allocation is failed!!!\n",
-			__func__);
-		return length;
-	}
-
-	if (length <= 0) {
-		pr_info("%s() - Error length : %d", __func__, (int) length);
-		return length;
-	}
-
-	if (DataLength > (HEADER_SIZE + PERFLOG_BUFF_STR_MAX_SIZE + SH_IDX_PACKET))
-		DataLength = HEADER_SIZE + PERFLOG_BUFF_STR_MAX_SIZE + SH_IDX_PACKET;
-
-	if (copy_from_user(writebuffer, data, DataLength))
-		return length;
-
-	// [[[ This will be replaced with below code
-	type_of_data = writebuffer[SH_TYPE];
-
-	if (type_of_data < max_write_ops && type_of_data >= 0)
-		return write_opts[type_of_data](&buffer,
-			writebuffer + SH_IDX_PACKET,
-			DataLength - SH_IDX_PACKET);
-	// This will be replaced with below code ]]]
-
-	type_of_data -= (int)'0';
-
-	if (type_of_data < max_write_ops && type_of_data >= 0)
-		return write_opts[type_of_data](&buffer,
-			writebuffer + SH_IDX_PACKET,
-			DataLength - SH_IDX_PACKET);
-
-	return length;
-}
-
-ssize_t kperfmon_read(struct file *filp,
-				char __user *data,
-				size_t count,
-				loff_t *loff_data)
-{
-	unsigned long length;
-	byte readbuffer[READ_BUFFER_SIZE] = {0, };
-	union _uPLogPacket readlogpacket;
-	char timestamp[32] = {0, };
-
-	unsigned long start = 0;
-	unsigned long end = 0;
-
-	if (!buffer.data) {
-		pr_info("%s() Error buffer allocation is failed!\n", __func__);
-		return 0;
-	}
-#if defined(USE_MONITOR)
-	if (buffer.position == buffer.start) {
-		char mutex_log[PERFLOG_BUFF_STR_MAX_SIZE + 1] = {0, };
-		int i, idx_mutex_log = 0;
-
-		idx_mutex_log += snprintf((mutex_log + idx_mutex_log),
-					PERFLOG_BUFF_STR_MAX_SIZE - idx_mutex_log,
-					"mutex test ");
-
-		for (i = 0;
-		    i <= MAX_MUTEX_RAWDATA &&
-		    idx_mutex_log < (PERFLOG_BUFF_STR_MAX_SIZE - 20);
-		    i++) {
-
-			int digit, flag = 0;
-
-			mutex_log[idx_mutex_log++] = '[';
-			idx_mutex_log += snprintf((mutex_log + idx_mutex_log),
-						PERFLOG_BUFF_STR_MAX_SIZE - idx_mutex_log,
-						"%d",
-						i);
-			mutex_log[idx_mutex_log++] = ']';
-			mutex_log[idx_mutex_log++] = ':';
-			//idx_mutex_log += snprintf((mutex_log + idx_mutex_log),
-			//			PERFLOG_BUFF_STR_MAX_SIZE - idx_mutex_log,
-			//			"%d",
-			//			mutex_rawdata[i]);
-			//mutex_rawdata[i][1] = 99999999;
-			for (digit = (MAX_MUTEX_RAWDATA_DIGIT-1) ; digit >= 0 ; digit--) {
-				if (flag) {
-					idx_mutex_log += snprintf((mutex_log + idx_mutex_log),
-								PERFLOG_BUFF_STR_MAX_SIZE - idx_mutex_log,
-								"%08u",
-								mutex_rawdata[i][digit]);
-				} else {
-					if (mutex_rawdata[i][digit] > 0) {
-						idx_mutex_log += snprintf((mutex_log + idx_mutex_log),
-									PERFLOG_BUFF_STR_MAX_SIZE - idx_mutex_log,
-									"%u",
-									mutex_rawdata[i][digit]);
-						flag = 1;
-					}
-				}
-			}
-
-			if (!flag)
-				mutex_log[idx_mutex_log++] = '0';
-
-			mutex_log[idx_mutex_log++] = ' ';
-		}
-
-		_perflog(PERFLOG_EVT, PERFLOG_MUTEX, mutex_log);
-	}
-#endif
-	buffer.status = FLAG_READING;
-
-	mutex_lock(&buffer.mutex);
-
-	if (buffer.position == buffer.start) {
-
-		if (before_list_cur_pos !=
-		   list_last_entry(&before_print_list, typeof(*before_list_cur_pos), list)) {
-			before_list_cur_pos = list_next_entry(before_list_cur_pos, list);
-
-			if (before_list_cur_pos != 0 && before_list_cur_pos->pdata != 0) {
-				int length = snprintf(readbuffer,
-							READ_BUFFER_SIZE,
-							"%s\n",
-							(char *) before_list_cur_pos->pdata);
-
-				if (length <= 0 || copy_to_user(data, readbuffer, length)) {
-					pr_info("%s(copy_to_user(4) returned > 0)\n", __func__);
-					mutex_unlock(&buffer.mutex);
-					buffer.status = FLAG_NOTHING;
-					return 0;
-				}
-
-				mutex_unlock(&buffer.mutex);
-				return length;
-			}
-		}
-	}
-
-	if (buffer.position == buffer.end || buffer.start < 0) {
-		buffer.position = buffer.start;
-		mutex_unlock(&buffer.mutex);
-		buffer.status = FLAG_NOTHING;
-		before_list_cur_pos
-			= list_first_entry(&before_print_list, typeof(*before_list_cur_pos), list);
-		return 0;
-	}
-
-	start = buffer.position;
-	GetNext(&buffer);
-	end = buffer.position;
-
-	//printk("kperfmon_read(start : %d, end : %d)\n", (int)start, (int)end);
-
-	if (start == end) {
-		buffer.position = buffer.start;
-		mutex_unlock(&buffer.mutex);
-		buffer.status = FLAG_NOTHING;
-		return 0;
-	}
-
-	//ReadPacket.raw = &rawpacket;
-	ReadBufferByPosition(&buffer, readlogpacket.stream, &length, start, end);
-	mutex_unlock(&buffer.mutex);
-	//printk(KERN_INFO "kperfmon_read(length : %d)\n", (int)length);
-	//readlogpacket.stream[length++] = '\n';
-
-	if (length >= PERFLOG_PACKET_SIZE) {
-		length = PERFLOG_PACKET_SIZE - 1;
-	} else if (length == 0) {
-		return 0;
-	}
-
-	readlogpacket.stream[length] = 0;
-
-#if NOT_USED
-	change2localtime(timestamp, readlogpacket.itemes.timestemp_sec);
-#else
-	snprintf(timestamp, 32, "%02d-%02d %02d:%02d:%02d.%03d",
-			readlogpacket.itemes.timestamp.month,
-			readlogpacket.itemes.timestamp.day,
-			readlogpacket.itemes.timestamp.hour,
-			readlogpacket.itemes.timestamp.minute,
-			readlogpacket.itemes.timestamp.second,
-			readlogpacket.itemes.timestamp.msecond);
-
-	if (readlogpacket.itemes.type >= OlogTestEnum_Type_maxnum
-	    || readlogpacket.itemes.type < 0) {
-		readlogpacket.itemes.type = PERFLOG_LOG;
-	}
-
-	if (readlogpacket.itemes.id >= OlogTestEnum_ID_maxnum
-	    || readlogpacket.itemes.id < 0) {
-		readlogpacket.itemes.id = PERFLOG_UNKNOWN;
-	}
-
-	length = snprintf(readbuffer, READ_BUFFER_SIZE,
-				"[%s %d %5d %5d (%3d)][%s][%s] %s\n",
-				timestamp,
-				readlogpacket.itemes.type,
-				readlogpacket.itemes.pid,
-				readlogpacket.itemes.tid,
-				readlogpacket.itemes.context_length,
-				OlogTestEnum_Type_strings[readlogpacket.itemes.type],
-				OlogTestEnum_ID_strings[readlogpacket.itemes.id],
-				readlogpacket.itemes.context_buffer);
-
-
-	if (length > count)
-		length = count;
-
-	if (buffer.debugger && count > DEBUGGER_SIZE) {
-		char debugger[DEBUGGER_SIZE] = "______________________________";
-
-		snprintf(debugger, DEBUGGER_SIZE, "S:%010lu_E:%010lu_____", start, end);
-
-		if (length + DEBUGGER_SIZE > count)
-			length = count - DEBUGGER_SIZE;
-
-		if (copy_to_user(data, debugger, strnlen(debugger, DEBUGGER_SIZE))) {
-			pr_info("%s(copy_to_user(1) returned > 0)\n", __func__);
-			return 0;
-		}
-
-		if (copy_to_user(data + DEBUGGER_SIZE, readbuffer, length)) {
-			pr_info("%s(copy_to_user(2) returned > 0)\n", __func__);
-			return 0;
-		}
-
-		length += DEBUGGER_SIZE;
-	} else {
-		if (length <= 0 || copy_to_user(data, readbuffer, length)) {
-			pr_info("%s(copy_to_user(3) returned > 0)\n", __func__);
-			return 0;
-		}
-	}
-
-	//printk(KERN_INFO "kperfmon_read(count : %d)\n", count);
-
-
-	return length;
-#endif
-}
-
-static int __init kperfmon_init(void)
-{
-	struct proc_dir_entry *entry;
-	// char kperfmon_version[KPERFMON_VERSION_LENGTH] = {0, };
-
-#if defined(KPERFMON_KMALLOC)
-	CreateBuffer(&buffer, BUFFER_SIZE);
-#else
-	char *context_buffer_size;
-	struct sysinfo si;
-
-	/* getting the usable main memory size from sysinfo */
-	si_meminfo(&si);
-
-	if (IS_MEMORY_UNDER_4GB(si.totalram)) {
-		CreateBuffer(&buffer, BUFFER_SIZE_2M);
-		context_buffer_size = "kperfmon buffer size [2M]";
-	} else {
-		CreateBuffer(&buffer, BUFFER_SIZE_5M);
-		context_buffer_size = "kperfmon buffer size [5M]";
-	}
-#endif
-
-	if (!buffer.data) {
-		pr_info("%s() - Error buffer allocation is failed!!!\n", __func__);
-		return -ENOMEM;
-	}
-
-	entry = proc_create(PROC_NAME, 0660, NULL, &kperfmon_fops);
-
-	if (!entry) {
-		pr_info("%s() - Error creating entry in proc failed!!!\n", __func__);
-		DestroyBuffer(&buffer);
-		return -EBUSY;
-	}
-
-	/* Set file user (owner) to shell user UID (2000), to allow file access to both root group and shell as well */
-	proc_set_user(entry, KUIDT_INIT(2000), KGIDT_INIT(0));
-
-	/*dbg_level_is_low = (sec_debug_level() == ANDROID_DEBUG_LEVEL_LOW);*/
-
-	INIT_LIST_HEAD(&before_print_list);
-	before_list_cur_pos =
-		list_first_entry(&before_print_list, typeof(*before_list_cur_pos), list);
-	process_version_function("  ");
-	// snprintf(kperfmon_version, KPERFMON_VERSION_LENGTH, "kperfmon_version [1.0.1]   kperfmon_read : 0x%x,  kperfmon_write : 0x%x", kperfmon_read, kperfmon_write);
-	// process_version_function(kperfmon_version);
-#if !defined(KPERFMON_KMALLOC)
-	process_version_function(context_buffer_size);
-#endif
-
-	pr_info("%s()\n", __func__);
-
-	return 0;
-}
-
-static void __exit kperfmon_exit(void)
-{
-	DestroyBuffer(&buffer);
-	pr_info("%s()\n", __func__);
-}
-
-#if defined(USE_WORKQUEUE)
-static void ologk_workqueue_func(struct work_struct *work)
-{
-	struct t_ologk_work *workqueue = (struct t_ologk_work *)work;
-
-	if (work) {
-		mutex_lock(&buffer.mutex);
-		WriteBuffer(&buffer,
-			workqueue->writelogpacket.stream,
-			PERFLOG_HEADER_SIZE + workqueue->writelogpacket.itemes.context_length);
-		mutex_unlock(&buffer.mutex);
-
-		kfree((void *)work);
-	}
-}
-#endif
-
-//#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
-static inline void do_gettimeofday2(struct timeval *tv)
-{
-	struct timespec64 now;
-
-	ktime_get_real_ts64(&now);
-	tv->tv_sec = now.tv_sec;
-	tv->tv_usec = now.tv_nsec/1000;
-}
-//#endif /* LINUX_VER >= 5.0 */
-
-void _perflog(int type, int logid, const char *fmt, ...)
-{
-#if !defined(USE_WORKQUEUE)
-	union _uPLogPacket writelogpacket;
-#endif
-	struct rtc_time tm;
-	struct timeval time;
-	unsigned long local_time;
-#if defined(USE_WORKQUEUE)
-	struct t_ologk_work *workqueue = 0;
-#endif
-	va_list args;
-
-	va_start(args, fmt);
-
-	if (buffer.data == 0) {
-		va_end(args);
-		return;
-	}
-
-#if defined(USE_WORKQUEUE)
-	workqueue = kmalloc(sizeof(struct t_ologk_work), GFP_ATOMIC);
-
-	if (workqueue) {
-		struct _PLogPacket *pitemes = &workqueue->writelogpacket.itemes;
-
-		INIT_WORK((struct work_struct *)workqueue, ologk_workqueue_func);
-
-		do_gettimeofday2(&time);
-		local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
-		rtc_time_to_tm(local_time, &tm);
-
-		//printk(" @ (%04d-%02d-%02d %02d:%02d:%02d)\n",
-		//	tm.tm_year + 1900,
-		//	tm.tm_mon + 1,
-		//	tm.tm_mday,
-		//	tm.tm_hour,
-		//	tm.tm_min,
-		//	tm.tm_sec);
-
-		pitemes->timestamp.month = tm.tm_mon + 1;
-		pitemes->timestamp.day = tm.tm_mday;
-		pitemes->timestamp.hour = tm.tm_hour;
-		pitemes->timestamp.minute = tm.tm_min;
-		pitemes->timestamp.second = tm.tm_sec;
-		pitemes->timestamp.msecond = time.tv_usec / 1000;
-		pitemes->type = PERFLOG_LOG;
-		pitemes->id = logid;
-		pitemes->pid = current->pid;//getpid();
-		pitemes->tid = 0;//gettid();
-		pitemes->context_length = vscnprintf(
-						pitemes->context_buffer,
-						PERFLOG_BUFF_STR_MAX_SIZE,
-						fmt,
-						args);
-
-		if (pitemes->context_length > PERFLOG_BUFF_STR_MAX_SIZE)
-			pitemes->context_length = PERFLOG_BUFF_STR_MAX_SIZE;
-
-		schedule_work((struct work_struct *)workqueue);
-
-		//{
-		//	struct timeval end_time;
-		//	do_gettimeofday2(&end_time);
-		//	printk("ologk() execution time with workqueue : %ld us ( %ld - %ld )\n",
-		//			end_time.tv_usec - time.tv_usec,
-		//			end_time.tv_usec,
-		//			time.tv_usec);
-		//}
-	} else {
-		pr_info("%s : workqueue is not working\n", __func__);
-	}
-
-#else
-	do_gettimeofday2(&time);
-	local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
-	rtc_time_to_tm(local_time, &tm);
-
-	//printk(" @ (%04d-%02d-%02d %02d:%02d:%02d)\n",
-	//	tm.tm_year + 1900,
-	//	tm.tm_mon + 1,
-	//	tm.tm_mday,
-	//	tm.tm_hour,
-	//	tm.tm_min,
-	//	tm.tm_sec);
-
-	writelogpacket.itemes.timestamp.month = tm.tm_mon + 1;
-	writelogpacket.itemes.timestamp.day = tm.tm_mday;
-	writelogpacket.itemes.timestamp.hour = tm.tm_hour;
-	writelogpacket.itemes.timestamp.minute = tm.tm_min;
-	writelogpacket.itemes.timestamp.second = tm.tm_sec;
-	writelogpacket.itemes.timestamp.msecond = time.tv_usec / 1000;
-	writelogpacket.itemes.type = type;
-	writelogpacket.itemes.pid = current->pid;//getpid();
-	writelogpacket.itemes.tid = 0;//gettid();
-	writelogpacket.itemes.context_length
-		= vscnprintf(writelogpacket.itemes.context_buffer,
-							PERFLOG_BUFF_STR_MAX_SIZE,
-							fmt,
-							args);
-
-	if (writelogpacket.itemes.context_length > PERFLOG_BUFF_STR_MAX_SIZE)
-		writelogpacket.itemes.context_length = PERFLOG_BUFF_STR_MAX_SIZE;
-
-	mutex_lock(&buffer.mutex);
-	WriteBuffer(&buffer,
-			writelogpacket.stream,
-			PERFLOG_HEADER_SIZE + writelogpacket.itemes.context_length);
-	mutex_unlock(&buffer.mutex);
-
-	//{
-	//	struct timeval end_time;
-	//	do_gettimeofday2(&end_time);
-	//	printk(KERN_INFO "ologk() execution time : %ld us ( %ld - %ld )\n",
-	//			end_time.tv_usec - time.tv_usec,
-	//			end_time.tv_usec, time.tv_usec);
-	//}
-#endif
-
-	va_end(args);
-}
-
-// void get_callstack(char *buffer, int max_size, int max_count)
-// {
-// 	struct stackframe frame;
-// 	struct task_struct *tsk = current;
-// 	//int len;
-
-// 	if (!try_get_task_stack(tsk))
-// 		return;
-
-// 	frame.fp = (unsigned long)__builtin_frame_address(0);
-// 	frame.pc = (unsigned long)get_callstack;
-
-// #if defined(CONFIG_FUNCTION_GRAPH_TRACER)
-// 	frame.graph = tsk->curr_ret_stack;
-// #endif
-// #if NOT_USED // temporary for GKI
-// 	if (max_size > 0) {
-// 		int count = 0;
-
-// 		max_count += 3;
-
-// 		do {
-// 			if (count > 2) {
-// 				int len = snprintf(buffer, max_size, " %pS", (void *)frame.pc);
-
-// 				max_size -= len;
-// 				buffer += len;
-// 			}
-// 			count++;
-// 		} while (!unwind_frame(tsk, &frame) &&
-// 				max_size > 0 &&
-// 				max_count > count);
-
-// 		put_task_stack(tsk);
-// 	}
-// #endif
-// }
-
-void send_signal(void)
-{
-#if NOT_USED // temporary for GKI
-	siginfo_t info;
-
-	info.si_signo = SIGNAL_35;
-	info.si_errno = SIGNAL_OLOG;
-	info.si_code = SIGNAL_OLOG;
-	send_sig_info(SIGNAL_35, &info, current);
-#endif
-}
-
-void perflog_evt(int logid, int arg1)
-{
-#if defined(USE_MONITOR)
-	struct timeval start_time;
-	struct timeval end_time;
-
-	int digit = 0;
-
-	do_gettimeofday2(&start_time);
-#endif
-	if (arg1 < 0 || buffer.status != FLAG_NOTHING)
-		return;
-
-	if (arg1 > MAX_MUTEX_RAWDATA) {
-		char log_buffer[PERFLOG_BUFF_STR_MAX_SIZE];
-		int len;
-		u64 utime, stime;
-
-		task_cputime(current, &utime, &stime);
-
-		if (utime > 0) {
-			len = snprintf(log_buffer,
-					PERFLOG_BUFF_STR_MAX_SIZE,
-					"%d jiffies",
-					arg1);
-			// Make some stuck problems to be needed to check
-			// how many the mutex logging are occurred.
-			// Refer to P200523-00343, P200523-01815.
-			/*send_signal();*/
-
-			// get_callstack(log_buffer + len,
-			// 		PERFLOG_BUFF_STR_MAX_SIZE - len,
-			// 		/*(dbg_level_is_low ? 1 : 3)*/MAX_DEPTH_OF_CALLSTACK);
-			_perflog(PERFLOG_EVT, PERFLOG_MUTEX, log_buffer);
-			arg1 = MAX_MUTEX_RAWDATA;
-
-			//do_gettimeofday2(&end_time);
-			//_perflog(PERFLOG_EVT,
-			//		PERFLOG_MUTEX,
-			//		"[MUTEX] processing time : %d",
-			//		end_time.tv_usec - start_time.tv_usec);
-		}
-	}
-#if defined(USE_MONITOR)
-	for (digit = 0 ; digit < MAX_MUTEX_RAWDATA_DIGIT ; digit++) {
-		mutex_rawdata[arg1][digit]++;
-		if (mutex_rawdata[arg1][digit] >= DIGIT_UNIT)
-			mutex_rawdata[arg1][digit] = 0;
-		else
-			break;
-	}
-#endif
-}
-
-//EXPORT_SYMBOL(ologk);
-//EXPORT_SYMBOL(_perflog);
-//EXPORT_SYMBOL(perflog_evt);
-
-module_init(kperfmon_init);
-module_exit(kperfmon_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Binse Park <[email protected]>");
-MODULE_DESCRIPTION("Performance Log(OLOG)");
-

+ 0 - 92
drivers/kperfmon/kperfmon.h

@@ -1,92 +0,0 @@
-#define FLAG_NOTHING			0
-#define FLAG_READING			1
-#define USE_WORKQUEUE			1
-#define NOT_USED			0
-
-#define byte				unsigned char
-
-struct tRingBuffer {
-	byte *data;
-	long length;
-	long start;
-	long end;
-	long position;
-
-	struct mutex mutex;
-	long debugger;
-	bool status;
-};
-
-#if defined(USE_WORKQUEUE)
-struct t_ologk_work {
-	struct work_struct ologk_work;
-	union _uPLogPacket writelogpacket;
-};
-#endif
-
-struct t_command {
-	char *command;
-	void (*func)(char *writebuffer);
-};
-
-#if defined(USE_MONITOR)
-unsigned long mutex_rawdata[MAX_MUTEX_RAWDATA + 1][MAX_MUTEX_RAWDATA_DIGIT] = {{0, },};
-#endif
-
-int ops_write_buffer(struct tRingBuffer *buffer,
-			byte *data, unsigned long length);
-int ops_process_command(struct tRingBuffer *buffer,
-			byte *data, unsigned long length);
-
-enum {
-	SH_TYPE_PACKET,
-	SH_TYPE_COMMAND,
-};
-
-enum {
-	SH_TYPE,
-	SH_IDX_PACKET
-};
-
-int (*write_opts[])(struct tRingBuffer *buffer,
-			byte *data, unsigned long length)
-			= {
-				ops_write_buffer,
-				ops_process_command,
-			};
-
-void set_kperfmon_debugger_function(char *writebuffer);
-void process_version_function(char *writebuffer);
-
-struct t_command commands[] = {
-	{"kperfmon_debugger", set_kperfmon_debugger_function},
-	{"java_version", process_version_function},
-	{"nativelib_version", process_version_function},
-	{"perfmond_version", process_version_function},
-};
-
-struct t_before_print {
-	void *pdata;
-	int (*func)(char *read_buffer);
-	struct list_head list;
-};
-
-void CreateBuffer(struct tRingBuffer *buffer,
-			unsigned long length);
-void DestroyBuffer(struct tRingBuffer *buffer);
-void WriteBuffer(struct tRingBuffer *buffer,
-			byte *data, unsigned long length);
-void GetNext(struct tRingBuffer *buffer);
-void ReadBuffer(struct tRingBuffer *buffer,
-			byte *data,
-			unsigned long *length);
-int kperfmon_open(struct inode *, struct file *);
-ssize_t kperfmon_write(struct file *filp,
-				const char __user *data,
-				size_t length,
-				loff_t *loff_data);
-ssize_t kperfmon_read(struct file *filp,
-				char __user *data,
-				size_t count,
-				loff_t *loff_data);
-

+ 0 - 88
drivers/kperfmon/olog.pb.h

@@ -1,88 +0,0 @@
-// Generated by the protocol buffer compiler for perflog!!  DO NOT EDIT!
-#ifndef _OLOG_PROTOCOL_BUFFER_H_
-#define _OLOG_PROTOCOL_BUFFER_H_
-
-//EnumGenerator::GenerateDefinition in perflog_enum.cc
-enum OlogTestEnum_Type {
-  PERFLOG_DEF = 0,
-  PERFLOG_LOG = 1,
-  PERFLOG_EVT = 2,
-  PERFLOG_WRN = 3,
-  PERFLOG_CRI = 4
-};
-#if defined(KPERFMON_KERNEL)
-int OlogTestEnum_Type_maxnum = 5;
-char * OlogTestEnum_Type_strings[5] = {
-  "DEF",
-  "LOG",
-  "EVT",
-  "WRN",
-  "CRI"
-};
-#endif //KPERFMON_KERNEL
-//EnumGenerator::GenerateDefinition in perflog_enum.cc
-enum OlogTestEnum_ID {
-  PERFLOG_UNKNOWN = 0,
-  PERFLOG_LCDV = 2,
-  PERFLOG_ARGOS = 3,
-  PERFLOG_APPLAUNCH = 4,
-  PERFLOG_LOADAPK = 5,
-  PERFLOG_MAINLOOPER = 6,
-  PERFLOG_EXCESSIVECPUUSAGE = 7,
-  PERFLOG_ACTIVITYSLOW = 8,
-  PERFLOG_BROADCAST = 9,
-  PERFLOG_STORE = 10,
-  PERFLOG_CPUTOP = 11,
-  PERFLOG_LCD = 12,
-  PERFLOG_CPU = 13,
-  PERFLOG_LOCKCONTENTION = 14,
-  PERFLOG_CPUFREQ = 15,
-  PERFLOG_MEMPRESSURE = 16,
-  PERFLOG_INPUTD = 17,
-  PERFLOG_AMPSS = 18,
-  PERFLOG_SERVICEMANAGERSLOW = 19,
-  PERFLOG_IPCSTARVE = 20,
-  PERFLOG_SCREENSHOT = 21,
-  PERFLOG_MUTEX = 22,
-  PERFLOG_SYSTEMSERVER = 23,
-  PERFLOG_PERFETTOLOGGINGENABLED = 24,
-  PERFLOG_BIGDATA = 25,
-  PERFLOG_PSI = 26,
-  PERFLOG_JANK = 27
-};
-#if defined(KPERFMON_KERNEL)
-int OlogTestEnum_ID_maxnum = 28;
-char * OlogTestEnum_ID_strings[28] = {
-  "UNKNOWN",
-  " ",
-  "LCDV",
-  "ARGOS",
-  "APPLAUNCH",
-  "LOADAPK",
-  "MAINLOOPER",
-  "EXCESSIVECPUUSAGE",
-  "ACTIVITYSLOW",
-  "BROADCAST",
-  "STORE",
-  "CPUTOP",
-  "LCD",
-  "CPU",
-  "LOCKCONTENTION",
-  "CPUFREQ",
-  "MEMPRESSURE",
-  "INPUTD",
-  "AMPSS",
-  "SERVICEMANAGERSLOW",
-  "IPCSTARVE",
-  "SCREENSHOT",
-  "MUTEX",
-  "SYSTEMSERVER",
-  "PERFETTOLOGGINGENABLED",
-  "BIGDATA",
-  "PSI",
-  "JANK"
-};
-#endif //KPERFMON_KERNEL
-
-#endif //_OLOG_PROTOCOL_BUFFER_H_
-

+ 0 - 10
drivers/kperfmon/ologk.c

@@ -1,10 +0,0 @@
-#include <linux/ologk.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-//void _perflog(int type, int logid, const char *fmt, ...) {
-//}
-
-//void perflog_evt(int logid, int arg1) {
-//}
-

+ 0 - 15
drivers/kperfmon/ologk.h

@@ -1,15 +0,0 @@
-#ifndef _OLOG_KERNEL_H_
-#define _OLOG_KERNEL_H_
-
-#include <linux/unistd.h>
-#include "olog.pb.h"
-
-#define OLOG_CPU_FREQ_FILTER   1500000
-#define PERFLOG_MUTEX_THRESHOLD   20
-
-#define ologk(...) _perflog(PERFLOG_LOG, PERFLOG_UNKNOWN, __VA_ARGS__)
-#define perflog(...) _perflog(PERFLOG_LOG, __VA_ARGS__)
-extern void _perflog(int type, int logid, const char *fmt, ...);
-extern void perflog_evt(int logid, int arg1);
-
-#endif

+ 0 - 120
drivers/kperfmon/perflog.h

@@ -1,120 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-//
-// Samsung's performance logging
-//
-// Copyright (c) 2014 Samsung Electronics Co., Ltd
-//              http://www.samsung.com
-
-#ifndef PERFLOG_H_
-#define PERFLOG_H_
-
-#define PERFLOG_LOC __FILE__, __LINE__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "olog.pb.h"
-
-typedef unsigned char uint8;
-typedef unsigned short uint16;
-typedef unsigned int  uint32;
-
-// extern FILE* perflog_fout;
-#define PERFLOG_PACKET_SIZE			256
-#define PERFLOG_HEADER_SIZE 			24
-#define PERFLOG_BUFF_STR_MAX_SIZE 		(PERFLOG_PACKET_SIZE - PERFLOG_HEADER_SIZE)
-#define PERFLOG_BUFF_STR_MAX_SIZE_FOR_MULTILINE	4096
-#define PERFLOG_BUFF_STR_MAX_SIZE_FOR_EVTI	PERFLOG_BUFF_STR_MAX_SIZE - PERFLOG_UINT16_SIZE
-#define PERFLOG_UINT16_SIZE			2
-#define PERFLOG_INT_SIZE			4
-
-/* PerfLog Phase 2 :: header format modification 
-   should be changed to protobuff type 
-*/
-typedef enum PerfLogAffectTag {
-    AFFECT_K,
-    AFFECT_F,
-    AFFECT_A
-}PerfLogAffect;
-
-typedef enum EvtNamingTag {
-    NAMING_LockC,
-    NAMING_AppLaunch,    
-}EvtNamingTag;
-
-typedef enum PerfLevelTag{
-    LOW,
-    MID,
-    HIGH,
-    CRITICAL
-}PerfLevelTag;
-
-// ###############################################################################
-#pragma pack(push, 1)
-
-struct Payload {
-	int param1;
-	int param2;
-	char logbuffer[PERFLOG_BUFF_STR_MAX_SIZE + 1];
-};  
-
-struct LogPacket {
-#if defined(KPERFMON_KERNEL)
-	struct timespec64 logtime;
-#else
-	struct timespec logtime;
-#endif
-        uint16 logtype;
-        uint16 logid;
-        uint16 pid;
-        uint16 tid;
-	struct Payload payload;
-};
-
-struct _Timestamp {
-	uint8  month;
-	uint8  day;
-	uint8  hour;
-	uint8  minute;
-	uint8  second;
-	uint16 msecond;
-};
-
-struct _PLogPacket {
-	struct _Timestamp timestamp;
-        uint16 pid;
-        uint16 tid;
-
-	uint8  type;
-	uint8  id;
-
-	char   pname[10];
-	uint8  context_length;
-	char   context_buffer[PERFLOG_BUFF_STR_MAX_SIZE + 1];
-};
-
-union _uPLogPacket {
-	struct _PLogPacket itemes;
-	char stream[PERFLOG_HEADER_SIZE + PERFLOG_BUFF_STR_MAX_SIZE];
-};
-
-#pragma pack(pop) 
-
-// Start API
-int perflog_sending_log_via_socket(uint16 type, uint16 logid, int param1, int param2, char const *str);
-
-int perflog_write(char const * fmt, ...);
-
-int perflog_write_log(uint16 type, uint16 logid, char const * fmt, ...);
-
-int perflog_write_evt(uint16 maintype, uint16 logid, uint16 param1, char const * fmt, ...);
-
-// int perflog_getlog(char **buff);
-// End API
-
-#ifdef __cplusplus
-}
-#endif /* #ifdef __cplusplus */
-
-#endif

+ 3 - 0
fs/Makefile

@@ -17,6 +17,9 @@ obj-y :=	open.o read_write.o file_table.o super.o \
 		fs_types.o fs_context.o fs_parser.o fsopen.o init.o \
 		kernel_read_file.o remap_range.o
 
+obj-$(CONFIG_KSU_SUSFS) += susfs.o
+obj-$(CONFIG_KSU_SUSFS_SUS_SU) += sus_su.o
+
 ifeq ($(CONFIG_BLOCK),y)
 obj-y +=	buffer.o direct-io.o mpage.o
 else

+ 18 - 0
fs/dcache.c

@@ -32,6 +32,9 @@
 #include <linux/bit_spinlock.h>
 #include <linux/rculist_bl.h>
 #include <linux/list_lru.h>
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+#include <linux/susfs_def.h>
+#endif
 #include "internal.h"
 #include "mount.h"
 
@@ -2297,6 +2300,11 @@ seqretry:
 			continue;
 		if (dentry->d_name.hash != hashlen_hash(hashlen))
 			continue;
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+		if (dentry->d_inode && unlikely(dentry->d_inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+			continue;
+		}
+#endif
 		tlen = dentry->d_name.len;
 		tname = dentry->d_name.name;
 		/* we want a consistent (name,len) pair */
@@ -2403,6 +2411,11 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
 			continue;
 		if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
 			continue;
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+		if (dentry->d_inode && unlikely(dentry->d_inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+			continue;
+		}
+#endif
 		*seqp = seq;
 		return dentry;
 	}
@@ -2484,6 +2497,11 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
 
 		if (dentry->d_name.hash != hash)
 			continue;
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+		if (dentry->d_inode && unlikely(dentry->d_inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+			continue;
+		}
+#endif
 
 		spin_lock(&dentry->d_lock);
 		if (dentry->d_parent != parent)

+ 10 - 0
fs/devpts/inode.c

@@ -596,6 +596,11 @@ struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
 	return dentry;
 }
 
+#if defined(CONFIG_KSU_SUSFS_SUS_SU)
+extern bool ksu_devpts_hook;
+extern int ksu_handle_devpts(struct inode*);
+#endif
+
 /**
  * devpts_get_priv -- get private data for a slave
  * @pts_inode: inode of the slave
@@ -604,6 +609,11 @@ struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
  */
 void *devpts_get_priv(struct dentry *dentry)
 {
+#if defined(CONFIG_KSU_SUSFS_SUS_SU)
+	if (likely(ksu_devpts_hook)) {
+		ksu_handle_devpts(dentry->d_inode);
+	}
+#endif
 	if (dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC)
 		return NULL;
 	return dentry->d_fsdata;

+ 11 - 0
fs/exec.c

@@ -1873,6 +1873,12 @@ out_unmark:
 	return retval;
 }
 
+#ifdef CONFIG_KSU_SUSFS_SUS_SU
+extern bool susfs_is_sus_su_hooks_enabled __read_mostly;
+extern int ksu_handle_execveat_sucompat(int *fd, struct filename **filename_ptr, void *argv,
+				void *envp, int *flags);
+#endif
+
 static int do_execveat_common(int fd, struct filename *filename,
 			      struct user_arg_ptr argv,
 			      struct user_arg_ptr envp,
@@ -1884,6 +1890,11 @@ static int do_execveat_common(int fd, struct filename *filename,
 	if (IS_ERR(filename))
 		return PTR_ERR(filename);
 
+#ifdef CONFIG_KSU_SUSFS_SUS_SU
+	if (susfs_is_sus_su_hooks_enabled)
+		ksu_handle_execveat_sucompat(&fd, &filename, &argv, &envp, &flags);
+#endif
+
 	/*
 	 * We move the actual failure in case of RLIMIT_NPROC excess from
 	 * set*uid() to execve() because too many poorly written programs

+ 141 - 1
fs/namei.c

@@ -40,7 +40,9 @@
 #include <linux/bitops.h>
 #include <linux/init_task.h>
 #include <linux/uaccess.h>
-
+#if defined(CONFIG_KSU_SUSFS_SUS_PATH) || defined(CONFIG_KSU_SUSFS_OPEN_REDIRECT)
+#include <linux/susfs_def.h>
+#endif
 #include "internal.h"
 #include "mount.h"
 
@@ -1097,6 +1099,12 @@ static inline int may_follow_link(struct nameidata *nd, const struct inode *inod
 	struct user_namespace *mnt_userns;
 	kuid_t i_uid;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	if (nd->inode && unlikely(nd->inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+		return -ENOENT;
+	}
+#endif
+
 	if (!sysctl_protected_symlinks)
 		return 0;
 
@@ -1182,6 +1190,12 @@ int may_linkat(struct user_namespace *mnt_userns, const struct path *link)
 {
 	struct inode *inode = link->dentry->d_inode;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	if (inode && unlikely(inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+		return -ENOENT;
+	}
+#endif
+
 	/* Inode writeback is not safe when the uid or gid are invalid. */
 	if (!uid_valid(i_uid_into_mnt(mnt_userns, inode)) ||
 	    !gid_valid(i_gid_into_mnt(mnt_userns, inode)))
@@ -1234,6 +1248,12 @@ static int may_create_in_sticky(struct user_namespace *mnt_userns,
 	umode_t dir_mode = nd->dir_mode;
 	kuid_t dir_uid = nd->dir_uid;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	if (unlikely(inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+		return -ENOENT;
+	}
+#endif
+
 	if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
 	    (!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
 	    likely(!(dir_mode & S_ISVTX)) ||
@@ -1612,6 +1632,10 @@ static struct dentry *__lookup_hash(const struct qstr *name,
 	struct dentry *old;
 	struct inode *dir = base->d_inode;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	int error;
+#endif
+
 	if (dentry)
 		return dentry;
 
@@ -1628,6 +1652,19 @@ static struct dentry *__lookup_hash(const struct qstr *name,
 		dput(dentry);
 		dentry = old;
 	}
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	if (!IS_ERR(dentry) && dentry->d_inode && unlikely(dentry->d_inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+		if ((flags & (LOOKUP_CREATE | LOOKUP_EXCL))) {
+			error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
+			if (error) {
+				dput(dentry);
+				return ERR_PTR(error);
+			}
+		}
+		dput(dentry);
+		return ERR_PTR(-ENOENT);
+	}
+#endif
 	return dentry;
 }
 
@@ -1714,6 +1751,12 @@ again:
 			dentry = old;
 		}
 	}
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	if (!IS_ERR(dentry) && dentry->d_inode && unlikely(dentry->d_inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+		dput(dentry);
+		return ERR_PTR(-ENOENT);
+	}
+#endif
 	return dentry;
 }
 
@@ -2359,6 +2402,12 @@ OK:
 			}
 			return -ENOTDIR;
 		}
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+		// we deal with sus sub path here
+		if (nd->inode && unlikely(nd->inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+			return 0;
+		}
+#endif
 	}
 }
 
@@ -2535,6 +2584,11 @@ int filename_lookup(int dfd, struct filename *name, unsigned flags,
 		audit_inode(name, path->dentry,
 			    flags & LOOKUP_MOUNTPOINT ? AUDIT_INODE_NOEVAL : 0);
 	restore_nameidata();
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	if (!retval && path->dentry->d_inode && unlikely(path->dentry->d_inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+		return -ENOENT;
+	}
+#endif
 	return retval;
 }
 
@@ -2963,6 +3017,12 @@ static int may_delete(struct user_namespace *mnt_userns, struct inode *dir,
 	if (IS_APPEND(dir))
 		return -EPERM;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	if (unlikely(inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+		return -ENOENT;
+	}
+#endif
+
 	if (check_sticky(mnt_userns, dir, inode) || IS_APPEND(inode) ||
 	    IS_IMMUTABLE(inode) || IS_SWAPFILE(inode) ||
 	    HAS_UNMAPPED_ID(mnt_userns, inode))
@@ -2993,7 +3053,19 @@ static int may_delete(struct user_namespace *mnt_userns, struct inode *dir,
 static inline int may_create(struct user_namespace *mnt_userns,
 			     struct inode *dir, struct dentry *child)
 {
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	int error;
+#endif
 	audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	if (child->d_inode && unlikely(child->d_inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+		error = inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
+		if (error) {
+			return error;
+		}
+		return -ENOENT;
+	}
+#endif
 	if (child->d_inode)
 		return -EEXIST;
 	if (IS_DEADDIR(dir))
@@ -3181,6 +3253,12 @@ static int may_open(struct user_namespace *mnt_userns, const struct path *path,
 	if (!inode)
 		return -ENOENT;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	if (unlikely(inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+		return -ENOENT;
+	}
+#endif
+
 	switch (inode->i_mode & S_IFMT) {
 	case S_IFLNK:
 		return -ELOOP;
@@ -3257,7 +3335,21 @@ static int may_o_create(struct user_namespace *mnt_userns,
 			const struct path *dir, struct dentry *dentry,
 			umode_t mode)
 {
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	int error;
+
+	if (dentry->d_inode && unlikely(dentry->d_inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+		error = inode_permission(mnt_userns, dir->dentry->d_inode,
+				 MAY_WRITE | MAY_EXEC);
+		if (error) {
+			return error;
+		}
+		return -ENOENT;
+	}
+	error = security_path_mknod(dir, dentry, mode, 0);
+#else
 	int error = security_path_mknod(dir, dentry, mode, 0);
+#endif
 	if (error)
 		return error;
 
@@ -3378,6 +3470,12 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
 	}
 	if (dentry->d_inode) {
 		/* Cached positive dentry: will open in f_op->open */
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+		if (unlikely(dentry->d_inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+			dput(dentry);
+			return ERR_PTR(-ENOENT);
+		}
+#endif
 		return dentry;
 	}
 
@@ -3409,6 +3507,16 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
 		dentry = atomic_open(nd, dentry, file, open_flag, mode);
 		if (unlikely(create_error) && dentry == ERR_PTR(-ENOENT))
 			dentry = ERR_PTR(create_error);
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+		if (!IS_ERR(dentry) && dentry->d_inode && unlikely(dentry->d_inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+			if (create_error) {
+				dput(dentry);
+				return ERR_PTR(create_error);
+			}
+			dput(dentry);
+			return ERR_PTR(-ENOENT);
+		}
+#endif
 		return dentry;
 	}
 
@@ -3423,6 +3531,12 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
 			}
 			dput(dentry);
 			dentry = res;
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+			if (dentry->d_inode && unlikely(dentry->d_inode->i_state & INODE_STATE_SUS_PATH) && likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+				dput(dentry);
+				return ERR_PTR(-ENOENT);
+			}
+#endif
 		}
 	}
 
@@ -3755,12 +3869,19 @@ static struct file *path_openat(struct nameidata *nd,
 	return ERR_PTR(error);
 }
 
+#ifdef CONFIG_KSU_SUSFS_OPEN_REDIRECT
+extern struct filename* susfs_get_redirected_path(unsigned long ino);
+#endif
+
 struct file *do_filp_open(int dfd, struct filename *pathname,
 		const struct open_flags *op)
 {
 	struct nameidata nd;
 	int flags = op->lookup_flags;
 	struct file *filp;
+#ifdef CONFIG_KSU_SUSFS_OPEN_REDIRECT
+	struct filename *fake_pathname;
+#endif
 
 	set_nameidata(&nd, dfd, pathname, NULL);
 	filp = path_openat(&nd, op, flags | LOOKUP_RCU);
@@ -3768,6 +3889,25 @@ struct file *do_filp_open(int dfd, struct filename *pathname,
 		filp = path_openat(&nd, op, flags);
 	if (unlikely(filp == ERR_PTR(-ESTALE)))
 		filp = path_openat(&nd, op, flags | LOOKUP_REVAL);
+#ifdef CONFIG_KSU_SUSFS_OPEN_REDIRECT
+	if (!IS_ERR(filp) && unlikely(filp->f_inode->i_state & INODE_STATE_OPEN_REDIRECT) && current_uid().val < 2000) {
+		fake_pathname = susfs_get_redirected_path(filp->f_inode->i_ino);
+		if (!IS_ERR(fake_pathname)) {
+			restore_nameidata();
+			filp_close(filp, NULL);
+			// no need to do `putname(pathname);` here as it will be done by calling process
+			set_nameidata(&nd, dfd, fake_pathname, NULL);
+			filp = path_openat(&nd, op, flags | LOOKUP_RCU);
+			if (unlikely(filp == ERR_PTR(-ECHILD)))
+				filp = path_openat(&nd, op, flags);
+			if (unlikely(filp == ERR_PTR(-ESTALE)))
+				filp = path_openat(&nd, op, flags | LOOKUP_REVAL);
+			restore_nameidata();
+			putname(fake_pathname);
+			return filp;
+		}
+	}
+#endif
 	restore_nameidata();
 	return filp;
 }

+ 271 - 3
fs/namespace.c

@@ -32,6 +32,9 @@
 #include <linux/fs_context.h>
 #include <linux/shmem_fs.h>
 #include <linux/mnt_idmapping.h>
+#if defined(CONFIG_KSU_SUSFS_SUS_MOUNT) || defined(CONFIG_KSU_SUSFS_TRY_UMOUNT)
+#include <linux/susfs_def.h>
+#endif
 #ifdef CONFIG_KDP_NS
 #include <linux/kdp.h>
 #endif
@@ -39,6 +42,30 @@
 #include "pnode.h"
 #include "internal.h"
 
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+extern bool susfs_is_current_ksu_domain(void);
+extern bool susfs_is_current_zygote_domain(void);
+
+static DEFINE_IDA(susfs_mnt_id_ida);
+static DEFINE_IDA(susfs_mnt_group_ida);
+
+#define CL_ZYGOTE_COPY_MNT_NS BIT(24) /* used by copy_mnt_ns() */
+#define CL_COPY_MNT_NS BIT(25) /* used by copy_mnt_ns() */
+#endif
+
+#ifdef CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT
+extern void susfs_auto_add_sus_ksu_default_mount(const char __user *to_pathname);
+bool susfs_is_auto_add_sus_ksu_default_mount_enabled = true;
+#endif
+#ifdef CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT
+extern int susfs_auto_add_sus_bind_mount(const char *pathname, struct path *path_target);
+bool susfs_is_auto_add_sus_bind_mount_enabled = true;
+#endif
+#ifdef CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT
+extern void susfs_auto_add_try_umount_for_bind_mount(struct path *path);
+bool susfs_is_auto_add_try_umount_for_bind_mount_enabled = true;
+#endif
+
 /* Maximum number of mounts in a mount namespace */
 static unsigned int sysctl_mount_max __read_mostly = 100000;
 
@@ -126,6 +153,19 @@ static inline struct hlist_head *mp_hash(struct dentry *dentry)
 	return &mountpoint_hashtable[tmp & mp_hash_mask];
 }
 
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+// Our own mnt_alloc_id() that assigns mnt_id starting from DEFAULT_SUS_MNT_ID
+static int susfs_mnt_alloc_id(struct mount *mnt)
+{
+	int res = ida_alloc_min(&susfs_mnt_id_ida, DEFAULT_SUS_MNT_ID, GFP_KERNEL);
+
+	if (res < 0)
+		return res;
+	mnt->mnt_id = res;
+	return 0;
+}
+#endif
+
 static int mnt_alloc_id(struct mount *mnt)
 {
 	int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
@@ -138,6 +178,26 @@ static int mnt_alloc_id(struct mount *mnt)
 
 static void mnt_free_id(struct mount *mnt)
 {
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	// We should first check the 'mnt->mnt.susfs_mnt_id_backup', see if it is DEFAULT_SUS_MNT_ID_FOR_KSU_PROC_UNSHARE
+	// if so, these mnt_id were not assigned by mnt_alloc_id() so we don't need to free it.
+	if (unlikely(mnt->mnt.susfs_mnt_id_backup == DEFAULT_SUS_MNT_ID_FOR_KSU_PROC_UNSHARE)) {
+		return;
+	}
+	// Now we can check if its mnt_id is sus
+	if (unlikely(mnt->mnt_id >= DEFAULT_SUS_MNT_ID)) {
+		ida_free(&susfs_mnt_id_ida, mnt->mnt_id);
+		return;
+	}
+	// Lastly if 'mnt->mnt.susfs_mnt_id_backup' is not 0, then it contains a backup origin mnt_id
+	// so we free it in the original way
+	if (likely(mnt->mnt.susfs_mnt_id_backup)) {
+		// If mnt->mnt.susfs_mnt_id_backup is not zero, it means mnt->mnt_id is spoofed,
+		// so here we return the original mnt_id for being freed.
+		ida_free(&mnt_id_ida, mnt->mnt.susfs_mnt_id_backup);
+		return;
+	}
+#endif
 	ida_free(&mnt_id_ida, mnt->mnt_id);
 }
 
@@ -146,8 +206,20 @@ static void mnt_free_id(struct mount *mnt)
  */
 static int mnt_alloc_group_id(struct mount *mnt)
 {
-	int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	int res;
 
+	// Check if mnt has sus mnt_id
+	if (mnt->mnt_id >= DEFAULT_SUS_MNT_ID) {
+		// If so, assign a sus mnt_group id DEFAULT_SUS_MNT_GROUP_ID from susfs_mnt_group_ida
+		res = ida_alloc_min(&susfs_mnt_group_ida, DEFAULT_SUS_MNT_GROUP_ID, GFP_KERNEL);
+		goto bypass_orig_flow;
+	}
+	res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
+bypass_orig_flow:
+#else
+	int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
+#endif
 	if (res < 0)
 		return res;
 	mnt->mnt_group_id = res;
@@ -159,6 +231,15 @@ static int mnt_alloc_group_id(struct mount *mnt)
  */
 void mnt_release_group_id(struct mount *mnt)
 {
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	// If mnt->mnt_group_id >= DEFAULT_SUS_MNT_GROUP_ID, it means 'mnt' is also sus mount,
+	// then we free the mnt->mnt_group_id from susfs_mnt_group_ida
+	if (mnt->mnt_group_id >= DEFAULT_SUS_MNT_GROUP_ID) {
+		ida_free(&susfs_mnt_group_ida, mnt->mnt_group_id);
+		mnt->mnt_group_id = 0;
+		return;
+	}
+#endif
 	ida_free(&mnt_group_ida, mnt->mnt_group_id);
 	mnt->mnt_group_id = 0;
 }
@@ -196,13 +277,31 @@ int mnt_get_count(struct mount *mnt)
 #endif
 }
 
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+static struct mount *alloc_vfsmnt(const char *name, bool should_spoof, int custom_mnt_id)
+#else
 static struct mount *alloc_vfsmnt(const char *name)
+#endif
 {
 	struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
 	if (mnt) {
 		int err;
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+		if (should_spoof) {
+			if (!custom_mnt_id) {
+				err = susfs_mnt_alloc_id(mnt);
+			} else {
+				mnt->mnt_id = custom_mnt_id;
+				err = 0;
+			}
+			goto bypass_orig_flow;
+		}
+#endif
+ 		err = mnt_alloc_id(mnt);
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+bypass_orig_flow:
+#endif
 
-		err = mnt_alloc_id(mnt);
 		if (err)
 			goto out_free_cache;
 #ifdef CONFIG_KDP_NS
@@ -1078,7 +1177,17 @@ struct vfsmount *vfs_create_mount(struct fs_context *fc)
 	if (!fc->root)
 		return ERR_PTR(-EINVAL);
 
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	// For newly created mounts, the only caller process we care is KSU
+	if (unlikely(susfs_is_current_ksu_domain())) {
+		mnt = alloc_vfsmnt(fc->source ?: "none", true, 0);
+		goto bypass_orig_flow;
+	}
+	mnt = alloc_vfsmnt(fc->source ?: "none", false, 0);
+bypass_orig_flow:
+#else
 	mnt = alloc_vfsmnt(fc->source ?: "none");
+#endif
 	if (!mnt)
 		return ERR_PTR(-ENOMEM);
 
@@ -1110,6 +1219,13 @@ struct vfsmount *vfs_create_mount(struct fs_context *fc)
 	fs_userns = mnt->mnt.mnt_sb->s_user_ns;
 	if (!initial_idmapping(fs_userns))
 		mnt->mnt.mnt_userns = get_user_ns(fs_userns);
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	// If caller process is zygote, then it is a normal mount, so we just reorder the mnt_id
+	if (susfs_is_current_zygote_domain()) {
+		mnt->mnt.susfs_mnt_id_backup = mnt->mnt_id;
+		mnt->mnt_id = current->susfs_last_fake_mnt_id++;
+	}
+#endif
 #endif
 
 	lock_mount_hash();
@@ -1195,8 +1311,52 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
 #endif
 	struct mount *mnt;
 	int err;
-
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	bool is_current_ksu_domain = susfs_is_current_ksu_domain();
+	bool is_current_zygote_domain = susfs_is_current_zygote_domain();
+
+	/* - It is very important that we need to use CL_COPY_MNT_NS to identify whether 
+	 *   the clone is a copy_tree() or single mount like called by __do_loopback()
+	 * - if caller process is KSU, consider the following situation:
+	 *     1. it is NOT doing unshare => call alloc_vfsmnt() to assign a new sus mnt_id
+	 *     2. it is doing unshare => spoof the new mnt_id with the old mnt_id
+	 * - If caller process is zygote and old mnt_id is sus => call alloc_vfsmnt() to assign a new sus mnt_id
+	 * - For the rest of caller process that doing unshare => call alloc_vfsmnt() to assign a new sus mnt_id only for old sus mount
+	 */
+	// Firstly, check if it is KSU process
+	if (unlikely(is_current_ksu_domain)) {
+		// if it is doing single clone
+		if (!(flag & CL_COPY_MNT_NS)) {
+			mnt = alloc_vfsmnt(old->mnt_devname, true, 0);
+			goto bypass_orig_flow;
+		}
+		// if it is doing unshare
+		mnt = alloc_vfsmnt(old->mnt_devname, true, old->mnt_id);
+		if (mnt) {
+			mnt->mnt.susfs_mnt_id_backup = DEFAULT_SUS_MNT_ID_FOR_KSU_PROC_UNSHARE;
+		}
+		goto bypass_orig_flow;
+	}
+	// Secondly, check if it is zygote process and no matter it is doing unshare or not
+	if (likely(is_current_zygote_domain) && (old->mnt_id >= DEFAULT_SUS_MNT_ID)) {
+		/* Important Note: 
+		 *  - Here we can't determine whether the unshare is called zygisk or not,
+		 *    so we can only patch out the unshare code in zygisk source code for now
+		 *  - But at least we can deal with old sus mounts using alloc_vfsmnt()
+		 */
+		mnt = alloc_vfsmnt(old->mnt_devname, true, 0);
+		goto bypass_orig_flow;
+	}
+	// Lastly, for other process that is doing unshare operation, but only deal with old sus mount
+	if ((flag & CL_COPY_MNT_NS) && (old->mnt_id >= DEFAULT_SUS_MNT_ID)) {
+		mnt = alloc_vfsmnt(old->mnt_devname, true, 0);
+		goto bypass_orig_flow;
+	}
+	mnt = alloc_vfsmnt(old->mnt_devname, false, 0);
+bypass_orig_flow:
+#else
 	mnt = alloc_vfsmnt(old->mnt_devname);
+#endif
 	if (!mnt)
 		return ERR_PTR(-ENOMEM);
 
@@ -1239,6 +1399,13 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
 	mnt->mnt_mountpoint = mnt->mnt.mnt_root;
 #endif
 	mnt->mnt_parent = mnt;
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	// If caller process is zygote and not doing unshare, so we just reorder the mnt_id
+	if (likely(is_current_zygote_domain) && !(flag & CL_ZYGOTE_COPY_MNT_NS)) {
+		mnt->mnt.susfs_mnt_id_backup = mnt->mnt_id;
+		mnt->mnt_id = current->susfs_last_fake_mnt_id++;
+	}
+#endif
 	lock_mount_hash();
 	list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
 	unlock_mount_hash();
@@ -2712,6 +2879,26 @@ static int do_loopback(struct path *path, const char *old_name,
 		umount_tree(mnt, UMOUNT_SYNC);
 		unlock_mount_hash();
 	}
+#if defined(CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT) || defined(CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT)
+	// Check if bind mounted path should be hidden and umounted automatically.
+	// And we target only process with ksu domain.
+	if (susfs_is_current_ksu_domain()) {
+#if defined(CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT)
+		if (susfs_is_auto_add_sus_bind_mount_enabled &&
+				susfs_auto_add_sus_bind_mount(old_name, &old_path)) {
+			goto orig_flow;
+		}
+#endif
+#if defined(CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT)
+		if (susfs_is_auto_add_try_umount_for_bind_mount_enabled) {
+			susfs_auto_add_try_umount_for_bind_mount(path);
+		}
+#endif
+	}
+#if defined(CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT)
+orig_flow:
+#endif
+#endif // #if defined(CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT) || defined(CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT)
 out2:
 	unlock_mount(mp);
 out:
@@ -3803,6 +3990,11 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
 	struct mount *new;
 	int copy_flags;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	bool is_zygote_pid = susfs_is_current_zygote_domain();
+	int last_entry_mnt_id = 0;
+#endif
+
 	BUG_ON(!ns);
 
 	if (likely(!(flags & CLONE_NEWNS))) {
@@ -3821,6 +4013,14 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
 	copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
 	if (user_ns != ns->user_ns)
 		copy_flags |= CL_SHARED_TO_SLAVE;
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	// Always let clone_mnt() in copy_tree() know it is from copy_mnt_ns()
+	copy_flags |= CL_COPY_MNT_NS;
+	if (is_zygote_pid) {
+		// Let clone_mnt() in copy_tree() know copy_mnt_ns() is run by zygote process
+		copy_flags |= CL_ZYGOTE_COPY_MNT_NS;
+	}
+#endif
 #ifdef CONFIG_KDP_NS
 	new = copy_tree(old, ((struct kdp_mount *)old)->mnt->mnt_root, copy_flags);
 #else
@@ -3882,6 +4082,28 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
 #endif
 			p = next_mnt(p, old);
 	}
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	// current->susfs_last_fake_mnt_id -> to record last valid fake mnt_id to zygote pid
+	// q->mnt.susfs_mnt_id_backup -> original mnt_id
+	// q->mnt_id -> will be modified to the fake mnt_id
+
+	// Here We are only interested in processes of which original mnt namespace belongs to zygote 
+	// Also we just make use of existing 'q' mount pointer, no need to delcare extra mount pointer
+	if (is_zygote_pid) {
+		last_entry_mnt_id = list_first_entry(&new_ns->list, struct mount, mnt_list)->mnt_id;
+		list_for_each_entry(q, &new_ns->list, mnt_list) {
+			if (unlikely(q->mnt_id >= DEFAULT_SUS_MNT_ID)) {
+				continue;
+			}
+			q->mnt.susfs_mnt_id_backup = q->mnt_id;
+			q->mnt_id = last_entry_mnt_id++;
+		}
+	}
+	// Assign the 'last_entry_mnt_id' to 'current->susfs_last_fake_mnt_id' for later use.
+	// should be fine here assuming zygote is forking/unsharing app in one single thread.
+	// Or should we put a lock here?
+	current->susfs_last_fake_mnt_id = last_entry_mnt_id;
+#endif
 	namespace_unlock();
 
 	if (rootmnt)
@@ -3954,6 +4176,12 @@ SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
 
 	ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options);
 
+#if defined(CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT) && defined(CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT)
+	// Just for the compatibility of Magic Mount KernelSU
+	if (!ret && susfs_is_auto_add_sus_ksu_default_mount_enabled && susfs_is_current_ksu_domain()) {
+		susfs_auto_add_sus_ksu_default_mount(dir_name);
+	}
+#endif
 	kfree(options);
 out_data:
 	kfree(kernel_dev);
@@ -4176,6 +4404,12 @@ out_to:
 	path_put(&to_path);
 out_from:
 	path_put(&from_path);
+#ifdef CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT
+	// For Legacy KSU mount scheme
+	if (!ret && susfs_is_auto_add_sus_ksu_default_mount_enabled && susfs_is_current_ksu_domain()) {
+		susfs_auto_add_sus_ksu_default_mount(to_pathname);
+	}
+#endif
 	return ret;
 }
 
@@ -5186,3 +5420,37 @@ static int __init init_fs_namespace_sysctls(void)
 fs_initcall(init_fs_namespace_sysctls);
 
 #endif /* CONFIG_SYSCTL */
+
+#ifdef CONFIG_KSU_SUSFS_TRY_UMOUNT
+extern void susfs_try_umount_all(uid_t uid);
+void susfs_run_try_umount_for_current_mnt_ns(void) {
+       struct mount *mnt;
+       struct mnt_namespace *mnt_ns;
+
+       mnt_ns = current->nsproxy->mnt_ns;
+       // Lock the namespace
+       namespace_lock();
+       list_for_each_entry(mnt, &mnt_ns->list, mnt_list) {
+               // Change the sus mount to be private
+               if (mnt->mnt_id >= DEFAULT_SUS_MNT_ID) {
+                       change_mnt_propagation(mnt, MS_PRIVATE);
+               }
+       }
+       // Unlock the namespace
+       namespace_unlock();
+       susfs_try_umount_all(current_uid().val);
+}
+#endif
+#ifdef CONFIG_KSU_SUSFS
+bool susfs_is_mnt_devname_ksu(struct path *path) {
+       struct mount *mnt;
+
+       if (path && path->mnt) {
+               mnt = real_mount(path->mnt);
+               if (mnt && mnt->mnt_devname && !strcmp(mnt->mnt_devname, "KSU")) {
+                       return true;
+               }
+       }
+       return false;
+}
+#endif

+ 48 - 0
fs/notify/fdinfo.c

@@ -12,6 +12,9 @@
 #include <linux/types.h>
 #include <linux/seq_file.h>
 #include <linux/exportfs.h>
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+#include <linux/susfs_def.h>
+#endif
 
 #include "inotify/inotify.h"
 #include "fanotify/fanotify.h"
@@ -22,16 +25,27 @@
 
 #if defined(CONFIG_INOTIFY_USER) || defined(CONFIG_FANOTIFY)
 
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+static void show_fdinfo(struct seq_file *m, struct file *f,
+			void (*show)(struct seq_file *m,
+				     struct fsnotify_mark *mark,
+					 struct file *file))
+#else
 static void show_fdinfo(struct seq_file *m, struct file *f,
 			void (*show)(struct seq_file *m,
 				     struct fsnotify_mark *mark))
+#endif
 {
 	struct fsnotify_group *group = f->private_data;
 	struct fsnotify_mark *mark;
 
 	fsnotify_group_lock(group);
 	list_for_each_entry(mark, &group->marks_list, g_list) {
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+		show(m, mark, f);
+#else
 		show(m, mark);
+#endif
 		if (seq_has_overflowed(m))
 			break;
 	}
@@ -73,7 +87,11 @@ static void show_mark_fhandle(struct seq_file *m, struct inode *inode)
 
 #ifdef CONFIG_INOTIFY_USER
 
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+static void inotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark, struct file *file)
+#else
 static void inotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
+#endif
 {
 	struct inotify_inode_mark *inode_mark;
 	struct inode *inode;
@@ -84,6 +102,36 @@ static void inotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
 	inode_mark = container_of(mark, struct inotify_inode_mark, fsn_mark);
 	inode = igrab(fsnotify_conn_inode(mark->connector));
 	if (inode) {
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+		if (likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC) &&
+				unlikely(inode->i_state & INODE_STATE_SUS_KSTAT)) {
+			struct path path;
+			char *pathname = kmalloc(PAGE_SIZE, GFP_KERNEL);
+			char *dpath;
+			if (!pathname) {
+				goto out_seq_printf;
+			}
+			dpath = d_path(&file->f_path, pathname, PAGE_SIZE);
+			if (!dpath) {
+				goto out_free_pathname;
+			}
+			if (kern_path(dpath, 0, &path)) {
+				goto out_free_pathname;
+			}
+			seq_printf(m, "inotify wd:%x ino:%lx sdev:%x mask:%x ignored_mask:0 ",
+					inode_mark->wd, path.dentry->d_inode->i_ino, path.dentry->d_inode->i_sb->s_dev,
+					inotify_mark_user_mask(mark));
+			show_mark_fhandle(m, path.dentry->d_inode);
+			seq_putc(m, '\n');
+			iput(inode);
+			path_put(&path);
+			kfree(pathname);
+			return;
+out_free_pathname:
+			kfree(pathname);
+		}
+out_seq_printf:
+#endif
 		seq_printf(m, "inotify wd:%x ino:%lx sdev:%x mask:%x ignored_mask:0 ",
 			   inode_mark->wd, inode->i_ino, inode->i_sb->s_dev,
 			   inotify_mark_user_mask(mark));

+ 11 - 1
fs/open.c

@@ -418,6 +418,12 @@ static const struct cred *access_override_creds(void)
 	return old_cred;
 }
 
+#ifdef CONFIG_KSU_SUSFS_SUS_SU
+extern bool susfs_is_sus_su_hooks_enabled __read_mostly;
+extern int ksu_handle_faccessat(int *dfd, const char __user **filename_user, int *mode,
+			int *flags);
+#endif
+
 static long do_faccessat(int dfd, const char __user *filename, int mode, int flags)
 {
 	struct path path;
@@ -425,7 +431,11 @@ static long do_faccessat(int dfd, const char __user *filename, int mode, int fla
 	int res;
 	unsigned int lookup_flags = LOOKUP_FOLLOW;
 	const struct cred *old_cred = NULL;
-
+#ifdef CONFIG_KSU_SUSFS_SUS_SU
+	if (susfs_is_sus_su_hooks_enabled) {
+		ksu_handle_faccessat(&dfd, &filename, &mode, NULL);
+	}
+#endif
 	if (mode & ~S_IRWXO)	/* where's F_OK, X_OK, W_OK, R_OK? */
 		return -EINVAL;
 

+ 14 - 1
fs/overlayfs/inode.c

@@ -165,7 +165,20 @@ int ovl_getattr(struct user_namespace *mnt_userns, const struct path *path,
 	bool metacopy_blocks = false;
 
 	metacopy_blocks = ovl_is_metacopy_dentry(dentry);
-
+#ifdef CONFIG_KSU_SUSFS_SUS_OVERLAYFS
+	ovl_path_lowerdata(dentry, &realpath);
+	if (likely(realpath.mnt && realpath.dentry)) {
+		old_cred = ovl_override_creds(dentry->d_sb);
+		err = vfs_getattr(&realpath, stat, request_mask, flags);
+		if (err)
+			goto out;
+		
+		if (realpath.dentry->d_inode) {
+			generic_fill_statx_attr(realpath.dentry->d_inode, stat);
+		}
+		goto out;
+	}
+#endif
 	type = ovl_path_real(dentry, &realpath);
 	old_cred = ovl_override_creds(dentry->d_sb);
 	err = vfs_getattr(&realpath, stat, request_mask, flags);

+ 11 - 1
fs/overlayfs/readdir.c

@@ -934,8 +934,18 @@ static int ovl_dir_open(struct inode *inode, struct file *file)
 	od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
 	if (!od)
 		return -ENOMEM;
-
+#ifdef CONFIG_KSU_SUSFS_SUS_OVERLAYFS
+	ovl_path_lowerdata(file->f_path.dentry, &realpath);
+	if (likely(realpath.mnt && realpath.dentry)) {
+		// We still use '__OVL_PATH_UPPER' here which should be fine.  
+		type = __OVL_PATH_UPPER;
+		goto bypass_orig_flow;
+	}
+#endif
 	type = ovl_path_real(file->f_path.dentry, &realpath);
+#ifdef CONFIG_KSU_SUSFS_SUS_OVERLAYFS
+bypass_orig_flow:
+#endif
 	realfile = ovl_dir_open_realfile(file, &realpath);
 	if (IS_ERR(realfile)) {
 		kfree(od);

+ 11 - 1
fs/overlayfs/super.c

@@ -327,7 +327,17 @@ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
 	struct dentry *root_dentry = dentry->d_sb->s_root;
 	struct path path;
 	int err;
-
+#ifdef CONFIG_KSU_SUSFS_SUS_OVERLAYFS
+	ovl_path_lowerdata(root_dentry, &path);
+	if (likely(path.mnt && path.dentry)) {
+		err = vfs_statfs(&path, buf);
+		if (!err) {
+			buf->f_namelen = 255; // 255 for erofs, ext2/4, f2fs
+			buf->f_type = path.dentry->d_sb->s_magic;
+		}
+		return err;
+	}
+#endif
 	ovl_path_real(root_dentry, &path);
 
 	err = vfs_statfs(&path, buf);

+ 10 - 0
fs/proc/bootconfig.c

@@ -12,8 +12,18 @@
 
 static char *saved_boot_config;
 
+#ifdef CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG
+extern int susfs_spoof_cmdline_or_bootconfig(struct seq_file *m);
+#endif
 static int boot_config_proc_show(struct seq_file *m, void *v)
 {
+#ifdef CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG
+	if (saved_boot_config) {
+		if (!susfs_spoof_cmdline_or_bootconfig(m)) {
+			return 0;
+		}
+	}
+#endif
 	if (saved_boot_config)
 		seq_puts(m, saved_boot_config);
 	return 0;

+ 44 - 2
fs/proc/fd.c

@@ -13,6 +13,9 @@
 #include <linux/fs.h>
 
 #include <linux/proc_fs.h>
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+#include <linux/susfs_def.h>
+#endif
 
 #include "../mount.h"
 #include "internal.h"
@@ -24,6 +27,9 @@ static int seq_show(struct seq_file *m, void *v)
 	int f_flags = 0, ret = -ENOENT;
 	struct file *file = NULL;
 	struct task_struct *task;
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	struct mount *mnt = NULL;
+#endif
 
 	task = get_proc_task(m->private);
 	if (!task)
@@ -53,12 +59,48 @@ static int seq_show(struct seq_file *m, void *v)
 
 	if (ret)
 		return ret;
-
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	mnt = real_mount(file->f_path.mnt);
+	if (likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC) &&
+			mnt->mnt_id >= DEFAULT_SUS_MNT_ID) {
+		struct path path;
+		char *pathname = kmalloc(PAGE_SIZE, GFP_KERNEL);
+		char *dpath;
+
+		for (; mnt->mnt_id >= DEFAULT_SUS_MNT_ID; mnt = mnt->mnt_parent) { }
+
+		if (!pathname) {
+			goto out_seq_printf;
+		}
+		dpath = d_path(&file->f_path, pathname, PAGE_SIZE);
+		if (!dpath) {
+			goto out_free_pathname;
+		}
+		if (kern_path(dpath, 0, &path)) {
+			goto out_free_pathname;
+		}
+		seq_printf(m, "pos:\t%lli\nflags:\t0%o\nmnt_id:\t%i\nino:\t%lu\n",
+				(long long)file->f_pos, f_flags,
+				mnt->mnt_id,
+				path.dentry->d_inode->i_ino);
+		path_put(&path);
+		kfree(pathname);
+		goto bypass_orig_flow;
+out_free_pathname:
+		kfree(pathname);
+	}
+out_seq_printf:
+	seq_printf(m, "pos:\t%lli\nflags:\t0%o\nmnt_id:\t%i\nino:\t%lu\n",
+		   (long long)file->f_pos, f_flags,
+		   mnt->mnt_id,
+		   file_inode(file)->i_ino);
+bypass_orig_flow:
+#else
 	seq_printf(m, "pos:\t%lli\nflags:\t0%o\nmnt_id:\t%i\nino:\t%lu\n",
 		   (long long)file->f_pos, f_flags,
 		   real_mount(file->f_path.mnt)->mnt_id,
 		   file_inode(file)->i_ino);
-
+#endif
 	/* show_fd_locks() never deferences files so a stale value is safe */
 	show_fd_locks(m, file, files);
 	if (seq_has_overflowed(m))

+ 16 - 0
fs/proc/task_mmu.c

@@ -20,6 +20,9 @@
 #include <linux/uaccess.h>
 #include <linux/pkeys.h>
 #include <trace/hooks/mm.h>
+#ifdef CONFIG_KSU_SUSFS_SUS_KSTAT
+#include <linux/susfs_def.h>
+#endif
 
 #include <asm/elf.h>
 #include <asm/tlb.h>
@@ -272,6 +275,10 @@ static void show_vma_header_prefix(struct seq_file *m,
 	seq_putc(m, ' ');
 }
 
+#ifdef CONFIG_KSU_SUSFS_SUS_KSTAT
+extern void susfs_sus_ino_for_show_map_vma(unsigned long ino, dev_t *out_dev, unsigned long *out_ino);
+#endif
+
 static void
 show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 {
@@ -286,8 +293,17 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 
 	if (file) {
 		struct inode *inode = file_inode(vma->vm_file);
+#ifdef CONFIG_KSU_SUSFS_SUS_KSTAT
+		if (unlikely(inode->i_state & INODE_STATE_SUS_KSTAT)) {
+			susfs_sus_ino_for_show_map_vma(inode->i_ino, &dev, &ino);
+			goto bypass_orig_flow;
+		}
+#endif
 		dev = inode->i_sb->s_dev;
 		ino = inode->i_ino;
+#ifdef CONFIG_KSU_SUSFS_SUS_KSTAT
+bypass_orig_flow:
+#endif
 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 	}
 

+ 22 - 0
fs/proc_namespace.c

@@ -12,12 +12,19 @@
 #include <linux/security.h>
 #include <linux/fs_struct.h>
 #include <linux/sched/task.h>
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+#include <linux/susfs_def.h>
+#endif
 
 #include "proc/internal.h" /* only for get_proc_task() in ->open() */
 
 #include "pnode.h"
 #include "internal.h"
 
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+extern bool susfs_is_current_ksu_domain(void);
+#endif
+
 static __poll_t mounts_poll(struct file *file, poll_table *wait)
 {
 	struct seq_file *m = file->private_data;
@@ -106,6 +113,11 @@ static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt)
 	struct super_block *sb = mnt_path.dentry->d_sb;
 	int err;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	if (unlikely((r->mnt_id >= DEFAULT_SUS_MNT_ID) && !susfs_is_current_ksu_domain()))
+		return 0;
+#endif
+
 	if (sb->s_op->show_devname) {
 		err = sb->s_op->show_devname(m, mnt_path.dentry);
 		if (err)
@@ -140,6 +152,11 @@ static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
 	struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
 	int err;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	if (unlikely((r->mnt_id >= DEFAULT_SUS_MNT_ID) && !susfs_is_current_ksu_domain()))
+		return 0;
+#endif
+
 	seq_printf(m, "%i %i %u:%u ", r->mnt_id, r->mnt_parent->mnt_id,
 		   MAJOR(sb->s_dev), MINOR(sb->s_dev));
 	if (sb->s_op->show_path) {
@@ -202,6 +219,11 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
 	struct super_block *sb = mnt_path.dentry->d_sb;
 	int err;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	if (unlikely((r->mnt_id >= DEFAULT_SUS_MNT_ID) && !susfs_is_current_ksu_domain()))
+		return 0;
+#endif
+
 	/* device */
 	if (sb->s_op->show_devname) {
 		seq_puts(m, "device ");

+ 29 - 0
fs/readdir.c

@@ -21,6 +21,9 @@
 #include <linux/unistd.h>
 #include <linux/compat.h>
 #include <linux/uaccess.h>
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+#include <linux/susfs_def.h>
+#endif
 
 #include <asm/unaligned.h>
 
@@ -36,6 +39,9 @@
 	unsafe_copy_to_user(dst, src, len, label);		\
 } while (0)
 
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+extern int susfs_sus_ino_for_filldir64(unsigned long ino);
+#endif
 
 int iterate_dir(struct file *file, struct dir_context *ctx)
 {
@@ -230,6 +236,12 @@ static bool filldir(struct dir_context *ctx, const char *name, int namlen,
 		sizeof(long));
 	int prev_reclen;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	if (likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC) && susfs_sus_ino_for_filldir64(ino)) {
+		return true;
+	}
+#endif
+
 	buf->error = verify_dirent_name(name, namlen);
 	if (unlikely(buf->error))
 		return false;
@@ -317,6 +329,12 @@ static bool filldir64(struct dir_context *ctx, const char *name, int namlen,
 		sizeof(u64));
 	int prev_reclen;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	if (likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC) && susfs_sus_ino_for_filldir64(ino)) {
+		return true;
+	}
+#endif
+
 	buf->error = verify_dirent_name(name, namlen);
 	if (unlikely(buf->error))
 		return false;
@@ -408,6 +426,11 @@ static bool compat_fillonedir(struct dir_context *ctx, const char *name,
 
 	if (buf->result)
 		return false;
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	if (likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC) && susfs_sus_ino_for_filldir64(ino)) {
+		return true;
+	}
+#endif
 	buf->result = verify_dirent_name(name, namlen);
 	if (buf->result)
 		return false;
@@ -482,6 +505,12 @@ static bool compat_filldir(struct dir_context *ctx, const char *name, int namlen
 		namlen + 2, sizeof(compat_long_t));
 	int prev_reclen;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+	if (likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC) && susfs_sus_ino_for_filldir64(ino)) {
+		return true;
+	}
+#endif
+
 	buf->error = verify_dirent_name(name, namlen);
 	if (unlikely(buf->error))
 		return false;

+ 45 - 0
fs/stat.c

@@ -19,12 +19,20 @@
 #include <linux/pagemap.h>
 #include <linux/compat.h>
 
+#if defined(CONFIG_KSU_SUSFS_SUS_KSTAT) || defined(CONFIG_KSU_SUSFS_SUS_MOUNT)
+#include <linux/susfs_def.h>
+#endif
+
 #include <linux/uaccess.h>
 #include <asm/unistd.h>
 
 #include "internal.h"
 #include "mount.h"
 
+#ifdef CONFIG_KSU_SUSFS_SUS_KSTAT
+extern void susfs_sus_ino_for_generic_fillattr(unsigned long ino, struct kstat *stat);
+#endif
+
 /**
  * generic_fillattr - Fill in the basic attributes from the inode struct
  * @mnt_userns:	user namespace of the mount the inode was found from
@@ -44,6 +52,17 @@
 void generic_fillattr(struct user_namespace *mnt_userns, struct inode *inode,
 		      struct kstat *stat)
 {
+#ifdef CONFIG_KSU_SUSFS_SUS_KSTAT
+	if (likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC) &&
+			unlikely(inode->i_state & INODE_STATE_SUS_KSTAT)) {
+		susfs_sus_ino_for_generic_fillattr(inode->i_ino, stat);
+		stat->mode = inode->i_mode;
+		stat->rdev = inode->i_rdev;
+		stat->uid = i_uid_into_mnt(mnt_userns, inode);
+		stat->gid = i_gid_into_mnt(mnt_userns, inode);
+		return;
+	}
+#endif
 	stat->dev = inode->i_sb->s_dev;
 	stat->ino = inode->i_ino;
 	stat->mode = inode->i_mode;
@@ -221,6 +240,10 @@ static int vfs_statx(int dfd, struct filename *filename, int flags,
 	unsigned int lookup_flags = getname_statx_lookup_flags(flags);
 	int error;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	struct mount *mnt;
+#endif
+
 	if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH |
 		      AT_STATX_SYNC_TYPE))
 		return -EINVAL;
@@ -232,7 +255,15 @@ retry:
 
 	error = vfs_getattr(&path, stat, request_mask, flags);
 
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	mnt = real_mount(path.mnt);
+	if (likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+		for (; mnt->mnt_id >= DEFAULT_SUS_MNT_ID; mnt = mnt->mnt_parent) {}
+	}
+	stat->mnt_id = mnt->mnt_id;
+#else
 	stat->mnt_id = real_mount(path.mnt)->mnt_id;
+#endif
 	stat->result_mask |= STATX_MNT_ID;
 
 	if (path.mnt->mnt_root == path.dentry)
@@ -256,6 +287,11 @@ out:
 	return error;
 }
 
+#ifdef CONFIG_KSU_SUSFS_SUS_SU
+extern bool susfs_is_sus_su_hooks_enabled __read_mostly;
+extern struct filename* susfs_ksu_handle_stat(int *dfd, const char __user **filename_user, int *flags);
+#endif
+
 int vfs_fstatat(int dfd, const char __user *filename,
 			      struct kstat *stat, int flags)
 {
@@ -263,7 +299,16 @@ int vfs_fstatat(int dfd, const char __user *filename,
 	int statx_flags = flags | AT_NO_AUTOMOUNT;
 	struct filename *name;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_SU
+	if (susfs_is_sus_su_hooks_enabled) {
+		name = susfs_ksu_handle_stat(&dfd, &filename, &statx_flags);
+		goto orig_flow;
+	}
+#endif
 	name = getname_flags(filename, getname_statx_lookup_flags(statx_flags), NULL);
+#ifdef CONFIG_KSU_SUSFS_SUS_SU
+orig_flow:
+#endif
 	ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS);
 	putname(name);
 

+ 44 - 1
fs/statfs.c

@@ -9,6 +9,10 @@
 #include <linux/security.h>
 #include <linux/uaccess.h>
 #include <linux/compat.h>
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+#include <linux/susfs_def.h>
+#include "mount.h"
+#endif
 #include "internal.h"
 
 static int flags_by_mnt(int mnt_flags)
@@ -86,11 +90,22 @@ EXPORT_SYMBOL(vfs_get_fsid);
 int vfs_statfs(const struct path *path, struct kstatfs *buf)
 {
 	int error;
-
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	struct mount *mnt; 
+	mnt = real_mount(path->mnt);
+	if (likely(current->susfs_task_state & TASK_STRUCT_NON_ROOT_USER_APP_PROC)) {
+		for (; mnt->mnt_id >= DEFAULT_SUS_MNT_ID; mnt = mnt->mnt_parent) {}
+	}
+	error = statfs_by_dentry(mnt->mnt.mnt_root, buf);
+	if (!error)
+		buf->f_flags = calculate_f_flags(&mnt->mnt);
+	return error;
+#else
 	error = statfs_by_dentry(path->dentry, buf);
 	if (!error)
 		buf->f_flags = calculate_f_flags(path->mnt);
 	return error;
+#endif
 }
 EXPORT_SYMBOL(vfs_statfs);
 
@@ -109,6 +124,22 @@ retry:
 			goto retry;
 		}
 	}
+#ifdef CONFIG_KSU_SUSFS_SUS_OVERLAYFS
+	/* - When mounting overlay, the f_flags are set with 'ro' and 'relatime',
+	 *   but this is an abnormal status, as when we inspect the output from mountinfo,
+	 *   we will find that all partitions set with 'ro' will have 'noatime' set as well.
+	 * - But what is strange here is that the vfsmnt f_flags of the lowest layer has corrent f_flags set,
+	 *   and still it is always changed to 'relatime' instead of 'noatime' for the final result,
+	 *   I can't think of any other reason to explain about this, maybe the f_flags is set by its own
+	 *   filesystem implementation but not the one from overlayfs.
+	 * - Anyway we just cannot use the retrieved f_flags from ovl_getattr() of overlayfs,
+	 *   we need to run one more check for user_statfs() and fd_statfs() by ourselves.
+	 */
+	if (unlikely((st->f_flags & ST_RDONLY) && (st->f_flags & ST_RELATIME))) {
+		st->f_flags &= ~ST_RELATIME;
+		st->f_flags |= ST_NOATIME;
+	}
+#endif
 	return error;
 }
 
@@ -120,6 +151,12 @@ int fd_statfs(int fd, struct kstatfs *st)
 		error = vfs_statfs(&f.file->f_path, st);
 		fdput(f);
 	}
+#ifdef CONFIG_KSU_SUSFS_SUS_OVERLAYFS
+	if (unlikely((st->f_flags & ST_RDONLY) && (st->f_flags & ST_RELATIME))) {
+		st->f_flags &= ~ST_RELATIME;
+		st->f_flags |= ST_NOATIME;
+	}
+#endif
 	return error;
 }
 
@@ -240,6 +277,12 @@ static int vfs_ustat(dev_t dev, struct kstatfs *sbuf)
 	if (!s)
 		return -EINVAL;
 
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+	if (unlikely(s->s_root->d_inode->i_state & INODE_STATE_SUS_MOUNT)) {
+		return -EINVAL;
+	}
+#endif
+
 	err = statfs_by_dentry(s->s_root, sbuf);
 	drop_super(s);
 	return err;

+ 140 - 0
fs/sus_su.c

@@ -0,0 +1,140 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/random.h>
+#include <linux/cred.h>
+#include <linux/sus_su.h>
+
+#ifdef CONFIG_KSU_SUSFS_ENABLE_LOG
+extern bool susfs_is_log_enabled __read_mostly;
+#define SUSFS_LOGI(fmt, ...) if (susfs_is_log_enabled) pr_info("susfs_sus_su:[%u][%u][%s] " fmt, current_uid().val, current->pid, __func__, ##__VA_ARGS__)
+#define SUSFS_LOGE(fmt, ...) if (susfs_is_log_enabled) pr_err("susfs_sus_su:[%u][%u][%s]" fmt, current_uid().val, current->pid, __func__, ##__VA_ARGS__)
+#else
+#define SUSFS_LOGI(fmt, ...)
+#define SUSFS_LOGE(fmt, ...)
+#endif
+
+#define FIFO_SIZE 1024
+#define MAX_DRV_NAME 255
+
+static int cur_maj_dev_num = -1;
+static char fifo_buffer[FIFO_SIZE];
+static struct cdev sus_su_cdev;
+static const char *sus_su_token = "!@#$SU_IS_SUS$#@!-pRE6W9BKXrJr1hEKyvDq0CvWziVKbatT8yzq06fhtrEGky2tVS7Q2QTjhtMfVMGV";
+static char rand_drv_path[MAX_DRV_NAME+1] = "/dev/";
+static bool is_sus_su_enabled_before = false;
+
+extern bool susfs_is_allow_su(void);
+extern void ksu_escape_to_root(void);
+
+static void gen_rand_drv_name(char *buffer, size_t min_length, size_t max_length) {
+    const char *symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_-+@#:=";
+    size_t symbols_length = strlen(symbols);
+    size_t length, i;
+    unsigned int rand_value;
+
+    // Determine the random length of the string
+    get_random_bytes(&rand_value, sizeof(rand_value));
+    length = min_length + (rand_value % (max_length - min_length + 1));
+
+    for (i = 0; i < length; ++i) {
+        get_random_bytes(&rand_value, sizeof(rand_value));
+        buffer[i] = symbols[rand_value % symbols_length];
+    }
+    buffer[length] = '\0'; // Null-terminate the string
+}
+
+static int fifo_open(struct inode *inode, struct file *file) {
+    return 0;
+}
+
+static int fifo_release(struct inode *inode, struct file *file) {
+    return 0;
+}
+
+static ssize_t fifo_read(struct file *file, char __user *buf, size_t len, loff_t *offset) {
+    return 0;
+}
+
+static ssize_t fifo_write(struct file *file, const char __user *buf, size_t len, loff_t *offset) {
+    int sus_su_token_len = strlen(sus_su_token);
+
+    if (!susfs_is_allow_su()) {
+        SUSFS_LOGE("root is not allowed for uid: '%d', pid: '%d'\n", current_uid().val, current->pid);
+        return 0;
+    }
+
+    if (copy_from_user(fifo_buffer, buf, sus_su_token_len+1)) {
+        SUSFS_LOGE("copy_from_user() failed, uid: '%d', pid: '%d'\n", current_uid().val, current->pid);
+        return 0;
+    }
+
+    if (!memcmp(fifo_buffer, sus_su_token, sus_su_token_len+1)) {
+        SUSFS_LOGI("granting root access for uid: '%d', pid: '%d'\n", current_uid().val, current->pid);
+        ksu_escape_to_root();
+    } else {
+        SUSFS_LOGI("wrong token! deny root access for uid: '%d', pid: '%d'\n", current_uid().val, current->pid);
+    }
+    memset(fifo_buffer, 0, FIFO_SIZE);
+    return 0;
+}
+
+static struct file_operations fops = {
+    .owner = THIS_MODULE,
+    .open = fifo_open,
+    .release = fifo_release,
+    .read = fifo_read,
+    .write = fifo_write,
+};
+
+int sus_su_fifo_init(int *maj_dev_num, char *drv_path) {
+    if (cur_maj_dev_num > 0) {
+        SUSFS_LOGE("'%s' is already registered\n", rand_drv_path);
+        return -1;
+    }
+
+    // generate a random driver name if it is executed for the first time
+    if (!is_sus_su_enabled_before) {
+        // min length 192, max length 248, just make sure max length doesn't exceeds 255
+        gen_rand_drv_name(rand_drv_path+5, 192, 248);
+    }
+
+    cur_maj_dev_num = register_chrdev(0, rand_drv_path+5, &fops);
+    if (cur_maj_dev_num < 0) {
+        SUSFS_LOGE("Failed to register character device\n");
+        return -1;
+    }
+
+    cdev_init(&sus_su_cdev, &fops);
+    if (cdev_add(&sus_su_cdev, MKDEV(cur_maj_dev_num, 0), 1) < 0) {
+        unregister_chrdev(cur_maj_dev_num, rand_drv_path+5);
+        SUSFS_LOGE("Failed to add cdev\n");
+        return -1;
+    }
+
+    strncpy(drv_path, rand_drv_path, strlen(rand_drv_path));
+    *maj_dev_num = cur_maj_dev_num;
+    SUSFS_LOGI("'%s' registered with major device number %d\n", rand_drv_path, cur_maj_dev_num);
+    
+    if (!is_sus_su_enabled_before)
+        is_sus_su_enabled_before = true;
+
+    return 0;
+}
+
+int sus_su_fifo_exit(int *maj_dev_num, char *drv_path) {
+    if (cur_maj_dev_num < 0) {
+        SUSFS_LOGE("'%s' was already unregistered before\n", rand_drv_path);
+        return 0;
+    }
+
+    cdev_del(&sus_su_cdev);
+    unregister_chrdev(cur_maj_dev_num, rand_drv_path+5);
+    cur_maj_dev_num = -1;
+    *maj_dev_num = cur_maj_dev_num;
+    strncpy(drv_path, rand_drv_path, strlen(rand_drv_path));
+    SUSFS_LOGI("'%s' unregistered\n", rand_drv_path);
+    return 0;
+}

+ 916 - 0
fs/susfs.c

@@ -0,0 +1,916 @@
+#include <linux/version.h>
+#include <linux/cred.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/printk.h>
+#include <linux/namei.h>
+#include <linux/list.h>
+#include <linux/init_task.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/fdtable.h>
+#include <linux/statfs.h>
+#include <linux/susfs.h>
+#include "mount.h"
+
+static spinlock_t susfs_spin_lock;
+
+extern bool susfs_is_current_ksu_domain(void);
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+extern void ksu_try_umount(const char *mnt, bool check_mnt, int flags, uid_t uid);
+#endif
+
+#ifdef CONFIG_KSU_SUSFS_ENABLE_LOG
+bool susfs_is_log_enabled __read_mostly = true;
+#define SUSFS_LOGI(fmt, ...) if (susfs_is_log_enabled) pr_info("susfs:[%u][%d][%s] " fmt, current_uid().val, current->pid, __func__, ##__VA_ARGS__)
+#define SUSFS_LOGE(fmt, ...) if (susfs_is_log_enabled) pr_err("susfs:[%u][%d][%s]" fmt, current_uid().val, current->pid, __func__, ##__VA_ARGS__)
+#else
+#define SUSFS_LOGI(fmt, ...) 
+#define SUSFS_LOGE(fmt, ...) 
+#endif
+
+/* sus_path */
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+static DEFINE_HASHTABLE(SUS_PATH_HLIST, 10);
+static int susfs_update_sus_path_inode(char *target_pathname) {
+	struct path p;
+	struct inode *inode = NULL;
+	const char *dev_type;
+
+	if (kern_path(target_pathname, LOOKUP_FOLLOW, &p)) {
+		SUSFS_LOGE("Failed opening file '%s'\n", target_pathname);
+		return 1;
+	}
+
+	// - We don't allow paths of which filesystem type is "tmpfs" or "fuse".
+	//   For tmpfs, because its starting inode->i_ino will begin with 1 again,
+	//   so it will cause wrong comparison in function susfs_sus_ino_for_filldir64()
+	//   For fuse, which is almost storage related, sus_path should not handle any paths of
+	//   which filesystem is "fuse" as well, since app can write to "fuse" and lookup files via
+	//   like binder / system API (you can see the uid is changed to 1000)/
+	// - so sus_path should be applied only on read-only filesystem like "erofs" or "f2fs", but not "tmpfs" or "fuse",
+	//   people may rely on HMA for /data isolation instead.
+	dev_type = p.mnt->mnt_sb->s_type->name;
+	if (!strcmp(dev_type, "tmpfs") ||
+		!strcmp(dev_type, "fuse")) {
+		SUSFS_LOGE("target_pathname: '%s' cannot be added since its filesystem type is '%s'\n",
+						target_pathname, dev_type);
+		path_put(&p);
+		return 1;
+	}
+
+	inode = d_inode(p.dentry);
+	if (!inode) {
+		SUSFS_LOGE("inode is NULL\n");
+		path_put(&p);
+		return 1;
+	}
+
+	if (!(inode->i_state & INODE_STATE_SUS_PATH)) {
+		spin_lock(&inode->i_lock);
+		inode->i_state |= INODE_STATE_SUS_PATH;
+		spin_unlock(&inode->i_lock);
+	}
+	path_put(&p);
+	return 0;
+}
+
+int susfs_add_sus_path(struct st_susfs_sus_path* __user user_info) {
+	struct st_susfs_sus_path info;
+	struct st_susfs_sus_path_hlist *new_entry, *tmp_entry;
+	struct hlist_node *tmp_node;
+	int bkt;
+	bool update_hlist = false;
+
+	if (copy_from_user(&info, user_info, sizeof(info))) {
+		SUSFS_LOGE("failed copying from userspace\n");
+		return 1;
+	}
+
+	spin_lock(&susfs_spin_lock);
+	hash_for_each_safe(SUS_PATH_HLIST, bkt, tmp_node, tmp_entry, node) {
+	if (!strcmp(tmp_entry->target_pathname, info.target_pathname)) {
+			hash_del(&tmp_entry->node);
+			kfree(tmp_entry);
+			update_hlist = true;
+			break;
+		}
+	}
+	spin_unlock(&susfs_spin_lock);
+
+	new_entry = kmalloc(sizeof(struct st_susfs_sus_path_hlist), GFP_KERNEL);
+	if (!new_entry) {
+		SUSFS_LOGE("no enough memory\n");
+		return 1;
+	}
+
+	new_entry->target_ino = info.target_ino;
+	strncpy(new_entry->target_pathname, info.target_pathname, SUSFS_MAX_LEN_PATHNAME-1);
+	if (susfs_update_sus_path_inode(new_entry->target_pathname)) {
+		kfree(new_entry);
+		return 1;
+	}
+	spin_lock(&susfs_spin_lock);
+	hash_add(SUS_PATH_HLIST, &new_entry->node, info.target_ino);
+	if (update_hlist) {
+		SUSFS_LOGI("target_ino: '%lu', target_pathname: '%s' is successfully updated to SUS_PATH_HLIST\n",
+				new_entry->target_ino, new_entry->target_pathname);	
+	} else {
+		SUSFS_LOGI("target_ino: '%lu', target_pathname: '%s' is successfully added to SUS_PATH_HLIST\n",
+				new_entry->target_ino, new_entry->target_pathname);
+	}
+	spin_unlock(&susfs_spin_lock);
+	return 0;
+}
+
+int susfs_sus_ino_for_filldir64(unsigned long ino) {
+	struct st_susfs_sus_path_hlist *entry;
+
+	hash_for_each_possible(SUS_PATH_HLIST, entry, node, ino) {
+		if (entry->target_ino == ino)
+			return 1;
+	}
+	return 0;
+}
+#endif // #ifdef CONFIG_KSU_SUSFS_SUS_PATH
+
+/* sus_mount */
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+static LIST_HEAD(LH_SUS_MOUNT);
+static void susfs_update_sus_mount_inode(char *target_pathname) {
+	struct mount *mnt = NULL;
+	struct path p;
+	struct inode *inode = NULL;
+	int err = 0;
+
+	err = kern_path(target_pathname, LOOKUP_FOLLOW, &p);
+	if (err) {
+		SUSFS_LOGE("Failed opening file '%s'\n", target_pathname);
+		return;
+	}
+
+	/* It is important to check if the mount has a legit peer group id, if so we cannot add them to sus_mount,
+	 * since there are chances that the mount is a legit mountpoint, and it can be misued by other susfs functions in future.
+	 * And by doing this it won't affect the sus_mount check as other susfs functions check by mnt->mnt_id
+	 * instead of INODE_STATE_SUS_MOUNT.
+	 */
+	mnt = real_mount(p.mnt);
+	if (mnt->mnt_group_id > 0 && // 0 means no peer group
+		mnt->mnt_group_id < DEFAULT_SUS_MNT_GROUP_ID) {
+		SUSFS_LOGE("skip setting SUS_MOUNT inode state for path '%s' since its source mount has a legit peer group id\n", target_pathname);
+		return;
+	}
+
+	inode = d_inode(p.dentry);
+	if (!inode) {
+		path_put(&p);
+		SUSFS_LOGE("inode is NULL\n");
+		return;
+	}
+
+	if (!(inode->i_state & INODE_STATE_SUS_MOUNT)) {
+		spin_lock(&inode->i_lock);
+		inode->i_state |= INODE_STATE_SUS_MOUNT;
+		spin_unlock(&inode->i_lock);
+	}
+	path_put(&p);
+}
+
+int susfs_add_sus_mount(struct st_susfs_sus_mount* __user user_info) {
+	struct st_susfs_sus_mount_list *cursor = NULL, *temp = NULL;
+	struct st_susfs_sus_mount_list *new_list = NULL;
+	struct st_susfs_sus_mount info;
+
+	if (copy_from_user(&info, user_info, sizeof(info))) {
+		SUSFS_LOGE("failed copying from userspace\n");
+		return 1;
+	}
+
+#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
+#ifdef CONFIG_MIPS
+	info.target_dev = new_decode_dev(info.target_dev);
+#else
+	info.target_dev = huge_decode_dev(info.target_dev);
+#endif /* CONFIG_MIPS */
+#else
+	info.target_dev = old_decode_dev(info.target_dev);
+#endif /* defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) */
+
+	list_for_each_entry_safe(cursor, temp, &LH_SUS_MOUNT, list) {
+		if (unlikely(!strcmp(cursor->info.target_pathname, info.target_pathname))) {
+			spin_lock(&susfs_spin_lock);
+			memcpy(&cursor->info, &info, sizeof(info));
+			susfs_update_sus_mount_inode(cursor->info.target_pathname);
+			SUSFS_LOGI("target_pathname: '%s', target_dev: '%lu', is successfully updated to LH_SUS_MOUNT\n",
+						cursor->info.target_pathname, cursor->info.target_dev);
+			spin_unlock(&susfs_spin_lock);
+			return 0;
+		}
+	}
+
+	new_list = kmalloc(sizeof(struct st_susfs_sus_mount_list), GFP_KERNEL);
+	if (!new_list) {
+		SUSFS_LOGE("no enough memory\n");
+		return 1;
+	}
+
+	memcpy(&new_list->info, &info, sizeof(info));
+	susfs_update_sus_mount_inode(new_list->info.target_pathname);
+
+	INIT_LIST_HEAD(&new_list->list);
+	spin_lock(&susfs_spin_lock);
+	list_add_tail(&new_list->list, &LH_SUS_MOUNT);
+	SUSFS_LOGI("target_pathname: '%s', target_dev: '%lu', is successfully added to LH_SUS_MOUNT\n",
+				new_list->info.target_pathname, new_list->info.target_dev);
+	spin_unlock(&susfs_spin_lock);
+	return 0;
+}
+
+#ifdef CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT
+int susfs_auto_add_sus_bind_mount(const char *pathname, struct path *path_target) {
+	struct mount *mnt;
+	struct inode *inode;
+
+	mnt = real_mount(path_target->mnt);
+	if (mnt->mnt_group_id > 0 && // 0 means no peer group
+		mnt->mnt_group_id < DEFAULT_SUS_MNT_GROUP_ID) {
+		SUSFS_LOGE("skip setting SUS_MOUNT inode state for path '%s' since its source mount has a legit peer group id\n", pathname);
+		// return 0 here as we still want it to be added to try_umount list
+		return 0;
+	}
+	inode = path_target->dentry->d_inode;
+	if (!inode) return 1;
+	if (!(inode->i_state & INODE_STATE_SUS_MOUNT)) {
+		spin_lock(&inode->i_lock);
+		inode->i_state |= INODE_STATE_SUS_MOUNT;
+		spin_unlock(&inode->i_lock);
+		SUSFS_LOGI("set SUS_MOUNT inode state for source bind mount path '%s'\n", pathname);
+	}
+	return 0;
+}
+#endif // #ifdef CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT
+
+#ifdef CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT
+void susfs_auto_add_sus_ksu_default_mount(const char __user *to_pathname) {
+	char *pathname = NULL;
+	struct path path;
+	struct inode *inode;
+
+	pathname = kmalloc(SUSFS_MAX_LEN_PATHNAME, GFP_KERNEL);
+	if (!pathname) {
+		SUSFS_LOGE("no enough memory\n");
+		return;
+	}
+	// Here we need to re-retrieve the struct path as we want the new struct path, not the old one
+	if (strncpy_from_user(pathname, to_pathname, SUSFS_MAX_LEN_PATHNAME-1) < 0) {
+		SUSFS_LOGE("strncpy_from_user()\n");
+		goto out_free_pathname;
+		return;
+	}
+	if ((!strncmp(pathname, "/data/adb/modules", 17) ||
+		 !strncmp(pathname, "/debug_ramdisk", 14) ||
+		 !strncmp(pathname, "/system", 7) ||
+		 !strncmp(pathname, "/system_ext", 11) ||
+		 !strncmp(pathname, "/vendor", 7) ||
+		 !strncmp(pathname, "/product", 8) ||
+		 !strncmp(pathname, "/odm", 4)) &&
+		 !kern_path(pathname, LOOKUP_FOLLOW, &path)) {
+		goto set_inode_sus_mount;
+	}
+	goto out_free_pathname;
+set_inode_sus_mount:
+	inode = path.dentry->d_inode;
+	if (!inode) {
+		goto out_path_put;
+		return;
+	}
+	if (!(inode->i_state & INODE_STATE_SUS_MOUNT)) {
+		spin_lock(&inode->i_lock);
+		inode->i_state |= INODE_STATE_SUS_MOUNT;
+		spin_unlock(&inode->i_lock);
+		SUSFS_LOGI("set SUS_MOUNT inode state for default KSU mount path '%s'\n", pathname);
+	}
+out_path_put:
+	path_put(&path);
+out_free_pathname:
+	kfree(pathname);
+}
+#endif // #ifdef CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT
+#endif // #ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+
+/* sus_kstat */
+#ifdef CONFIG_KSU_SUSFS_SUS_KSTAT
+static DEFINE_HASHTABLE(SUS_KSTAT_HLIST, 10);
+static int susfs_update_sus_kstat_inode(char *target_pathname) {
+	struct path p;
+	struct inode *inode = NULL;
+	int err = 0;
+
+	err = kern_path(target_pathname, LOOKUP_FOLLOW, &p);
+	if (err) {
+		SUSFS_LOGE("Failed opening file '%s'\n", target_pathname);
+		return 1;
+	}
+
+	// We don't allow path of which filesystem type is "tmpfs", because its inode->i_ino is starting from 1 again,
+	// which will cause wrong comparison in function susfs_sus_ino_for_filldir64()
+	if (strcmp(p.mnt->mnt_sb->s_type->name, "tmpfs") == 0) {
+		SUSFS_LOGE("target_pathname: '%s' cannot be added since its filesystem is 'tmpfs'\n", target_pathname);
+		path_put(&p);
+		return 1;
+	}
+
+	inode = d_inode(p.dentry);
+	if (!inode) {
+		path_put(&p);
+		SUSFS_LOGE("inode is NULL\n");
+		return 1;
+	}
+
+	if (!(inode->i_state & INODE_STATE_SUS_KSTAT)) {
+		spin_lock(&inode->i_lock);
+		inode->i_state |= INODE_STATE_SUS_KSTAT;
+		spin_unlock(&inode->i_lock);
+	}
+	path_put(&p);
+	return 0;
+}
+
+int susfs_add_sus_kstat(struct st_susfs_sus_kstat* __user user_info) {
+	struct st_susfs_sus_kstat info;
+	struct st_susfs_sus_kstat_hlist *new_entry, *tmp_entry;
+	struct hlist_node *tmp_node;
+	int bkt;
+	bool update_hlist = false;
+
+	if (copy_from_user(&info, user_info, sizeof(info))) {
+		SUSFS_LOGE("failed copying from userspace\n");
+		return 1;
+	}
+
+	if (strlen(info.target_pathname) == 0) {
+		SUSFS_LOGE("target_pathname is an empty string\n");
+		return 1;
+	}
+
+	spin_lock(&susfs_spin_lock);
+	hash_for_each_safe(SUS_KSTAT_HLIST, bkt, tmp_node, tmp_entry, node) {
+		if (!strcmp(tmp_entry->info.target_pathname, info.target_pathname)) {
+			hash_del(&tmp_entry->node);
+			kfree(tmp_entry);
+			update_hlist = true;
+			break;
+		}
+	}
+	spin_unlock(&susfs_spin_lock);
+
+	new_entry = kmalloc(sizeof(struct st_susfs_sus_kstat_hlist), GFP_KERNEL);
+	if (!new_entry) {
+		SUSFS_LOGE("no enough memory\n");
+		return 1;
+	}
+
+#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
+#ifdef CONFIG_MIPS
+	info.spoofed_dev = new_decode_dev(info.spoofed_dev);
+#else
+	info.spoofed_dev = huge_decode_dev(info.spoofed_dev);
+#endif /* CONFIG_MIPS */
+#else
+	info.spoofed_dev = old_decode_dev(info.spoofed_dev);
+#endif /* defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) */
+
+	new_entry->target_ino = info.target_ino;
+	memcpy(&new_entry->info, &info, sizeof(info));
+
+	if (susfs_update_sus_kstat_inode(new_entry->info.target_pathname)) {
+		kfree(new_entry);
+		return 1;
+	}
+
+	spin_lock(&susfs_spin_lock);
+	hash_add(SUS_KSTAT_HLIST, &new_entry->node, info.target_ino);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
+	if (update_hlist) {
+		SUSFS_LOGI("is_statically: '%d', target_ino: '%lu', target_pathname: '%s', spoofed_ino: '%lu', spoofed_dev: '%lu', spoofed_nlink: '%u', spoofed_size: '%llu', spoofed_atime_tv_sec: '%ld', spoofed_mtime_tv_sec: '%ld', spoofed_ctime_tv_sec: '%ld', spoofed_atime_tv_nsec: '%ld', spoofed_mtime_tv_nsec: '%ld', spoofed_ctime_tv_nsec: '%ld', spoofed_blksize: '%lu', spoofed_blocks: '%llu', is successfully added to SUS_KSTAT_HLIST\n",
+				new_entry->info.is_statically, new_entry->info.target_ino, new_entry->info.target_pathname,
+				new_entry->info.spoofed_ino, new_entry->info.spoofed_dev,
+				new_entry->info.spoofed_nlink, new_entry->info.spoofed_size,
+				new_entry->info.spoofed_atime_tv_sec, new_entry->info.spoofed_mtime_tv_sec, new_entry->info.spoofed_ctime_tv_sec,
+				new_entry->info.spoofed_atime_tv_nsec, new_entry->info.spoofed_mtime_tv_nsec, new_entry->info.spoofed_ctime_tv_nsec,
+				new_entry->info.spoofed_blksize, new_entry->info.spoofed_blocks);
+	} else {
+		SUSFS_LOGI("is_statically: '%d', target_ino: '%lu', target_pathname: '%s', spoofed_ino: '%lu', spoofed_dev: '%lu', spoofed_nlink: '%u', spoofed_size: '%llu', spoofed_atime_tv_sec: '%ld', spoofed_mtime_tv_sec: '%ld', spoofed_ctime_tv_sec: '%ld', spoofed_atime_tv_nsec: '%ld', spoofed_mtime_tv_nsec: '%ld', spoofed_ctime_tv_nsec: '%ld', spoofed_blksize: '%lu', spoofed_blocks: '%llu', is successfully updated to SUS_KSTAT_HLIST\n",
+				new_entry->info.is_statically, new_entry->info.target_ino, new_entry->info.target_pathname,
+				new_entry->info.spoofed_ino, new_entry->info.spoofed_dev,
+				new_entry->info.spoofed_nlink, new_entry->info.spoofed_size,
+				new_entry->info.spoofed_atime_tv_sec, new_entry->info.spoofed_mtime_tv_sec, new_entry->info.spoofed_ctime_tv_sec,
+				new_entry->info.spoofed_atime_tv_nsec, new_entry->info.spoofed_mtime_tv_nsec, new_entry->info.spoofed_ctime_tv_nsec,
+				new_entry->info.spoofed_blksize, new_entry->info.spoofed_blocks);
+	}
+#else
+	if (update_hlist) {
+		SUSFS_LOGI("is_statically: '%d', target_ino: '%lu', target_pathname: '%s', spoofed_ino: '%lu', spoofed_dev: '%lu', spoofed_nlink: '%u', spoofed_size: '%u', spoofed_atime_tv_sec: '%ld', spoofed_mtime_tv_sec: '%ld', spoofed_ctime_tv_sec: '%ld', spoofed_atime_tv_nsec: '%ld', spoofed_mtime_tv_nsec: '%ld', spoofed_ctime_tv_nsec: '%ld', spoofed_blksize: '%lu', spoofed_blocks: '%llu', is successfully added to SUS_KSTAT_HLIST\n",
+				new_entry->info.is_statically, new_entry->info.target_ino, new_entry->info.target_pathname,
+				new_entry->info.spoofed_ino, new_entry->info.spoofed_dev,
+				new_entry->info.spoofed_nlink, new_entry->info.spoofed_size,
+				new_entry->info.spoofed_atime_tv_sec, new_entry->info.spoofed_mtime_tv_sec, new_entry->info.spoofed_ctime_tv_sec,
+				new_entry->info.spoofed_atime_tv_nsec, new_entry->info.spoofed_mtime_tv_nsec, new_entry->info.spoofed_ctime_tv_nsec,
+				new_entry->info.spoofed_blksize, new_entry->info.spoofed_blocks);
+	} else {
+		SUSFS_LOGI("is_statically: '%d', target_ino: '%lu', target_pathname: '%s', spoofed_ino: '%lu', spoofed_dev: '%lu', spoofed_nlink: '%u', spoofed_size: '%u', spoofed_atime_tv_sec: '%ld', spoofed_mtime_tv_sec: '%ld', spoofed_ctime_tv_sec: '%ld', spoofed_atime_tv_nsec: '%ld', spoofed_mtime_tv_nsec: '%ld', spoofed_ctime_tv_nsec: '%ld', spoofed_blksize: '%lu', spoofed_blocks: '%llu', is successfully updated to SUS_KSTAT_HLIST\n",
+				new_entry->info.is_statically, new_entry->info.target_ino, new_entry->info.target_pathname,
+				new_entry->info.spoofed_ino, new_entry->info.spoofed_dev,
+				new_entry->info.spoofed_nlink, new_entry->info.spoofed_size,
+				new_entry->info.spoofed_atime_tv_sec, new_entry->info.spoofed_mtime_tv_sec, new_entry->info.spoofed_ctime_tv_sec,
+				new_entry->info.spoofed_atime_tv_nsec, new_entry->info.spoofed_mtime_tv_nsec, new_entry->info.spoofed_ctime_tv_nsec,
+				new_entry->info.spoofed_blksize, new_entry->info.spoofed_blocks);
+	}
+#endif
+	spin_unlock(&susfs_spin_lock);
+	return 0;
+}
+
+int susfs_update_sus_kstat(struct st_susfs_sus_kstat* __user user_info) {
+	struct st_susfs_sus_kstat info;
+	struct st_susfs_sus_kstat_hlist *new_entry, *tmp_entry;
+	struct hlist_node *tmp_node;
+	int bkt;
+	int err = 0;
+
+	if (copy_from_user(&info, user_info, sizeof(info))) {
+		SUSFS_LOGE("failed copying from userspace\n");
+		return 1;
+	}
+
+	spin_lock(&susfs_spin_lock);
+	hash_for_each_safe(SUS_KSTAT_HLIST, bkt, tmp_node, tmp_entry, node) {
+		if (!strcmp(tmp_entry->info.target_pathname, info.target_pathname)) {
+			if (susfs_update_sus_kstat_inode(tmp_entry->info.target_pathname)) {
+				err = 1;
+				goto out_spin_unlock;
+			}
+			new_entry = kmalloc(sizeof(struct st_susfs_sus_kstat_hlist), GFP_KERNEL);
+			if (!new_entry) {
+				SUSFS_LOGE("no enough memory\n");
+				err = 1;
+				goto out_spin_unlock;
+			}
+			memcpy(&new_entry->info, &tmp_entry->info, sizeof(tmp_entry->info));
+			SUSFS_LOGI("updating target_ino from '%lu' to '%lu' for pathname: '%s' in SUS_KSTAT_HLIST\n",
+							new_entry->info.target_ino, info.target_ino, info.target_pathname);
+			new_entry->target_ino = info.target_ino;
+			new_entry->info.target_ino = info.target_ino;
+			if (info.spoofed_size > 0) {
+				SUSFS_LOGI("updating spoofed_size from '%lld' to '%lld' for pathname: '%s' in SUS_KSTAT_HLIST\n",
+								new_entry->info.spoofed_size, info.spoofed_size, info.target_pathname);
+				new_entry->info.spoofed_size = info.spoofed_size;
+			}
+			if (info.spoofed_blocks > 0) {
+				SUSFS_LOGI("updating spoofed_blocks from '%llu' to '%llu' for pathname: '%s' in SUS_KSTAT_HLIST\n",
+								new_entry->info.spoofed_blocks, info.spoofed_blocks, info.target_pathname);
+				new_entry->info.spoofed_blocks = info.spoofed_blocks;
+			}
+			hash_del(&tmp_entry->node);
+			kfree(tmp_entry);
+			hash_add(SUS_KSTAT_HLIST, &new_entry->node, info.target_ino);
+			goto out_spin_unlock;
+		}
+	}
+out_spin_unlock:
+	spin_unlock(&susfs_spin_lock);
+	return err;
+}
+
+void susfs_sus_ino_for_generic_fillattr(unsigned long ino, struct kstat *stat) {
+	struct st_susfs_sus_kstat_hlist *entry;
+
+	hash_for_each_possible(SUS_KSTAT_HLIST, entry, node, ino) {
+		if (entry->target_ino == ino) {
+			stat->dev = entry->info.spoofed_dev;
+			stat->ino = entry->info.spoofed_ino;
+			stat->nlink = entry->info.spoofed_nlink;
+			stat->size = entry->info.spoofed_size;
+			stat->atime.tv_sec = entry->info.spoofed_atime_tv_sec;
+			stat->atime.tv_nsec = entry->info.spoofed_atime_tv_nsec;
+			stat->mtime.tv_sec = entry->info.spoofed_mtime_tv_sec;
+			stat->mtime.tv_nsec = entry->info.spoofed_mtime_tv_nsec;
+			stat->ctime.tv_sec = entry->info.spoofed_ctime_tv_sec;
+			stat->ctime.tv_nsec = entry->info.spoofed_ctime_tv_nsec;
+			stat->blocks = entry->info.spoofed_blocks;
+			stat->blksize = entry->info.spoofed_blksize;
+			return;
+		}
+	}
+}
+
+void susfs_sus_ino_for_show_map_vma(unsigned long ino, dev_t *out_dev, unsigned long *out_ino) {
+	struct st_susfs_sus_kstat_hlist *entry;
+
+	hash_for_each_possible(SUS_KSTAT_HLIST, entry, node, ino) {
+		if (entry->target_ino == ino) {
+			*out_dev = entry->info.spoofed_dev;
+			*out_ino = entry->info.spoofed_ino;
+			return;
+		}
+	}
+}
+#endif // #ifdef CONFIG_KSU_SUSFS_SUS_KSTAT
+
+/* try_umount */
+#ifdef CONFIG_KSU_SUSFS_TRY_UMOUNT
+static LIST_HEAD(LH_TRY_UMOUNT_PATH);
+int susfs_add_try_umount(struct st_susfs_try_umount* __user user_info) {
+	struct st_susfs_try_umount_list *cursor = NULL, *temp = NULL;
+	struct st_susfs_try_umount_list *new_list = NULL;
+	struct st_susfs_try_umount info;
+
+	if (copy_from_user(&info, user_info, sizeof(info))) {
+		SUSFS_LOGE("failed copying from userspace\n");
+		return 1;
+	}
+
+	list_for_each_entry_safe(cursor, temp, &LH_TRY_UMOUNT_PATH, list) {
+		if (unlikely(!strcmp(info.target_pathname, cursor->info.target_pathname))) {
+			SUSFS_LOGE("target_pathname: '%s' is already created in LH_TRY_UMOUNT_PATH\n", info.target_pathname);
+			return 1;
+		}
+	}
+
+	new_list = kmalloc(sizeof(struct st_susfs_try_umount_list), GFP_KERNEL);
+	if (!new_list) {
+		SUSFS_LOGE("no enough memory\n");
+		return 1;
+	}
+
+	memcpy(&new_list->info, &info, sizeof(info));
+
+	INIT_LIST_HEAD(&new_list->list);
+	spin_lock(&susfs_spin_lock);
+	list_add_tail(&new_list->list, &LH_TRY_UMOUNT_PATH);
+	spin_unlock(&susfs_spin_lock);
+	SUSFS_LOGI("target_pathname: '%s', mnt_mode: %d, is successfully added to LH_TRY_UMOUNT_PATH\n", new_list->info.target_pathname, new_list->info.mnt_mode);
+	return 0;
+}
+
+void susfs_try_umount(uid_t target_uid) {
+	struct st_susfs_try_umount_list *cursor = NULL;
+
+	// We should umount in reversed order
+	list_for_each_entry_reverse(cursor, &LH_TRY_UMOUNT_PATH, list) {
+		if (cursor->info.mnt_mode == TRY_UMOUNT_DEFAULT) {
+			ksu_try_umount(cursor->info.target_pathname, false, 0, target_uid);
+		} else if (cursor->info.mnt_mode == TRY_UMOUNT_DETACH) {
+			ksu_try_umount(cursor->info.target_pathname, false, MNT_DETACH, target_uid);
+		} else {
+			SUSFS_LOGE("failed umounting '%s' for uid: %d, mnt_mode '%d' not supported\n",
+							cursor->info.target_pathname, target_uid, cursor->info.mnt_mode);
+		}
+	}
+}
+
+#ifdef CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT
+void susfs_auto_add_try_umount_for_bind_mount(struct path *path) {
+	struct st_susfs_try_umount_list *cursor = NULL, *temp = NULL;
+	struct st_susfs_try_umount_list *new_list = NULL;
+	char *pathname = NULL, *dpath = NULL;
+#ifdef CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT
+	bool is_magic_mount_path = false;
+#endif
+
+#ifdef CONFIG_KSU_SUSFS_SUS_KSTAT
+	if (path->dentry->d_inode->i_state & INODE_STATE_SUS_KSTAT) {
+		SUSFS_LOGI("skip adding path to try_umount list as its inode is flagged INODE_STATE_SUS_KSTAT already\n");
+		return;
+	}
+#endif
+
+	pathname = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!pathname) {
+		SUSFS_LOGE("no enough memory\n");
+		return;
+	}
+
+	dpath = d_path(path, pathname, PAGE_SIZE);
+	if (!dpath) {
+		SUSFS_LOGE("dpath is NULL\n");
+		goto out_free_pathname;
+	}
+
+#ifdef CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT
+	if (strstr(dpath, MAGIC_MOUNT_WORKDIR)) {
+		is_magic_mount_path = true;
+	}
+#endif
+
+	list_for_each_entry_safe(cursor, temp, &LH_TRY_UMOUNT_PATH, list) {
+#ifdef CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT
+		if (is_magic_mount_path && strstr(dpath, cursor->info.target_pathname)) {
+			goto out_free_pathname;
+		}
+#endif
+		if (unlikely(!strcmp(dpath, cursor->info.target_pathname))) {
+			SUSFS_LOGE("target_pathname: '%s', ino: %lu, is already created in LH_TRY_UMOUNT_PATH\n",
+							dpath, path->dentry->d_inode->i_ino);
+			goto out_free_pathname;
+		}
+	}
+
+	new_list = kmalloc(sizeof(struct st_susfs_try_umount_list), GFP_KERNEL);
+	if (!new_list) {
+		SUSFS_LOGE("no enough memory\n");
+		goto out_free_pathname;
+	}
+
+#ifdef CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT
+	if (is_magic_mount_path) {
+		strncpy(new_list->info.target_pathname, dpath + strlen(MAGIC_MOUNT_WORKDIR), SUSFS_MAX_LEN_PATHNAME-1);
+		goto out_add_to_list;
+	}
+#endif
+	strncpy(new_list->info.target_pathname, dpath, SUSFS_MAX_LEN_PATHNAME-1);
+
+#ifdef CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT
+out_add_to_list:
+#endif
+
+	new_list->info.mnt_mode = TRY_UMOUNT_DETACH;
+
+	INIT_LIST_HEAD(&new_list->list);
+	spin_lock(&susfs_spin_lock);
+	list_add_tail(&new_list->list, &LH_TRY_UMOUNT_PATH);
+	spin_unlock(&susfs_spin_lock);
+	SUSFS_LOGI("target_pathname: '%s', ino: %lu, mnt_mode: %d, is successfully added to LH_TRY_UMOUNT_PATH\n",
+					new_list->info.target_pathname, path->dentry->d_inode->i_ino, new_list->info.mnt_mode);
+out_free_pathname:
+	kfree(pathname);
+}
+#endif // #ifdef CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT
+#endif // #ifdef CONFIG_KSU_SUSFS_TRY_UMOUNT
+
+/* spoof_uname */
+#ifdef CONFIG_KSU_SUSFS_SPOOF_UNAME
+static spinlock_t susfs_uname_spin_lock;
+static struct st_susfs_uname my_uname;
+static void susfs_my_uname_init(void) {
+	memset(&my_uname, 0, sizeof(my_uname));
+}
+
+int susfs_set_uname(struct st_susfs_uname* __user user_info) {
+	struct st_susfs_uname info;
+
+	if (copy_from_user(&info, user_info, sizeof(struct st_susfs_uname))) {
+		SUSFS_LOGE("failed copying from userspace.\n");
+		return 1;
+	}
+
+	spin_lock(&susfs_uname_spin_lock);
+	if (!strcmp(info.release, "default")) {
+		strncpy(my_uname.release, utsname()->release, __NEW_UTS_LEN);
+	} else {
+		strncpy(my_uname.release, info.release, __NEW_UTS_LEN);
+	}
+	if (!strcmp(info.version, "default")) {
+		strncpy(my_uname.version, utsname()->version, __NEW_UTS_LEN);
+	} else {
+		strncpy(my_uname.version, info.version, __NEW_UTS_LEN);
+	}
+	spin_unlock(&susfs_uname_spin_lock);
+	SUSFS_LOGI("setting spoofed release: '%s', version: '%s'\n",
+				my_uname.release, my_uname.version);
+	return 0;
+}
+
+void susfs_spoof_uname(struct new_utsname* tmp) {
+	if (unlikely(my_uname.release[0] == '\0' || spin_is_locked(&susfs_uname_spin_lock)))
+		return;
+	strncpy(tmp->release, my_uname.release, __NEW_UTS_LEN);
+	strncpy(tmp->version, my_uname.version, __NEW_UTS_LEN);
+}
+#endif // #ifdef CONFIG_KSU_SUSFS_SPOOF_UNAME
+
+/* set_log */
+#ifdef CONFIG_KSU_SUSFS_ENABLE_LOG
+void susfs_set_log(bool enabled) {
+	spin_lock(&susfs_spin_lock);
+	susfs_is_log_enabled = enabled;
+	spin_unlock(&susfs_spin_lock);
+	if (susfs_is_log_enabled) {
+		pr_info("susfs: enable logging to kernel");
+	} else {
+		pr_info("susfs: disable logging to kernel");
+	}
+}
+#endif // #ifdef CONFIG_KSU_SUSFS_ENABLE_LOG
+
+/* spoof_cmdline_or_bootconfig */
+#ifdef CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG
+static char *fake_cmdline_or_bootconfig = NULL;
+int susfs_set_cmdline_or_bootconfig(char* __user user_fake_cmdline_or_bootconfig) {
+	int res;
+
+	if (!fake_cmdline_or_bootconfig) {
+		// 4096 is enough I guess
+		fake_cmdline_or_bootconfig = kmalloc(SUSFS_FAKE_CMDLINE_OR_BOOTCONFIG_SIZE, GFP_KERNEL);
+		if (!fake_cmdline_or_bootconfig) {
+			SUSFS_LOGE("no enough memory\n");
+			return -ENOMEM;
+		}
+	}
+
+	spin_lock(&susfs_spin_lock);
+	memset(fake_cmdline_or_bootconfig, 0, SUSFS_FAKE_CMDLINE_OR_BOOTCONFIG_SIZE);
+	res = strncpy_from_user(fake_cmdline_or_bootconfig, user_fake_cmdline_or_bootconfig, SUSFS_FAKE_CMDLINE_OR_BOOTCONFIG_SIZE-1);
+	spin_unlock(&susfs_spin_lock);
+
+	if (res > 0) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6,1,0)
+		SUSFS_LOGI("fake_cmdline_or_bootconfig is set, length of string: %lu\n", strlen(fake_cmdline_or_bootconfig));
+#else
+		SUSFS_LOGI("fake_cmdline_or_bootconfig is set, length of string: %u\n", strlen(fake_cmdline_or_bootconfig));
+#endif
+		return 0;
+	}
+	SUSFS_LOGI("failed setting fake_cmdline_or_bootconfig\n");
+	return res;
+}
+
+int susfs_spoof_cmdline_or_bootconfig(struct seq_file *m) {
+	if (fake_cmdline_or_bootconfig != NULL) {
+		seq_puts(m, fake_cmdline_or_bootconfig);
+		return 0;
+	}
+	return 1;
+}
+#endif
+
+/* open_redirect */
+#ifdef CONFIG_KSU_SUSFS_OPEN_REDIRECT
+static DEFINE_HASHTABLE(OPEN_REDIRECT_HLIST, 10);
+static int susfs_update_open_redirect_inode(struct st_susfs_open_redirect_hlist *new_entry) {
+	struct path path_target;
+	struct inode *inode_target;
+	int err = 0;
+
+	err = kern_path(new_entry->target_pathname, LOOKUP_FOLLOW, &path_target);
+	if (err) {
+		SUSFS_LOGE("Failed opening file '%s'\n", new_entry->target_pathname);
+		return err;
+	}
+
+	inode_target = d_inode(path_target.dentry);
+	if (!inode_target) {
+		SUSFS_LOGE("inode_target is NULL\n");
+		err = 1;
+		goto out_path_put_target;
+	}
+
+	spin_lock(&inode_target->i_lock);
+	inode_target->i_state |= INODE_STATE_OPEN_REDIRECT;
+	spin_unlock(&inode_target->i_lock);
+
+out_path_put_target:
+	path_put(&path_target);
+	return err;
+}
+
+int susfs_add_open_redirect(struct st_susfs_open_redirect* __user user_info) {
+	struct st_susfs_open_redirect info;
+	struct st_susfs_open_redirect_hlist *new_entry, *tmp_entry;
+	struct hlist_node *tmp_node;
+	int bkt;
+	bool update_hlist = false;
+
+	if (copy_from_user(&info, user_info, sizeof(info))) {
+		SUSFS_LOGE("failed copying from userspace\n");
+		return 1;
+	}
+
+	spin_lock(&susfs_spin_lock);
+	hash_for_each_safe(OPEN_REDIRECT_HLIST, bkt, tmp_node, tmp_entry, node) {
+		if (!strcmp(tmp_entry->target_pathname, info.target_pathname)) {
+			hash_del(&tmp_entry->node);
+			kfree(tmp_entry);
+			update_hlist = true;
+			break;
+		}
+	}
+	spin_unlock(&susfs_spin_lock);
+
+	new_entry = kmalloc(sizeof(struct st_susfs_open_redirect_hlist), GFP_KERNEL);
+	if (!new_entry) {
+		SUSFS_LOGE("no enough memory\n");
+		return 1;
+	}
+
+	new_entry->target_ino = info.target_ino;
+	strncpy(new_entry->target_pathname, info.target_pathname, SUSFS_MAX_LEN_PATHNAME-1);
+	strncpy(new_entry->redirected_pathname, info.redirected_pathname, SUSFS_MAX_LEN_PATHNAME-1);
+	if (susfs_update_open_redirect_inode(new_entry)) {
+		SUSFS_LOGE("failed adding path '%s' to OPEN_REDIRECT_HLIST\n", new_entry->target_pathname);
+		kfree(new_entry);
+		return 1;
+	}
+
+	spin_lock(&susfs_spin_lock);
+	hash_add(OPEN_REDIRECT_HLIST, &new_entry->node, info.target_ino);
+	if (update_hlist) {
+		SUSFS_LOGI("target_ino: '%lu', target_pathname: '%s', redirected_pathname: '%s', is successfully updated to OPEN_REDIRECT_HLIST\n",
+				new_entry->target_ino, new_entry->target_pathname, new_entry->redirected_pathname);	
+	} else {
+		SUSFS_LOGI("target_ino: '%lu', target_pathname: '%s' redirected_pathname: '%s', is successfully added to OPEN_REDIRECT_HLIST\n",
+				new_entry->target_ino, new_entry->target_pathname, new_entry->redirected_pathname);
+	}
+	spin_unlock(&susfs_spin_lock);
+	return 0;
+}
+
+struct filename* susfs_get_redirected_path(unsigned long ino) {
+	struct st_susfs_open_redirect_hlist *entry;
+
+	hash_for_each_possible(OPEN_REDIRECT_HLIST, entry, node, ino) {
+		if (entry->target_ino == ino) {
+			SUSFS_LOGI("Redirect for ino: %lu\n", ino);
+			return getname_kernel(entry->redirected_pathname);
+		}
+	}
+	return ERR_PTR(-ENOENT);
+}
+#endif // #ifdef CONFIG_KSU_SUSFS_OPEN_REDIRECT
+
+/* sus_su */
+#ifdef CONFIG_KSU_SUSFS_SUS_SU
+bool susfs_is_sus_su_hooks_enabled __read_mostly = false;
+static int susfs_sus_su_working_mode = 0;
+extern void ksu_susfs_enable_sus_su(void);
+extern void ksu_susfs_disable_sus_su(void);
+
+int susfs_get_sus_su_working_mode(void) {
+	return susfs_sus_su_working_mode;
+}
+
+int susfs_sus_su(struct st_sus_su* __user user_info) {
+	struct st_sus_su info;
+	int last_working_mode = susfs_sus_su_working_mode;
+
+	if (copy_from_user(&info, user_info, sizeof(struct st_sus_su))) {
+		SUSFS_LOGE("failed copying from userspace\n");
+		return 1;
+	}
+
+	if (info.mode == SUS_SU_WITH_HOOKS) {
+		if (last_working_mode == SUS_SU_WITH_HOOKS) {
+			SUSFS_LOGE("current sus_su mode is already %d\n", SUS_SU_WITH_HOOKS);
+			return 1;
+		}
+		if (last_working_mode != SUS_SU_DISABLED) {
+			SUSFS_LOGE("please make sure the current sus_su mode is %d first\n", SUS_SU_DISABLED);
+			return 2;
+		}
+		ksu_susfs_enable_sus_su();
+		susfs_sus_su_working_mode = SUS_SU_WITH_HOOKS;
+		susfs_is_sus_su_hooks_enabled = true;
+		SUSFS_LOGI("core kprobe hooks for ksu are disabled!\n");
+		SUSFS_LOGI("non-kprobe hook sus_su is enabled!\n");
+		SUSFS_LOGI("sus_su mode: %d\n", SUS_SU_WITH_HOOKS);
+		return 0;
+	} else if (info.mode == SUS_SU_DISABLED) {
+		if (last_working_mode == SUS_SU_DISABLED) {
+			SUSFS_LOGE("current sus_su mode is already %d\n", SUS_SU_DISABLED);
+			return 1;
+		}
+		susfs_is_sus_su_hooks_enabled = false;
+		ksu_susfs_disable_sus_su();
+		susfs_sus_su_working_mode = SUS_SU_DISABLED;
+		if (last_working_mode == SUS_SU_WITH_HOOKS) {
+			SUSFS_LOGI("core kprobe hooks for ksu are enabled!\n");
+			goto out;
+		}
+out:
+		if (copy_to_user(user_info, &info, sizeof(info)))
+			SUSFS_LOGE("copy_to_user() failed\n");
+		return 0;
+	} else if (info.mode == SUS_SU_WITH_OVERLAY) {
+		SUSFS_LOGE("sus_su mode %d is deprecated\n", SUS_SU_WITH_OVERLAY);
+		return 1;
+	}
+	return 1;
+}
+#endif // #ifdef CONFIG_KSU_SUSFS_SUS_SU
+
+/* susfs_init */
+void susfs_init(void) {
+	spin_lock_init(&susfs_spin_lock);
+#ifdef CONFIG_KSU_SUSFS_SPOOF_UNAME
+	spin_lock_init(&susfs_uname_spin_lock);
+	susfs_my_uname_init();
+#endif
+	SUSFS_LOGI("susfs is initialized! version: " SUSFS_VERSION " \n");
+}
+
+/* No module exit is needed becuase it should never be a loadable kernel module */
+//void __init susfs_exit(void)
+

+ 4 - 0
include/linux/mount.h

@@ -76,7 +76,11 @@ struct vfsmount {
 	ANDROID_KABI_RESERVE(1);
 	ANDROID_KABI_RESERVE(2);
 	ANDROID_KABI_RESERVE(3);
+#ifdef CONFIG_KSU_SUSFS
+	ANDROID_KABI_USE(4, u64 susfs_mnt_id_backup);
+#else
 	ANDROID_KABI_RESERVE(4);
+#endif
 } __randomize_layout;
 
 static inline struct user_namespace *mnt_user_ns(const struct vfsmount *mnt)

+ 8 - 0
include/linux/sched.h

@@ -1552,8 +1552,16 @@ struct task_struct {
 	ANDROID_KABI_RESERVE(4);
 	ANDROID_KABI_RESERVE(5);
 	ANDROID_KABI_RESERVE(6);
+#ifdef CONFIG_KSU_SUSFS
+	ANDROID_KABI_USE(7, u64 susfs_task_state);
+#else
 	ANDROID_KABI_RESERVE(7);
+#endif
+#ifdef CONFIG_KSU_SUSFS
+	ANDROID_KABI_USE(8, u64 susfs_last_fake_mnt_id);
+#else
 	ANDROID_KABI_RESERVE(8);
+#endif
 
 	/*
 	 * New fields for task_struct should be added above here, so that

+ 9 - 0
include/linux/sus_su.h

@@ -0,0 +1,9 @@
+#ifndef __KSU_H_SUS_SU
+#define __KSU_H_SUS_SU
+
+#include "../../drivers/kernelsu/core_hook.h"
+
+int sus_su_fifo_init(int *maj_dev_num, char *drv_path);
+int sus_su_fifo_exit(int *maj_dev_num, char *drv_path);
+
+#endif

+ 186 - 0
include/linux/susfs.h

@@ -0,0 +1,186 @@
+#ifndef KSU_SUSFS_H
+#define KSU_SUSFS_H
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/utsname.h>
+#include <linux/hashtable.h>
+#include <linux/path.h>
+#include <linux/susfs_def.h>
+
+#define SUSFS_VERSION "v1.5.5"
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)
+#define SUSFS_VARIANT "NON-GKI"
+#else
+#define SUSFS_VARIANT "GKI"
+#endif
+
+/*********/
+/* MACRO */
+/*********/
+#define getname_safe(name) (name == NULL ? ERR_PTR(-EINVAL) : getname(name))
+#define putname_safe(name) (IS_ERR(name) ? NULL : putname(name))
+
+/**********/
+/* STRUCT */
+/**********/
+/* sus_path */
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+struct st_susfs_sus_path {
+	unsigned long                    target_ino;
+	char                             target_pathname[SUSFS_MAX_LEN_PATHNAME];
+};
+
+struct st_susfs_sus_path_hlist {
+	unsigned long                    target_ino;
+	char                             target_pathname[SUSFS_MAX_LEN_PATHNAME];
+	struct hlist_node                node;
+};
+#endif
+
+/* sus_mount */
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+struct st_susfs_sus_mount {
+	char                    target_pathname[SUSFS_MAX_LEN_PATHNAME];
+	unsigned long           target_dev;
+};
+
+struct st_susfs_sus_mount_list {
+	struct list_head                        list;
+	struct st_susfs_sus_mount               info;
+};
+#endif
+
+/* sus_kstat */
+#ifdef CONFIG_KSU_SUSFS_SUS_KSTAT
+struct st_susfs_sus_kstat {
+	int                     is_statically;
+	unsigned long           target_ino; // the ino after bind mounted or overlayed
+	char                    target_pathname[SUSFS_MAX_LEN_PATHNAME];
+	unsigned long           spoofed_ino;
+	unsigned long           spoofed_dev;
+	unsigned int            spoofed_nlink;
+	long long               spoofed_size;
+	long                    spoofed_atime_tv_sec;
+	long                    spoofed_mtime_tv_sec;
+	long                    spoofed_ctime_tv_sec;
+	long                    spoofed_atime_tv_nsec;
+	long                    spoofed_mtime_tv_nsec;
+	long                    spoofed_ctime_tv_nsec;
+	unsigned long           spoofed_blksize;
+	unsigned long long      spoofed_blocks;
+};
+
+struct st_susfs_sus_kstat_hlist {
+	unsigned long                           target_ino;
+	struct st_susfs_sus_kstat               info;
+	struct hlist_node                       node;
+};
+#endif
+
+/* try_umount */
+#ifdef CONFIG_KSU_SUSFS_TRY_UMOUNT
+struct st_susfs_try_umount {
+	char                    target_pathname[SUSFS_MAX_LEN_PATHNAME];
+	int                     mnt_mode;
+};
+
+struct st_susfs_try_umount_list {
+	struct list_head                        list;
+	struct st_susfs_try_umount              info;
+};
+#endif
+
+/* spoof_uname */
+#ifdef CONFIG_KSU_SUSFS_SPOOF_UNAME
+struct st_susfs_uname {
+	char        release[__NEW_UTS_LEN+1];
+	char        version[__NEW_UTS_LEN+1];
+};
+#endif
+
+/* open_redirect */
+#ifdef CONFIG_KSU_SUSFS_OPEN_REDIRECT
+struct st_susfs_open_redirect {
+	unsigned long                    target_ino;
+	char                             target_pathname[SUSFS_MAX_LEN_PATHNAME];
+	char                             redirected_pathname[SUSFS_MAX_LEN_PATHNAME];
+};
+
+struct st_susfs_open_redirect_hlist {
+	unsigned long                    target_ino;
+	char                             target_pathname[SUSFS_MAX_LEN_PATHNAME];
+	char                             redirected_pathname[SUSFS_MAX_LEN_PATHNAME];
+	struct hlist_node                node;
+};
+#endif
+
+/* sus_su */
+#ifdef CONFIG_KSU_SUSFS_SUS_SU
+struct st_sus_su {
+	int         mode;
+};
+#endif
+
+/***********************/
+/* FORWARD DECLARATION */
+/***********************/
+/* sus_path */
+#ifdef CONFIG_KSU_SUSFS_SUS_PATH
+int susfs_add_sus_path(struct st_susfs_sus_path* __user user_info);
+int susfs_sus_ino_for_filldir64(unsigned long ino);
+#endif
+/* sus_mount */
+#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+int susfs_add_sus_mount(struct st_susfs_sus_mount* __user user_info);
+#ifdef CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT
+int susfs_auto_add_sus_bind_mount(const char *pathname, struct path *path_target);
+#endif // #ifdef CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT
+#ifdef CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT
+void susfs_auto_add_sus_ksu_default_mount(const char __user *to_pathname);
+#endif // #ifdef CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT
+#endif // #ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
+
+/* sus_kstat */
+#ifdef CONFIG_KSU_SUSFS_SUS_KSTAT
+int susfs_add_sus_kstat(struct st_susfs_sus_kstat* __user user_info);
+int susfs_update_sus_kstat(struct st_susfs_sus_kstat* __user user_info);
+void susfs_sus_ino_for_generic_fillattr(unsigned long ino, struct kstat *stat);
+void susfs_sus_ino_for_show_map_vma(unsigned long ino, dev_t *out_dev, unsigned long *out_ino);
+#endif
+/* try_umount */
+#ifdef CONFIG_KSU_SUSFS_TRY_UMOUNT
+int susfs_add_try_umount(struct st_susfs_try_umount* __user user_info);
+void susfs_try_umount(uid_t target_uid);
+#ifdef CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT
+void susfs_auto_add_try_umount_for_bind_mount(struct path *path);
+#endif // #ifdef CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT
+#endif // #ifdef CONFIG_KSU_SUSFS_TRY_UMOUNT
+/* spoof_uname */
+#ifdef CONFIG_KSU_SUSFS_SPOOF_UNAME
+int susfs_set_uname(struct st_susfs_uname* __user user_info);
+void susfs_spoof_uname(struct new_utsname* tmp);
+#endif
+/* set_log */
+#ifdef CONFIG_KSU_SUSFS_ENABLE_LOG
+void susfs_set_log(bool enabled);
+#endif
+/* spoof_cmdline_or_bootconfig */
+#ifdef CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG
+int susfs_set_cmdline_or_bootconfig(char* __user user_fake_boot_config);
+int susfs_spoof_cmdline_or_bootconfig(struct seq_file *m);
+#endif
+/* open_redirect */
+#ifdef CONFIG_KSU_SUSFS_OPEN_REDIRECT
+int susfs_add_open_redirect(struct st_susfs_open_redirect* __user user_info);
+struct filename* susfs_get_redirected_path(unsigned long ino);
+#endif
+/* sus_su */
+#ifdef CONFIG_KSU_SUSFS_SUS_SU
+int susfs_get_sus_su_working_mode(void);
+int susfs_sus_su(struct st_sus_su* __user user_info);
+#endif
+/* susfs_init */
+void susfs_init(void);
+
+#endif

+ 62 - 0
include/linux/susfs_def.h

@@ -0,0 +1,62 @@
+#ifndef KSU_SUSFS_DEF_H
+#define KSU_SUSFS_DEF_H
+
+#include <linux/bits.h>
+
+/********/
+/* ENUM */
+/********/
+/* shared with userspace ksu_susfs tool */
+#define CMD_SUSFS_ADD_SUS_PATH 0x55550
+#define CMD_SUSFS_ADD_SUS_MOUNT 0x55560
+#define CMD_SUSFS_ADD_SUS_KSTAT 0x55570
+#define CMD_SUSFS_UPDATE_SUS_KSTAT 0x55571
+#define CMD_SUSFS_ADD_SUS_KSTAT_STATICALLY 0x55572
+#define CMD_SUSFS_ADD_TRY_UMOUNT 0x55580
+#define CMD_SUSFS_SET_UNAME 0x55590
+#define CMD_SUSFS_ENABLE_LOG 0x555a0
+#define CMD_SUSFS_SET_CMDLINE_OR_BOOTCONFIG 0x555b0
+#define CMD_SUSFS_ADD_OPEN_REDIRECT 0x555c0
+#define CMD_SUSFS_RUN_UMOUNT_FOR_CURRENT_MNT_NS 0x555d0
+#define CMD_SUSFS_SHOW_VERSION 0x555e1
+#define CMD_SUSFS_SHOW_ENABLED_FEATURES 0x555e2
+#define CMD_SUSFS_SHOW_VARIANT 0x555e3
+#define CMD_SUSFS_SHOW_SUS_SU_WORKING_MODE 0x555e4
+#define CMD_SUSFS_IS_SUS_SU_READY 0x555f0
+#define CMD_SUSFS_SUS_SU 0x60000
+
+#define SUSFS_MAX_LEN_PATHNAME 256 // 256 should address many paths already unless you are doing some strange experimental stuff, then set your own desired length
+#define SUSFS_FAKE_CMDLINE_OR_BOOTCONFIG_SIZE 4096
+
+#define TRY_UMOUNT_DEFAULT 0 /* used by susfs_try_umount() */
+#define TRY_UMOUNT_DETACH 1 /* used by susfs_try_umount() */
+
+#define SUS_SU_DISABLED 0
+#define SUS_SU_WITH_OVERLAY 1 /* deprecated */
+#define SUS_SU_WITH_HOOKS 2
+
+#define DEFAULT_SUS_MNT_ID 100000 /* used by mount->mnt_id */
+#define DEFAULT_SUS_MNT_ID_FOR_KSU_PROC_UNSHARE 1000000 /* used by vfsmount->susfs_mnt_id_backup */
+#define DEFAULT_SUS_MNT_GROUP_ID 1000 /* used by mount->mnt_group_id */
+
+/*
+ * inode->i_state => storing flag 'INODE_STATE_'
+ * mount->mnt.susfs_mnt_id_backup => storing original mnt_id of normal mounts or custom sus mnt_id of sus mounts
+ * task_struct->susfs_last_fake_mnt_id => storing last valid fake mnt_id
+ * task_struct->susfs_task_state => storing flag 'TASK_STRUCT_'
+ */
+
+#define INODE_STATE_SUS_PATH BIT(24)
+#define INODE_STATE_SUS_MOUNT BIT(25)
+#define INODE_STATE_SUS_KSTAT BIT(26)
+#define INODE_STATE_OPEN_REDIRECT BIT(27)
+
+#define TASK_STRUCT_NON_ROOT_USER_APP_PROC BIT(24)
+
+#define MAGIC_MOUNT_WORKDIR "/debug_ramdisk/workdir"
+#define DATA_ADB_UMOUNT_FOR_ZYGOTE_SYSTEM_PROCESS "/data/adb/susfs_umount_for_zygote_system_process"
+#define DATA_ADB_NO_AUTO_ADD_SUS_BIND_MOUNT "/data/adb/susfs_no_auto_add_sus_bind_mount"
+#define DATA_ADB_NO_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT "/data/adb/susfs_no_auto_add_sus_ksu_default_mount"
+#define DATA_ADB_NO_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT "/data/adb/susfs_no_auto_add_try_umount_for_bind_mount"
+
+#endif // #ifndef KSU_SUSFS_DEF_H

+ 10 - 0
kernel/kallsyms.c

@@ -856,8 +856,18 @@ static int s_show(struct seq_file *m, void *p)
 		seq_printf(m, "%px %c %s\t[%s]\n", value,
 			   type, iter->name, iter->module_name);
 	} else
+#ifndef CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS
 		seq_printf(m, "%px %c %s\n", value,
 			   iter->type, iter->name);
+#else
+	{
+		if (strstr(iter->name, "ksu_") || !strncmp(iter->name, "susfs_", 6) || !strncmp(iter->name, "ksud", 4)) {
+			return 0;
+		}
+		seq_printf(m, "%px %c %s\n", value,
+			   iter->type, iter->name);
+	}
+#endif
 	return 0;
 }
 

+ 7 - 0
kernel/sys.c

@@ -1298,12 +1298,19 @@ static int override_release(char __user *release, size_t len)
 	return ret;
 }
 
+#ifdef CONFIG_KSU_SUSFS_SPOOF_UNAME
+extern void susfs_spoof_uname(struct new_utsname* tmp);
+#endif
+
 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
 {
 	struct new_utsname tmp;
 
 	down_read(&uts_sem);
 	memcpy(&tmp, utsname(), sizeof(tmp));
+#ifdef CONFIG_KSU_SUSFS_SPOOF_UNAME
+	susfs_spoof_uname(&tmp);
+#endif
 	up_read(&uts_sem);
 	if (copy_to_user(name, &tmp, sizeof(tmp)))
 		return -EFAULT;