浏览代码

ANDROID: Sched: Export scheduler symbols needed by vendor modules

Need to export internal scheduler symbols to facilitate vendor module
with scheduler based value-adds.

Bug: 173725277
Change-Id: I021f09097dfc1480abcc998cc8e05e75b2ee828b
Signed-off-by: Shaleen Agrawal <[email protected]>
Signed-off-by: Pavankumar Kondeti <[email protected]>
Shaleen Agrawal 4 年之前
父节点
当前提交
5a920a6503
共有 8 个文件被更改,包括 10 次插入1 次删除
  1. 1 0
      drivers/base/arch_topology.c
  2. 1 0
      kernel/cgroup/cgroup.c
  3. 1 0
      kernel/fork.c
  4. 1 1
      kernel/irq_work.c
  5. 1 0
      kernel/kthread.c
  6. 3 0
      kernel/sched/core.c
  7. 1 0
      kernel/sched/cputime.c
  8. 1 0
      kernel/softirq.c

+ 1 - 0
drivers/base/arch_topology.c

@@ -33,6 +33,7 @@ __weak bool arch_freq_counters_available(const struct cpumask *cpus)
 	return false;
 }
 DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(freq_scale);
 
 void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
 			     unsigned long max_freq)

+ 1 - 0
kernel/cgroup/cgroup.c

@@ -4166,6 +4166,7 @@ struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
 		return next;
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(css_next_child);
 
 /**
  * css_next_descendant_pre - find the next descendant for pre-order walk

+ 1 - 0
kernel/fork.c

@@ -139,6 +139,7 @@ static const char * const resident_page_types[] = {
 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
 
 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
+EXPORT_SYMBOL_GPL(tasklist_lock);
 
 #ifdef CONFIG_PROVE_RCU
 int lockdep_tasklist_lock_is_held(void)

+ 1 - 1
kernel/irq_work.c

@@ -111,7 +111,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
 	return true;
 #endif /* CONFIG_SMP */
 }
-
+EXPORT_SYMBOL_GPL(irq_work_queue_on);
 
 bool irq_work_needs_cpu(void)
 {

+ 1 - 0
kernel/kthread.c

@@ -455,6 +455,7 @@ void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
 {
 	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
 }
+EXPORT_SYMBOL_GPL(kthread_bind_mask);
 
 /**
  * kthread_bind - bind a just-created kthread to a cpu.

+ 3 - 0
kernel/sched/core.c

@@ -201,6 +201,7 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
 			cpu_relax();
 	}
 }
+EXPORT_SYMBOL_GPL(__task_rq_lock);
 
 /*
  * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
@@ -7086,7 +7087,9 @@ int in_sched_functions(unsigned long addr)
  * Every task in system belongs to this group at bootup.
  */
 struct task_group root_task_group;
+EXPORT_SYMBOL_GPL(root_task_group);
 LIST_HEAD(task_groups);
+EXPORT_SYMBOL_GPL(task_groups);
 
 /* Cacheline aligned slab cache for task_group */
 static struct kmem_cache *task_group_cache __read_mostly;

+ 1 - 0
kernel/sched/cputime.c

@@ -19,6 +19,7 @@
  * compromise in place of having locks on each irq in account_system_time.
  */
 DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_irqtime);
 
 static int sched_clock_irqtime;
 

+ 1 - 0
kernel/softirq.c

@@ -55,6 +55,7 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
 
 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd);
 
 /*
  * active_softirqs -- per cpu, a mask of softirqs that are being handled,