|
@@ -2192,7 +2192,7 @@ static inline void post_schedule(struct rq *rq)
|
|
* schedule_tail - first thing a freshly forked thread must call.
|
|
* schedule_tail - first thing a freshly forked thread must call.
|
|
* @prev: the thread we just switched away from.
|
|
* @prev: the thread we just switched away from.
|
|
*/
|
|
*/
|
|
-asmlinkage void schedule_tail(struct task_struct *prev)
|
|
|
|
|
|
+asmlinkage __visible void schedule_tail(struct task_struct *prev)
|
|
__releases(rq->lock)
|
|
__releases(rq->lock)
|
|
{
|
|
{
|
|
struct rq *rq = this_rq();
|
|
struct rq *rq = this_rq();
|
|
@@ -2741,7 +2741,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
|
|
blk_schedule_flush_plug(tsk);
|
|
blk_schedule_flush_plug(tsk);
|
|
}
|
|
}
|
|
|
|
|
|
-asmlinkage void __sched schedule(void)
|
|
|
|
|
|
+asmlinkage __visible void __sched schedule(void)
|
|
{
|
|
{
|
|
struct task_struct *tsk = current;
|
|
struct task_struct *tsk = current;
|
|
|
|
|
|
@@ -2751,7 +2751,7 @@ asmlinkage void __sched schedule(void)
|
|
EXPORT_SYMBOL(schedule);
|
|
EXPORT_SYMBOL(schedule);
|
|
|
|
|
|
#ifdef CONFIG_CONTEXT_TRACKING
|
|
#ifdef CONFIG_CONTEXT_TRACKING
|
|
-asmlinkage void __sched schedule_user(void)
|
|
|
|
|
|
+asmlinkage __visible void __sched schedule_user(void)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
* If we come here after a random call to set_need_resched(),
|
|
* If we come here after a random call to set_need_resched(),
|
|
@@ -2783,7 +2783,7 @@ void __sched schedule_preempt_disabled(void)
|
|
* off of preempt_enable. Kernel preemptions off return from interrupt
|
|
* off of preempt_enable. Kernel preemptions off return from interrupt
|
|
* occur there and call schedule directly.
|
|
* occur there and call schedule directly.
|
|
*/
|
|
*/
|
|
-asmlinkage void __sched notrace preempt_schedule(void)
|
|
|
|
|
|
+asmlinkage __visible void __sched notrace preempt_schedule(void)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
* If there is a non-zero preempt_count or interrupts are disabled,
|
|
* If there is a non-zero preempt_count or interrupts are disabled,
|
|
@@ -2813,7 +2813,7 @@ EXPORT_SYMBOL(preempt_schedule);
|
|
* Note, that this is called and return with irqs disabled. This will
|
|
* Note, that this is called and return with irqs disabled. This will
|
|
* protect us against recursive calling from irq.
|
|
* protect us against recursive calling from irq.
|
|
*/
|
|
*/
|
|
-asmlinkage void __sched preempt_schedule_irq(void)
|
|
|
|
|
|
+asmlinkage __visible void __sched preempt_schedule_irq(void)
|
|
{
|
|
{
|
|
enum ctx_state prev_state;
|
|
enum ctx_state prev_state;
|
|
|
|
|