mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 16:13:19 +09:00
x86/percpu: Move irq_stack variables next to current_task
Further extend struct pcpu_hot with the hard and soft irq stack pointers. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20220915111145.599170752@infradead.org
This commit is contained in:
committed by
Peter Zijlstra
parent
c063a217bc
commit
d7b6d709a7
@@ -18,6 +18,12 @@ struct pcpu_hot {
|
|||||||
int preempt_count;
|
int preempt_count;
|
||||||
int cpu_number;
|
int cpu_number;
|
||||||
unsigned long top_of_stack;
|
unsigned long top_of_stack;
|
||||||
|
void *hardirq_stack_ptr;
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
bool hardirq_stack_inuse;
|
||||||
|
#else
|
||||||
|
void *softirq_stack_ptr;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
u8 pad[64];
|
u8 pad[64];
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -116,7 +116,7 @@
|
|||||||
ASM_CALL_ARG2
|
ASM_CALL_ARG2
|
||||||
|
|
||||||
#define call_on_irqstack(func, asm_call, argconstr...) \
|
#define call_on_irqstack(func, asm_call, argconstr...) \
|
||||||
call_on_stack(__this_cpu_read(hardirq_stack_ptr), \
|
call_on_stack(__this_cpu_read(pcpu_hot.hardirq_stack_ptr), \
|
||||||
func, asm_call, argconstr)
|
func, asm_call, argconstr)
|
||||||
|
|
||||||
/* Macros to assert type correctness for run_*_on_irqstack macros */
|
/* Macros to assert type correctness for run_*_on_irqstack macros */
|
||||||
@@ -135,7 +135,7 @@
|
|||||||
* User mode entry and interrupt on the irq stack do not \
|
* User mode entry and interrupt on the irq stack do not \
|
||||||
* switch stacks. If from user mode the task stack is empty. \
|
* switch stacks. If from user mode the task stack is empty. \
|
||||||
*/ \
|
*/ \
|
||||||
if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) { \
|
if (user_mode(regs) || __this_cpu_read(pcpu_hot.hardirq_stack_inuse)) { \
|
||||||
irq_enter_rcu(); \
|
irq_enter_rcu(); \
|
||||||
func(c_args); \
|
func(c_args); \
|
||||||
irq_exit_rcu(); \
|
irq_exit_rcu(); \
|
||||||
@@ -146,9 +146,9 @@
|
|||||||
* places. Invoke the stack switch macro with the call \
|
* places. Invoke the stack switch macro with the call \
|
||||||
* sequence which matches the above direct invocation. \
|
* sequence which matches the above direct invocation. \
|
||||||
*/ \
|
*/ \
|
||||||
__this_cpu_write(hardirq_stack_inuse, true); \
|
__this_cpu_write(pcpu_hot.hardirq_stack_inuse, true); \
|
||||||
call_on_irqstack(func, asm_call, constr); \
|
call_on_irqstack(func, asm_call, constr); \
|
||||||
__this_cpu_write(hardirq_stack_inuse, false); \
|
__this_cpu_write(pcpu_hot.hardirq_stack_inuse, false); \
|
||||||
} \
|
} \
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -212,9 +212,9 @@
|
|||||||
*/
|
*/
|
||||||
#define do_softirq_own_stack() \
|
#define do_softirq_own_stack() \
|
||||||
{ \
|
{ \
|
||||||
__this_cpu_write(hardirq_stack_inuse, true); \
|
__this_cpu_write(pcpu_hot.hardirq_stack_inuse, true); \
|
||||||
call_on_irqstack(__do_softirq, ASM_CALL_ARG0); \
|
call_on_irqstack(__do_softirq, ASM_CALL_ARG0); \
|
||||||
__this_cpu_write(hardirq_stack_inuse, false); \
|
__this_cpu_write(pcpu_hot.hardirq_stack_inuse, false); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -448,8 +448,6 @@ static inline unsigned long cpu_kernelmode_gs_base(int cpu)
|
|||||||
return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
|
return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
DECLARE_PER_CPU(void *, hardirq_stack_ptr);
|
|
||||||
DECLARE_PER_CPU(bool, hardirq_stack_inuse);
|
|
||||||
extern asmlinkage void ignore_sysret(void);
|
extern asmlinkage void ignore_sysret(void);
|
||||||
|
|
||||||
/* Save actual FS/GS selectors and bases to current->thread */
|
/* Save actual FS/GS selectors and bases to current->thread */
|
||||||
@@ -458,8 +456,6 @@ void current_save_fsgs(void);
|
|||||||
#ifdef CONFIG_STACKPROTECTOR
|
#ifdef CONFIG_STACKPROTECTOR
|
||||||
DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
|
DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
|
||||||
#endif
|
#endif
|
||||||
DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
|
|
||||||
DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
|
|
||||||
#endif /* !X86_64 */
|
#endif /* !X86_64 */
|
||||||
|
|
||||||
struct perf_event;
|
struct perf_event;
|
||||||
|
|||||||
@@ -2024,9 +2024,6 @@ DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
|
|||||||
fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
|
fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
|
||||||
EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
|
EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
|
||||||
|
|
||||||
DEFINE_PER_CPU(void *, hardirq_stack_ptr);
|
|
||||||
DEFINE_PER_CPU(bool, hardirq_stack_inuse);
|
|
||||||
|
|
||||||
static void wrmsrl_cstar(unsigned long val)
|
static void wrmsrl_cstar(unsigned long val)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ const char *stack_type_name(enum stack_type type)
|
|||||||
|
|
||||||
static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
|
static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
|
||||||
{
|
{
|
||||||
unsigned long *begin = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
|
unsigned long *begin = (unsigned long *)this_cpu_read(pcpu_hot.hardirq_stack_ptr);
|
||||||
unsigned long *end = begin + (THREAD_SIZE / sizeof(long));
|
unsigned long *end = begin + (THREAD_SIZE / sizeof(long));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -62,7 +62,7 @@ static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
|
|||||||
|
|
||||||
static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
|
static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
|
||||||
{
|
{
|
||||||
unsigned long *begin = (unsigned long *)this_cpu_read(softirq_stack_ptr);
|
unsigned long *begin = (unsigned long *)this_cpu_read(pcpu_hot.softirq_stack_ptr);
|
||||||
unsigned long *end = begin + (THREAD_SIZE / sizeof(long));
|
unsigned long *end = begin + (THREAD_SIZE / sizeof(long));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -134,7 +134,7 @@ static __always_inline bool in_exception_stack(unsigned long *stack, struct stac
|
|||||||
|
|
||||||
static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
|
static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
|
||||||
{
|
{
|
||||||
unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
|
unsigned long *end = (unsigned long *)this_cpu_read(pcpu_hot.hardirq_stack_ptr);
|
||||||
unsigned long *begin;
|
unsigned long *begin;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -52,9 +52,6 @@ static inline int check_stack_overflow(void) { return 0; }
|
|||||||
static inline void print_stack_overflow(void) { }
|
static inline void print_stack_overflow(void) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
|
|
||||||
DEFINE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
|
|
||||||
|
|
||||||
static void call_on_stack(void *func, void *stack)
|
static void call_on_stack(void *func, void *stack)
|
||||||
{
|
{
|
||||||
asm volatile("xchgl %%ebx,%%esp \n"
|
asm volatile("xchgl %%ebx,%%esp \n"
|
||||||
@@ -77,7 +74,7 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
|
|||||||
u32 *isp, *prev_esp, arg1;
|
u32 *isp, *prev_esp, arg1;
|
||||||
|
|
||||||
curstk = (struct irq_stack *) current_stack();
|
curstk = (struct irq_stack *) current_stack();
|
||||||
irqstk = __this_cpu_read(hardirq_stack_ptr);
|
irqstk = __this_cpu_read(pcpu_hot.hardirq_stack_ptr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* this is where we switch to the IRQ stack. However, if we are
|
* this is where we switch to the IRQ stack. However, if we are
|
||||||
@@ -115,7 +112,7 @@ int irq_init_percpu_irqstack(unsigned int cpu)
|
|||||||
int node = cpu_to_node(cpu);
|
int node = cpu_to_node(cpu);
|
||||||
struct page *ph, *ps;
|
struct page *ph, *ps;
|
||||||
|
|
||||||
if (per_cpu(hardirq_stack_ptr, cpu))
|
if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
|
ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
|
||||||
@@ -127,8 +124,8 @@ int irq_init_percpu_irqstack(unsigned int cpu)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
|
per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = page_address(ph);
|
||||||
per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
|
per_cpu(pcpu_hot.softirq_stack_ptr, cpu) = page_address(ps);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -138,7 +135,7 @@ void do_softirq_own_stack(void)
|
|||||||
struct irq_stack *irqstk;
|
struct irq_stack *irqstk;
|
||||||
u32 *isp, *prev_esp;
|
u32 *isp, *prev_esp;
|
||||||
|
|
||||||
irqstk = __this_cpu_read(softirq_stack_ptr);
|
irqstk = __this_cpu_read(pcpu_hot.softirq_stack_ptr);
|
||||||
|
|
||||||
/* build the stack frame on the softirq stack */
|
/* build the stack frame on the softirq stack */
|
||||||
isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
|
isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ static int map_irq_stack(unsigned int cpu)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Store actual TOS to avoid adjustment in the hotpath */
|
/* Store actual TOS to avoid adjustment in the hotpath */
|
||||||
per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
|
per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
@@ -63,14 +63,14 @@ static int map_irq_stack(unsigned int cpu)
|
|||||||
void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
|
void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
|
||||||
|
|
||||||
/* Store actual TOS to avoid adjustment in the hotpath */
|
/* Store actual TOS to avoid adjustment in the hotpath */
|
||||||
per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
|
per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int irq_init_percpu_irqstack(unsigned int cpu)
|
int irq_init_percpu_irqstack(unsigned int cpu)
|
||||||
{
|
{
|
||||||
if (per_cpu(hardirq_stack_ptr, cpu))
|
if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu))
|
||||||
return 0;
|
return 0;
|
||||||
return map_irq_stack(cpu);
|
return map_irq_stack(cpu);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -563,7 +563,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
|
||||||
this_cpu_read(hardirq_stack_inuse));
|
this_cpu_read(pcpu_hot.hardirq_stack_inuse));
|
||||||
|
|
||||||
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
|
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||||
switch_fpu_prepare(prev_fpu, cpu);
|
switch_fpu_prepare(prev_fpu, cpu);
|
||||||
|
|||||||
Reference in New Issue
Block a user