mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
sched: Move MM CID related functions to sched.h
There is nothing mm specific in that and including mm.h can cause header recursion hell. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Link: https://patch.msgid.link/20251027084306.778457951@linutronix.de
This commit is contained in:
committed by
Ingo Molnar
parent
7702a9c285
commit
4fc9225d19
@@ -2401,31 +2401,6 @@ struct zap_details {
|
||||
/* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */
|
||||
#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1))
|
||||
|
||||
#ifdef CONFIG_SCHED_MM_CID
|
||||
void sched_mm_cid_before_execve(struct task_struct *t);
|
||||
void sched_mm_cid_after_execve(struct task_struct *t);
|
||||
void sched_mm_cid_fork(struct task_struct *t);
|
||||
void sched_mm_cid_exit_signals(struct task_struct *t);
|
||||
static inline int task_mm_cid(struct task_struct *t)
|
||||
{
|
||||
return t->mm_cid;
|
||||
}
|
||||
#else
|
||||
static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
|
||||
static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
|
||||
static inline void sched_mm_cid_fork(struct task_struct *t) { }
|
||||
static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
|
||||
static inline int task_mm_cid(struct task_struct *t)
|
||||
{
|
||||
/*
|
||||
* Use the processor id as a fall-back when the mm cid feature is
|
||||
* disabled. This provides functional per-cpu data structure accesses
|
||||
* in user-space, althrough it won't provide the memory usage benefits.
|
||||
*/
|
||||
return raw_smp_processor_id();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern bool can_do_mlock(void);
|
||||
#else
|
||||
|
||||
@@ -2310,6 +2310,32 @@ static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct allo
|
||||
#define alloc_tag_restore(_tag, _old) do {} while (0)
|
||||
#endif
|
||||
|
||||
/* Avoids recursive inclusion hell */
|
||||
#ifdef CONFIG_SCHED_MM_CID
|
||||
void sched_mm_cid_before_execve(struct task_struct *t);
|
||||
void sched_mm_cid_after_execve(struct task_struct *t);
|
||||
void sched_mm_cid_fork(struct task_struct *t);
|
||||
void sched_mm_cid_exit_signals(struct task_struct *t);
|
||||
static inline int task_mm_cid(struct task_struct *t)
|
||||
{
|
||||
return t->mm_cid;
|
||||
}
|
||||
#else
|
||||
static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
|
||||
static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
|
||||
static inline void sched_mm_cid_fork(struct task_struct *t) { }
|
||||
static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
|
||||
static inline int task_mm_cid(struct task_struct *t)
|
||||
{
|
||||
/*
|
||||
* Use the processor id as a fall-back when the mm cid feature is
|
||||
* disabled. This provides functional per-cpu data structure accesses
|
||||
* in user-space, althrough it won't provide the memory usage benefits.
|
||||
*/
|
||||
return task_cpu(t);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef MODULE
|
||||
#ifndef COMPILE_OFFSETS
|
||||
|
||||
|
||||
Reference in New Issue
Block a user