mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
Merge tag 'locking-core-2020-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
- LKMM updates: mostly documentation changes, but also some new litmus
tests for atomic ops.
- KCSAN updates: the most important change is that GCC 11 now has all
fixes in place to support KCSAN, so GCC support can be enabled again.
Also more annotations.
- futex updates: minor cleanups and simplifications
- seqlock updates: merge preparatory changes/cleanups for the
'associated locks' facilities.
- lockdep updates:
- simplify IRQ trace event handling
- add various new debug checks
- simplify header dependencies, split out <linux/lockdep_types.h>,
decouple lockdep from other low level headers some more
- fix NMI handling
- misc cleanups and smaller fixes
* tag 'locking-core-2020-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (60 commits)
kcsan: Improve IRQ state trace reporting
lockdep: Refactor IRQ trace events fields into struct
seqlock: lockdep assert non-preemptibility on seqcount_t write
lockdep: Add preemption enabled/disabled assertion APIs
seqlock: Implement raw_seqcount_begin() in terms of raw_read_seqcount()
seqlock: Add kernel-doc for seqcount_t and seqlock_t APIs
seqlock: Reorder seqcount_t and seqlock_t API definitions
seqlock: seqcount_t latch: End read sections with read_seqcount_retry()
seqlock: Properly format kernel-doc code samples
Documentation: locking: Describe seqlock design and usage
locking/qspinlock: Do not include atomic.h from qspinlock_types.h
locking/atomic: Move ATOMIC_INIT into linux/types.h
lockdep: Move list.h inclusion into lockdep.h
locking/lockdep: Fix TRACE_IRQFLAGS vs. NMIs
futex: Remove unused or redundant includes
futex: Consistently use fshared as boolean
futex: Remove needless goto's
futex: Remove put_futex_key()
rwsem: fix commas in initialisation
docs: locking: Replace HTTP links with HTTPS ones
...
This commit is contained in:
@@ -395,7 +395,7 @@ void lockdep_init_task(struct task_struct *task)
|
||||
|
||||
static __always_inline void lockdep_recursion_finish(void)
|
||||
{
|
||||
if (WARN_ON_ONCE(--current->lockdep_recursion))
|
||||
if (WARN_ON_ONCE((--current->lockdep_recursion) & LOCKDEP_RECURSION_MASK))
|
||||
current->lockdep_recursion = 0;
|
||||
}
|
||||
|
||||
@@ -2062,9 +2062,9 @@ print_bad_irq_dependency(struct task_struct *curr,
|
||||
pr_warn("-----------------------------------------------------\n");
|
||||
pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
|
||||
curr->comm, task_pid_nr(curr),
|
||||
curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
|
||||
lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
|
||||
curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
|
||||
curr->hardirqs_enabled,
|
||||
lockdep_hardirqs_enabled(),
|
||||
curr->softirqs_enabled);
|
||||
print_lock(next);
|
||||
|
||||
@@ -3331,9 +3331,9 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
|
||||
|
||||
pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
|
||||
curr->comm, task_pid_nr(curr),
|
||||
lockdep_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
|
||||
lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
|
||||
lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
|
||||
lockdep_hardirqs_enabled(curr),
|
||||
lockdep_hardirqs_enabled(),
|
||||
lockdep_softirqs_enabled(curr));
|
||||
print_lock(this);
|
||||
|
||||
@@ -3484,19 +3484,21 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
|
||||
|
||||
void print_irqtrace_events(struct task_struct *curr)
|
||||
{
|
||||
printk("irq event stamp: %u\n", curr->irq_events);
|
||||
const struct irqtrace_events *trace = &curr->irqtrace;
|
||||
|
||||
printk("irq event stamp: %u\n", trace->irq_events);
|
||||
printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
|
||||
curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
|
||||
(void *)curr->hardirq_enable_ip);
|
||||
trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
|
||||
(void *)trace->hardirq_enable_ip);
|
||||
printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
|
||||
curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
|
||||
(void *)curr->hardirq_disable_ip);
|
||||
trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip,
|
||||
(void *)trace->hardirq_disable_ip);
|
||||
printk("softirqs last enabled at (%u): [<%px>] %pS\n",
|
||||
curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
|
||||
(void *)curr->softirq_enable_ip);
|
||||
trace->softirq_enable_event, (void *)trace->softirq_enable_ip,
|
||||
(void *)trace->softirq_enable_ip);
|
||||
printk("softirqs last disabled at (%u): [<%px>] %pS\n",
|
||||
curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
|
||||
(void *)curr->softirq_disable_ip);
|
||||
trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
|
||||
(void *)trace->softirq_disable_ip);
|
||||
}
|
||||
|
||||
static int HARDIRQ_verbose(struct lock_class *class)
|
||||
@@ -3646,10 +3648,19 @@ static void __trace_hardirqs_on_caller(void)
|
||||
*/
|
||||
void lockdep_hardirqs_on_prepare(unsigned long ip)
|
||||
{
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
if (unlikely(!debug_locks))
|
||||
return;
|
||||
|
||||
if (unlikely(current->hardirqs_enabled)) {
|
||||
/*
|
||||
* NMIs do not (and cannot) track lock dependencies, nothing to do.
|
||||
*/
|
||||
if (unlikely(in_nmi()))
|
||||
return;
|
||||
|
||||
if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
|
||||
return;
|
||||
|
||||
if (unlikely(lockdep_hardirqs_enabled())) {
|
||||
/*
|
||||
* Neither irq nor preemption are disabled here
|
||||
* so this is racy by nature but losing one hit
|
||||
@@ -3677,7 +3688,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
|
||||
* Can't allow enabling interrupts while in an interrupt handler,
|
||||
* that's general bad form and such. Recursion, limited stack etc..
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
|
||||
if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
|
||||
return;
|
||||
|
||||
current->hardirq_chain_key = current->curr_chain_key;
|
||||
@@ -3690,12 +3701,35 @@ EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
|
||||
|
||||
void noinstr lockdep_hardirqs_on(unsigned long ip)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
struct irqtrace_events *trace = ¤t->irqtrace;
|
||||
|
||||
if (unlikely(!debug_locks || curr->lockdep_recursion))
|
||||
if (unlikely(!debug_locks))
|
||||
return;
|
||||
|
||||
if (curr->hardirqs_enabled) {
|
||||
/*
|
||||
* NMIs can happen in the middle of local_irq_{en,dis}able() where the
|
||||
* tracking state and hardware state are out of sync.
|
||||
*
|
||||
* NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from,
|
||||
* and not rely on hardware state like normal interrupts.
|
||||
*/
|
||||
if (unlikely(in_nmi())) {
|
||||
if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Skip:
|
||||
* - recursion check, because NMI can hit lockdep;
|
||||
* - hardware state check, because above;
|
||||
* - chain_key check, see lockdep_hardirqs_on_prepare().
|
||||
*/
|
||||
goto skip_checks;
|
||||
}
|
||||
|
||||
if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
|
||||
return;
|
||||
|
||||
if (lockdep_hardirqs_enabled()) {
|
||||
/*
|
||||
* Neither irq nor preemption are disabled here
|
||||
* so this is racy by nature but losing one hit
|
||||
@@ -3720,10 +3754,11 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
|
||||
DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
|
||||
current->curr_chain_key);
|
||||
|
||||
skip_checks:
|
||||
/* we'll do an OFF -> ON transition: */
|
||||
curr->hardirqs_enabled = 1;
|
||||
curr->hardirq_enable_ip = ip;
|
||||
curr->hardirq_enable_event = ++curr->irq_events;
|
||||
this_cpu_write(hardirqs_enabled, 1);
|
||||
trace->hardirq_enable_ip = ip;
|
||||
trace->hardirq_enable_event = ++trace->irq_events;
|
||||
debug_atomic_inc(hardirqs_on_events);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
|
||||
@@ -3733,9 +3768,18 @@ EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
|
||||
*/
|
||||
void noinstr lockdep_hardirqs_off(unsigned long ip)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
if (unlikely(!debug_locks))
|
||||
return;
|
||||
|
||||
if (unlikely(!debug_locks || curr->lockdep_recursion))
|
||||
/*
|
||||
* Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep;
|
||||
* they will restore the software state. This ensures the software
|
||||
* state is consistent inside NMIs as well.
|
||||
*/
|
||||
if (in_nmi()) {
|
||||
if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
|
||||
return;
|
||||
} else if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -3745,13 +3789,15 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return;
|
||||
|
||||
if (curr->hardirqs_enabled) {
|
||||
if (lockdep_hardirqs_enabled()) {
|
||||
struct irqtrace_events *trace = ¤t->irqtrace;
|
||||
|
||||
/*
|
||||
* We have done an ON -> OFF transition:
|
||||
*/
|
||||
curr->hardirqs_enabled = 0;
|
||||
curr->hardirq_disable_ip = ip;
|
||||
curr->hardirq_disable_event = ++curr->irq_events;
|
||||
this_cpu_write(hardirqs_enabled, 0);
|
||||
trace->hardirq_disable_ip = ip;
|
||||
trace->hardirq_disable_event = ++trace->irq_events;
|
||||
debug_atomic_inc(hardirqs_off_events);
|
||||
} else {
|
||||
debug_atomic_inc(redundant_hardirqs_off);
|
||||
@@ -3764,7 +3810,7 @@ EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
|
||||
*/
|
||||
void lockdep_softirqs_on(unsigned long ip)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
struct irqtrace_events *trace = ¤t->irqtrace;
|
||||
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
return;
|
||||
@@ -3776,7 +3822,7 @@ void lockdep_softirqs_on(unsigned long ip)
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return;
|
||||
|
||||
if (curr->softirqs_enabled) {
|
||||
if (current->softirqs_enabled) {
|
||||
debug_atomic_inc(redundant_softirqs_on);
|
||||
return;
|
||||
}
|
||||
@@ -3785,17 +3831,17 @@ void lockdep_softirqs_on(unsigned long ip)
|
||||
/*
|
||||
* We'll do an OFF -> ON transition:
|
||||
*/
|
||||
curr->softirqs_enabled = 1;
|
||||
curr->softirq_enable_ip = ip;
|
||||
curr->softirq_enable_event = ++curr->irq_events;
|
||||
current->softirqs_enabled = 1;
|
||||
trace->softirq_enable_ip = ip;
|
||||
trace->softirq_enable_event = ++trace->irq_events;
|
||||
debug_atomic_inc(softirqs_on_events);
|
||||
/*
|
||||
* We are going to turn softirqs on, so set the
|
||||
* usage bit for all held locks, if hardirqs are
|
||||
* enabled too:
|
||||
*/
|
||||
if (curr->hardirqs_enabled)
|
||||
mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
|
||||
if (lockdep_hardirqs_enabled())
|
||||
mark_held_locks(current, LOCK_ENABLED_SOFTIRQ);
|
||||
lockdep_recursion_finish();
|
||||
}
|
||||
|
||||
@@ -3804,8 +3850,6 @@ void lockdep_softirqs_on(unsigned long ip)
|
||||
*/
|
||||
void lockdep_softirqs_off(unsigned long ip)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
return;
|
||||
|
||||
@@ -3815,13 +3859,15 @@ void lockdep_softirqs_off(unsigned long ip)
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return;
|
||||
|
||||
if (curr->softirqs_enabled) {
|
||||
if (current->softirqs_enabled) {
|
||||
struct irqtrace_events *trace = ¤t->irqtrace;
|
||||
|
||||
/*
|
||||
* We have done an ON -> OFF transition:
|
||||
*/
|
||||
curr->softirqs_enabled = 0;
|
||||
curr->softirq_disable_ip = ip;
|
||||
curr->softirq_disable_event = ++curr->irq_events;
|
||||
current->softirqs_enabled = 0;
|
||||
trace->softirq_disable_ip = ip;
|
||||
trace->softirq_disable_event = ++trace->irq_events;
|
||||
debug_atomic_inc(softirqs_off_events);
|
||||
/*
|
||||
* Whoops, we wanted softirqs off, so why aren't they?
|
||||
@@ -3843,7 +3889,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
|
||||
*/
|
||||
if (!hlock->trylock) {
|
||||
if (hlock->read) {
|
||||
if (curr->hardirq_context)
|
||||
if (lockdep_hardirq_context())
|
||||
if (!mark_lock(curr, hlock,
|
||||
LOCK_USED_IN_HARDIRQ_READ))
|
||||
return 0;
|
||||
@@ -3852,7 +3898,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
|
||||
LOCK_USED_IN_SOFTIRQ_READ))
|
||||
return 0;
|
||||
} else {
|
||||
if (curr->hardirq_context)
|
||||
if (lockdep_hardirq_context())
|
||||
if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
|
||||
return 0;
|
||||
if (curr->softirq_context)
|
||||
@@ -3890,7 +3936,7 @@ lock_used:
|
||||
|
||||
static inline unsigned int task_irq_context(struct task_struct *task)
|
||||
{
|
||||
return LOCK_CHAIN_HARDIRQ_CONTEXT * !!task->hardirq_context +
|
||||
return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() +
|
||||
LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
|
||||
}
|
||||
|
||||
@@ -3983,7 +4029,7 @@ static inline short task_wait_context(struct task_struct *curr)
|
||||
* Set appropriate wait type for the context; for IRQs we have to take
|
||||
* into account force_irqthread as that is implied by PREEMPT_RT.
|
||||
*/
|
||||
if (curr->hardirq_context) {
|
||||
if (lockdep_hardirq_context()) {
|
||||
/*
|
||||
* Check if force_irqthreads will run us threaded.
|
||||
*/
|
||||
@@ -4826,11 +4872,11 @@ static void check_flags(unsigned long flags)
|
||||
return;
|
||||
|
||||
if (irqs_disabled_flags(flags)) {
|
||||
if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
|
||||
if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
|
||||
printk("possible reason: unannotated irqs-off.\n");
|
||||
}
|
||||
} else {
|
||||
if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
|
||||
if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) {
|
||||
printk("possible reason: unannotated irqs-on.\n");
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user