mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
Merge tag 'printk-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux
Pull printk updates from Petr Mladek:
- Allow creaing nbcon console drivers with an unsafe write_atomic()
callback that can only be called by the final nbcon_atomic_flush_unsafe().
Otherwise, the driver would rely on the kthread.
It is going to be used as the-best-effort approach for an
experimental nbcon netconsole driver, see
https://lore.kernel.org/r/20251121-nbcon-v1-2-503d17b2b4af@debian.org
Note that a safe .write_atomic() callback is supposed to work in NMI
context. But some networking drivers are not safe even in IRQ
context:
https://lore.kernel.org/r/oc46gdpmmlly5o44obvmoatfqo5bhpgv7pabpvb6sjuqioymcg@gjsma3ghoz35
In an ideal world, all networking drivers would be fixed first and
the atomic flush would be blocked only in NMI context. But it brings
the question how reliable networking drivers are when the system is
in a bad state. They might block flushing more reliable serial
consoles which are more suitable for serious debugging anyway.
- Allow to use the last 4 bytes of the printk ring buffer.
- Prevent queuing IRQ work and block printk kthreads when consoles are
suspended. Otherwise, they create non-necessary churn or even block
the suspend.
- Release console_lock() between each record in the kthread used for
legacy consoles on RT. It might significantly speed up the boot.
- Release nbcon context between each record in the atomic flush. It
prevents stalls of the related printk kthread after it has lost the
ownership in the middle of a record
- Add support for NBCON consoles into KDB
- Add %ptsP modifier for printing struct timespec64 and use it where
possible
- Misc code clean up
* tag 'printk-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux: (48 commits)
printk: Use console_is_usable on console_unblank
arch: um: kmsg_dump: Use console_is_usable
drivers: serial: kgdboc: Drop checks for CON_ENABLED and CON_BOOT
lib/vsprintf: Unify FORMAT_STATE_NUM handlers
printk: Avoid irq_work for printk_deferred() on suspend
printk: Avoid scheduling irq_work on suspend
printk: Allow printk_trigger_flush() to flush all types
tracing: Switch to use %ptSp
scsi: snic: Switch to use %ptSp
scsi: fnic: Switch to use %ptSp
s390/dasd: Switch to use %ptSp
ptp: ocp: Switch to use %ptSp
pps: Switch to use %ptSp
PCI: epf-test: Switch to use %ptSp
net: dsa: sja1105: Switch to use %ptSp
mmc: mmc_test: Switch to use %ptSp
media: av7110: Switch to use %ptSp
ipmi: Switch to use %ptSp
igb: Switch to use %ptSp
e1000e: Switch to use %ptSp
...
This commit is contained in:
@@ -589,24 +589,41 @@ static void kdb_msg_write(const char *msg, int msg_len)
|
||||
*/
|
||||
cookie = console_srcu_read_lock();
|
||||
for_each_console_srcu(c) {
|
||||
if (!(console_srcu_read_flags(c) & CON_ENABLED))
|
||||
short flags = console_srcu_read_flags(c);
|
||||
|
||||
if (!console_is_usable(c, flags, true))
|
||||
continue;
|
||||
if (c == dbg_io_ops->cons)
|
||||
continue;
|
||||
if (!c->write)
|
||||
continue;
|
||||
/*
|
||||
* Set oops_in_progress to encourage the console drivers to
|
||||
* disregard their internal spin locks: in the current calling
|
||||
* context the risk of deadlock is a bigger problem than risks
|
||||
* due to re-entering the console driver. We operate directly on
|
||||
* oops_in_progress rather than using bust_spinlocks() because
|
||||
* the calls bust_spinlocks() makes on exit are not appropriate
|
||||
* for this calling context.
|
||||
*/
|
||||
++oops_in_progress;
|
||||
c->write(c, msg, msg_len);
|
||||
--oops_in_progress;
|
||||
|
||||
if (flags & CON_NBCON) {
|
||||
struct nbcon_write_context wctxt = { };
|
||||
|
||||
/*
|
||||
* Do not continue if the console is NBCON and the context
|
||||
* can't be acquired.
|
||||
*/
|
||||
if (!nbcon_kdb_try_acquire(c, &wctxt))
|
||||
continue;
|
||||
|
||||
nbcon_write_context_set_buf(&wctxt, (char *)msg, msg_len);
|
||||
|
||||
c->write_atomic(c, &wctxt);
|
||||
nbcon_kdb_release(&wctxt);
|
||||
} else {
|
||||
/*
|
||||
* Set oops_in_progress to encourage the console drivers to
|
||||
* disregard their internal spin locks: in the current calling
|
||||
* context the risk of deadlock is a bigger problem than risks
|
||||
* due to re-entering the console driver. We operate directly on
|
||||
* oops_in_progress rather than using bust_spinlocks() because
|
||||
* the calls bust_spinlocks() makes on exit are not appropriate
|
||||
* for this calling context.
|
||||
*/
|
||||
++oops_in_progress;
|
||||
c->write(c, msg, msg_len);
|
||||
--oops_in_progress;
|
||||
}
|
||||
touch_nmi_watchdog();
|
||||
}
|
||||
console_srcu_read_unlock(cookie);
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
* internal.h - printk internal definitions
|
||||
*/
|
||||
#include <linux/console.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
|
||||
@@ -112,47 +111,6 @@ bool nbcon_kthread_create(struct console *con);
|
||||
void nbcon_kthread_stop(struct console *con);
|
||||
void nbcon_kthreads_wake(void);
|
||||
|
||||
/*
|
||||
* Check if the given console is currently capable and allowed to print
|
||||
* records. Note that this function does not consider the current context,
|
||||
* which can also play a role in deciding if @con can be used to print
|
||||
* records.
|
||||
*/
|
||||
static inline bool console_is_usable(struct console *con, short flags, bool use_atomic)
|
||||
{
|
||||
if (!(flags & CON_ENABLED))
|
||||
return false;
|
||||
|
||||
if ((flags & CON_SUSPENDED))
|
||||
return false;
|
||||
|
||||
if (flags & CON_NBCON) {
|
||||
/* The write_atomic() callback is optional. */
|
||||
if (use_atomic && !con->write_atomic)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* For the !use_atomic case, @printk_kthreads_running is not
|
||||
* checked because the write_thread() callback is also used
|
||||
* via the legacy loop when the printer threads are not
|
||||
* available.
|
||||
*/
|
||||
} else {
|
||||
if (!con->write)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Console drivers may assume that per-cpu resources have been
|
||||
* allocated. So unless they're explicitly marked as being able to
|
||||
* cope (CON_ANYTIME) don't call them until this CPU is officially up.
|
||||
*/
|
||||
if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* nbcon_kthread_wake - Wake up a console printing thread
|
||||
* @con: Console to operate on
|
||||
@@ -204,9 +162,6 @@ static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *hand
|
||||
static inline void nbcon_kthread_wake(struct console *con) { }
|
||||
static inline void nbcon_kthreads_wake(void) { }
|
||||
|
||||
static inline bool console_is_usable(struct console *con, short flags,
|
||||
bool use_atomic) { return false; }
|
||||
|
||||
#endif /* CONFIG_PRINTK */
|
||||
|
||||
extern bool have_boot_console;
|
||||
@@ -230,6 +185,8 @@ struct console_flush_type {
|
||||
bool legacy_offload;
|
||||
};
|
||||
|
||||
extern bool console_irqwork_blocked;
|
||||
|
||||
/*
|
||||
* Identify which console flushing methods should be used in the context of
|
||||
* the caller.
|
||||
@@ -241,7 +198,7 @@ static inline void printk_get_console_flush_type(struct console_flush_type *ft)
|
||||
switch (nbcon_get_default_prio()) {
|
||||
case NBCON_PRIO_NORMAL:
|
||||
if (have_nbcon_console && !have_boot_console) {
|
||||
if (printk_kthreads_running)
|
||||
if (printk_kthreads_running && !console_irqwork_blocked)
|
||||
ft->nbcon_offload = true;
|
||||
else
|
||||
ft->nbcon_atomic = true;
|
||||
@@ -251,7 +208,7 @@ static inline void printk_get_console_flush_type(struct console_flush_type *ft)
|
||||
if (have_legacy_console || have_boot_console) {
|
||||
if (!is_printk_legacy_deferred())
|
||||
ft->legacy_direct = true;
|
||||
else
|
||||
else if (!console_irqwork_blocked)
|
||||
ft->legacy_offload = true;
|
||||
}
|
||||
break;
|
||||
@@ -264,7 +221,7 @@ static inline void printk_get_console_flush_type(struct console_flush_type *ft)
|
||||
if (have_legacy_console || have_boot_console) {
|
||||
if (!is_printk_legacy_deferred())
|
||||
ft->legacy_direct = true;
|
||||
else
|
||||
else if (!console_irqwork_blocked)
|
||||
ft->legacy_offload = true;
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/kdb.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/panic.h>
|
||||
@@ -118,6 +119,9 @@
|
||||
* from scratch.
|
||||
*/
|
||||
|
||||
/* Counter of active nbcon emergency contexts. */
|
||||
static atomic_t nbcon_cpu_emergency_cnt = ATOMIC_INIT(0);
|
||||
|
||||
/**
|
||||
* nbcon_state_set - Helper function to set the console state
|
||||
* @con: Console to update
|
||||
@@ -249,13 +253,16 @@ static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt,
|
||||
* since all non-panic CPUs are stopped during panic(), it
|
||||
* is safer to have them avoid gaining console ownership.
|
||||
*
|
||||
* If this acquire is a reacquire (and an unsafe takeover
|
||||
* One exception is when kdb has locked for printing on this CPU.
|
||||
*
|
||||
* Second exception is a reacquire (and an unsafe takeover
|
||||
* has not previously occurred) then it is allowed to attempt
|
||||
* a direct acquire in panic. This gives console drivers an
|
||||
* opportunity to perform any necessary cleanup if they were
|
||||
* interrupted by the panic CPU while printing.
|
||||
*/
|
||||
if (panic_on_other_cpu() &&
|
||||
!kdb_printf_on_this_cpu() &&
|
||||
(!is_reacquire || cur->unsafe_takeover)) {
|
||||
return -EPERM;
|
||||
}
|
||||
@@ -850,8 +857,8 @@ out:
|
||||
return nbcon_context_can_proceed(ctxt, &cur);
|
||||
}
|
||||
|
||||
static void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
|
||||
char *buf, unsigned int len)
|
||||
void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
|
||||
char *buf, unsigned int len)
|
||||
{
|
||||
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
|
||||
struct console *con = ctxt->console;
|
||||
@@ -1163,6 +1170,17 @@ static bool nbcon_kthread_should_wakeup(struct console *con, struct nbcon_contex
|
||||
if (kthread_should_stop())
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Block the kthread when the system is in an emergency or panic mode.
|
||||
* It increases the chance that these contexts would be able to show
|
||||
* the messages directly. And it reduces the risk of interrupted writes
|
||||
* where the context with a higher priority takes over the nbcon console
|
||||
* ownership in the middle of a message.
|
||||
*/
|
||||
if (unlikely(atomic_read(&nbcon_cpu_emergency_cnt)) ||
|
||||
unlikely(panic_in_progress()))
|
||||
return false;
|
||||
|
||||
cookie = console_srcu_read_lock();
|
||||
|
||||
flags = console_srcu_read_flags(con);
|
||||
@@ -1214,6 +1232,14 @@ wait_for_event:
|
||||
if (kthread_should_stop())
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Block the kthread when the system is in an emergency or panic
|
||||
* mode. See nbcon_kthread_should_wakeup() for more details.
|
||||
*/
|
||||
if (unlikely(atomic_read(&nbcon_cpu_emergency_cnt)) ||
|
||||
unlikely(panic_in_progress()))
|
||||
goto wait_for_event;
|
||||
|
||||
backlog = false;
|
||||
|
||||
/*
|
||||
@@ -1276,6 +1302,13 @@ void nbcon_kthreads_wake(void)
|
||||
if (!printk_kthreads_running)
|
||||
return;
|
||||
|
||||
/*
|
||||
* It is not allowed to call this function when console irq_work
|
||||
* is blocked.
|
||||
*/
|
||||
if (WARN_ON_ONCE(console_irqwork_blocked))
|
||||
return;
|
||||
|
||||
cookie = console_srcu_read_lock();
|
||||
for_each_console_srcu(con) {
|
||||
if (!(console_srcu_read_flags(con) & CON_NBCON))
|
||||
@@ -1404,6 +1437,26 @@ enum nbcon_prio nbcon_get_default_prio(void)
|
||||
return NBCON_PRIO_NORMAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Track if it is allowed to perform unsafe hostile takeovers of console
|
||||
* ownership. When true, console drivers might perform unsafe actions while
|
||||
* printing. It is externally available via nbcon_allow_unsafe_takeover().
|
||||
*/
|
||||
static bool panic_nbcon_allow_unsafe_takeover;
|
||||
|
||||
/**
|
||||
* nbcon_allow_unsafe_takeover - Check if unsafe console takeovers are allowed
|
||||
*
|
||||
* Return: True, when it is permitted to perform unsafe console printing
|
||||
*
|
||||
* This is also used by console_is_usable() to determine if it is allowed to
|
||||
* call write_atomic() callbacks flagged as unsafe (CON_NBCON_ATOMIC_UNSAFE).
|
||||
*/
|
||||
bool nbcon_allow_unsafe_takeover(void)
|
||||
{
|
||||
return panic_on_this_cpu() && panic_nbcon_allow_unsafe_takeover;
|
||||
}
|
||||
|
||||
/**
|
||||
* nbcon_legacy_emit_next_record - Print one record for an nbcon console
|
||||
* in legacy contexts
|
||||
@@ -1474,7 +1527,6 @@ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
|
||||
* write_atomic() callback
|
||||
* @con: The nbcon console to flush
|
||||
* @stop_seq: Flush up until this record
|
||||
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
|
||||
*
|
||||
* Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on
|
||||
* failure.
|
||||
@@ -1493,8 +1545,7 @@ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
|
||||
* returned, it cannot be expected that the unfinalized record will become
|
||||
* available.
|
||||
*/
|
||||
static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
||||
bool allow_unsafe_takeover)
|
||||
static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
|
||||
{
|
||||
struct nbcon_write_context wctxt = { };
|
||||
struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
|
||||
@@ -1503,12 +1554,12 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
||||
ctxt->console = con;
|
||||
ctxt->spinwait_max_us = 2000;
|
||||
ctxt->prio = nbcon_get_default_prio();
|
||||
ctxt->allow_unsafe_takeover = allow_unsafe_takeover;
|
||||
|
||||
if (!nbcon_context_try_acquire(ctxt, false))
|
||||
return -EPERM;
|
||||
ctxt->allow_unsafe_takeover = nbcon_allow_unsafe_takeover();
|
||||
|
||||
while (nbcon_seq_read(con) < stop_seq) {
|
||||
if (!nbcon_context_try_acquire(ctxt, false))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* nbcon_emit_next_record() returns false when the console was
|
||||
* handed over or taken over. In both cases the context is no
|
||||
@@ -1517,6 +1568,8 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
||||
if (!nbcon_emit_next_record(&wctxt, true))
|
||||
return -EAGAIN;
|
||||
|
||||
nbcon_context_release(ctxt);
|
||||
|
||||
if (!ctxt->backlog) {
|
||||
/* Are there reserved but not yet finalized records? */
|
||||
if (nbcon_seq_read(con) < stop_seq)
|
||||
@@ -1525,7 +1578,6 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
||||
}
|
||||
}
|
||||
|
||||
nbcon_context_release(ctxt);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1534,15 +1586,13 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
||||
* write_atomic() callback
|
||||
* @con: The nbcon console to flush
|
||||
* @stop_seq: Flush up until this record
|
||||
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
|
||||
*
|
||||
* This will stop flushing before @stop_seq if another context has ownership.
|
||||
* That context is then responsible for the flushing. Likewise, if new records
|
||||
* are added while this context was flushing and there is no other context
|
||||
* to handle the printing, this context must also flush those records.
|
||||
*/
|
||||
static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
||||
bool allow_unsafe_takeover)
|
||||
static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
|
||||
{
|
||||
struct console_flush_type ft;
|
||||
unsigned long flags;
|
||||
@@ -1557,7 +1607,7 @@ again:
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
err = __nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
|
||||
err = __nbcon_atomic_flush_pending_con(con, stop_seq);
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
@@ -1589,9 +1639,8 @@ again:
|
||||
* __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
|
||||
* write_atomic() callback
|
||||
* @stop_seq: Flush up until this record
|
||||
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
|
||||
*/
|
||||
static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeover)
|
||||
static void __nbcon_atomic_flush_pending(u64 stop_seq)
|
||||
{
|
||||
struct console *con;
|
||||
int cookie;
|
||||
@@ -1609,7 +1658,7 @@ static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeove
|
||||
if (nbcon_seq_read(con) >= stop_seq)
|
||||
continue;
|
||||
|
||||
nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
|
||||
nbcon_atomic_flush_pending_con(con, stop_seq);
|
||||
}
|
||||
console_srcu_read_unlock(cookie);
|
||||
}
|
||||
@@ -1625,7 +1674,7 @@ static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeove
|
||||
*/
|
||||
void nbcon_atomic_flush_pending(void)
|
||||
{
|
||||
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), false);
|
||||
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1637,7 +1686,9 @@ void nbcon_atomic_flush_pending(void)
|
||||
*/
|
||||
void nbcon_atomic_flush_unsafe(void)
|
||||
{
|
||||
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), true);
|
||||
panic_nbcon_allow_unsafe_takeover = true;
|
||||
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
|
||||
panic_nbcon_allow_unsafe_takeover = false;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1655,6 +1706,8 @@ void nbcon_cpu_emergency_enter(void)
|
||||
|
||||
preempt_disable();
|
||||
|
||||
atomic_inc(&nbcon_cpu_emergency_cnt);
|
||||
|
||||
cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
|
||||
(*cpu_emergency_nesting)++;
|
||||
}
|
||||
@@ -1669,10 +1722,24 @@ void nbcon_cpu_emergency_exit(void)
|
||||
unsigned int *cpu_emergency_nesting;
|
||||
|
||||
cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
|
||||
|
||||
if (!WARN_ON_ONCE(*cpu_emergency_nesting == 0))
|
||||
(*cpu_emergency_nesting)--;
|
||||
|
||||
/*
|
||||
* Wake up kthreads because there might be some pending messages
|
||||
* added by other CPUs with normal priority since the last flush
|
||||
* in the emergency context.
|
||||
*/
|
||||
if (!WARN_ON_ONCE(atomic_read(&nbcon_cpu_emergency_cnt) == 0)) {
|
||||
if (atomic_dec_return(&nbcon_cpu_emergency_cnt) == 0) {
|
||||
struct console_flush_type ft;
|
||||
|
||||
printk_get_console_flush_type(&ft);
|
||||
if (ft.nbcon_offload)
|
||||
nbcon_kthreads_wake();
|
||||
}
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
@@ -1844,14 +1911,75 @@ void nbcon_device_release(struct console *con)
|
||||
* using the legacy loop.
|
||||
*/
|
||||
if (ft.nbcon_atomic) {
|
||||
__nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false);
|
||||
__nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb));
|
||||
} else if (ft.legacy_direct) {
|
||||
if (console_trylock())
|
||||
console_unlock();
|
||||
} else if (ft.legacy_offload) {
|
||||
printk_trigger_flush();
|
||||
defer_console_output();
|
||||
}
|
||||
}
|
||||
console_srcu_read_unlock(cookie);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nbcon_device_release);
|
||||
|
||||
/**
|
||||
* nbcon_kdb_try_acquire - Try to acquire nbcon console and enter unsafe
|
||||
* section
|
||||
* @con: The nbcon console to acquire
|
||||
* @wctxt: The nbcon write context to be used on success
|
||||
*
|
||||
* Context: Under console_srcu_read_lock() for emitting a single kdb message
|
||||
* using the given con->write_atomic() callback. Can be called
|
||||
* only when the console is usable at the moment.
|
||||
*
|
||||
* Return: True if the console was acquired. False otherwise.
|
||||
*
|
||||
* kdb emits messages on consoles registered for printk() without
|
||||
* storing them into the ring buffer. It has to acquire the console
|
||||
* ownerhip so that it could call con->write_atomic() callback a safe way.
|
||||
*
|
||||
* This function acquires the nbcon console using priority NBCON_PRIO_EMERGENCY
|
||||
* and marks it unsafe for handover/takeover.
|
||||
*/
|
||||
bool nbcon_kdb_try_acquire(struct console *con,
|
||||
struct nbcon_write_context *wctxt)
|
||||
{
|
||||
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
|
||||
|
||||
memset(ctxt, 0, sizeof(*ctxt));
|
||||
ctxt->console = con;
|
||||
ctxt->prio = NBCON_PRIO_EMERGENCY;
|
||||
|
||||
if (!nbcon_context_try_acquire(ctxt, false))
|
||||
return false;
|
||||
|
||||
if (!nbcon_context_enter_unsafe(ctxt))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* nbcon_kdb_release - Exit unsafe section and release the nbcon console
|
||||
*
|
||||
* @wctxt: The nbcon write context initialized by a successful
|
||||
* nbcon_kdb_try_acquire()
|
||||
*/
|
||||
void nbcon_kdb_release(struct nbcon_write_context *wctxt)
|
||||
{
|
||||
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
|
||||
|
||||
if (!nbcon_context_exit_unsafe(ctxt))
|
||||
return;
|
||||
|
||||
nbcon_context_release(ctxt);
|
||||
|
||||
/*
|
||||
* Flush any new printk() messages added when the console was blocked.
|
||||
* Only the console used by the given write context was blocked.
|
||||
* The console was locked only when the write_atomic() callback
|
||||
* was usable.
|
||||
*/
|
||||
__nbcon_atomic_flush_pending_con(ctxt->console, prb_next_reserve_seq(prb));
|
||||
}
|
||||
|
||||
@@ -462,6 +462,9 @@ bool have_boot_console;
|
||||
/* See printk_legacy_allow_panic_sync() for details. */
|
||||
bool legacy_allow_panic_sync;
|
||||
|
||||
/* Avoid using irq_work when suspending. */
|
||||
bool console_irqwork_blocked;
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
DECLARE_WAIT_QUEUE_HEAD(log_wait);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
|
||||
@@ -2390,7 +2393,7 @@ asmlinkage int vprintk_emit(int facility, int level,
|
||||
/* If called from the scheduler, we can not call up(). */
|
||||
if (level == LOGLEVEL_SCHED) {
|
||||
level = LOGLEVEL_DEFAULT;
|
||||
ft.legacy_offload |= ft.legacy_direct;
|
||||
ft.legacy_offload |= ft.legacy_direct && !console_irqwork_blocked;
|
||||
ft.legacy_direct = false;
|
||||
}
|
||||
|
||||
@@ -2426,7 +2429,7 @@ asmlinkage int vprintk_emit(int facility, int level,
|
||||
|
||||
if (ft.legacy_offload)
|
||||
defer_console_output();
|
||||
else
|
||||
else if (!console_irqwork_blocked)
|
||||
wake_up_klogd();
|
||||
|
||||
return printed_len;
|
||||
@@ -2730,10 +2733,20 @@ void console_suspend_all(void)
|
||||
{
|
||||
struct console *con;
|
||||
|
||||
if (console_suspend_enabled)
|
||||
pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
|
||||
|
||||
/*
|
||||
* Flush any console backlog and then avoid queueing irq_work until
|
||||
* console_resume_all(). Until then deferred printing is no longer
|
||||
* triggered, NBCON consoles transition to atomic flushing, and
|
||||
* any klogd waiters are not triggered.
|
||||
*/
|
||||
pr_flush(1000, true);
|
||||
console_irqwork_blocked = true;
|
||||
|
||||
if (!console_suspend_enabled)
|
||||
return;
|
||||
pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
|
||||
pr_flush(1000, true);
|
||||
|
||||
console_list_lock();
|
||||
for_each_console(con)
|
||||
@@ -2754,26 +2767,34 @@ void console_resume_all(void)
|
||||
struct console_flush_type ft;
|
||||
struct console *con;
|
||||
|
||||
if (!console_suspend_enabled)
|
||||
return;
|
||||
|
||||
console_list_lock();
|
||||
for_each_console(con)
|
||||
console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
|
||||
console_list_unlock();
|
||||
|
||||
/*
|
||||
* Ensure that all SRCU list walks have completed. All printing
|
||||
* contexts must be able to see they are no longer suspended so
|
||||
* that they are guaranteed to wake up and resume printing.
|
||||
* Allow queueing irq_work. After restoring console state, deferred
|
||||
* printing and any klogd waiters need to be triggered in case there
|
||||
* is now a console backlog.
|
||||
*/
|
||||
synchronize_srcu(&console_srcu);
|
||||
console_irqwork_blocked = false;
|
||||
|
||||
if (console_suspend_enabled) {
|
||||
console_list_lock();
|
||||
for_each_console(con)
|
||||
console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
|
||||
console_list_unlock();
|
||||
|
||||
/*
|
||||
* Ensure that all SRCU list walks have completed. All printing
|
||||
* contexts must be able to see they are no longer suspended so
|
||||
* that they are guaranteed to wake up and resume printing.
|
||||
*/
|
||||
synchronize_srcu(&console_srcu);
|
||||
}
|
||||
|
||||
printk_get_console_flush_type(&ft);
|
||||
if (ft.nbcon_offload)
|
||||
nbcon_kthreads_wake();
|
||||
if (ft.legacy_offload)
|
||||
defer_console_output();
|
||||
else
|
||||
wake_up_klogd();
|
||||
|
||||
pr_flush(1000, true);
|
||||
}
|
||||
@@ -3002,21 +3023,18 @@ out:
|
||||
}
|
||||
|
||||
/*
|
||||
* Legacy console printing from printk() caller context does not respect
|
||||
* raw_spinlock/spinlock nesting. For !PREEMPT_RT the lockdep warning is a
|
||||
* false positive. For PREEMPT_RT the false positive condition does not
|
||||
* occur.
|
||||
*
|
||||
* This map is used to temporarily establish LD_WAIT_SLEEP context for the
|
||||
* console write() callback when legacy printing to avoid false positive
|
||||
* lockdep complaints, thus allowing lockdep to continue to function for
|
||||
* real issues.
|
||||
* The legacy console always acquires a spinlock_t from its printing
|
||||
* callback. This violates lock nesting if the caller acquired an always
|
||||
* spinning lock (raw_spinlock_t) while invoking printk(). This is not a
|
||||
* problem on PREEMPT_RT because legacy consoles print always from a
|
||||
* dedicated thread and never from within printk(). Therefore we tell
|
||||
* lockdep that a sleeping spin lock (spinlock_t) is valid here.
|
||||
*/
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
static inline void printk_legacy_allow_spinlock_enter(void) { }
|
||||
static inline void printk_legacy_allow_spinlock_exit(void) { }
|
||||
#else
|
||||
static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_SLEEP);
|
||||
static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_CONFIG);
|
||||
|
||||
static inline void printk_legacy_allow_spinlock_enter(void)
|
||||
{
|
||||
@@ -3134,6 +3152,108 @@ static inline void printk_kthreads_check_locked(void) { }
|
||||
|
||||
#endif /* CONFIG_PRINTK */
|
||||
|
||||
|
||||
/*
|
||||
* Print out one record for each console.
|
||||
*
|
||||
* @do_cond_resched is set by the caller. It can be true only in schedulable
|
||||
* context.
|
||||
*
|
||||
* @next_seq is set to the sequence number after the last available record.
|
||||
* The value is valid only when all usable consoles were flushed. It is
|
||||
* when the function returns true (can do the job) and @try_again parameter
|
||||
* is set to false, see below.
|
||||
*
|
||||
* @handover will be set to true if a printk waiter has taken over the
|
||||
* console_lock, in which case the caller is no longer holding the
|
||||
* console_lock. Otherwise it is set to false.
|
||||
*
|
||||
* @try_again will be set to true when it still makes sense to call this
|
||||
* function again. The function could do the job, see the return value.
|
||||
* And some consoles still make progress.
|
||||
*
|
||||
* Returns true when the function could do the job. Some consoles are usable,
|
||||
* and there was no takeover and no panic_on_other_cpu().
|
||||
*
|
||||
* Requires the console_lock.
|
||||
*/
|
||||
static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool *handover,
|
||||
bool *try_again)
|
||||
{
|
||||
struct console_flush_type ft;
|
||||
bool any_usable = false;
|
||||
struct console *con;
|
||||
int cookie;
|
||||
|
||||
*try_again = false;
|
||||
|
||||
printk_get_console_flush_type(&ft);
|
||||
|
||||
cookie = console_srcu_read_lock();
|
||||
for_each_console_srcu(con) {
|
||||
short flags = console_srcu_read_flags(con);
|
||||
u64 printk_seq;
|
||||
bool progress;
|
||||
|
||||
/*
|
||||
* console_flush_one_record() is only responsible for
|
||||
* nbcon consoles when the nbcon consoles cannot print via
|
||||
* their atomic or threaded flushing.
|
||||
*/
|
||||
if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
|
||||
continue;
|
||||
|
||||
if (!console_is_usable(con, flags, !do_cond_resched))
|
||||
continue;
|
||||
any_usable = true;
|
||||
|
||||
if (flags & CON_NBCON) {
|
||||
progress = nbcon_legacy_emit_next_record(con, handover, cookie,
|
||||
!do_cond_resched);
|
||||
printk_seq = nbcon_seq_read(con);
|
||||
} else {
|
||||
progress = console_emit_next_record(con, handover, cookie);
|
||||
printk_seq = con->seq;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a handover has occurred, the SRCU read lock
|
||||
* is already released.
|
||||
*/
|
||||
if (*handover)
|
||||
goto fail;
|
||||
|
||||
/* Track the next of the highest seq flushed. */
|
||||
if (printk_seq > *next_seq)
|
||||
*next_seq = printk_seq;
|
||||
|
||||
if (!progress)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* An usable console made a progress. There might still be
|
||||
* pending messages.
|
||||
*/
|
||||
*try_again = true;
|
||||
|
||||
/* Allow panic_cpu to take over the consoles safely. */
|
||||
if (panic_on_other_cpu())
|
||||
goto fail_srcu;
|
||||
|
||||
if (do_cond_resched)
|
||||
cond_resched();
|
||||
}
|
||||
console_srcu_read_unlock(cookie);
|
||||
|
||||
return any_usable;
|
||||
|
||||
fail_srcu:
|
||||
console_srcu_read_unlock(cookie);
|
||||
fail:
|
||||
*try_again = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Print out all remaining records to all consoles.
|
||||
*
|
||||
@@ -3159,77 +3279,18 @@ static inline void printk_kthreads_check_locked(void) { }
|
||||
*/
|
||||
static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
|
||||
{
|
||||
struct console_flush_type ft;
|
||||
bool any_usable = false;
|
||||
struct console *con;
|
||||
bool any_progress;
|
||||
int cookie;
|
||||
bool try_again;
|
||||
bool ret;
|
||||
|
||||
*next_seq = 0;
|
||||
*handover = false;
|
||||
|
||||
do {
|
||||
any_progress = false;
|
||||
ret = console_flush_one_record(do_cond_resched, next_seq,
|
||||
handover, &try_again);
|
||||
} while (try_again);
|
||||
|
||||
printk_get_console_flush_type(&ft);
|
||||
|
||||
cookie = console_srcu_read_lock();
|
||||
for_each_console_srcu(con) {
|
||||
short flags = console_srcu_read_flags(con);
|
||||
u64 printk_seq;
|
||||
bool progress;
|
||||
|
||||
/*
|
||||
* console_flush_all() is only responsible for nbcon
|
||||
* consoles when the nbcon consoles cannot print via
|
||||
* their atomic or threaded flushing.
|
||||
*/
|
||||
if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
|
||||
continue;
|
||||
|
||||
if (!console_is_usable(con, flags, !do_cond_resched))
|
||||
continue;
|
||||
any_usable = true;
|
||||
|
||||
if (flags & CON_NBCON) {
|
||||
progress = nbcon_legacy_emit_next_record(con, handover, cookie,
|
||||
!do_cond_resched);
|
||||
printk_seq = nbcon_seq_read(con);
|
||||
} else {
|
||||
progress = console_emit_next_record(con, handover, cookie);
|
||||
printk_seq = con->seq;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a handover has occurred, the SRCU read lock
|
||||
* is already released.
|
||||
*/
|
||||
if (*handover)
|
||||
return false;
|
||||
|
||||
/* Track the next of the highest seq flushed. */
|
||||
if (printk_seq > *next_seq)
|
||||
*next_seq = printk_seq;
|
||||
|
||||
if (!progress)
|
||||
continue;
|
||||
any_progress = true;
|
||||
|
||||
/* Allow panic_cpu to take over the consoles safely. */
|
||||
if (panic_on_other_cpu())
|
||||
goto abandon;
|
||||
|
||||
if (do_cond_resched)
|
||||
cond_resched();
|
||||
}
|
||||
console_srcu_read_unlock(cookie);
|
||||
} while (any_progress);
|
||||
|
||||
return any_usable;
|
||||
|
||||
abandon:
|
||||
console_srcu_read_unlock(cookie);
|
||||
return false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __console_flush_and_unlock(void)
|
||||
@@ -3331,12 +3392,10 @@ void console_unblank(void)
|
||||
*/
|
||||
cookie = console_srcu_read_lock();
|
||||
for_each_console_srcu(c) {
|
||||
short flags = console_srcu_read_flags(c);
|
||||
|
||||
if (flags & CON_SUSPENDED)
|
||||
if (!console_is_usable(c, console_srcu_read_flags(c), true))
|
||||
continue;
|
||||
|
||||
if ((flags & CON_ENABLED) && c->unblank) {
|
||||
if (c->unblank) {
|
||||
found_unblank = true;
|
||||
break;
|
||||
}
|
||||
@@ -3373,12 +3432,10 @@ void console_unblank(void)
|
||||
|
||||
cookie = console_srcu_read_lock();
|
||||
for_each_console_srcu(c) {
|
||||
short flags = console_srcu_read_flags(c);
|
||||
|
||||
if (flags & CON_SUSPENDED)
|
||||
if (!console_is_usable(c, console_srcu_read_flags(c), true))
|
||||
continue;
|
||||
|
||||
if ((flags & CON_ENABLED) && c->unblank)
|
||||
if (c->unblank)
|
||||
c->unblank();
|
||||
}
|
||||
console_srcu_read_unlock(cookie);
|
||||
@@ -3601,17 +3658,26 @@ static bool legacy_kthread_should_wakeup(void)
|
||||
|
||||
static int legacy_kthread_func(void *unused)
|
||||
{
|
||||
for (;;) {
|
||||
wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup());
|
||||
bool try_again;
|
||||
|
||||
wait_for_event:
|
||||
wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup());
|
||||
|
||||
do {
|
||||
bool handover = false;
|
||||
u64 next_seq = 0;
|
||||
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
return 0;
|
||||
|
||||
console_lock();
|
||||
__console_flush_and_unlock();
|
||||
}
|
||||
console_flush_one_record(true, &next_seq, &handover, &try_again);
|
||||
if (!handover)
|
||||
__console_unlock();
|
||||
|
||||
return 0;
|
||||
} while (try_again);
|
||||
|
||||
goto wait_for_event;
|
||||
}
|
||||
|
||||
static bool legacy_kthread_create(void)
|
||||
@@ -4511,6 +4577,13 @@ static void __wake_up_klogd(int val)
|
||||
if (!printk_percpu_data_ready())
|
||||
return;
|
||||
|
||||
/*
|
||||
* It is not allowed to call this function when console irq_work
|
||||
* is blocked.
|
||||
*/
|
||||
if (WARN_ON_ONCE(console_irqwork_blocked))
|
||||
return;
|
||||
|
||||
preempt_disable();
|
||||
/*
|
||||
* Guarantee any new records can be seen by tasks preparing to wait
|
||||
@@ -4567,9 +4640,30 @@ void defer_console_output(void)
|
||||
__wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
|
||||
}
|
||||
|
||||
/**
|
||||
* printk_trigger_flush - Attempt to flush printk buffer to consoles.
|
||||
*
|
||||
* If possible, flush the printk buffer to all consoles in the caller's
|
||||
* context. If offloading is available, trigger deferred printing.
|
||||
*
|
||||
* This is best effort. Depending on the system state, console states,
|
||||
* and caller context, no actual flushing may result from this call.
|
||||
*/
|
||||
void printk_trigger_flush(void)
|
||||
{
|
||||
defer_console_output();
|
||||
struct console_flush_type ft;
|
||||
|
||||
printk_get_console_flush_type(&ft);
|
||||
if (ft.nbcon_atomic)
|
||||
nbcon_atomic_flush_pending();
|
||||
if (ft.nbcon_offload)
|
||||
nbcon_kthreads_wake();
|
||||
if (ft.legacy_direct) {
|
||||
if (console_trylock())
|
||||
console_unlock();
|
||||
}
|
||||
if (ft.legacy_offload)
|
||||
defer_console_output();
|
||||
}
|
||||
|
||||
int vprintk_deferred(const char *fmt, va_list args)
|
||||
|
||||
@@ -411,6 +411,23 @@ static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size)
|
||||
return to_blk_size(size) <= DATA_SIZE(data_ring) / 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compare the current and requested logical position and decide
|
||||
* whether more space is needed.
|
||||
*
|
||||
* Return false when @lpos_current is already at or beyond @lpos_target.
|
||||
*
|
||||
* Also return false when the difference between the positions is bigger
|
||||
* than the size of the data buffer. It might happen only when the caller
|
||||
* raced with another CPU(s) which already made and used the space.
|
||||
*/
|
||||
static bool need_more_space(struct prb_data_ring *data_ring,
|
||||
unsigned long lpos_current,
|
||||
unsigned long lpos_target)
|
||||
{
|
||||
return lpos_target - lpos_current - 1 < DATA_SIZE(data_ring);
|
||||
}
|
||||
|
||||
/* Query the state of a descriptor. */
|
||||
static enum desc_state get_desc_state(unsigned long id,
|
||||
unsigned long state_val)
|
||||
@@ -577,7 +594,7 @@ static bool data_make_reusable(struct printk_ringbuffer *rb,
|
||||
unsigned long id;
|
||||
|
||||
/* Loop until @lpos_begin has advanced to or beyond @lpos_end. */
|
||||
while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) {
|
||||
while (need_more_space(data_ring, lpos_begin, lpos_end)) {
|
||||
blk = to_block(data_ring, lpos_begin);
|
||||
|
||||
/*
|
||||
@@ -668,7 +685,7 @@ static bool data_push_tail(struct printk_ringbuffer *rb, unsigned long lpos)
|
||||
* sees the new tail lpos, any descriptor states that transitioned to
|
||||
* the reusable state must already be visible.
|
||||
*/
|
||||
while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) {
|
||||
while (need_more_space(data_ring, tail_lpos, lpos)) {
|
||||
/*
|
||||
* Make all descriptors reusable that are associated with
|
||||
* data blocks before @lpos.
|
||||
@@ -999,6 +1016,17 @@ static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool is_blk_wrapped(struct prb_data_ring *data_ring,
|
||||
unsigned long begin_lpos, unsigned long next_lpos)
|
||||
{
|
||||
/*
|
||||
* Subtract one from next_lpos since it's not actually part of this data
|
||||
* block. This allows perfectly fitting records to not wrap.
|
||||
*/
|
||||
return DATA_WRAPS(data_ring, begin_lpos) !=
|
||||
DATA_WRAPS(data_ring, next_lpos - 1);
|
||||
}
|
||||
|
||||
/* Determine the end of a data block. */
|
||||
static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
|
||||
unsigned long lpos, unsigned int size)
|
||||
@@ -1010,7 +1038,7 @@ static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
|
||||
next_lpos = lpos + size;
|
||||
|
||||
/* First check if the data block does not wrap. */
|
||||
if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos))
|
||||
if (!is_blk_wrapped(data_ring, begin_lpos, next_lpos))
|
||||
return next_lpos;
|
||||
|
||||
/* Wrapping data blocks store their data at the beginning. */
|
||||
@@ -1087,7 +1115,7 @@ static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size,
|
||||
blk = to_block(data_ring, begin_lpos);
|
||||
blk->id = id; /* LMM(data_alloc:B) */
|
||||
|
||||
if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) {
|
||||
if (is_blk_wrapped(data_ring, begin_lpos, next_lpos)) {
|
||||
/* Wrapping data blocks store their data at the beginning. */
|
||||
blk = to_block(data_ring, 0);
|
||||
|
||||
@@ -1131,14 +1159,21 @@ static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size,
|
||||
return NULL;
|
||||
|
||||
/* Keep track if @blk_lpos was a wrapping data block. */
|
||||
wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next));
|
||||
wrapped = is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next);
|
||||
|
||||
size = to_blk_size(size);
|
||||
|
||||
next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size);
|
||||
|
||||
/* If the data block does not increase, there is nothing to do. */
|
||||
if (head_lpos - next_lpos < DATA_SIZE(data_ring)) {
|
||||
/*
|
||||
* Use the current data block when the size does not increase, i.e.
|
||||
* when @head_lpos is already able to accommodate the new @next_lpos.
|
||||
*
|
||||
* Note that need_more_space() could never return false here because
|
||||
* the difference between the positions was bigger than the data
|
||||
* buffer size. The data block is reopened and can't get reused.
|
||||
*/
|
||||
if (!need_more_space(data_ring, head_lpos, next_lpos)) {
|
||||
if (wrapped)
|
||||
blk = to_block(data_ring, 0);
|
||||
else
|
||||
@@ -1167,7 +1202,7 @@ static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size,
|
||||
|
||||
blk = to_block(data_ring, blk_lpos->begin);
|
||||
|
||||
if (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, next_lpos)) {
|
||||
if (is_blk_wrapped(data_ring, blk_lpos->begin, next_lpos)) {
|
||||
struct prb_data_block *old_blk = blk;
|
||||
|
||||
/* Wrapping data blocks store their data at the beginning. */
|
||||
@@ -1203,7 +1238,7 @@ static unsigned int space_used(struct prb_data_ring *data_ring,
|
||||
if (BLK_DATALESS(blk_lpos))
|
||||
return 0;
|
||||
|
||||
if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) {
|
||||
if (!is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next)) {
|
||||
/* Data block does not wrap. */
|
||||
return (DATA_INDEX(data_ring, blk_lpos->next) -
|
||||
DATA_INDEX(data_ring, blk_lpos->begin));
|
||||
@@ -1249,15 +1284,15 @@ static const char *get_data(struct prb_data_ring *data_ring,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Regular data block: @begin less than @next and in same wrap. */
|
||||
if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) &&
|
||||
blk_lpos->begin < blk_lpos->next) {
|
||||
/* Regular data block: @begin and @next in the same wrap. */
|
||||
if (!is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next)) {
|
||||
db = to_block(data_ring, blk_lpos->begin);
|
||||
*data_size = blk_lpos->next - blk_lpos->begin;
|
||||
|
||||
/* Wrapping data block: @begin is one wrap behind @next. */
|
||||
} else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) ==
|
||||
DATA_WRAPS(data_ring, blk_lpos->next)) {
|
||||
} else if (!is_blk_wrapped(data_ring,
|
||||
blk_lpos->begin + DATA_SIZE(data_ring),
|
||||
blk_lpos->next)) {
|
||||
db = to_block(data_ring, 0);
|
||||
*data_size = DATA_INDEX(data_ring, blk_lpos->next);
|
||||
|
||||
@@ -1267,6 +1302,10 @@ static const char *get_data(struct prb_data_ring *data_ring,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Sanity check. Data-less blocks were handled earlier. */
|
||||
if (WARN_ON_ONCE(!data_check_size(data_ring, *data_size) || !*data_size))
|
||||
return NULL;
|
||||
|
||||
/* A valid data block will always be aligned to the ID size. */
|
||||
if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) ||
|
||||
WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) {
|
||||
|
||||
@@ -1467,12 +1467,12 @@ trace_hwlat_print(struct trace_iterator *iter, int flags,
|
||||
|
||||
trace_assign_type(field, entry);
|
||||
|
||||
trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%lld.%09ld count:%d",
|
||||
trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%ptSp count:%d",
|
||||
field->seqnum,
|
||||
field->duration,
|
||||
field->outer_duration,
|
||||
(long long)field->timestamp.tv_sec,
|
||||
field->timestamp.tv_nsec, field->count);
|
||||
&field->timestamp,
|
||||
field->count);
|
||||
|
||||
if (field->nmi_count) {
|
||||
/*
|
||||
|
||||
Reference in New Issue
Block a user