net/sched: sch_cake: Fix incorrect qlen reduction in cake_drop

In cake_drop(), qdisc_tree_reduce_backlog() is used to update the qlen
and backlog of the qdisc hierarchy. Its caller, cake_enqueue(), assumes
that the parent qdisc will enqueue the current packet. However, this
assumption breaks when cake_enqueue() returns NET_XMIT_CN: the parent
qdisc stops enqueuing current packet, leaving the tree qlen/backlog
accounting inconsistent. This mismatch can lead to a NULL dereference
(e.g., when the parent Qdisc is qfq_qdisc).

This patch computes the qlen/backlog delta in a more robust way by
observing the difference before and after the series of cake_drop()
calls, and then compensates the qdisc tree accounting if cake_enqueue()
returns NET_XMIT_CN.

To ensure correct compensation when ACK thinning is enabled, a new
variable is introduced to keep qlen unchanged.

Fixes: 15de71d06a ("net/sched: Make cake_enqueue return NET_XMIT_CN when past buffer_limit")
Signed-off-by: Xiang Mei <xmei5@asu.edu>
Reviewed-by: Toke Høiland-Jørgensen <toke@toke.dk>
Link: https://patch.msgid.link/20251128001415.377823-1-xmei5@asu.edu
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Xiang Mei
2025-11-27 17:14:14 -07:00
committed by Paolo Abeni
parent e5235eb6cf
commit 9fefc78f7f

View File

@@ -1597,7 +1597,6 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_OVERLIMIT);
sch->q.qlen--;
qdisc_tree_reduce_backlog(sch, 1, len);
cake_heapify(q, 0);
@@ -1743,14 +1742,14 @@ static void cake_reconfigure(struct Qdisc *sch);
static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
u32 idx, tin, prev_qlen, prev_backlog, drop_id;
struct cake_sched_data *q = qdisc_priv(sch);
int len = qdisc_pkt_len(skb);
int ret;
int len = qdisc_pkt_len(skb), ret;
struct sk_buff *ack = NULL;
ktime_t now = ktime_get();
struct cake_tin_data *b;
struct cake_flow *flow;
u32 idx, tin;
bool same_flow = false;
/* choose flow to insert into */
idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
@@ -1823,6 +1822,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
consume_skb(skb);
} else {
/* not splitting */
int ack_pkt_len = 0;
cobalt_set_enqueue_time(skb, now);
get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
flow_queue_add(flow, skb);
@@ -1833,13 +1834,13 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (ack) {
b->ack_drops++;
sch->qstats.drops++;
b->bytes += qdisc_pkt_len(ack);
len -= qdisc_pkt_len(ack);
ack_pkt_len = qdisc_pkt_len(ack);
b->bytes += ack_pkt_len;
q->buffer_used += skb->truesize - ack->truesize;
if (q->rate_flags & CAKE_FLAG_INGRESS)
cake_advance_shaper(q, b, ack, now, true);
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
qdisc_tree_reduce_backlog(sch, 1, ack_pkt_len);
consume_skb(ack);
} else {
sch->q.qlen++;
@@ -1848,11 +1849,11 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* stats */
b->packets++;
b->bytes += len;
b->backlogs[idx] += len;
b->tin_backlog += len;
sch->qstats.backlog += len;
q->avg_window_bytes += len;
b->bytes += len - ack_pkt_len;
b->backlogs[idx] += len - ack_pkt_len;
b->tin_backlog += len - ack_pkt_len;
sch->qstats.backlog += len - ack_pkt_len;
q->avg_window_bytes += len - ack_pkt_len;
}
if (q->overflow_timeout)
@@ -1927,24 +1928,29 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (q->buffer_used > q->buffer_max_used)
q->buffer_max_used = q->buffer_used;
if (q->buffer_used > q->buffer_limit) {
bool same_flow = false;
u32 dropped = 0;
u32 drop_id;
if (q->buffer_used <= q->buffer_limit)
return NET_XMIT_SUCCESS;
while (q->buffer_used > q->buffer_limit) {
dropped++;
drop_id = cake_drop(sch, to_free);
prev_qlen = sch->q.qlen;
prev_backlog = sch->qstats.backlog;
if ((drop_id >> 16) == tin &&
(drop_id & 0xFFFF) == idx)
same_flow = true;
}
b->drop_overlimit += dropped;
if (same_flow)
return NET_XMIT_CN;
while (q->buffer_used > q->buffer_limit) {
drop_id = cake_drop(sch, to_free);
if ((drop_id >> 16) == tin &&
(drop_id & 0xFFFF) == idx)
same_flow = true;
}
prev_qlen -= sch->q.qlen;
prev_backlog -= sch->qstats.backlog;
b->drop_overlimit += prev_qlen;
if (same_flow) {
qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
prev_backlog - len);
return NET_XMIT_CN;
}
qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
return NET_XMIT_SUCCESS;
}