mm: thp: introduce folio_split_queue_lock and its variants

In future memcg removal, the binding between a folio and a memcg may
change, making the split lock within the memcg unstable when held.

A new approach is required to reparent the split queue to its parent. 
This patch starts introducing a unified way to acquire the split lock for
future work.

It's a code-only refactoring with no functional changes.

Link: https://lkml.kernel.org/r/a31a90bcac04dc754f775e87ae3205be3170b571.1762762324.git.zhengqi.arch@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nico Pache <npache@redhat.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Wei Yang <richard.weiyang@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Muchun Song
2025-11-10 16:17:56 +08:00
committed by Andrew Morton
parent fd603ae11e
commit ad7c7f4576
2 changed files with 94 additions and 35 deletions

View File

@@ -1647,6 +1647,11 @@ int alloc_shrinker_info(struct mem_cgroup *memcg);
void free_shrinker_info(struct mem_cgroup *memcg);
void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
void reparent_shrinker_deferred(struct mem_cgroup *memcg);
static inline int shrinker_id(struct shrinker *shrinker)
{
return shrinker->id;
}
#else
#define mem_cgroup_sockets_enabled 0
@@ -1678,6 +1683,11 @@ static inline void set_shrinker_bit(struct mem_cgroup *memcg,
int nid, int shrinker_id)
{
}
static inline int shrinker_id(struct shrinker *shrinker)
{
return -1;
}
#endif
#ifdef CONFIG_MEMCG

View File

@@ -1077,28 +1077,86 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
return pmd;
}
static struct deferred_split *split_queue_node(int nid)
{
struct pglist_data *pgdata = NODE_DATA(nid);
return &pgdata->deferred_split_queue;
}
#ifdef CONFIG_MEMCG
static inline
struct deferred_split *get_deferred_split_queue(struct folio *folio)
struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
struct deferred_split *queue)
{
struct mem_cgroup *memcg = folio_memcg(folio);
struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
if (mem_cgroup_disabled())
return NULL;
if (split_queue_node(folio_nid(folio)) == queue)
return NULL;
return container_of(queue, struct mem_cgroup, deferred_split_queue);
}
if (memcg)
return &memcg->deferred_split_queue;
else
return &pgdat->deferred_split_queue;
static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
{
return memcg ? &memcg->deferred_split_queue : split_queue_node(nid);
}
#else
static inline
struct deferred_split *get_deferred_split_queue(struct folio *folio)
struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
struct deferred_split *queue)
{
struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
return NULL;
}
return &pgdat->deferred_split_queue;
static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
{
return split_queue_node(nid);
}
#endif
static struct deferred_split *split_queue_lock(int nid, struct mem_cgroup *memcg)
{
struct deferred_split *queue;
queue = memcg_split_queue(nid, memcg);
spin_lock(&queue->split_queue_lock);
return queue;
}
static struct deferred_split *
split_queue_lock_irqsave(int nid, struct mem_cgroup *memcg, unsigned long *flags)
{
struct deferred_split *queue;
queue = memcg_split_queue(nid, memcg);
spin_lock_irqsave(&queue->split_queue_lock, *flags);
return queue;
}
static struct deferred_split *folio_split_queue_lock(struct folio *folio)
{
return split_queue_lock(folio_nid(folio), folio_memcg(folio));
}
static struct deferred_split *
folio_split_queue_lock_irqsave(struct folio *folio, unsigned long *flags)
{
return split_queue_lock_irqsave(folio_nid(folio), folio_memcg(folio), flags);
}
static inline void split_queue_unlock(struct deferred_split *queue)
{
spin_unlock(&queue->split_queue_lock);
}
static inline void split_queue_unlock_irqrestore(struct deferred_split *queue,
unsigned long flags)
{
spin_unlock_irqrestore(&queue->split_queue_lock, flags);
}
static inline bool is_transparent_hugepage(const struct folio *folio)
{
if (!folio_test_large(folio))
@@ -3690,7 +3748,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
struct page *split_at, struct page *lock_at,
struct list_head *list, enum split_type split_type, bool unmapped)
{
struct deferred_split *ds_queue = get_deferred_split_queue(folio);
struct deferred_split *ds_queue;
XA_STATE(xas, &folio->mapping->i_pages, folio->index);
struct folio *end_folio = folio_next(folio);
bool is_anon = folio_test_anon(folio);
@@ -3824,7 +3882,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
}
/* Prevent deferred_split_scan() touching ->_refcount */
spin_lock(&ds_queue->split_queue_lock);
ds_queue = folio_split_queue_lock(folio);
if (folio_ref_freeze(folio, 1 + extra_pins)) {
struct swap_cluster_info *ci = NULL;
struct lruvec *lruvec;
@@ -3846,7 +3904,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
*/
list_del_init(&folio->_deferred_list);
}
spin_unlock(&ds_queue->split_queue_lock);
split_queue_unlock(ds_queue);
if (mapping) {
int nr = folio_nr_pages(folio);
@@ -3946,7 +4004,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
if (ci)
swap_cluster_unlock(ci);
} else {
spin_unlock(&ds_queue->split_queue_lock);
split_queue_unlock(ds_queue);
ret = -EAGAIN;
}
fail:
@@ -4129,8 +4187,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
WARN_ON_ONCE(folio_ref_count(folio));
WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg_charged(folio));
ds_queue = get_deferred_split_queue(folio);
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
if (!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
if (folio_test_partially_mapped(folio)) {
@@ -4141,7 +4198,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
list_del_init(&folio->_deferred_list);
unqueued = true;
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
split_queue_unlock_irqrestore(ds_queue, flags);
return unqueued; /* useful for debug warnings */
}
@@ -4149,10 +4206,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
/* partially_mapped=false won't clear PG_partially_mapped folio flag */
void deferred_split_folio(struct folio *folio, bool partially_mapped)
{
struct deferred_split *ds_queue = get_deferred_split_queue(folio);
#ifdef CONFIG_MEMCG
struct mem_cgroup *memcg = folio_memcg(folio);
#endif
struct deferred_split *ds_queue;
unsigned long flags;
/*
@@ -4175,7 +4229,7 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
if (folio_test_swapcache(folio))
return;
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
if (partially_mapped) {
if (!folio_test_partially_mapped(folio)) {
folio_set_partially_mapped(folio);
@@ -4190,15 +4244,16 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
}
if (list_empty(&folio->_deferred_list)) {
struct mem_cgroup *memcg;
memcg = folio_split_queue_memcg(folio, ds_queue);
list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
ds_queue->split_queue_len++;
#ifdef CONFIG_MEMCG
if (memcg)
set_shrinker_bit(memcg, folio_nid(folio),
deferred_split_shrinker->id);
#endif
shrinker_id(deferred_split_shrinker));
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
split_queue_unlock_irqrestore(ds_queue, flags);
}
static unsigned long deferred_split_count(struct shrinker *shrink,
@@ -4244,19 +4299,13 @@ static bool thp_underused(struct folio *folio)
static unsigned long deferred_split_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
struct pglist_data *pgdata = NODE_DATA(sc->nid);
struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
struct deferred_split *ds_queue;
unsigned long flags;
LIST_HEAD(list);
struct folio *folio, *next, *prev = NULL;
int split = 0, removed = 0;
#ifdef CONFIG_MEMCG
if (sc->memcg)
ds_queue = &sc->memcg->deferred_split_queue;
#endif
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
ds_queue = split_queue_lock_irqsave(sc->nid, sc->memcg, &flags);
/* Take pin on all head pages to avoid freeing them under us */
list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
_deferred_list) {
@@ -4275,7 +4324,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
if (!--sc->nr_to_scan)
break;
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
split_queue_unlock_irqrestore(ds_queue, flags);
list_for_each_entry_safe(folio, next, &list, _deferred_list) {
bool did_split = false;