rhashtable: use likely for rhashtable lookup

Sometimes, the result of the rhashtable_lookup() is expected to be found.
Therefore, we can use likely() for such cases.

Following new functions are introduced, which will use likely or unlikely
during the lookup:

 rhashtable_lookup_likely
 rhltable_lookup_likely

A micro-benchmark is made for these new functions: lookup a existed entry
repeatedly for 100000000 times, and rhashtable_lookup_likely() gets ~30%
speedup.

Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Menglong Dong
2025-10-11 09:48:55 +08:00
committed by Herbert Xu
parent 9b04d8f005
commit aa653654ee

View File

@@ -355,12 +355,25 @@ static inline void rht_unlock(struct bucket_table *tbl,
local_irq_restore(flags);
}
static inline struct rhash_head *__rht_ptr(
struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
enum rht_lookup_freq {
RHT_LOOKUP_NORMAL,
RHT_LOOKUP_LIKELY,
};
static __always_inline struct rhash_head *__rht_ptr(
struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt,
const enum rht_lookup_freq freq)
{
return (struct rhash_head *)
((unsigned long)p & ~BIT(0) ?:
(unsigned long)RHT_NULLS_MARKER(bkt));
unsigned long p_val = (unsigned long)p & ~BIT(0);
BUILD_BUG_ON(!__builtin_constant_p(freq));
if (freq == RHT_LOOKUP_LIKELY)
return (struct rhash_head *)
(likely(p_val) ? p_val : (unsigned long)RHT_NULLS_MARKER(bkt));
else
return (struct rhash_head *)
(p_val ?: (unsigned long)RHT_NULLS_MARKER(bkt));
}
/*
@@ -370,10 +383,17 @@ static inline struct rhash_head *__rht_ptr(
* rht_ptr_exclusive() dereferences in a context where exclusive
* access is guaranteed, such as when destroying the table.
*/
static __always_inline struct rhash_head *__rht_ptr_rcu(
struct rhash_lock_head __rcu *const *bkt,
const enum rht_lookup_freq freq)
{
return __rht_ptr(rcu_dereference_all(*bkt), bkt, freq);
}
static inline struct rhash_head *rht_ptr_rcu(
struct rhash_lock_head __rcu *const *bkt)
{
return __rht_ptr(rcu_dereference_all(*bkt), bkt);
return __rht_ptr_rcu(bkt, RHT_LOOKUP_NORMAL);
}
static inline struct rhash_head *rht_ptr(
@@ -381,13 +401,15 @@ static inline struct rhash_head *rht_ptr(
struct bucket_table *tbl,
unsigned int hash)
{
return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt,
RHT_LOOKUP_NORMAL);
}
static inline struct rhash_head *rht_ptr_exclusive(
struct rhash_lock_head __rcu *const *bkt)
{
return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt,
RHT_LOOKUP_NORMAL);
}
static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
@@ -588,7 +610,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
/* Internal function, do not use. */
static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
const struct rhashtable_params params,
const enum rht_lookup_freq freq)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
@@ -599,12 +622,13 @@ static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhash_head *he;
unsigned int hash;
BUILD_BUG_ON(!__builtin_constant_p(freq));
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
bkt = rht_bucket(tbl, hash);
do {
rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) {
rht_for_each_rcu_from(he, __rht_ptr_rcu(bkt, freq), tbl, hash) {
if (params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he)))
@@ -643,11 +667,22 @@ static __always_inline void *rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
struct rhash_head *he = __rhashtable_lookup(ht, key, params);
struct rhash_head *he = __rhashtable_lookup(ht, key, params,
RHT_LOOKUP_NORMAL);
return he ? rht_obj(ht, he) : NULL;
}
static __always_inline void *rhashtable_lookup_likely(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
struct rhash_head *he = __rhashtable_lookup(ht, key, params,
RHT_LOOKUP_LIKELY);
return likely(he) ? rht_obj(ht, he) : NULL;
}
/**
* rhashtable_lookup_fast - search hash table, without RCU read lock
* @ht: hash table
@@ -693,11 +728,22 @@ static __always_inline struct rhlist_head *rhltable_lookup(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
{
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
RHT_LOOKUP_NORMAL);
return he ? container_of(he, struct rhlist_head, rhead) : NULL;
}
static __always_inline struct rhlist_head *rhltable_lookup_likely(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
{
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
RHT_LOOKUP_LIKELY);
return likely(he) ? container_of(he, struct rhlist_head, rhead) : NULL;
}
/* Internal function, please use rhashtable_insert_fast() instead. This
* function returns the existing element already in hashes if there is a clash,
* otherwise it returns an error via ERR_PTR().