Merge tag 'bitmap-for-6.19' of github.com:/norov/linux

Pull bitmap updates from Yury Norov:

 - Runtime field_{get,prep}() (Geert)

 - Rust ID pool updates (Alice)

 - min_t() simplification (David)

 - __sw_hweightN kernel-doc fixes (Andy)

 - cpumask.h headers cleanup (Andy)

* tag 'bitmap-for-6.19' of github.com:/norov/linux: (32 commits)
  rust_binder: use bitmap for allocation of handles
  rust: id_pool: do not immediately acquire new ids
  rust: id_pool: do not supply starting capacity
  rust: id_pool: rename IdPool::new() to with_capacity()
  rust: bitmap: add BitmapVec::new_inline()
  rust: bitmap: add MAX_LEN and MAX_INLINE_LEN constants
  cpumask: Don't use "proxy" headers
  soc: renesas: Use bitfield helpers
  clk: renesas: Use bitfield helpers
  ALSA: usb-audio: Convert to common field_{get,prep}() helpers
  soc: renesas: rz-sysc: Convert to common field_get() helper
  pinctrl: ma35: Convert to common field_{get,prep}() helpers
  iio: mlx90614: Convert to common field_{get,prep}() helpers
  iio: dac: Convert to common field_prep() helper
  gpio: aspeed: Convert to common field_{get,prep}() helpers
  EDAC/ie31200: Convert to common field_get() helper
  crypto: qat - convert to common field_get() helper
  clk: at91: Convert to common field_{get,prep}() helpers
  bitfield: Add non-constant field_{prep,get}() helpers
  bitfield: Add less-checking __FIELD_{GET,PREP}()
  ...
This commit is contained in:
Linus Torvalds
2025-12-06 09:01:27 -08:00
23 changed files with 303 additions and 142 deletions

View File

@@ -4432,6 +4432,7 @@ F: arch/*/lib/bitops.c
F: include/asm-generic/bitops F: include/asm-generic/bitops
F: include/asm-generic/bitops.h F: include/asm-generic/bitops.h
F: include/linux/bitops.h F: include/linux/bitops.h
F: lib/hweight.c
F: lib/test_bitops.c F: lib/test_bitops.c
F: tools/*/bitops* F: tools/*/bitops*

View File

@@ -2,6 +2,8 @@
#ifndef _ASM_X86_CPUMASK_H #ifndef _ASM_X86_CPUMASK_H
#define _ASM_X86_CPUMASK_H #define _ASM_X86_CPUMASK_H
#ifndef __ASSEMBLER__ #ifndef __ASSEMBLER__
#include <linux/compiler.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
extern void setup_cpu_local_masks(void); extern void setup_cpu_local_masks(void);

View File

@@ -19,6 +19,7 @@ use kernel::{
cred::Credential, cred::Credential,
error::Error, error::Error,
fs::file::{self, File}, fs::file::{self, File},
id_pool::IdPool,
list::{List, ListArc, ListArcField, ListLinks}, list::{List, ListArc, ListArcField, ListLinks},
mm, mm,
prelude::*, prelude::*,
@@ -394,6 +395,8 @@ kernel::list::impl_list_item! {
struct ProcessNodeRefs { struct ProcessNodeRefs {
/// Used to look up nodes using the 32-bit id that this process knows it by. /// Used to look up nodes using the 32-bit id that this process knows it by.
by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>, by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
/// Used to quickly find unused ids in `by_handle`.
handle_is_present: IdPool,
/// Used to look up nodes without knowing their local 32-bit id. The usize is the address of /// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
/// the underlying `Node` struct as returned by `Node::global_id`. /// the underlying `Node` struct as returned by `Node::global_id`.
by_node: RBTree<usize, u32>, by_node: RBTree<usize, u32>,
@@ -408,6 +411,7 @@ impl ProcessNodeRefs {
fn new() -> Self { fn new() -> Self {
Self { Self {
by_handle: RBTree::new(), by_handle: RBTree::new(),
handle_is_present: IdPool::new(),
by_node: RBTree::new(), by_node: RBTree::new(),
freeze_listeners: RBTree::new(), freeze_listeners: RBTree::new(),
} }
@@ -802,7 +806,7 @@ impl Process {
pub(crate) fn insert_or_update_handle( pub(crate) fn insert_or_update_handle(
self: ArcBorrow<'_, Process>, self: ArcBorrow<'_, Process>,
node_ref: NodeRef, node_ref: NodeRef,
is_mananger: bool, is_manager: bool,
) -> Result<u32> { ) -> Result<u32> {
{ {
let mut refs = self.node_refs.lock(); let mut refs = self.node_refs.lock();
@@ -821,7 +825,33 @@ impl Process {
let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?; let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
let info = UniqueArc::new_uninit(GFP_KERNEL)?; let info = UniqueArc::new_uninit(GFP_KERNEL)?;
let mut refs = self.node_refs.lock(); let mut refs_lock = self.node_refs.lock();
let mut refs = &mut *refs_lock;
let (unused_id, by_handle_slot) = loop {
// ID 0 may only be used by the manager.
let start = if is_manager { 0 } else { 1 };
if let Some(res) = refs.handle_is_present.find_unused_id(start) {
match refs.by_handle.entry(res.as_u32()) {
rbtree::Entry::Vacant(entry) => break (res, entry),
rbtree::Entry::Occupied(_) => {
pr_err!("Detected mismatch between handle_is_present and by_handle");
res.acquire();
kernel::warn_on!(true);
return Err(EINVAL);
}
}
}
let grow_request = refs.handle_is_present.grow_request().ok_or(ENOMEM)?;
drop(refs_lock);
let resizer = grow_request.realloc(GFP_KERNEL)?;
refs_lock = self.node_refs.lock();
refs = &mut *refs_lock;
refs.handle_is_present.grow(resizer);
};
let handle = unused_id.as_u32();
// Do a lookup again as node may have been inserted before the lock was reacquired. // Do a lookup again as node may have been inserted before the lock was reacquired.
if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) { if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
@@ -831,20 +861,9 @@ impl Process {
return Ok(handle); return Ok(handle);
} }
// Find id.
let mut target: u32 = if is_mananger { 0 } else { 1 };
for handle in refs.by_handle.keys() {
if *handle > target {
break;
}
if *handle == target {
target = target.checked_add(1).ok_or(ENOMEM)?;
}
}
let gid = node_ref.node.global_id(); let gid = node_ref.node.global_id();
let (info_proc, info_node) = { let (info_proc, info_node) = {
let info_init = NodeRefInfo::new(node_ref, target, self.into()); let info_init = NodeRefInfo::new(node_ref, handle, self.into());
match info.pin_init_with(info_init) { match info.pin_init_with(info_init) {
Ok(info) => ListArc::pair_from_pin_unique(info), Ok(info) => ListArc::pair_from_pin_unique(info),
// error is infallible // error is infallible
@@ -865,9 +884,10 @@ impl Process {
// `info_node` into the right node's `refs` list. // `info_node` into the right node's `refs` list.
unsafe { info_proc.node_ref2().node.insert_node_info(info_node) }; unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
refs.by_node.insert(reserve1.into_node(gid, target)); refs.by_node.insert(reserve1.into_node(gid, handle));
refs.by_handle.insert(reserve2.into_node(target, info_proc)); by_handle_slot.insert(info_proc, reserve2);
Ok(target) unused_id.acquire();
Ok(handle)
} }
pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> { pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
@@ -932,6 +952,16 @@ impl Process {
let id = info.node_ref().node.global_id(); let id = info.node_ref().node.global_id();
refs.by_handle.remove(&handle); refs.by_handle.remove(&handle);
refs.by_node.remove(&id); refs.by_node.remove(&id);
refs.handle_is_present.release_id(handle as usize);
if let Some(shrink) = refs.handle_is_present.shrink_request() {
drop(refs);
// This intentionally ignores allocation failures.
if let Ok(new_bitmap) = shrink.realloc(GFP_KERNEL) {
refs = self.node_refs.lock();
refs.handle_is_present.shrink(new_bitmap);
}
}
} }
} else { } else {
// All refs are cleared in process exit, so this warning is expected in that case. // All refs are cleared in process exit, so this warning is expected in that case.

View File

@@ -3,6 +3,7 @@
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com> * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
*/ */
#include <linux/bitfield.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/clk-provider.h> #include <linux/clk-provider.h>
#include <linux/clkdev.h> #include <linux/clkdev.h>

View File

@@ -117,9 +117,6 @@ struct at91_clk_pms {
unsigned int parent; unsigned int parent;
}; };
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
#define ndck(a, s) (a[s - 1].id + 1) #define ndck(a, s) (a[s - 1].id + 1)
#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1) #define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1)

View File

@@ -7,6 +7,7 @@
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/ */
#include <linux/bitfield.h>
#include <linux/clk-provider.h> #include <linux/clk-provider.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/io.h> #include <linux/io.h>
@@ -171,8 +172,7 @@ static u8 cpg_div6_clock_get_parent(struct clk_hw *hw)
if (clock->src_mask == 0) if (clock->src_mask == 0)
return 0; return 0;
hw_index = (readl(clock->reg) & clock->src_mask) >> hw_index = field_get(clock->src_mask, readl(clock->reg));
__ffs(clock->src_mask);
for (i = 0; i < clk_hw_get_num_parents(hw); i++) { for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
if (clock->parents[i] == hw_index) if (clock->parents[i] == hw_index)
return i; return i;
@@ -191,7 +191,7 @@ static int cpg_div6_clock_set_parent(struct clk_hw *hw, u8 index)
if (index >= clk_hw_get_num_parents(hw)) if (index >= clk_hw_get_num_parents(hw))
return -EINVAL; return -EINVAL;
src = clock->parents[index] << __ffs(clock->src_mask); src = field_prep(clock->src_mask, clock->parents[index]);
writel((readl(clock->reg) & ~clock->src_mask) | src, clock->reg); writel((readl(clock->reg) & ~clock->src_mask) | src, clock->reg);
return 0; return 0;
} }

View File

@@ -54,10 +54,8 @@ static unsigned long cpg_pll_clk_recalc_rate(struct clk_hw *hw,
{ {
struct cpg_pll_clk *pll_clk = to_pll_clk(hw); struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
unsigned int mult; unsigned int mult;
u32 val;
val = readl(pll_clk->pllcr_reg) & CPG_PLLnCR_STC_MASK; mult = FIELD_GET(CPG_PLLnCR_STC_MASK, readl(pll_clk->pllcr_reg)) + 1;
mult = (val >> __ffs(CPG_PLLnCR_STC_MASK)) + 1;
return parent_rate * mult * pll_clk->fixed_mult; return parent_rate * mult * pll_clk->fixed_mult;
} }
@@ -94,7 +92,7 @@ static int cpg_pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
val = readl(pll_clk->pllcr_reg); val = readl(pll_clk->pllcr_reg);
val &= ~CPG_PLLnCR_STC_MASK; val &= ~CPG_PLLnCR_STC_MASK;
val |= (mult - 1) << __ffs(CPG_PLLnCR_STC_MASK); val |= FIELD_PREP(CPG_PLLnCR_STC_MASK, mult - 1);
writel(val, pll_clk->pllcr_reg); writel(val, pll_clk->pllcr_reg);
for (i = 1000; i; i--) { for (i = 1000; i; i--) {
@@ -176,11 +174,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate) unsigned long parent_rate)
{ {
struct cpg_z_clk *zclk = to_z_clk(hw); struct cpg_z_clk *zclk = to_z_clk(hw);
unsigned int mult; unsigned int mult = 32 - field_get(zclk->mask, readl(zclk->reg));
u32 val;
val = readl(zclk->reg) & zclk->mask;
mult = 32 - (val >> __ffs(zclk->mask));
return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
32 * zclk->fixed_div); 32 * zclk->fixed_div);
@@ -231,7 +225,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK) if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY; return -EBUSY;
cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask)); cpg_reg_modify(zclk->reg, zclk->mask,
field_prep(zclk->mask, 32 - mult));
/* /*
* Set KICK bit in FRQCRB to update hardware setting and wait for * Set KICK bit in FRQCRB to update hardware setting and wait for

View File

@@ -279,11 +279,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate) unsigned long parent_rate)
{ {
struct cpg_z_clk *zclk = to_z_clk(hw); struct cpg_z_clk *zclk = to_z_clk(hw);
unsigned int mult; unsigned int mult = 32 - field_get(zclk->mask, readl(zclk->reg));
u32 val;
val = readl(zclk->reg) & zclk->mask;
mult = 32 - (val >> __ffs(zclk->mask));
return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
32 * zclk->fixed_div); 32 * zclk->fixed_div);
@@ -334,7 +330,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK) if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY; return -EBUSY;
cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask)); cpg_reg_modify(zclk->reg, zclk->mask,
field_prep(zclk->mask, 32 - mult));
/* /*
* Set KICK bit in FRQCRB to update hardware setting and wait for * Set KICK bit in FRQCRB to update hardware setting and wait for

View File

@@ -1,18 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2025 Intel Corporation */ /* Copyright(c) 2025 Intel Corporation */
#include <linux/bitfield.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/sprintf.h> #include <linux/sprintf.h>
#include <linux/string_helpers.h> #include <linux/string_helpers.h>
#include "adf_pm_dbgfs_utils.h" #include "adf_pm_dbgfs_utils.h"
/*
* This is needed because a variable is used to index the mask at
* pm_scnprint_table(), making it not compile time constant, so the compile
* asserts from FIELD_GET() or u32_get_bits() won't be fulfilled.
*/
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
#define PM_INFO_MAX_KEY_LEN 21 #define PM_INFO_MAX_KEY_LEN 21
static int pm_scnprint_table(char *buff, const struct pm_status_row *table, static int pm_scnprint_table(char *buff, const struct pm_status_row *table,

View File

@@ -44,6 +44,7 @@
* but lo_hi_readq() ensures that we are safe across all e3-1200 processors. * but lo_hi_readq() ensures that we are safe across all e3-1200 processors.
*/ */
#include <linux/bitfield.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/pci.h> #include <linux/pci.h>
@@ -139,9 +140,6 @@
#define IE31200_CAPID0_DDPCD BIT(6) #define IE31200_CAPID0_DDPCD BIT(6)
#define IE31200_CAPID0_ECC BIT(1) #define IE31200_CAPID0_ECC BIT(1)
/* Non-constant mask variant of FIELD_GET() */
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
static int nr_channels; static int nr_channels;
static struct pci_dev *mci_pdev; static struct pci_dev *mci_pdev;
static int ie31200_registered = 1; static int ie31200_registered = 1;

View File

@@ -5,6 +5,7 @@
* Joel Stanley <joel@jms.id.au> * Joel Stanley <joel@jms.id.au>
*/ */
#include <linux/bitfield.h>
#include <linux/cleanup.h> #include <linux/cleanup.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/gpio/aspeed.h> #include <linux/gpio/aspeed.h>
@@ -30,10 +31,6 @@
*/ */
#include <linux/gpio/consumer.h> #include <linux/gpio/consumer.h>
/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
#define GPIO_G7_IRQ_STS_BASE 0x100 #define GPIO_G7_IRQ_STS_BASE 0x100
#define GPIO_G7_IRQ_STS_OFFSET(x) (GPIO_G7_IRQ_STS_BASE + (x) * 0x4) #define GPIO_G7_IRQ_STS_OFFSET(x) (GPIO_G7_IRQ_STS_BASE + (x) * 0x4)
#define GPIO_G7_CTRL_REG_BASE 0x180 #define GPIO_G7_CTRL_REG_BASE 0x180

View File

@@ -53,9 +53,6 @@
#define AD3530R_MAX_CHANNELS 8 #define AD3530R_MAX_CHANNELS 8
#define AD3531R_MAX_CHANNELS 4 #define AD3531R_MAX_CHANNELS 4
/* Non-constant mask variant of FIELD_PREP() */
#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
enum ad3530r_mode { enum ad3530r_mode {
AD3530R_NORMAL_OP, AD3530R_NORMAL_OP,
AD3530R_POWERDOWN_1K, AD3530R_POWERDOWN_1K,

View File

@@ -22,6 +22,7 @@
* the "wakeup" GPIO is not given, power management will be disabled. * the "wakeup" GPIO is not given, power management will be disabled.
*/ */
#include <linux/bitfield.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/gpio/consumer.h> #include <linux/gpio/consumer.h>
@@ -68,10 +69,6 @@
#define MLX90614_CONST_SCALE 20 /* Scale in milliKelvin (0.02 * 1000) */ #define MLX90614_CONST_SCALE 20 /* Scale in milliKelvin (0.02 * 1000) */
#define MLX90614_CONST_FIR 0x7 /* Fixed value for FIR part of low pass filter */ #define MLX90614_CONST_FIR 0x7 /* Fixed value for FIR part of low pass filter */
/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
struct mlx_chip_info { struct mlx_chip_info {
/* EEPROM offsets with 16-bit data, MSB first */ /* EEPROM offsets with 16-bit data, MSB first */
/* emissivity correction coefficient */ /* emissivity correction coefficient */

View File

@@ -81,10 +81,6 @@
#define MVOLT_1800 0 #define MVOLT_1800 0
#define MVOLT_3300 1 #define MVOLT_3300 1
/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
static const char * const gpio_group_name[] = { static const char * const gpio_group_name[] = {
"gpioa", "gpiob", "gpioc", "gpiod", "gpioe", "gpiof", "gpiog", "gpioa", "gpiob", "gpioc", "gpiod", "gpioe", "gpiof", "gpiog",
"gpioh", "gpioi", "gpioj", "gpiok", "gpiol", "gpiom", "gpion", "gpioh", "gpioi", "gpioj", "gpiok", "gpiol", "gpiom", "gpion",

View File

@@ -5,6 +5,7 @@
* Copyright (C) 2014-2016 Glider bvba * Copyright (C) 2014-2016 Glider bvba
*/ */
#include <linux/bitfield.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
@@ -524,8 +525,7 @@ static int __init renesas_soc_init(void)
eshi, eslo); eshi, eslo);
} }
if (soc->id && if (soc->id && field_get(id->mask, product) != soc->id) {
((product & id->mask) >> __ffs(id->mask)) != soc->id) {
pr_warn("SoC mismatch (product = 0x%x)\n", product); pr_warn("SoC mismatch (product = 0x%x)\n", product);
ret = -ENODEV; ret = -ENODEV;
goto free_soc_dev_attr; goto free_soc_dev_attr;

View File

@@ -5,6 +5,7 @@
* Copyright (C) 2024 Renesas Electronics Corp. * Copyright (C) 2024 Renesas Electronics Corp.
*/ */
#include <linux/bitfield.h>
#include <linux/cleanup.h> #include <linux/cleanup.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/mfd/syscon.h> #include <linux/mfd/syscon.h>
@@ -16,8 +17,6 @@
#include "rz-sysc.h" #include "rz-sysc.h"
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
/** /**
* struct rz_sysc - RZ SYSC private data structure * struct rz_sysc - RZ SYSC private data structure
* @base: SYSC base address * @base: SYSC base address

View File

@@ -17,6 +17,7 @@
* FIELD_{GET,PREP} macros take as first parameter shifted mask * FIELD_{GET,PREP} macros take as first parameter shifted mask
* from which they extract the base mask and shift amount. * from which they extract the base mask and shift amount.
* Mask must be a compilation time constant. * Mask must be a compilation time constant.
* field_{get,prep} are variants that take a non-const mask.
* *
* Example: * Example:
* *
@@ -60,7 +61,7 @@
#define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x)) #define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x))
#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \ #define __BF_FIELD_CHECK_MASK(_mask, _val, _pfx) \
({ \ ({ \
BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \ BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
_pfx "mask is not constant"); \ _pfx "mask is not constant"); \
@@ -69,13 +70,33 @@
~((_mask) >> __bf_shf(_mask)) & \ ~((_mask) >> __bf_shf(_mask)) & \
(0 + (_val)) : 0, \ (0 + (_val)) : 0, \
_pfx "value too large for the field"); \ _pfx "value too large for the field"); \
BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \
__bf_cast_unsigned(_reg, ~0ull), \
_pfx "type of reg too small for mask"); \
__BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \ __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
(1ULL << __bf_shf(_mask))); \ (1ULL << __bf_shf(_mask))); \
}) })
#define __BF_FIELD_CHECK_REG(mask, reg, pfx) \
BUILD_BUG_ON_MSG(__bf_cast_unsigned(mask, mask) > \
__bf_cast_unsigned(reg, ~0ull), \
pfx "type of reg too small for mask")
#define __BF_FIELD_CHECK(mask, reg, val, pfx) \
({ \
__BF_FIELD_CHECK_MASK(mask, val, pfx); \
__BF_FIELD_CHECK_REG(mask, reg, pfx); \
})
#define __FIELD_PREP(mask, val, pfx) \
({ \
__BF_FIELD_CHECK_MASK(mask, val, pfx); \
((typeof(mask))(val) << __bf_shf(mask)) & (mask); \
})
#define __FIELD_GET(mask, reg, pfx) \
({ \
__BF_FIELD_CHECK_MASK(mask, 0U, pfx); \
(typeof(mask))(((reg) & (mask)) >> __bf_shf(mask)); \
})
/** /**
* FIELD_MAX() - produce the maximum value representable by a field * FIELD_MAX() - produce the maximum value representable by a field
* @_mask: shifted mask defining the field's length and position * @_mask: shifted mask defining the field's length and position
@@ -112,8 +133,8 @@
*/ */
#define FIELD_PREP(_mask, _val) \ #define FIELD_PREP(_mask, _val) \
({ \ ({ \
__BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \ __BF_FIELD_CHECK_REG(_mask, 0ULL, "FIELD_PREP: "); \
((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \ __FIELD_PREP(_mask, _val, "FIELD_PREP: "); \
}) })
#define __BF_CHECK_POW2(n) BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0) #define __BF_CHECK_POW2(n) BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0)
@@ -152,8 +173,8 @@
*/ */
#define FIELD_GET(_mask, _reg) \ #define FIELD_GET(_mask, _reg) \
({ \ ({ \
__BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \ __BF_FIELD_CHECK_REG(_mask, _reg, "FIELD_GET: "); \
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ __FIELD_GET(_mask, _reg, "FIELD_GET: "); \
}) })
/** /**
@@ -220,4 +241,62 @@ __MAKE_OP(64)
#undef __MAKE_OP #undef __MAKE_OP
#undef ____MAKE_OP #undef ____MAKE_OP
#define __field_prep(mask, val) \
({ \
__auto_type __mask = (mask); \
typeof(__mask) __val = (val); \
unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
__ffs(__mask) : __ffs64(__mask); \
(__val << __shift) & __mask; \
})
#define __field_get(mask, reg) \
({ \
__auto_type __mask = (mask); \
typeof(__mask) __reg = (reg); \
unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
__ffs(__mask) : __ffs64(__mask); \
(__reg & __mask) >> __shift; \
})
/**
* field_prep() - prepare a bitfield element
* @mask: shifted mask defining the field's length and position, must be
* non-zero
* @val: value to put in the field
*
* Return: field value masked and shifted to its final destination
*
* field_prep() masks and shifts up the value. The result should be
* combined with other fields of the bitfield using logical OR.
* Unlike FIELD_PREP(), @mask is not limited to a compile-time constant.
* Typical usage patterns are a value stored in a table, or calculated by
* shifting a constant by a variable number of bits.
* If you want to ensure that @mask is a compile-time constant, please use
* FIELD_PREP() directly instead.
*/
#define field_prep(mask, val) \
(__builtin_constant_p(mask) ? __FIELD_PREP(mask, val, "field_prep: ") \
: __field_prep(mask, val))
/**
* field_get() - extract a bitfield element
* @mask: shifted mask defining the field's length and position, must be
* non-zero
* @reg: value of entire bitfield
*
* Return: extracted field value
*
* field_get() extracts the field specified by @mask from the
* bitfield passed in as @reg by masking and shifting it down.
* Unlike FIELD_GET(), @mask is not limited to a compile-time constant.
* Typical usage patterns are a value stored in a table, or calculated by
* shifting a constant by a variable number of bits.
* If you want to ensure that @mask is a compile-time constant, please use
* FIELD_GET() directly instead.
*/
#define field_get(mask, reg) \
(__builtin_constant_p(mask) ? __FIELD_GET(mask, reg, "field_get: ") \
: __field_get(mask, reg))
#endif #endif

View File

@@ -7,14 +7,16 @@
* set of CPUs in a system, one bit position per CPU number. In general, * set of CPUs in a system, one bit position per CPU number. In general,
* only nr_cpu_ids (<= NR_CPUS) bits are valid. * only nr_cpu_ids (<= NR_CPUS) bits are valid.
*/ */
#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/bitmap.h>
#include <linux/cpumask_types.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/bug.h> #include <linux/bitmap.h>
#include <linux/cleanup.h>
#include <linux/cpumask_types.h>
#include <linux/gfp_types.h> #include <linux/gfp_types.h>
#include <linux/numa.h> #include <linux/numa.h>
#include <linux/threads.h>
#include <linux/types.h>
#include <asm/bug.h>
/** /**
* cpumask_pr_args - printf args to output a cpumask * cpumask_pr_args - printf args to output a cpumask

View File

@@ -245,18 +245,18 @@ static __always_inline int __nodes_weight(const nodemask_t *srcp, unsigned int n
} }
/* FIXME: better would be to fix all architectures to never return /* FIXME: better would be to fix all architectures to never return
> MAX_NUMNODES, then the silly min_ts could be dropped. */ > MAX_NUMNODES, then the silly min()s could be dropped. */
#define first_node(src) __first_node(&(src)) #define first_node(src) __first_node(&(src))
static __always_inline unsigned int __first_node(const nodemask_t *srcp) static __always_inline unsigned int __first_node(const nodemask_t *srcp)
{ {
return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); return min(MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
} }
#define next_node(n, src) __next_node((n), &(src)) #define next_node(n, src) __next_node((n), &(src))
static __always_inline unsigned int __next_node(int n, const nodemask_t *srcp) static __always_inline unsigned int __next_node(int n, const nodemask_t *srcp)
{ {
return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); return min(MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
} }
/* /*
@@ -293,8 +293,7 @@ static __always_inline void init_nodemask_of_node(nodemask_t *mask, int node)
#define first_unset_node(mask) __first_unset_node(&(mask)) #define first_unset_node(mask) __first_unset_node(&(mask))
static __always_inline unsigned int __first_unset_node(const nodemask_t *maskp) static __always_inline unsigned int __first_unset_node(const nodemask_t *maskp)
{ {
return min_t(unsigned int, MAX_NUMNODES, return min(MAX_NUMNODES, find_first_zero_bit(maskp->bits, MAX_NUMNODES));
find_first_zero_bit(maskp->bits, MAX_NUMNODES));
} }
#define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES) #define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES)

View File

@@ -4,8 +4,8 @@
#include <asm/types.h> #include <asm/types.h>
/** /**
* hweightN - returns the hamming weight of a N-bit word * DOC: __sw_hweightN - returns the hamming weight of a N-bit word
* @x: the word to weigh * @w: the word to weigh
* *
* The Hamming Weight of a number is the total number of bits set in it. * The Hamming Weight of a number is the total number of bits set in it.
*/ */

View File

@@ -12,8 +12,6 @@ use crate::bindings;
use crate::pr_err; use crate::pr_err;
use core::ptr::NonNull; use core::ptr::NonNull;
const BITS_PER_LONG: usize = bindings::BITS_PER_LONG as usize;
/// Represents a C bitmap. Wraps underlying C bitmap API. /// Represents a C bitmap. Wraps underlying C bitmap API.
/// ///
/// # Invariants /// # Invariants
@@ -149,14 +147,14 @@ macro_rules! bitmap_assert_return {
/// ///
/// # Invariants /// # Invariants
/// ///
/// * `nbits` is `<= i32::MAX` and never changes. /// * `nbits` is `<= MAX_LEN`.
/// * if `nbits <= bindings::BITS_PER_LONG`, then `repr` is a `usize`. /// * if `nbits <= MAX_INLINE_LEN`, then `repr` is a `usize`.
/// * otherwise, `repr` holds a non-null pointer to an initialized /// * otherwise, `repr` holds a non-null pointer to an initialized
/// array of `unsigned long` that is large enough to hold `nbits` bits. /// array of `unsigned long` that is large enough to hold `nbits` bits.
pub struct BitmapVec { pub struct BitmapVec {
/// Representation of bitmap. /// Representation of bitmap.
repr: BitmapRepr, repr: BitmapRepr,
/// Length of this bitmap. Must be `<= i32::MAX`. /// Length of this bitmap. Must be `<= MAX_LEN`.
nbits: usize, nbits: usize,
} }
@@ -164,7 +162,7 @@ impl core::ops::Deref for BitmapVec {
type Target = Bitmap; type Target = Bitmap;
fn deref(&self) -> &Bitmap { fn deref(&self) -> &Bitmap {
let ptr = if self.nbits <= BITS_PER_LONG { let ptr = if self.nbits <= BitmapVec::MAX_INLINE_LEN {
// SAFETY: Bitmap is represented inline. // SAFETY: Bitmap is represented inline.
#[allow(unused_unsafe, reason = "Safe since Rust 1.92.0")] #[allow(unused_unsafe, reason = "Safe since Rust 1.92.0")]
unsafe { unsafe {
@@ -183,7 +181,7 @@ impl core::ops::Deref for BitmapVec {
impl core::ops::DerefMut for BitmapVec { impl core::ops::DerefMut for BitmapVec {
fn deref_mut(&mut self) -> &mut Bitmap { fn deref_mut(&mut self) -> &mut Bitmap {
let ptr = if self.nbits <= BITS_PER_LONG { let ptr = if self.nbits <= BitmapVec::MAX_INLINE_LEN {
// SAFETY: Bitmap is represented inline. // SAFETY: Bitmap is represented inline.
#[allow(unused_unsafe, reason = "Safe since Rust 1.92.0")] #[allow(unused_unsafe, reason = "Safe since Rust 1.92.0")]
unsafe { unsafe {
@@ -213,7 +211,7 @@ unsafe impl Sync for BitmapVec {}
impl Drop for BitmapVec { impl Drop for BitmapVec {
fn drop(&mut self) { fn drop(&mut self) {
if self.nbits <= BITS_PER_LONG { if self.nbits <= BitmapVec::MAX_INLINE_LEN {
return; return;
} }
// SAFETY: `self.ptr` was returned by the C `bitmap_zalloc`. // SAFETY: `self.ptr` was returned by the C `bitmap_zalloc`.
@@ -226,23 +224,39 @@ impl Drop for BitmapVec {
} }
impl BitmapVec { impl BitmapVec {
/// The maximum possible length of a `BitmapVec`.
pub const MAX_LEN: usize = i32::MAX as usize;
/// The maximum length that uses the inline representation.
pub const MAX_INLINE_LEN: usize = usize::BITS as usize;
/// Construct a longest possible inline [`BitmapVec`].
#[inline]
pub fn new_inline() -> Self {
// INVARIANT: `nbits <= MAX_INLINE_LEN`, so an inline bitmap is the right repr.
BitmapVec {
repr: BitmapRepr { bitmap: 0 },
nbits: BitmapVec::MAX_INLINE_LEN,
}
}
/// Constructs a new [`BitmapVec`]. /// Constructs a new [`BitmapVec`].
/// ///
/// Fails with [`AllocError`] when the [`BitmapVec`] could not be allocated. This /// Fails with [`AllocError`] when the [`BitmapVec`] could not be allocated. This
/// includes the case when `nbits` is greater than `i32::MAX`. /// includes the case when `nbits` is greater than `MAX_LEN`.
#[inline] #[inline]
pub fn new(nbits: usize, flags: Flags) -> Result<Self, AllocError> { pub fn new(nbits: usize, flags: Flags) -> Result<Self, AllocError> {
if nbits <= BITS_PER_LONG { if nbits <= BitmapVec::MAX_INLINE_LEN {
return Ok(BitmapVec { return Ok(BitmapVec {
repr: BitmapRepr { bitmap: 0 }, repr: BitmapRepr { bitmap: 0 },
nbits, nbits,
}); });
} }
if nbits > i32::MAX.try_into().unwrap() { if nbits > Self::MAX_LEN {
return Err(AllocError); return Err(AllocError);
} }
let nbits_u32 = u32::try_from(nbits).unwrap(); let nbits_u32 = u32::try_from(nbits).unwrap();
// SAFETY: `BITS_PER_LONG < nbits` and `nbits <= i32::MAX`. // SAFETY: `MAX_INLINE_LEN < nbits` and `nbits <= MAX_LEN`.
let ptr = unsafe { bindings::bitmap_zalloc(nbits_u32, flags.as_raw()) }; let ptr = unsafe { bindings::bitmap_zalloc(nbits_u32, flags.as_raw()) };
let ptr = NonNull::new(ptr).ok_or(AllocError)?; let ptr = NonNull::new(ptr).ok_or(AllocError)?;
// INVARIANT: `ptr` returned by C `bitmap_zalloc` and `nbits` checked. // INVARIANT: `ptr` returned by C `bitmap_zalloc` and `nbits` checked.
@@ -495,9 +509,10 @@ mod tests {
#[test] #[test]
fn bitmap_borrow() { fn bitmap_borrow() {
let fake_bitmap: [usize; 2] = [0, 0]; let fake_bitmap: [usize; 2] = [0, 0];
let fake_bitmap_len = 2 * usize::BITS as usize;
// SAFETY: `fake_c_bitmap` is an array of expected length. // SAFETY: `fake_c_bitmap` is an array of expected length.
let b = unsafe { Bitmap::from_raw(fake_bitmap.as_ptr(), 2 * BITS_PER_LONG) }; let b = unsafe { Bitmap::from_raw(fake_bitmap.as_ptr(), fake_bitmap_len) };
assert_eq!(2 * BITS_PER_LONG, b.len()); assert_eq!(fake_bitmap_len, b.len());
assert_eq!(None, b.next_bit(0)); assert_eq!(None, b.next_bit(0));
} }

View File

@@ -7,8 +7,6 @@
use crate::alloc::{AllocError, Flags}; use crate::alloc::{AllocError, Flags};
use crate::bitmap::BitmapVec; use crate::bitmap::BitmapVec;
const BITS_PER_LONG: usize = bindings::BITS_PER_LONG as usize;
/// Represents a dynamic ID pool backed by a [`BitmapVec`]. /// Represents a dynamic ID pool backed by a [`BitmapVec`].
/// ///
/// Clients acquire and release IDs from unset bits in a bitmap. /// Clients acquire and release IDs from unset bits in a bitmap.
@@ -25,22 +23,22 @@ const BITS_PER_LONG: usize = bindings::BITS_PER_LONG as usize;
/// Basic usage /// Basic usage
/// ///
/// ``` /// ```
/// use kernel::alloc::{AllocError, flags::GFP_KERNEL}; /// use kernel::alloc::AllocError;
/// use kernel::id_pool::IdPool; /// use kernel::id_pool::{IdPool, UnusedId};
/// ///
/// let mut pool = IdPool::new(64, GFP_KERNEL)?; /// let mut pool = IdPool::with_capacity(64, GFP_KERNEL)?;
/// for i in 0..64 { /// for i in 0..64 {
/// assert_eq!(i, pool.acquire_next_id(i).ok_or(ENOSPC)?); /// assert_eq!(i, pool.find_unused_id(i).ok_or(ENOSPC)?.acquire());
/// } /// }
/// ///
/// pool.release_id(23); /// pool.release_id(23);
/// assert_eq!(23, pool.acquire_next_id(0).ok_or(ENOSPC)?); /// assert_eq!(23, pool.find_unused_id(0).ok_or(ENOSPC)?.acquire());
/// ///
/// assert_eq!(None, pool.acquire_next_id(0)); // time to realloc. /// assert!(pool.find_unused_id(0).is_none()); // time to realloc.
/// let resizer = pool.grow_request().ok_or(ENOSPC)?.realloc(GFP_KERNEL)?; /// let resizer = pool.grow_request().ok_or(ENOSPC)?.realloc(GFP_KERNEL)?;
/// pool.grow(resizer); /// pool.grow(resizer);
/// ///
/// assert_eq!(pool.acquire_next_id(0), Some(64)); /// assert_eq!(pool.find_unused_id(0).ok_or(ENOSPC)?.acquire(), 64);
/// # Ok::<(), Error>(()) /// # Ok::<(), Error>(())
/// ``` /// ```
/// ///
@@ -54,8 +52,8 @@ const BITS_PER_LONG: usize = bindings::BITS_PER_LONG as usize;
/// fn get_id_maybe_realloc(guarded_pool: &SpinLock<IdPool>) -> Result<usize, AllocError> { /// fn get_id_maybe_realloc(guarded_pool: &SpinLock<IdPool>) -> Result<usize, AllocError> {
/// let mut pool = guarded_pool.lock(); /// let mut pool = guarded_pool.lock();
/// loop { /// loop {
/// match pool.acquire_next_id(0) { /// match pool.find_unused_id(0) {
/// Some(index) => return Ok(index), /// Some(index) => return Ok(index.acquire()),
/// None => { /// None => {
/// let alloc_request = pool.grow_request(); /// let alloc_request = pool.grow_request();
/// drop(pool); /// drop(pool);
@@ -97,13 +95,24 @@ impl ReallocRequest {
impl IdPool { impl IdPool {
/// Constructs a new [`IdPool`]. /// Constructs a new [`IdPool`].
/// ///
/// A capacity below [`BITS_PER_LONG`] is adjusted to /// The pool will have a capacity of [`MAX_INLINE_LEN`].
/// [`BITS_PER_LONG`].
/// ///
/// [`BITS_PER_LONG`]: srctree/include/asm-generic/bitsperlong.h /// [`MAX_INLINE_LEN`]: BitmapVec::MAX_INLINE_LEN
#[inline] #[inline]
pub fn new(num_ids: usize, flags: Flags) -> Result<Self, AllocError> { pub fn new() -> Self {
let num_ids = core::cmp::max(num_ids, BITS_PER_LONG); Self {
map: BitmapVec::new_inline(),
}
}
/// Constructs a new [`IdPool`] with space for a specific number of bits.
///
/// A capacity below [`MAX_INLINE_LEN`] is adjusted to [`MAX_INLINE_LEN`].
///
/// [`MAX_INLINE_LEN`]: BitmapVec::MAX_INLINE_LEN
#[inline]
pub fn with_capacity(num_ids: usize, flags: Flags) -> Result<Self, AllocError> {
let num_ids = usize::max(num_ids, BitmapVec::MAX_INLINE_LEN);
let map = BitmapVec::new(num_ids, flags)?; let map = BitmapVec::new(num_ids, flags)?;
Ok(Self { map }) Ok(Self { map })
} }
@@ -116,28 +125,34 @@ impl IdPool {
/// Returns a [`ReallocRequest`] if the [`IdPool`] can be shrunk, [`None`] otherwise. /// Returns a [`ReallocRequest`] if the [`IdPool`] can be shrunk, [`None`] otherwise.
/// ///
/// The capacity of an [`IdPool`] cannot be shrunk below [`BITS_PER_LONG`]. /// The capacity of an [`IdPool`] cannot be shrunk below [`MAX_INLINE_LEN`].
/// ///
/// [`BITS_PER_LONG`]: srctree/include/asm-generic/bitsperlong.h /// [`MAX_INLINE_LEN`]: BitmapVec::MAX_INLINE_LEN
/// ///
/// # Examples /// # Examples
/// ///
/// ``` /// ```
/// use kernel::alloc::{AllocError, flags::GFP_KERNEL}; /// use kernel::{
/// use kernel::id_pool::{ReallocRequest, IdPool}; /// alloc::AllocError,
/// bitmap::BitmapVec,
/// id_pool::{
/// IdPool,
/// ReallocRequest,
/// },
/// };
/// ///
/// let mut pool = IdPool::new(1024, GFP_KERNEL)?; /// let mut pool = IdPool::with_capacity(1024, GFP_KERNEL)?;
/// let alloc_request = pool.shrink_request().ok_or(AllocError)?; /// let alloc_request = pool.shrink_request().ok_or(AllocError)?;
/// let resizer = alloc_request.realloc(GFP_KERNEL)?; /// let resizer = alloc_request.realloc(GFP_KERNEL)?;
/// pool.shrink(resizer); /// pool.shrink(resizer);
/// assert_eq!(pool.capacity(), kernel::bindings::BITS_PER_LONG as usize); /// assert_eq!(pool.capacity(), BitmapVec::MAX_INLINE_LEN);
/// # Ok::<(), AllocError>(()) /// # Ok::<(), AllocError>(())
/// ``` /// ```
#[inline] #[inline]
pub fn shrink_request(&self) -> Option<ReallocRequest> { pub fn shrink_request(&self) -> Option<ReallocRequest> {
let cap = self.capacity(); let cap = self.capacity();
// Shrinking below [`BITS_PER_LONG`] is never possible. // Shrinking below `MAX_INLINE_LEN` is never possible.
if cap <= BITS_PER_LONG { if cap <= BitmapVec::MAX_INLINE_LEN {
return None; return None;
} }
// Determine if the bitmap can shrink based on the position of // Determine if the bitmap can shrink based on the position of
@@ -146,13 +161,13 @@ impl IdPool {
// bitmap should shrink to half its current size. // bitmap should shrink to half its current size.
let Some(bit) = self.map.last_bit() else { let Some(bit) = self.map.last_bit() else {
return Some(ReallocRequest { return Some(ReallocRequest {
num_ids: BITS_PER_LONG, num_ids: BitmapVec::MAX_INLINE_LEN,
}); });
}; };
if bit >= (cap / 4) { if bit >= (cap / 4) {
return None; return None;
} }
let num_ids = usize::max(BITS_PER_LONG, cap / 2); let num_ids = usize::max(BitmapVec::MAX_INLINE_LEN, cap / 2);
Some(ReallocRequest { num_ids }) Some(ReallocRequest { num_ids })
} }
@@ -177,11 +192,13 @@ impl IdPool {
/// Returns a [`ReallocRequest`] for growing this [`IdPool`], if possible. /// Returns a [`ReallocRequest`] for growing this [`IdPool`], if possible.
/// ///
/// The capacity of an [`IdPool`] cannot be grown above [`i32::MAX`]. /// The capacity of an [`IdPool`] cannot be grown above [`MAX_LEN`].
///
/// [`MAX_LEN`]: BitmapVec::MAX_LEN
#[inline] #[inline]
pub fn grow_request(&self) -> Option<ReallocRequest> { pub fn grow_request(&self) -> Option<ReallocRequest> {
let num_ids = self.capacity() * 2; let num_ids = self.capacity() * 2;
if num_ids > i32::MAX.try_into().unwrap() { if num_ids > BitmapVec::MAX_LEN {
return None; return None;
} }
Some(ReallocRequest { num_ids }) Some(ReallocRequest { num_ids })
@@ -204,18 +221,18 @@ impl IdPool {
self.map = resizer.new; self.map = resizer.new;
} }
/// Acquires a new ID by finding and setting the next zero bit in the /// Finds an unused ID in the bitmap.
/// bitmap.
/// ///
/// Upon success, returns its index. Otherwise, returns [`None`] /// Upon success, returns its index. Otherwise, returns [`None`]
/// to indicate that a [`Self::grow_request`] is needed. /// to indicate that a [`Self::grow_request`] is needed.
#[inline] #[inline]
pub fn acquire_next_id(&mut self, offset: usize) -> Option<usize> { #[must_use]
let next_zero_bit = self.map.next_zero_bit(offset); pub fn find_unused_id(&mut self, offset: usize) -> Option<UnusedId<'_>> {
if let Some(nr) = next_zero_bit { // INVARIANT: `next_zero_bit()` returns None or an integer less than `map.len()`
self.map.set_bit(nr); Some(UnusedId {
} id: self.map.next_zero_bit(offset)?,
next_zero_bit pool: self,
})
} }
/// Releases an ID. /// Releases an ID.
@@ -224,3 +241,55 @@ impl IdPool {
self.map.clear_bit(id); self.map.clear_bit(id);
} }
} }
/// Represents an unused id in an [`IdPool`].
///
/// # Invariants
///
/// The value of `id` is less than `pool.map.len()`.
pub struct UnusedId<'pool> {
id: usize,
pool: &'pool mut IdPool,
}
impl<'pool> UnusedId<'pool> {
/// Get the unused id as an usize.
///
/// Be aware that the id has not yet been acquired in the pool. The
/// [`acquire`] method must be called to prevent others from taking the id.
///
/// [`acquire`]: UnusedId::acquire()
#[inline]
#[must_use]
pub fn as_usize(&self) -> usize {
self.id
}
/// Get the unused id as an u32.
///
/// Be aware that the id has not yet been acquired in the pool. The
/// [`acquire`] method must be called to prevent others from taking the id.
///
/// [`acquire`]: UnusedId::acquire()
#[inline]
#[must_use]
pub fn as_u32(&self) -> u32 {
// CAST: By the type invariants:
// `self.id < pool.map.len() <= BitmapVec::MAX_LEN = i32::MAX`.
self.id as u32
}
/// Acquire the unused id.
#[inline]
pub fn acquire(self) -> usize {
self.pool.map.set_bit(self.id);
self.id
}
}
impl Default for IdPool {
#[inline]
fn default() -> Self {
Self::new()
}
}

View File

@@ -3416,10 +3416,6 @@ static int snd_bbfpro_controls_create(struct usb_mixer_interface *mixer)
#define RME_DIGIFACE_REGISTER(reg, mask) (((reg) << 16) | (mask)) #define RME_DIGIFACE_REGISTER(reg, mask) (((reg) << 16) | (mask))
#define RME_DIGIFACE_INVERT BIT(31) #define RME_DIGIFACE_INVERT BIT(31)
/* Nonconst helpers */
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
static int snd_rme_digiface_write_reg(struct snd_kcontrol *kcontrol, int item, u16 mask, u16 val) static int snd_rme_digiface_write_reg(struct snd_kcontrol *kcontrol, int item, u16 mask, u16 val)
{ {
struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol); struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);