mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
ublk: implement NUMA-aware memory allocation
Implement NUMA-friendly memory allocation for ublk driver to improve performance on multi-socket systems. This commit includes the following changes: 1. Rename __queues to queues, dropping the __ prefix since the field is now accessed directly throughout the codebase rather than only through the ublk_get_queue() helper. 2. Remove the queue_size field from struct ublk_device as it is no longer needed. 3. Move queue allocation and deallocation into ublk_init_queue() and ublk_deinit_queue() respectively, improving encapsulation. This simplifies ublk_init_queues() and ublk_deinit_queues() to just iterate and call the per-queue functions. 4. Add ublk_get_queue_numa_node() helper function to determine the appropriate NUMA node for a queue by finding the first CPU mapped to that queue via tag_set.map[HCTX_TYPE_DEFAULT].mq_map[] and converting it to a NUMA node using cpu_to_node(). This function is called internally by ublk_init_queue() to determine the allocation node. 5. Allocate each queue structure on its local NUMA node using kvzalloc_node() in ublk_init_queue(). 6. Allocate the I/O command buffer on the same NUMA node using alloc_pages_node(). This reduces memory access latency on multi-socket NUMA systems by ensuring each queue's data structures are local to the CPUs that access them. Reviewed-by: Caleb Sander Mateos <csander@purestorage.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -209,9 +209,6 @@ struct ublk_queue {
|
||||
struct ublk_device {
|
||||
struct gendisk *ub_disk;
|
||||
|
||||
char *__queues;
|
||||
|
||||
unsigned int queue_size;
|
||||
struct ublksrv_ctrl_dev_info dev_info;
|
||||
|
||||
struct blk_mq_tag_set tag_set;
|
||||
@@ -239,6 +236,8 @@ struct ublk_device {
|
||||
bool canceling;
|
||||
pid_t ublksrv_tgid;
|
||||
struct delayed_work exit_work;
|
||||
|
||||
struct ublk_queue *queues[];
|
||||
};
|
||||
|
||||
/* header of ublk_params */
|
||||
@@ -781,7 +780,7 @@ static noinline void ublk_put_device(struct ublk_device *ub)
|
||||
static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
|
||||
int qid)
|
||||
{
|
||||
return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
|
||||
return dev->queues[qid];
|
||||
}
|
||||
|
||||
static inline bool ublk_rq_has_data(const struct request *rq)
|
||||
@@ -2662,9 +2661,13 @@ static const struct file_operations ublk_ch_fops = {
|
||||
|
||||
static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
|
||||
{
|
||||
int size = ublk_queue_cmd_buf_size(ub);
|
||||
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
|
||||
int i;
|
||||
struct ublk_queue *ubq = ub->queues[q_id];
|
||||
int size, i;
|
||||
|
||||
if (!ubq)
|
||||
return;
|
||||
|
||||
size = ublk_queue_cmd_buf_size(ub);
|
||||
|
||||
for (i = 0; i < ubq->q_depth; i++) {
|
||||
struct ublk_io *io = &ubq->ios[i];
|
||||
@@ -2676,57 +2679,76 @@ static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
|
||||
|
||||
if (ubq->io_cmd_buf)
|
||||
free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
|
||||
|
||||
kvfree(ubq);
|
||||
ub->queues[q_id] = NULL;
|
||||
}
|
||||
|
||||
static int ublk_get_queue_numa_node(struct ublk_device *ub, int q_id)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
/* Find first CPU mapped to this queue */
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[cpu] == q_id)
|
||||
return cpu_to_node(cpu);
|
||||
}
|
||||
|
||||
return NUMA_NO_NODE;
|
||||
}
|
||||
|
||||
static int ublk_init_queue(struct ublk_device *ub, int q_id)
|
||||
{
|
||||
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
|
||||
int depth = ub->dev_info.queue_depth;
|
||||
int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
|
||||
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
|
||||
void *ptr;
|
||||
struct ublk_queue *ubq;
|
||||
struct page *page;
|
||||
int numa_node;
|
||||
int size;
|
||||
|
||||
/* Determine NUMA node based on queue's CPU affinity */
|
||||
numa_node = ublk_get_queue_numa_node(ub, q_id);
|
||||
|
||||
/* Allocate queue structure on local NUMA node */
|
||||
ubq = kvzalloc_node(ubq_size, GFP_KERNEL, numa_node);
|
||||
if (!ubq)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&ubq->cancel_lock);
|
||||
ubq->flags = ub->dev_info.flags;
|
||||
ubq->q_id = q_id;
|
||||
ubq->q_depth = ub->dev_info.queue_depth;
|
||||
ubq->q_depth = depth;
|
||||
size = ublk_queue_cmd_buf_size(ub);
|
||||
|
||||
ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
|
||||
if (!ptr)
|
||||
/* Allocate I/O command buffer on local NUMA node */
|
||||
page = alloc_pages_node(numa_node, gfp_flags, get_order(size));
|
||||
if (!page) {
|
||||
kvfree(ubq);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ubq->io_cmd_buf = page_address(page);
|
||||
|
||||
ubq->io_cmd_buf = ptr;
|
||||
ub->queues[q_id] = ubq;
|
||||
ubq->dev = ub;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ublk_deinit_queues(struct ublk_device *ub)
|
||||
{
|
||||
int nr_queues = ub->dev_info.nr_hw_queues;
|
||||
int i;
|
||||
|
||||
if (!ub->__queues)
|
||||
return;
|
||||
|
||||
for (i = 0; i < nr_queues; i++)
|
||||
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
|
||||
ublk_deinit_queue(ub, i);
|
||||
kvfree(ub->__queues);
|
||||
}
|
||||
|
||||
static int ublk_init_queues(struct ublk_device *ub)
|
||||
{
|
||||
int nr_queues = ub->dev_info.nr_hw_queues;
|
||||
int depth = ub->dev_info.queue_depth;
|
||||
int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
|
||||
int i, ret = -ENOMEM;
|
||||
int i, ret;
|
||||
|
||||
ub->queue_size = ubq_size;
|
||||
ub->__queues = kvcalloc(nr_queues, ubq_size, GFP_KERNEL);
|
||||
if (!ub->__queues)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < nr_queues; i++) {
|
||||
if (ublk_init_queue(ub, i))
|
||||
for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
|
||||
ret = ublk_init_queue(ub, i);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@@ -3128,7 +3150,7 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
|
||||
goto out_unlock;
|
||||
|
||||
ret = -ENOMEM;
|
||||
ub = kzalloc(sizeof(*ub), GFP_KERNEL);
|
||||
ub = kzalloc(struct_size(ub, queues, info.nr_hw_queues), GFP_KERNEL);
|
||||
if (!ub)
|
||||
goto out_unlock;
|
||||
mutex_init(&ub->mutex);
|
||||
|
||||
Reference in New Issue
Block a user