mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 11:56:58 +00:00
Normal rings support 64b SQEs for posting submissions, while certain features require the ring to be configured with IORING_SETUP_SQE128, as they need to convey more information per submission. This, in turn, makes ALL the SQEs be 128b in size. This is somewhat wasteful and inefficient, particularly when only certain SQEs need to be of the bigger variant. This adds support for setting up a ring with mixed SQE sizes, using IORING_SETUP_SQE_MIXED. When setup in this mode, SQEs posted to the ring may be either 64b or 128b in size. If a SQE is 128b in size, then opcode will be set to a variante to indicate that this is the case. Any other non-128b opcode will assume the SQ's default size. SQEs on these types of mixed rings may also utilize NOP with skip success set. This can happen if the ring is one (small) SQE entry away from wrapping, and an attempt is made to get a 128b SQE. As SQEs must be contiguous in the SQ ring, a 128b SQE cannot wrap the ring. For this case, a single NOP SQE should be inserted with the SKIP_SUCCESS flag set. The kernel will process this as a normal NOP and without posting a CQE. Signed-off-by: Keith Busch <kbusch@kernel.org> [axboe: {} style fix and assign sqe before opcode read] Signed-off-by: Jens Axboe <axboe@kernel.dk>
55 lines
1.4 KiB
C
55 lines
1.4 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#ifndef IOU_OP_DEF_H
|
|
#define IOU_OP_DEF_H
|
|
|
|
struct io_issue_def {
|
|
/* needs req->file assigned */
|
|
unsigned needs_file : 1;
|
|
/* should block plug */
|
|
unsigned plug : 1;
|
|
/* supports ioprio */
|
|
unsigned ioprio : 1;
|
|
/* supports iopoll */
|
|
unsigned iopoll : 1;
|
|
/* op supports buffer selection */
|
|
unsigned buffer_select : 1;
|
|
/* hash wq insertion if file is a regular file */
|
|
unsigned hash_reg_file : 1;
|
|
/* unbound wq insertion if file is a non-regular file */
|
|
unsigned unbound_nonreg_file : 1;
|
|
/* set if opcode supports polled "wait" */
|
|
unsigned pollin : 1;
|
|
unsigned pollout : 1;
|
|
unsigned poll_exclusive : 1;
|
|
/* skip auditing */
|
|
unsigned audit_skip : 1;
|
|
/* have to be put into the iopoll list */
|
|
unsigned iopoll_queue : 1;
|
|
/* vectored opcode, set if 1) vectored, and 2) handler needs to know */
|
|
unsigned vectored : 1;
|
|
/* set to 1 if this opcode uses 128b sqes in a mixed sq */
|
|
unsigned is_128 : 1;
|
|
|
|
/* size of async data needed, if any */
|
|
unsigned short async_size;
|
|
|
|
int (*issue)(struct io_kiocb *, unsigned int);
|
|
int (*prep)(struct io_kiocb *, const struct io_uring_sqe *);
|
|
};
|
|
|
|
struct io_cold_def {
|
|
const char *name;
|
|
|
|
void (*sqe_copy)(struct io_kiocb *);
|
|
void (*cleanup)(struct io_kiocb *);
|
|
void (*fail)(struct io_kiocb *);
|
|
};
|
|
|
|
extern const struct io_issue_def io_issue_defs[];
|
|
extern const struct io_cold_def io_cold_defs[];
|
|
|
|
bool io_uring_op_supported(u8 opcode);
|
|
|
|
void io_uring_optable_init(void);
|
|
#endif
|