mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
spi-cadence: support transmission with
Merge series from Jun Guo <jun.guo@cixtech.com>: The Cadence SPI IP supports configurable FIFO data widths during integration. On some SoCs, the FIFO data width is designed to be 16 or 32 bits at the chip design stage. However, the current driver only supports communication with an 8-bit FIFO data width. Therefore, these patches are added to enable the driver to support communication with 16-bit and 32-bit FIFO data widths.
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/ti,twl4030-gpio.yaml#
|
||||
$id: http://devicetree.org/schemas/gpio/ti,twl4030-gpio.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: TI TWL4030 GPIO controller
|
||||
|
||||
@@ -21,6 +21,7 @@ properties:
|
||||
- enum:
|
||||
- xlnx,zynqmp-spi-r1p6
|
||||
- xlnx,versal-net-spi-r1p6
|
||||
- cix,sky1-spi-r1p6
|
||||
- const: cdns,spi-r1p6
|
||||
|
||||
reg:
|
||||
|
||||
@@ -37,8 +37,8 @@ which corresponds to the following ASL (in the scope of \_SB)::
|
||||
Name (_HID, ...)
|
||||
Name (_CRS, ResourceTemplate () {
|
||||
I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED,
|
||||
AddressingMode7Bit, "\\_SB.SMB1.CH00", 0x00,
|
||||
ResourceConsumer,,)
|
||||
AddressingMode7Bit, "\\_SB.SMB1.MUX0.CH00",
|
||||
0x00, ResourceConsumer,,)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -52,8 +52,8 @@ which corresponds to the following ASL (in the scope of \_SB)::
|
||||
Name (_HID, ...)
|
||||
Name (_CRS, ResourceTemplate () {
|
||||
I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED,
|
||||
AddressingMode7Bit, "\\_SB.SMB1.CH01", 0x00,
|
||||
ResourceConsumer,,)
|
||||
AddressingMode7Bit, "\\_SB.SMB1.MUX0.CH01",
|
||||
0x00, ResourceConsumer,,)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4818,6 +4818,7 @@ F: drivers/net/dsa/b53/*
|
||||
F: drivers/net/dsa/bcm_sf2*
|
||||
F: include/linux/dsa/brcm.h
|
||||
F: include/linux/platform_data/b53.h
|
||||
F: net/dsa/tag_brcm.c
|
||||
|
||||
BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
|
||||
M: Florian Fainelli <florian.fainelli@broadcom.com>
|
||||
@@ -12521,6 +12522,7 @@ F: include/linux/avf/virtchnl.h
|
||||
F: include/linux/net/intel/*/
|
||||
|
||||
INTEL ETHERNET PROTOCOL DRIVER FOR RDMA
|
||||
M: Krzysztof Czurylo <krzysztof.czurylo@intel.com>
|
||||
M: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
@@ -12861,7 +12863,8 @@ F: tools/testing/selftests/sgx/*
|
||||
K: \bSGX_
|
||||
|
||||
INTEL SKYLAKE INT3472 ACPI DEVICE DRIVER
|
||||
M: Daniel Scally <djrscally@gmail.com>
|
||||
M: Daniel Scally <dan.scally@ideasonboard.com>
|
||||
M: Sakari Ailus <sakari.ailus@linux.intel.com>
|
||||
S: Maintained
|
||||
F: drivers/platform/x86/intel/int3472/
|
||||
F: include/linux/platform_data/x86/int3472.h
|
||||
@@ -20160,6 +20163,7 @@ R: Alexander Shishkin <alexander.shishkin@linux.intel.com>
|
||||
R: Jiri Olsa <jolsa@kernel.org>
|
||||
R: Ian Rogers <irogers@google.com>
|
||||
R: Adrian Hunter <adrian.hunter@intel.com>
|
||||
R: James Clark <james.clark@linaro.org>
|
||||
L: linux-perf-users@vger.kernel.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Supported
|
||||
|
||||
2
Makefile
2
Makefile
@@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 18
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
||||
@@ -917,6 +917,13 @@ config ARCH_USES_CFI_TRAPS
|
||||
An architecture should select this option if it requires the
|
||||
.kcfi_traps section for KCFI trap handling.
|
||||
|
||||
config ARCH_USES_CFI_GENERIC_LLVM_PASS
|
||||
bool
|
||||
help
|
||||
An architecture should select this option if it uses the generic
|
||||
KCFIPass in LLVM to expand kCFI bundles instead of architecture-specific
|
||||
lowering.
|
||||
|
||||
config CFI
|
||||
bool "Use Kernel Control Flow Integrity (kCFI)"
|
||||
default CFI_CLANG
|
||||
|
||||
@@ -44,6 +44,8 @@ config ARM
|
||||
select ARCH_USE_BUILTIN_BSWAP
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select ARCH_USE_MEMTEST
|
||||
# https://github.com/llvm/llvm-project/commit/d130f402642fba3d065aacb506cb061c899558de
|
||||
select ARCH_USES_CFI_GENERIC_LLVM_PASS if CLANG_VERSION < 220000
|
||||
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
|
||||
select ARCH_WANT_GENERAL_HUGETLB
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
|
||||
@@ -109,7 +109,7 @@ endif
|
||||
ifdef CONFIG_RUSTC_HAS_ANNOTATE_TABLEJUMP
|
||||
KBUILD_RUSTFLAGS += -Cllvm-args=--loongarch-annotate-tablejump
|
||||
else
|
||||
KBUILD_RUSTFLAGS += -Zno-jump-tables # keep compatibility with older compilers
|
||||
KBUILD_RUSTFLAGS += $(if $(call rustc-min-version,109300),-Cjump-tables=n,-Zno-jump-tables) # keep compatibility with older compilers
|
||||
endif
|
||||
ifdef CONFIG_LTO_CLANG
|
||||
# The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled.
|
||||
|
||||
@@ -35,6 +35,8 @@
|
||||
|
||||
#define KERNEL_START (KERNEL_BINARY_TEXT_START)
|
||||
|
||||
#define ALIGNMENT_OK(ptr, type) (((ptr) & (sizeof(type) - 1)) == 0)
|
||||
|
||||
extern struct unwind_table_entry __start___unwind[];
|
||||
extern struct unwind_table_entry __stop___unwind[];
|
||||
|
||||
@@ -257,12 +259,15 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
|
||||
if (pc_is_kernel_fn(pc, _switch_to) ||
|
||||
pc == (unsigned long)&_switch_to_ret) {
|
||||
info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
|
||||
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
|
||||
if (ALIGNMENT_OK(info->prev_sp, long))
|
||||
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
|
||||
else
|
||||
info->prev_ip = info->prev_sp = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
if (pc == (unsigned long)&_call_on_stack) {
|
||||
if (pc == (unsigned long)&_call_on_stack && ALIGNMENT_OK(info->sp, long)) {
|
||||
info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
|
||||
info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
|
||||
return 1;
|
||||
@@ -370,8 +375,10 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
|
||||
info->prev_sp = info->sp - frame_size;
|
||||
if (e->Millicode)
|
||||
info->rp = info->r31;
|
||||
else if (rpoffset)
|
||||
else if (rpoffset && ALIGNMENT_OK(info->prev_sp, long))
|
||||
info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
|
||||
else
|
||||
info->rp = 0;
|
||||
info->prev_ip = info->rp;
|
||||
info->rp = 0;
|
||||
}
|
||||
|
||||
@@ -12,6 +12,12 @@
|
||||
#define __ASM_STR(x) #x
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AS_HAS_INSN
|
||||
#define ASM_INSN_I(__x) ".insn " __x
|
||||
#else
|
||||
#define ASM_INSN_I(__x) ".4byte " __x
|
||||
#endif
|
||||
|
||||
#if __riscv_xlen == 64
|
||||
#define __REG_SEL(a, b) __ASM_STR(a)
|
||||
#elif __riscv_xlen == 32
|
||||
|
||||
@@ -256,10 +256,10 @@
|
||||
INSN_S(OPCODE_OP_IMM, FUNC3(6), __RS2(3), \
|
||||
SIMM12((offset) & 0xfe0), RS1(base))
|
||||
|
||||
#define RISCV_PAUSE ".4byte 0x100000f"
|
||||
#define ZAWRS_WRS_NTO ".4byte 0x00d00073"
|
||||
#define ZAWRS_WRS_STO ".4byte 0x01d00073"
|
||||
#define RISCV_NOP4 ".4byte 0x00000013"
|
||||
#define RISCV_PAUSE ASM_INSN_I("0x100000f")
|
||||
#define ZAWRS_WRS_NTO ASM_INSN_I("0x00d00073")
|
||||
#define ZAWRS_WRS_STO ASM_INSN_I("0x01d00073")
|
||||
#define RISCV_NOP4 ASM_INSN_I("0x00000013")
|
||||
|
||||
#define RISCV_INSN_NOP4 _AC(0x00000013, U)
|
||||
|
||||
|
||||
@@ -30,8 +30,8 @@ extern struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_mips;
|
||||
* allowing any subsequent instructions to fetch.
|
||||
*/
|
||||
|
||||
#define MIPS_PAUSE ".4byte 0x00501013\n\t"
|
||||
#define MIPS_EHB ".4byte 0x00301013\n\t"
|
||||
#define MIPS_IHB ".4byte 0x00101013\n\t"
|
||||
#define MIPS_PAUSE ASM_INSN_I("0x00501013\n\t")
|
||||
#define MIPS_EHB ASM_INSN_I("0x00301013\n\t")
|
||||
#define MIPS_IHB ASM_INSN_I("0x00101013\n\t")
|
||||
|
||||
#endif // _ASM_RISCV_VENDOR_EXTENSIONS_MIPS_H
|
||||
|
||||
@@ -265,10 +265,10 @@ void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
|
||||
{
|
||||
if (!strncmp(remcom_in_buffer, gdb_xfer_read_target,
|
||||
sizeof(gdb_xfer_read_target)))
|
||||
strcpy(remcom_out_buffer, riscv_gdb_stub_target_desc);
|
||||
strscpy(remcom_out_buffer, riscv_gdb_stub_target_desc, BUFMAX);
|
||||
else if (!strncmp(remcom_in_buffer, gdb_xfer_read_cpuxml,
|
||||
sizeof(gdb_xfer_read_cpuxml)))
|
||||
strcpy(remcom_out_buffer, riscv_gdb_stub_cpuxml);
|
||||
strscpy(remcom_out_buffer, riscv_gdb_stub_cpuxml, BUFMAX);
|
||||
}
|
||||
|
||||
static inline void kgdb_arch_update_addr(struct pt_regs *regs,
|
||||
|
||||
@@ -119,6 +119,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||
unsigned int num_plts = 0;
|
||||
unsigned int num_gots = 0;
|
||||
Elf_Rela *scratch = NULL;
|
||||
Elf_Rela *new_scratch;
|
||||
size_t scratch_size = 0;
|
||||
int i;
|
||||
|
||||
@@ -168,9 +169,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||
scratch_size_needed = (num_scratch_relas + num_relas) * sizeof(*scratch);
|
||||
if (scratch_size_needed > scratch_size) {
|
||||
scratch_size = scratch_size_needed;
|
||||
scratch = kvrealloc(scratch, scratch_size, GFP_KERNEL);
|
||||
if (!scratch)
|
||||
new_scratch = kvrealloc(scratch, scratch_size, GFP_KERNEL);
|
||||
if (!new_scratch) {
|
||||
kvfree(scratch);
|
||||
return -ENOMEM;
|
||||
}
|
||||
scratch = new_scratch;
|
||||
}
|
||||
|
||||
for (size_t j = 0; j < num_relas; j++)
|
||||
|
||||
@@ -16,6 +16,22 @@
|
||||
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
|
||||
/*
|
||||
* This disables KASAN checking when reading a value from another task's stack,
|
||||
* since the other task could be running on another CPU and could have poisoned
|
||||
* the stack in the meantime.
|
||||
*/
|
||||
#define READ_ONCE_TASK_STACK(task, x) \
|
||||
({ \
|
||||
unsigned long val; \
|
||||
unsigned long addr = x; \
|
||||
if ((task) == current) \
|
||||
val = READ_ONCE(addr); \
|
||||
else \
|
||||
val = READ_ONCE_NOCHECK(addr); \
|
||||
val; \
|
||||
})
|
||||
|
||||
extern asmlinkage void handle_exception(void);
|
||||
extern unsigned long ret_from_exception_end;
|
||||
|
||||
@@ -69,8 +85,9 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
||||
fp = frame->ra;
|
||||
pc = regs->ra;
|
||||
} else {
|
||||
fp = frame->fp;
|
||||
pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
|
||||
fp = READ_ONCE_TASK_STACK(task, frame->fp);
|
||||
pc = READ_ONCE_TASK_STACK(task, frame->ra);
|
||||
pc = ftrace_graph_ret_addr(current, &graph_idx, pc,
|
||||
&frame->ra);
|
||||
if (pc >= (unsigned long)handle_exception &&
|
||||
pc < (unsigned long)&ret_from_exception_end) {
|
||||
|
||||
@@ -31,7 +31,7 @@ config RISCV_MODULE_LINKING_KUNIT
|
||||
If unsure, say N.
|
||||
|
||||
config RISCV_KPROBES_KUNIT
|
||||
bool "KUnit test for riscv kprobes" if !KUNIT_ALL_TESTS
|
||||
tristate "KUnit test for riscv kprobes" if !KUNIT_ALL_TESTS
|
||||
depends on KUNIT
|
||||
depends on KPROBES
|
||||
default KUNIT_ALL_TESTS
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
obj-y += test-kprobes.o test-kprobes-asm.o
|
||||
obj-$(CONFIG_RISCV_KPROBES_KUNIT) += kprobes_riscv_kunit.o
|
||||
|
||||
kprobes_riscv_kunit-objs := test-kprobes.o test-kprobes-asm.o
|
||||
|
||||
@@ -49,8 +49,11 @@ static struct kunit_case kprobes_testcases[] = {
|
||||
};
|
||||
|
||||
static struct kunit_suite kprobes_test_suite = {
|
||||
.name = "kprobes_test_riscv",
|
||||
.name = "kprobes_riscv",
|
||||
.test_cases = kprobes_testcases,
|
||||
};
|
||||
|
||||
kunit_test_suites(&kprobes_test_suite);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("KUnit test for riscv kprobes");
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
#define pt_dump_seq_puts(m, fmt) \
|
||||
({ \
|
||||
if (m) \
|
||||
seq_printf(m, fmt); \
|
||||
seq_puts(m, fmt); \
|
||||
})
|
||||
|
||||
/*
|
||||
|
||||
@@ -98,7 +98,7 @@ ifeq ($(CONFIG_X86_KERNEL_IBT),y)
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104816
|
||||
#
|
||||
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=branch -fno-jump-tables)
|
||||
KBUILD_RUSTFLAGS += -Zcf-protection=branch -Zno-jump-tables
|
||||
KBUILD_RUSTFLAGS += -Zcf-protection=branch $(if $(call rustc-min-version,109300),-Cjump-tables=n,-Zno-jump-tables)
|
||||
else
|
||||
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
|
||||
endif
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
#define AMD_NODE0_PCI_SLOT 0x18
|
||||
|
||||
struct pci_dev *amd_node_get_func(u16 node, u8 func);
|
||||
struct pci_dev *amd_node_get_root(u16 node);
|
||||
|
||||
static inline u16 amd_num_nodes(void)
|
||||
{
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
#ifndef _ASM_RUNTIME_CONST_H
|
||||
#define _ASM_RUNTIME_CONST_H
|
||||
|
||||
#ifdef MODULE
|
||||
#error "Cannot use runtime-const infrastructure from modules"
|
||||
#endif
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
.macro RUNTIME_CONST_PTR sym reg
|
||||
|
||||
@@ -12,12 +12,12 @@
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/percpu.h>
|
||||
#include <asm/runtime-const.h>
|
||||
|
||||
/*
|
||||
* Virtual variable: there's no actual backing store for this,
|
||||
* it can purely be used as 'runtime_const_ptr(USER_PTR_MAX)'
|
||||
*/
|
||||
#ifdef MODULE
|
||||
#define runtime_const_ptr(sym) (sym)
|
||||
#else
|
||||
#include <asm/runtime-const.h>
|
||||
#endif
|
||||
extern unsigned long USER_PTR_MAX;
|
||||
|
||||
#ifdef CONFIG_ADDRESS_MASKING
|
||||
|
||||
@@ -34,62 +34,6 @@ struct pci_dev *amd_node_get_func(u16 node, u8 func)
|
||||
return pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(AMD_NODE0_PCI_SLOT + node, func));
|
||||
}
|
||||
|
||||
#define DF_BLK_INST_CNT 0x040
|
||||
#define DF_CFG_ADDR_CNTL_LEGACY 0x084
|
||||
#define DF_CFG_ADDR_CNTL_DF4 0xC04
|
||||
|
||||
#define DF_MAJOR_REVISION GENMASK(27, 24)
|
||||
|
||||
static u16 get_cfg_addr_cntl_offset(struct pci_dev *df_f0)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
/*
|
||||
* Revision fields added for DF4 and later.
|
||||
*
|
||||
* Major revision of '0' is found pre-DF4. Field is Read-as-Zero.
|
||||
*/
|
||||
if (pci_read_config_dword(df_f0, DF_BLK_INST_CNT, ®))
|
||||
return 0;
|
||||
|
||||
if (reg & DF_MAJOR_REVISION)
|
||||
return DF_CFG_ADDR_CNTL_DF4;
|
||||
|
||||
return DF_CFG_ADDR_CNTL_LEGACY;
|
||||
}
|
||||
|
||||
struct pci_dev *amd_node_get_root(u16 node)
|
||||
{
|
||||
struct pci_dev *root;
|
||||
u16 cntl_off;
|
||||
u8 bus;
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_ZEN))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* D18F0xXXX [Config Address Control] (DF::CfgAddressCntl)
|
||||
* Bits [7:0] (SecBusNum) holds the bus number of the root device for
|
||||
* this Data Fabric instance. The segment, device, and function will be 0.
|
||||
*/
|
||||
struct pci_dev *df_f0 __free(pci_dev_put) = amd_node_get_func(node, 0);
|
||||
if (!df_f0)
|
||||
return NULL;
|
||||
|
||||
cntl_off = get_cfg_addr_cntl_offset(df_f0);
|
||||
if (!cntl_off)
|
||||
return NULL;
|
||||
|
||||
if (pci_read_config_byte(df_f0, cntl_off, &bus))
|
||||
return NULL;
|
||||
|
||||
/* Grab the pointer for the actual root device instance. */
|
||||
root = pci_get_domain_bus_and_slot(0, bus, 0);
|
||||
|
||||
pci_dbg(root, "is root for AMD node %u\n", node);
|
||||
return root;
|
||||
}
|
||||
|
||||
static struct pci_dev **amd_roots;
|
||||
|
||||
/* Protect the PCI config register pairs used for SMN. */
|
||||
@@ -274,51 +218,21 @@ DEFINE_SHOW_STORE_ATTRIBUTE(smn_node);
|
||||
DEFINE_SHOW_STORE_ATTRIBUTE(smn_address);
|
||||
DEFINE_SHOW_STORE_ATTRIBUTE(smn_value);
|
||||
|
||||
static int amd_cache_roots(void)
|
||||
static struct pci_dev *get_next_root(struct pci_dev *root)
|
||||
{
|
||||
u16 node, num_nodes = amd_num_nodes();
|
||||
|
||||
amd_roots = kcalloc(num_nodes, sizeof(*amd_roots), GFP_KERNEL);
|
||||
if (!amd_roots)
|
||||
return -ENOMEM;
|
||||
|
||||
for (node = 0; node < num_nodes; node++)
|
||||
amd_roots[node] = amd_node_get_root(node);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int reserve_root_config_spaces(void)
|
||||
{
|
||||
struct pci_dev *root = NULL;
|
||||
struct pci_bus *bus = NULL;
|
||||
|
||||
while ((bus = pci_find_next_bus(bus))) {
|
||||
/* Root device is Device 0 Function 0 on each Primary Bus. */
|
||||
root = pci_get_slot(bus, 0);
|
||||
if (!root)
|
||||
while ((root = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, root))) {
|
||||
/* Root device is Device 0 Function 0. */
|
||||
if (root->devfn)
|
||||
continue;
|
||||
|
||||
if (root->vendor != PCI_VENDOR_ID_AMD &&
|
||||
root->vendor != PCI_VENDOR_ID_HYGON)
|
||||
continue;
|
||||
|
||||
pci_dbg(root, "Reserving PCI config space\n");
|
||||
|
||||
/*
|
||||
* There are a few SMN index/data pairs and other registers
|
||||
* that shouldn't be accessed by user space.
|
||||
* So reserve the entire PCI config space for simplicity rather
|
||||
* than covering specific registers piecemeal.
|
||||
*/
|
||||
if (!pci_request_config_region_exclusive(root, 0, PCI_CFG_SPACE_SIZE, NULL)) {
|
||||
pci_err(root, "Failed to reserve config space\n");
|
||||
return -EEXIST;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
smn_exclusive = true;
|
||||
return 0;
|
||||
return root;
|
||||
}
|
||||
|
||||
static bool enable_dfs;
|
||||
@@ -332,7 +246,8 @@ __setup("amd_smn_debugfs_enable", amd_smn_enable_dfs);
|
||||
|
||||
static int __init amd_smn_init(void)
|
||||
{
|
||||
int err;
|
||||
u16 count, num_roots, roots_per_node, node, num_nodes;
|
||||
struct pci_dev *root;
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_ZEN))
|
||||
return 0;
|
||||
@@ -342,13 +257,48 @@ static int __init amd_smn_init(void)
|
||||
if (amd_roots)
|
||||
return 0;
|
||||
|
||||
err = amd_cache_roots();
|
||||
if (err)
|
||||
return err;
|
||||
num_roots = 0;
|
||||
root = NULL;
|
||||
while ((root = get_next_root(root))) {
|
||||
pci_dbg(root, "Reserving PCI config space\n");
|
||||
|
||||
err = reserve_root_config_spaces();
|
||||
if (err)
|
||||
return err;
|
||||
/*
|
||||
* There are a few SMN index/data pairs and other registers
|
||||
* that shouldn't be accessed by user space. So reserve the
|
||||
* entire PCI config space for simplicity rather than covering
|
||||
* specific registers piecemeal.
|
||||
*/
|
||||
if (!pci_request_config_region_exclusive(root, 0, PCI_CFG_SPACE_SIZE, NULL)) {
|
||||
pci_err(root, "Failed to reserve config space\n");
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
num_roots++;
|
||||
}
|
||||
|
||||
pr_debug("Found %d AMD root devices\n", num_roots);
|
||||
|
||||
if (!num_roots)
|
||||
return -ENODEV;
|
||||
|
||||
num_nodes = amd_num_nodes();
|
||||
amd_roots = kcalloc(num_nodes, sizeof(*amd_roots), GFP_KERNEL);
|
||||
if (!amd_roots)
|
||||
return -ENOMEM;
|
||||
|
||||
roots_per_node = num_roots / num_nodes;
|
||||
|
||||
count = 0;
|
||||
node = 0;
|
||||
root = NULL;
|
||||
while (node < num_nodes && (root = get_next_root(root))) {
|
||||
/* Use one root for each node and skip the rest. */
|
||||
if (count++ % roots_per_node)
|
||||
continue;
|
||||
|
||||
pci_dbg(root, "is root for AMD node %u\n", node);
|
||||
amd_roots[node++] = root;
|
||||
}
|
||||
|
||||
if (enable_dfs) {
|
||||
debugfs_dir = debugfs_create_dir("amd_smn", arch_debugfs_dir);
|
||||
@@ -358,6 +308,8 @@ static int __init amd_smn_init(void)
|
||||
debugfs_create_file("value", 0600, debugfs_dir, NULL, &smn_value_fops);
|
||||
}
|
||||
|
||||
smn_exclusive = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1038,6 +1038,7 @@ static void init_amd_zen4(struct cpuinfo_x86 *c)
|
||||
static const struct x86_cpu_id zen5_rdseed_microcode[] = {
|
||||
ZEN_MODEL_STEP_UCODE(0x1a, 0x02, 0x1, 0x0b00215a),
|
||||
ZEN_MODEL_STEP_UCODE(0x1a, 0x11, 0x0, 0x0b101054),
|
||||
{},
|
||||
};
|
||||
|
||||
static void init_amd_zen5(struct cpuinfo_x86 *c)
|
||||
|
||||
@@ -78,6 +78,10 @@
|
||||
DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_info);
|
||||
|
||||
/* Used for modules: built-in code uses runtime constants */
|
||||
unsigned long USER_PTR_MAX;
|
||||
EXPORT_SYMBOL(USER_PTR_MAX);
|
||||
|
||||
u32 elf_hwcap2 __read_mostly;
|
||||
|
||||
/* Number of siblings per CPU package */
|
||||
@@ -2579,7 +2583,7 @@ void __init arch_cpu_finalize_init(void)
|
||||
alternative_instructions();
|
||||
|
||||
if (IS_ENABLED(CONFIG_X86_64)) {
|
||||
unsigned long USER_PTR_MAX = TASK_SIZE_MAX;
|
||||
USER_PTR_MAX = TASK_SIZE_MAX;
|
||||
|
||||
/*
|
||||
* Enable this when LAM is gated on LASS support
|
||||
|
||||
@@ -220,10 +220,12 @@ static bool need_sha_check(u32 cur_rev)
|
||||
case 0xaa001: return cur_rev <= 0xaa00116; break;
|
||||
case 0xaa002: return cur_rev <= 0xaa00218; break;
|
||||
case 0xb0021: return cur_rev <= 0xb002146; break;
|
||||
case 0xb0081: return cur_rev <= 0xb008111; break;
|
||||
case 0xb1010: return cur_rev <= 0xb101046; break;
|
||||
case 0xb2040: return cur_rev <= 0xb204031; break;
|
||||
case 0xb4040: return cur_rev <= 0xb404031; break;
|
||||
case 0xb6000: return cur_rev <= 0xb600031; break;
|
||||
case 0xb6080: return cur_rev <= 0xb608031; break;
|
||||
case 0xb7000: return cur_rev <= 0xb700031; break;
|
||||
default: break;
|
||||
}
|
||||
|
||||
@@ -750,7 +750,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
||||
}
|
||||
|
||||
/*
|
||||
* Disregard _CPC if the number of entries in the return pachage is not
|
||||
* Disregard _CPC if the number of entries in the return package is not
|
||||
* as expected, but support future revisions being proper supersets of
|
||||
* the v3 and only causing more entries to be returned by _CPC.
|
||||
*/
|
||||
|
||||
@@ -487,7 +487,7 @@ static int acpi_battery_read(struct acpi_battery *battery)
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
battery->present = state & (1 << battery->id);
|
||||
battery->present = !!(state & (1 << battery->id));
|
||||
if (!battery->present)
|
||||
return 0;
|
||||
|
||||
|
||||
@@ -625,8 +625,10 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
|
||||
len += entry->len;
|
||||
}
|
||||
|
||||
if (!len)
|
||||
if (!len) {
|
||||
kvfree(ptr);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
*_buf = ptr;
|
||||
return len;
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
@@ -303,8 +304,8 @@ static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
|
||||
drv->states[0].exit_latency = 1;
|
||||
drv->states[0].target_residency = 1;
|
||||
drv->states[0].power_usage = UINT_MAX;
|
||||
strcpy(drv->states[0].name, "WFI");
|
||||
strcpy(drv->states[0].desc, "RISC-V WFI");
|
||||
strscpy(drv->states[0].name, "WFI");
|
||||
strscpy(drv->states[0].desc, "RISC-V WFI");
|
||||
|
||||
/*
|
||||
* If no DT idle states are detected (ret == 0) let the driver
|
||||
|
||||
@@ -433,7 +433,7 @@ static void handle_error(struct mc_priv *priv, struct ecc_status *stat,
|
||||
phys_addr_t pfn;
|
||||
int err;
|
||||
|
||||
if (WARN_ON_ONCE(ctl_num > NUM_CONTROLLERS))
|
||||
if (WARN_ON_ONCE(ctl_num >= NUM_CONTROLLERS))
|
||||
return;
|
||||
|
||||
mci = priv->mci[ctl_num];
|
||||
|
||||
@@ -723,6 +723,7 @@ struct gpiochip_fwd *devm_gpiochip_fwd_alloc(struct device *dev,
|
||||
chip->get_multiple = gpio_fwd_get_multiple_locked;
|
||||
chip->set = gpio_fwd_set;
|
||||
chip->set_multiple = gpio_fwd_set_multiple_locked;
|
||||
chip->set_config = gpio_fwd_set_config;
|
||||
chip->to_irq = gpio_fwd_to_irq;
|
||||
chip->base = -1;
|
||||
chip->ngpio = ngpios;
|
||||
|
||||
@@ -50,25 +50,6 @@ static inline u32 tb10x_reg_read(struct tb10x_gpio *gpio, unsigned int offs)
|
||||
return ioread32(gpio->base + offs);
|
||||
}
|
||||
|
||||
static inline void tb10x_reg_write(struct tb10x_gpio *gpio, unsigned int offs,
|
||||
u32 val)
|
||||
{
|
||||
iowrite32(val, gpio->base + offs);
|
||||
}
|
||||
|
||||
static inline void tb10x_set_bits(struct tb10x_gpio *gpio, unsigned int offs,
|
||||
u32 mask, u32 val)
|
||||
{
|
||||
u32 r;
|
||||
|
||||
guard(gpio_generic_lock_irqsave)(&gpio->chip);
|
||||
|
||||
r = tb10x_reg_read(gpio, offs);
|
||||
r = (r & ~mask) | (val & mask);
|
||||
|
||||
tb10x_reg_write(gpio, offs, r);
|
||||
}
|
||||
|
||||
static int tb10x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
|
||||
{
|
||||
struct tb10x_gpio *tb10x_gpio = gpiochip_get_data(chip);
|
||||
|
||||
@@ -41,7 +41,7 @@ static struct gpio_device *swnode_get_gpio_device(struct fwnode_handle *fwnode)
|
||||
!strcmp(gdev_node->name, GPIOLIB_SWNODE_UNDEFINED_NAME))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
gdev = gpio_device_find_by_label(gdev_node->name);
|
||||
gdev = gpio_device_find_by_fwnode(fwnode);
|
||||
return gdev ?: ERR_PTR(-EPROBE_DEFER);
|
||||
}
|
||||
|
||||
|
||||
@@ -5296,6 +5296,8 @@ static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
|
||||
struct gpio_device *gdev;
|
||||
loff_t index = *pos;
|
||||
|
||||
s->private = NULL;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return NULL;
|
||||
@@ -5329,7 +5331,11 @@ static void *gpiolib_seq_next(struct seq_file *s, void *v, loff_t *pos)
|
||||
|
||||
static void gpiolib_seq_stop(struct seq_file *s, void *v)
|
||||
{
|
||||
struct gpiolib_seq_priv *priv = s->private;
|
||||
struct gpiolib_seq_priv *priv;
|
||||
|
||||
priv = s->private;
|
||||
if (!priv)
|
||||
return;
|
||||
|
||||
srcu_read_unlock(&gpio_devices_srcu, priv->idx);
|
||||
kfree(priv);
|
||||
|
||||
@@ -245,7 +245,7 @@ always-$(CONFIG_DRM_HEADER_TEST) += \
|
||||
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
|
||||
cmd_hdrtest = \
|
||||
$(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
|
||||
PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
|
||||
PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
|
||||
touch $@
|
||||
|
||||
$(obj)/%.hdrtest: $(src)/%.h FORCE
|
||||
|
||||
@@ -1267,6 +1267,10 @@ static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
|
||||
|
||||
(void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
|
||||
|
||||
/* VM entity stopped if process killed, don't clear freed pt bo */
|
||||
if (!amdgpu_vm_ready(vm))
|
||||
return 0;
|
||||
|
||||
(void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
|
||||
|
||||
(void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
|
||||
|
||||
@@ -5243,10 +5243,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_release_full_gpu(adev, false);
|
||||
|
||||
r = amdgpu_dpm_notify_rlc_state(adev, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -2632,9 +2632,14 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
int r;
|
||||
|
||||
if (amdgpu_acpi_should_gpu_reset(adev))
|
||||
return amdgpu_asic_reset(adev);
|
||||
if (amdgpu_acpi_should_gpu_reset(adev)) {
|
||||
amdgpu_device_lock_reset_domain(adev->reset_domain);
|
||||
r = amdgpu_asic_reset(adev);
|
||||
amdgpu_device_unlock_reset_domain(adev->reset_domain);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2355,8 +2355,11 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
|
||||
if (!ret && !psp->securedisplay_context.context.resp_status) {
|
||||
psp->securedisplay_context.context.initialized = true;
|
||||
mutex_init(&psp->securedisplay_context.mutex);
|
||||
} else
|
||||
} else {
|
||||
/* don't try again */
|
||||
psp->securedisplay_context.context.bin_desc.size_bytes = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&psp->securedisplay_context.mutex);
|
||||
|
||||
|
||||
@@ -407,7 +407,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
|
||||
if (adev->kfd.init_complete && !amdgpu_in_reset(adev) &&
|
||||
!adev->in_suspend)
|
||||
flags |= AMDGPU_XCP_OPS_KFD;
|
||||
|
||||
if (flags & AMDGPU_XCP_OPS_KFD) {
|
||||
|
||||
@@ -3102,6 +3102,11 @@ static int gfx_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
adev->gfx.gfx_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
|
||||
adev->gfx.compute_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
@@ -4399,6 +4399,11 @@ static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
gfx_v7_0_gpu_early_init(adev);
|
||||
|
||||
adev->gfx.gfx_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
|
||||
adev->gfx.compute_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
@@ -2023,6 +2023,11 @@ static int gfx_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->gfx.gfx_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
|
||||
adev->gfx.compute_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -2292,7 +2292,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
|
||||
r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
|
||||
|
||||
} else {
|
||||
if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
|
||||
if (adev->in_suspend)
|
||||
amdgpu_xcp_restore_partition_mode(adev->xcp_mgr);
|
||||
else if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
|
||||
AMDGPU_XCP_FL_NONE) ==
|
||||
AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
|
||||
r = amdgpu_xcp_switch_partition_mode(
|
||||
|
||||
@@ -142,13 +142,37 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int psp_v11_wait_for_tos_unload(struct psp_context *psp)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t sol_reg1, sol_reg2;
|
||||
int retry_loop;
|
||||
|
||||
/* Wait for the TOS to be unloaded */
|
||||
for (retry_loop = 0; retry_loop < 20; retry_loop++) {
|
||||
sol_reg1 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
||||
usleep_range(1000, 2000);
|
||||
sol_reg2 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
||||
if (sol_reg1 == sol_reg2)
|
||||
return 0;
|
||||
}
|
||||
dev_err(adev->dev, "TOS unload failed, C2PMSG_33: %x C2PMSG_81: %x",
|
||||
RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_33),
|
||||
RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81));
|
||||
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
int ret;
|
||||
int retry_loop;
|
||||
|
||||
/* For a reset done at the end of S3, only wait for TOS to be unloaded */
|
||||
if (adev->in_s3 && !(adev->flags & AMD_IS_APU) && amdgpu_in_reset(adev))
|
||||
return psp_v11_wait_for_tos_unload(psp);
|
||||
|
||||
for (retry_loop = 0; retry_loop < 20; retry_loop++) {
|
||||
/* Wait for bootloader to signify that is
|
||||
ready having bit 31 of C2PMSG_35 set to 1 */
|
||||
|
||||
@@ -3563,6 +3563,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
|
||||
/* Do mst topology probing after resuming cached state*/
|
||||
drm_connector_list_iter_begin(ddev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
bool init = false;
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
@@ -3572,7 +3573,14 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
|
||||
aconnector->mst_root)
|
||||
continue;
|
||||
|
||||
drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
|
||||
scoped_guard(mutex, &aconnector->mst_mgr.lock) {
|
||||
init = !aconnector->mst_mgr.mst_primary;
|
||||
}
|
||||
if (init)
|
||||
dm_helpers_dp_mst_start_top_mgr(aconnector->dc_link->ctx,
|
||||
aconnector->dc_link, false);
|
||||
else
|
||||
drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
|
||||
@@ -8030,7 +8038,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
|
||||
"mode %dx%d@%dHz is not native, enabling scaling\n",
|
||||
adjusted_mode->hdisplay, adjusted_mode->vdisplay,
|
||||
drm_mode_vrefresh(adjusted_mode));
|
||||
dm_new_connector_state->scaling = RMX_FULL;
|
||||
dm_new_connector_state->scaling = RMX_ASPECT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1302,7 +1302,8 @@ static int odm_combine_segments_show(struct seq_file *m, void *unused)
|
||||
if (connector->status != connector_status_connected)
|
||||
return -ENODEV;
|
||||
|
||||
if (pipe_ctx != NULL && pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
|
||||
if (pipe_ctx && pipe_ctx->stream_res.tg &&
|
||||
pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
|
||||
pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments(pipe_ctx->stream_res.tg, &segments);
|
||||
|
||||
seq_printf(m, "%d\n", segments);
|
||||
|
||||
@@ -1141,6 +1141,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
|
||||
!sink->edid_caps.edid_hdmi)
|
||||
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
|
||||
else if (dc_is_dvi_signal(sink->sink_signal) &&
|
||||
dc_is_dvi_signal(link->connector_signal) &&
|
||||
aud_support->hdmi_audio_native &&
|
||||
sink->edid_caps.edid_hdmi)
|
||||
sink->sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
|
||||
|
||||
@@ -195,24 +195,6 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
|
||||
{
|
||||
int ret = 0;
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
|
||||
if (pp_funcs && pp_funcs->notify_rlc_state) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
ret = pp_funcs->notify_rlc_state(
|
||||
adev->powerplay.pp_handle,
|
||||
en);
|
||||
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
|
||||
@@ -4724,14 +4724,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
||||
ret = devm_device_add_group(adev->dev,
|
||||
&amdgpu_pm_policy_attr_group);
|
||||
if (ret)
|
||||
goto err_out0;
|
||||
goto err_out1;
|
||||
}
|
||||
|
||||
if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) {
|
||||
ret = devm_device_add_group(adev->dev,
|
||||
&amdgpu_board_attr_group);
|
||||
if (ret)
|
||||
goto err_out0;
|
||||
goto err_out1;
|
||||
if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
|
||||
(void *)&tmp) != -EOPNOTSUPP) {
|
||||
sysfs_add_file_to_group(&adev->dev->kobj,
|
||||
|
||||
@@ -424,8 +424,6 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
|
||||
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
|
||||
enum pp_mp1_state mp1_state);
|
||||
|
||||
int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en);
|
||||
|
||||
int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
|
||||
|
||||
@@ -2040,6 +2040,12 @@ static int smu_disable_dpms(struct smu_context *smu)
|
||||
smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
|
||||
return 0;
|
||||
|
||||
/* vangogh s0ix */
|
||||
if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) ||
|
||||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2)) &&
|
||||
adev->in_s0ix)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* For gpu reset, runpm and hibernation through BACO,
|
||||
* BACO feature has to be kept enabled.
|
||||
|
||||
@@ -2217,6 +2217,9 @@ static int vangogh_post_smu_init(struct smu_context *smu)
|
||||
uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
|
||||
adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
|
||||
|
||||
if (adev->in_s0ix)
|
||||
return 0;
|
||||
|
||||
/* allow message will be sent after enable message on Vangogh*/
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
|
||||
|
||||
@@ -413,7 +413,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
|
||||
#
|
||||
# Enable locally for CONFIG_DRM_I915_WERROR=y. See also scripts/Makefile.build
|
||||
ifdef CONFIG_DRM_I915_WERROR
|
||||
cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none -Werror $<
|
||||
cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none -Werror $<
|
||||
endif
|
||||
|
||||
# header test
|
||||
|
||||
@@ -205,7 +205,7 @@ static u64 div_u64_roundup(u64 nom, u32 den)
|
||||
|
||||
u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
|
||||
{
|
||||
return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
|
||||
return mul_u64_u32_div(count, NSEC_PER_SEC, gt->clock_frequency);
|
||||
}
|
||||
|
||||
u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
|
||||
@@ -215,7 +215,7 @@ u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
|
||||
|
||||
u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
|
||||
{
|
||||
return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
|
||||
return mul_u64_u32_div(ns, gt->clock_frequency, NSEC_PER_SEC);
|
||||
}
|
||||
|
||||
u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
|
||||
|
||||
@@ -1595,8 +1595,20 @@ err_unlock:
|
||||
err_vma_res:
|
||||
i915_vma_resource_free(vma_res);
|
||||
err_fence:
|
||||
if (work)
|
||||
dma_fence_work_commit_imm(&work->base);
|
||||
if (work) {
|
||||
/*
|
||||
* When pinning VMA to GGTT on CHV or BXT with VTD enabled,
|
||||
* commit VMA binding asynchronously to avoid risk of lock
|
||||
* inversion among reservation_ww locks held here and
|
||||
* cpu_hotplug_lock acquired from stop_machine(), which we
|
||||
* wrap around GGTT updates when running in those environments.
|
||||
*/
|
||||
if (i915_vma_is_ggtt(vma) &&
|
||||
intel_vm_no_concurrent_access_wa(vma->vm->i915))
|
||||
dma_fence_work_commit(&work->base);
|
||||
else
|
||||
dma_fence_work_commit_imm(&work->base);
|
||||
}
|
||||
err_rpm:
|
||||
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ config DRM_POWERVR
|
||||
depends on DRM
|
||||
depends on MMU
|
||||
depends on PM
|
||||
depends on POWER_SEQUENCING || !POWER_SEQUENCING
|
||||
select DRM_EXEC
|
||||
select DRM_GEM_SHMEM_HELPER
|
||||
select DRM_SCHED
|
||||
|
||||
@@ -283,6 +283,10 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
|
||||
unsigned int i;
|
||||
unsigned long flags;
|
||||
|
||||
/* release GCE HW usage and start autosuspend */
|
||||
pm_runtime_mark_last_busy(cmdq_cl->chan->mbox->dev);
|
||||
pm_runtime_put_autosuspend(cmdq_cl->chan->mbox->dev);
|
||||
|
||||
if (data->sta < 0)
|
||||
return;
|
||||
|
||||
@@ -618,6 +622,9 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
|
||||
mtk_crtc->config_updating = false;
|
||||
spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
|
||||
|
||||
if (pm_runtime_resume_and_get(mtk_crtc->cmdq_client.chan->mbox->dev) < 0)
|
||||
goto update_config_out;
|
||||
|
||||
mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
|
||||
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
|
||||
goto update_config_out;
|
||||
|
||||
@@ -21,9 +21,6 @@
|
||||
|
||||
static const u64 modifiers[] = {
|
||||
DRM_FORMAT_MOD_LINEAR,
|
||||
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
|
||||
AFBC_FORMAT_MOD_SPLIT |
|
||||
AFBC_FORMAT_MOD_SPARSE),
|
||||
DRM_FORMAT_MOD_INVALID,
|
||||
};
|
||||
|
||||
@@ -71,26 +68,7 @@ static bool mtk_plane_format_mod_supported(struct drm_plane *plane,
|
||||
uint32_t format,
|
||||
uint64_t modifier)
|
||||
{
|
||||
if (modifier == DRM_FORMAT_MOD_LINEAR)
|
||||
return true;
|
||||
|
||||
if (modifier != DRM_FORMAT_MOD_ARM_AFBC(
|
||||
AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
|
||||
AFBC_FORMAT_MOD_SPLIT |
|
||||
AFBC_FORMAT_MOD_SPARSE))
|
||||
return false;
|
||||
|
||||
if (format != DRM_FORMAT_XRGB8888 &&
|
||||
format != DRM_FORMAT_ARGB8888 &&
|
||||
format != DRM_FORMAT_BGRX8888 &&
|
||||
format != DRM_FORMAT_BGRA8888 &&
|
||||
format != DRM_FORMAT_ABGR8888 &&
|
||||
format != DRM_FORMAT_XBGR8888 &&
|
||||
format != DRM_FORMAT_RGB888 &&
|
||||
format != DRM_FORMAT_BGR888)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
return modifier == DRM_FORMAT_MOD_LINEAR;
|
||||
}
|
||||
|
||||
static void mtk_plane_destroy_state(struct drm_plane *plane,
|
||||
|
||||
@@ -2867,7 +2867,9 @@ nv50_display_create(struct drm_device *dev)
|
||||
}
|
||||
|
||||
/* Assign the correct format modifiers */
|
||||
if (disp->disp->object.oclass >= TU102_DISP)
|
||||
if (disp->disp->object.oclass >= GB202_DISP)
|
||||
nouveau_display(dev)->format_modifiers = wndwca7e_modifiers;
|
||||
else if (disp->disp->object.oclass >= TU102_DISP)
|
||||
nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
|
||||
else
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
|
||||
|
||||
@@ -104,4 +104,5 @@ struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder);
|
||||
extern const u64 disp50xx_modifiers[];
|
||||
extern const u64 disp90xx_modifiers[];
|
||||
extern const u64 wndwc57e_modifiers[];
|
||||
extern const u64 wndwca7e_modifiers[];
|
||||
#endif
|
||||
|
||||
@@ -786,13 +786,14 @@ nv50_wndw_destroy(struct drm_plane *plane)
|
||||
}
|
||||
|
||||
/* This function assumes the format has already been validated against the plane
|
||||
* and the modifier was validated against the device-wides modifier list at FB
|
||||
* and the modifier was validated against the device-wide modifier list at FB
|
||||
* creation time.
|
||||
*/
|
||||
static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
|
||||
u32 format, u64 modifier)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(plane->dev);
|
||||
const struct drm_format_info *info = drm_format_info(format);
|
||||
uint8_t i;
|
||||
|
||||
/* All chipsets can display all formats in linear layout */
|
||||
@@ -800,13 +801,32 @@ static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
|
||||
return true;
|
||||
|
||||
if (drm->client.device.info.chipset < 0xc0) {
|
||||
const struct drm_format_info *info = drm_format_info(format);
|
||||
const uint8_t kind = (modifier >> 12) & 0xff;
|
||||
|
||||
if (!format) return false;
|
||||
|
||||
for (i = 0; i < info->num_planes; i++)
|
||||
if ((info->cpp[i] != 4) && kind != 0x70) return false;
|
||||
} else if (drm->client.device.info.chipset >= 0x1b2) {
|
||||
const uint8_t slayout = ((modifier >> 22) & 0x1) |
|
||||
((modifier >> 25) & 0x6);
|
||||
|
||||
if (!format)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Note in practice this implies only formats where cpp is equal
|
||||
* for each plane, or >= 4 for all planes, are supported.
|
||||
*/
|
||||
for (i = 0; i < info->num_planes; i++) {
|
||||
if (((info->cpp[i] == 2) && slayout != 3) ||
|
||||
((info->cpp[i] == 1) && slayout != 2) ||
|
||||
((info->cpp[i] >= 4) && slayout != 1))
|
||||
return false;
|
||||
|
||||
/* 24-bit not supported. It has yet another layout */
|
||||
WARN_ON(info->cpp[i] == 3);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
@@ -179,6 +179,39 @@ wndwca7e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/****************************************************************
|
||||
* Log2(block height) ----------------------------+ *
|
||||
* Page Kind ----------------------------------+ | *
|
||||
* Gob Height/Page Kind Generation ------+ | | *
|
||||
* Sector layout -------+ | | | *
|
||||
* Compression ------+ | | | | */
|
||||
const u64 wndwca7e_modifiers[] = { /* | | | | | */
|
||||
/* 4cpp+ modifiers */
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 0),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 1),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 2),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 3),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 4),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 5),
|
||||
/* 1cpp/8bpp modifiers */
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 0),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 1),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 2),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 3),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 4),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 5),
|
||||
/* 2cpp/16bpp modifiers */
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 0),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 1),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 2),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 3),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 4),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 5),
|
||||
/* All formats support linear */
|
||||
DRM_FORMAT_MOD_LINEAR,
|
||||
DRM_FORMAT_MOD_INVALID
|
||||
};
|
||||
|
||||
static const struct nv50_wndw_func
|
||||
wndwca7e = {
|
||||
.acquire = wndwc37e_acquire,
|
||||
|
||||
@@ -173,26 +173,15 @@ int drm_sched_entity_error(struct drm_sched_entity *entity)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_entity_error);
|
||||
|
||||
static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
|
||||
struct dma_fence_cb *cb);
|
||||
|
||||
static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
|
||||
{
|
||||
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
|
||||
|
||||
drm_sched_fence_scheduled(job->s_fence, NULL);
|
||||
drm_sched_fence_finished(job->s_fence, -ESRCH);
|
||||
WARN_ON(job->s_fence->parent);
|
||||
job->sched->ops->free_job(job);
|
||||
}
|
||||
|
||||
/* Signal the scheduler finished fence when the entity in question is killed. */
|
||||
static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
|
||||
struct dma_fence_cb *cb)
|
||||
{
|
||||
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
|
||||
finish_cb);
|
||||
struct dma_fence *f;
|
||||
unsigned long index;
|
||||
|
||||
dma_fence_put(f);
|
||||
|
||||
/* Wait for all dependencies to avoid data corruptions */
|
||||
xa_for_each(&job->dependencies, index, f) {
|
||||
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
|
||||
@@ -220,6 +209,21 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
|
||||
dma_fence_put(f);
|
||||
}
|
||||
|
||||
drm_sched_fence_scheduled(job->s_fence, NULL);
|
||||
drm_sched_fence_finished(job->s_fence, -ESRCH);
|
||||
WARN_ON(job->s_fence->parent);
|
||||
job->sched->ops->free_job(job);
|
||||
}
|
||||
|
||||
/* Signal the scheduler finished fence when the entity in question is killed. */
|
||||
static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
|
||||
struct dma_fence_cb *cb)
|
||||
{
|
||||
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
|
||||
finish_cb);
|
||||
|
||||
dma_fence_put(f);
|
||||
|
||||
INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
|
||||
schedule_work(&job->work);
|
||||
}
|
||||
|
||||
@@ -85,6 +85,7 @@ config DRM_PANEL_MIPI_DBI
|
||||
config DRM_PIXPAPER
|
||||
tristate "DRM support for PIXPAPER display panels"
|
||||
depends on DRM && SPI
|
||||
depends on MMU
|
||||
select DRM_CLIENT_SELECTION
|
||||
select DRM_GEM_SHMEM_HELPER
|
||||
select DRM_KMS_HELPER
|
||||
|
||||
@@ -988,16 +988,16 @@ void xe_device_shutdown(struct xe_device *xe)
|
||||
|
||||
drm_dbg(&xe->drm, "Shutting down device\n");
|
||||
|
||||
if (xe_driver_flr_disabled(xe)) {
|
||||
xe_display_pm_shutdown(xe);
|
||||
xe_display_pm_shutdown(xe);
|
||||
|
||||
xe_irq_suspend(xe);
|
||||
xe_irq_suspend(xe);
|
||||
|
||||
for_each_gt(gt, xe, id)
|
||||
xe_gt_shutdown(gt);
|
||||
for_each_gt(gt, xe, id)
|
||||
xe_gt_shutdown(gt);
|
||||
|
||||
xe_display_pm_shutdown_late(xe);
|
||||
} else {
|
||||
xe_display_pm_shutdown_late(xe);
|
||||
|
||||
if (!xe_driver_flr_disabled(xe)) {
|
||||
/* BOOM! */
|
||||
__xe_driver_flr(xe);
|
||||
}
|
||||
|
||||
@@ -165,7 +165,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
|
||||
for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
|
||||
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
|
||||
&syncs_user[num_syncs], SYNC_PARSE_FLAG_EXEC |
|
||||
&syncs_user[num_syncs], NULL, 0,
|
||||
SYNC_PARSE_FLAG_EXEC |
|
||||
(xe_vm_in_lr_mode(vm) ?
|
||||
SYNC_PARSE_FLAG_LR_MODE : 0));
|
||||
if (err)
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_syncobj.h>
|
||||
#include <uapi/drm/xe_drm.h>
|
||||
|
||||
#include "xe_dep_scheduler.h"
|
||||
@@ -324,6 +325,16 @@ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
|
||||
}
|
||||
xe_vm_put(migrate_vm);
|
||||
|
||||
if (!IS_ERR(q)) {
|
||||
int err = drm_syncobj_create(&q->ufence_syncobj,
|
||||
DRM_SYNCOBJ_CREATE_SIGNALED,
|
||||
NULL);
|
||||
if (err) {
|
||||
xe_exec_queue_put(q);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
}
|
||||
|
||||
return q;
|
||||
}
|
||||
ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
|
||||
@@ -333,6 +344,9 @@ void xe_exec_queue_destroy(struct kref *ref)
|
||||
struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
|
||||
struct xe_exec_queue *eq, *next;
|
||||
|
||||
if (q->ufence_syncobj)
|
||||
drm_syncobj_put(q->ufence_syncobj);
|
||||
|
||||
if (xe_exec_queue_uses_pxp(q))
|
||||
xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include "xe_hw_fence_types.h"
|
||||
#include "xe_lrc_types.h"
|
||||
|
||||
struct drm_syncobj;
|
||||
struct xe_execlist_exec_queue;
|
||||
struct xe_gt;
|
||||
struct xe_guc_exec_queue;
|
||||
@@ -155,6 +156,12 @@ struct xe_exec_queue {
|
||||
struct list_head link;
|
||||
} pxp;
|
||||
|
||||
/** @ufence_syncobj: User fence syncobj */
|
||||
struct drm_syncobj *ufence_syncobj;
|
||||
|
||||
/** @ufence_timeline_value: User fence timeline value */
|
||||
u64 ufence_timeline_value;
|
||||
|
||||
/** @ops: submission backend exec queue operations */
|
||||
const struct xe_exec_queue_ops *ops;
|
||||
|
||||
|
||||
@@ -200,6 +200,9 @@ static void guc_ct_fini(struct drm_device *drm, void *arg)
|
||||
{
|
||||
struct xe_guc_ct *ct = arg;
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
|
||||
cancel_work_sync(&ct->dead.worker);
|
||||
#endif
|
||||
ct_exit_safe_mode(ct);
|
||||
destroy_workqueue(ct->g2h_wq);
|
||||
xa_destroy(&ct->fence_lookup);
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <drm/drm_syncobj.h>
|
||||
#include <uapi/drm/xe_drm.h>
|
||||
|
||||
#include <generated/xe_wa_oob.h>
|
||||
@@ -1389,7 +1390,9 @@ static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from fro
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param)
|
||||
static int xe_oa_parse_syncs(struct xe_oa *oa,
|
||||
struct xe_oa_stream *stream,
|
||||
struct xe_oa_open_param *param)
|
||||
{
|
||||
int ret, num_syncs, num_ufence = 0;
|
||||
|
||||
@@ -1409,7 +1412,9 @@ static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param)
|
||||
|
||||
for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) {
|
||||
ret = xe_sync_entry_parse(oa->xe, param->xef, ¶m->syncs[num_syncs],
|
||||
¶m->syncs_user[num_syncs], 0);
|
||||
¶m->syncs_user[num_syncs],
|
||||
stream->ufence_syncobj,
|
||||
++stream->ufence_timeline_value, 0);
|
||||
if (ret)
|
||||
goto err_syncs;
|
||||
|
||||
@@ -1539,7 +1544,7 @@ static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg)
|
||||
return -ENODEV;
|
||||
|
||||
param.xef = stream->xef;
|
||||
err = xe_oa_parse_syncs(stream->oa, ¶m);
|
||||
err = xe_oa_parse_syncs(stream->oa, stream, ¶m);
|
||||
if (err)
|
||||
goto err_config_put;
|
||||
|
||||
@@ -1635,6 +1640,7 @@ static void xe_oa_destroy_locked(struct xe_oa_stream *stream)
|
||||
if (stream->exec_q)
|
||||
xe_exec_queue_put(stream->exec_q);
|
||||
|
||||
drm_syncobj_put(stream->ufence_syncobj);
|
||||
kfree(stream);
|
||||
}
|
||||
|
||||
@@ -1826,6 +1832,7 @@ static int xe_oa_stream_open_ioctl_locked(struct xe_oa *oa,
|
||||
struct xe_oa_open_param *param)
|
||||
{
|
||||
struct xe_oa_stream *stream;
|
||||
struct drm_syncobj *ufence_syncobj;
|
||||
int stream_fd;
|
||||
int ret;
|
||||
|
||||
@@ -1836,17 +1843,31 @@ static int xe_oa_stream_open_ioctl_locked(struct xe_oa *oa,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ret = drm_syncobj_create(&ufence_syncobj, DRM_SYNCOBJ_CREATE_SIGNALED,
|
||||
NULL);
|
||||
if (ret)
|
||||
goto exit;
|
||||
|
||||
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
|
||||
if (!stream) {
|
||||
ret = -ENOMEM;
|
||||
goto exit;
|
||||
goto err_syncobj;
|
||||
}
|
||||
|
||||
stream->ufence_syncobj = ufence_syncobj;
|
||||
stream->oa = oa;
|
||||
ret = xe_oa_stream_init(stream, param);
|
||||
|
||||
ret = xe_oa_parse_syncs(oa, stream, param);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
ret = xe_oa_stream_init(stream, param);
|
||||
if (ret) {
|
||||
while (param->num_syncs--)
|
||||
xe_sync_entry_cleanup(¶m->syncs[param->num_syncs]);
|
||||
kfree(param->syncs);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
if (!param->disabled) {
|
||||
ret = xe_oa_enable_locked(stream);
|
||||
if (ret)
|
||||
@@ -1870,6 +1891,8 @@ err_destroy:
|
||||
xe_oa_stream_destroy(stream);
|
||||
err_free:
|
||||
kfree(stream);
|
||||
err_syncobj:
|
||||
drm_syncobj_put(ufence_syncobj);
|
||||
exit:
|
||||
return ret;
|
||||
}
|
||||
@@ -2083,22 +2106,14 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
|
||||
goto err_exec_q;
|
||||
}
|
||||
|
||||
ret = xe_oa_parse_syncs(oa, ¶m);
|
||||
if (ret)
|
||||
goto err_exec_q;
|
||||
|
||||
mutex_lock(¶m.hwe->gt->oa.gt_lock);
|
||||
ret = xe_oa_stream_open_ioctl_locked(oa, ¶m);
|
||||
mutex_unlock(¶m.hwe->gt->oa.gt_lock);
|
||||
if (ret < 0)
|
||||
goto err_sync_cleanup;
|
||||
goto err_exec_q;
|
||||
|
||||
return ret;
|
||||
|
||||
err_sync_cleanup:
|
||||
while (param.num_syncs--)
|
||||
xe_sync_entry_cleanup(¶m.syncs[param.num_syncs]);
|
||||
kfree(param.syncs);
|
||||
err_exec_q:
|
||||
if (param.exec_q)
|
||||
xe_exec_queue_put(param.exec_q);
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
#include "regs/xe_reg_defs.h"
|
||||
#include "xe_hw_engine_types.h"
|
||||
|
||||
struct drm_syncobj;
|
||||
|
||||
#define DEFAULT_XE_OA_BUFFER_SIZE SZ_16M
|
||||
|
||||
enum xe_oa_report_header {
|
||||
@@ -248,6 +250,12 @@ struct xe_oa_stream {
|
||||
/** @xef: xe_file with which the stream was opened */
|
||||
struct xe_file *xef;
|
||||
|
||||
/** @ufence_syncobj: User fence syncobj */
|
||||
struct drm_syncobj *ufence_syncobj;
|
||||
|
||||
/** @ufence_timeline_value: User fence timeline value */
|
||||
u64 ufence_timeline_value;
|
||||
|
||||
/** @last_fence: fence to use in stream destroy when needed */
|
||||
struct dma_fence *last_fence;
|
||||
|
||||
|
||||
@@ -113,6 +113,8 @@ static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
|
||||
int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
|
||||
struct xe_sync_entry *sync,
|
||||
struct drm_xe_sync __user *sync_user,
|
||||
struct drm_syncobj *ufence_syncobj,
|
||||
u64 ufence_timeline_value,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct drm_xe_sync sync_in;
|
||||
@@ -192,10 +194,15 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
|
||||
if (exec) {
|
||||
sync->addr = sync_in.addr;
|
||||
} else {
|
||||
sync->ufence_timeline_value = ufence_timeline_value;
|
||||
sync->ufence = user_fence_create(xe, sync_in.addr,
|
||||
sync_in.timeline_value);
|
||||
if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence)))
|
||||
return PTR_ERR(sync->ufence);
|
||||
sync->ufence_chain_fence = dma_fence_chain_alloc();
|
||||
if (!sync->ufence_chain_fence)
|
||||
return -ENOMEM;
|
||||
sync->ufence_syncobj = ufence_syncobj;
|
||||
}
|
||||
|
||||
break;
|
||||
@@ -239,7 +246,12 @@ void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence)
|
||||
} else if (sync->ufence) {
|
||||
int err;
|
||||
|
||||
dma_fence_get(fence);
|
||||
drm_syncobj_add_point(sync->ufence_syncobj,
|
||||
sync->ufence_chain_fence,
|
||||
fence, sync->ufence_timeline_value);
|
||||
sync->ufence_chain_fence = NULL;
|
||||
|
||||
fence = drm_syncobj_fence_get(sync->ufence_syncobj);
|
||||
user_fence_get(sync->ufence);
|
||||
err = dma_fence_add_callback(fence, &sync->ufence->cb,
|
||||
user_fence_cb);
|
||||
@@ -259,7 +271,8 @@ void xe_sync_entry_cleanup(struct xe_sync_entry *sync)
|
||||
drm_syncobj_put(sync->syncobj);
|
||||
dma_fence_put(sync->fence);
|
||||
dma_fence_chain_free(sync->chain_fence);
|
||||
if (sync->ufence)
|
||||
dma_fence_chain_free(sync->ufence_chain_fence);
|
||||
if (!IS_ERR_OR_NULL(sync->ufence))
|
||||
user_fence_put(sync->ufence);
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#include "xe_sync_types.h"
|
||||
|
||||
struct drm_syncobj;
|
||||
struct xe_device;
|
||||
struct xe_exec_queue;
|
||||
struct xe_file;
|
||||
@@ -21,6 +22,8 @@ struct xe_vm;
|
||||
int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
|
||||
struct xe_sync_entry *sync,
|
||||
struct drm_xe_sync __user *sync_user,
|
||||
struct drm_syncobj *ufence_syncobj,
|
||||
u64 ufence_timeline_value,
|
||||
unsigned int flags);
|
||||
int xe_sync_entry_add_deps(struct xe_sync_entry *sync,
|
||||
struct xe_sched_job *job);
|
||||
|
||||
@@ -18,9 +18,12 @@ struct xe_sync_entry {
|
||||
struct drm_syncobj *syncobj;
|
||||
struct dma_fence *fence;
|
||||
struct dma_fence_chain *chain_fence;
|
||||
struct dma_fence_chain *ufence_chain_fence;
|
||||
struct drm_syncobj *ufence_syncobj;
|
||||
struct xe_user_fence *ufence;
|
||||
u64 addr;
|
||||
u64 timeline_value;
|
||||
u64 ufence_timeline_value;
|
||||
u32 type;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
@@ -3606,8 +3606,12 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
|
||||
syncs_user = u64_to_user_ptr(args->syncs);
|
||||
for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
|
||||
struct xe_exec_queue *__q = q ?: vm->q[0];
|
||||
|
||||
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
|
||||
&syncs_user[num_syncs],
|
||||
__q->ufence_syncobj,
|
||||
++__q->ufence_timeline_value,
|
||||
(xe_vm_in_lr_mode(vm) ?
|
||||
SYNC_PARSE_FLAG_LR_MODE : 0) |
|
||||
(!args->num_binds ?
|
||||
|
||||
@@ -118,6 +118,7 @@ struct pca954x {
|
||||
raw_spinlock_t lock;
|
||||
struct regulator *supply;
|
||||
|
||||
struct gpio_desc *reset_gpio;
|
||||
struct reset_control *reset_cont;
|
||||
};
|
||||
|
||||
@@ -315,25 +316,6 @@ static u8 pca954x_regval(struct pca954x *data, u8 chan)
|
||||
return 1 << chan;
|
||||
}
|
||||
|
||||
static void pca954x_reset_assert(struct pca954x *data)
|
||||
{
|
||||
if (data->reset_cont)
|
||||
reset_control_assert(data->reset_cont);
|
||||
}
|
||||
|
||||
static void pca954x_reset_deassert(struct pca954x *data)
|
||||
{
|
||||
if (data->reset_cont)
|
||||
reset_control_deassert(data->reset_cont);
|
||||
}
|
||||
|
||||
static void pca954x_reset_mux(struct pca954x *data)
|
||||
{
|
||||
pca954x_reset_assert(data);
|
||||
udelay(1);
|
||||
pca954x_reset_deassert(data);
|
||||
}
|
||||
|
||||
static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
|
||||
{
|
||||
struct pca954x *data = i2c_mux_priv(muxc);
|
||||
@@ -347,8 +329,6 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
|
||||
ret = pca954x_reg_write(muxc->parent, client, regval);
|
||||
data->last_chan = ret < 0 ? 0 : regval;
|
||||
}
|
||||
if (ret == -ETIMEDOUT && data->reset_cont)
|
||||
pca954x_reset_mux(data);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -358,7 +338,6 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
|
||||
struct pca954x *data = i2c_mux_priv(muxc);
|
||||
struct i2c_client *client = data->client;
|
||||
s32 idle_state;
|
||||
int ret = 0;
|
||||
|
||||
idle_state = READ_ONCE(data->idle_state);
|
||||
if (idle_state >= 0)
|
||||
@@ -368,10 +347,8 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
|
||||
if (idle_state == MUX_IDLE_DISCONNECT) {
|
||||
/* Deselect active channel */
|
||||
data->last_chan = 0;
|
||||
ret = pca954x_reg_write(muxc->parent, client,
|
||||
data->last_chan);
|
||||
if (ret == -ETIMEDOUT && data->reset_cont)
|
||||
pca954x_reset_mux(data);
|
||||
return pca954x_reg_write(muxc->parent, client,
|
||||
data->last_chan);
|
||||
}
|
||||
|
||||
/* otherwise leave as-is */
|
||||
@@ -550,10 +527,29 @@ static int pca954x_get_reset(struct device *dev, struct pca954x *data)
|
||||
if (IS_ERR(data->reset_cont))
|
||||
return dev_err_probe(dev, PTR_ERR(data->reset_cont),
|
||||
"Failed to get reset\n");
|
||||
else if (data->reset_cont)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* fallback to legacy reset-gpios
|
||||
*/
|
||||
data->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(data->reset_gpio)) {
|
||||
return dev_err_probe(dev, PTR_ERR(data->reset_gpio),
|
||||
"Failed to get reset gpio");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pca954x_reset_deassert(struct pca954x *data)
|
||||
{
|
||||
if (data->reset_cont)
|
||||
reset_control_deassert(data->reset_cont);
|
||||
else
|
||||
gpiod_set_value_cansleep(data->reset_gpio, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* I2C init/probing/exit functions
|
||||
*/
|
||||
@@ -593,7 +589,7 @@ static int pca954x_probe(struct i2c_client *client)
|
||||
if (ret)
|
||||
goto fail_cleanup;
|
||||
|
||||
if (data->reset_cont) {
|
||||
if (data->reset_cont || data->reset_gpio) {
|
||||
udelay(1);
|
||||
pca954x_reset_deassert(data);
|
||||
/* Give the chip some time to recover. */
|
||||
|
||||
@@ -206,6 +206,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
|
||||
return ret;
|
||||
|
||||
err_free:
|
||||
ib_umem_release(umem);
|
||||
rdma_restrack_put(&cq->res);
|
||||
kfree(cq);
|
||||
err_event_file:
|
||||
|
||||
@@ -913,7 +913,7 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
|
||||
spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
|
||||
}
|
||||
|
||||
static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
|
||||
static void bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
|
||||
{
|
||||
struct bnxt_re_qp *gsi_sqp;
|
||||
struct bnxt_re_ah *gsi_sah;
|
||||
@@ -933,10 +933,9 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
|
||||
|
||||
ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
|
||||
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
|
||||
if (rc) {
|
||||
if (rc)
|
||||
ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
|
||||
|
||||
/* remove from active qp list */
|
||||
@@ -951,10 +950,6 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
|
||||
rdev->gsi_ctx.gsi_sqp = NULL;
|
||||
rdev->gsi_ctx.gsi_sah = NULL;
|
||||
rdev->gsi_ctx.sqp_tbl = NULL;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev)
|
||||
|
||||
@@ -1216,13 +1216,13 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
if (umem->length < cq->size) {
|
||||
ibdev_dbg(&dev->ibdev, "External memory too small\n");
|
||||
err = -EINVAL;
|
||||
goto err_free_mem;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (!ib_umem_is_contiguous(umem)) {
|
||||
ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n");
|
||||
err = -EINVAL;
|
||||
goto err_free_mem;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
cq->cpu_addr = NULL;
|
||||
@@ -1251,7 +1251,7 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
|
||||
err = efa_com_create_cq(&dev->edev, ¶ms, &result);
|
||||
if (err)
|
||||
goto err_free_mem;
|
||||
goto err_free_mapped;
|
||||
|
||||
resp.db_off = result.db_off;
|
||||
resp.cq_idx = result.cq_idx;
|
||||
@@ -1299,12 +1299,10 @@ err_remove_mmap:
|
||||
efa_cq_user_mmap_entries_remove(cq);
|
||||
err_destroy_cq:
|
||||
efa_destroy_cq_idx(dev, cq->cq_idx);
|
||||
err_free_mem:
|
||||
if (umem)
|
||||
ib_umem_release(umem);
|
||||
else
|
||||
efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
|
||||
|
||||
err_free_mapped:
|
||||
if (!umem)
|
||||
efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
|
||||
DMA_FROM_DEVICE);
|
||||
err_out:
|
||||
atomic64_inc(&dev->stats.create_cq_err);
|
||||
return err;
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/uverbs_ioctl.h>
|
||||
#include "hns_roce_device.h"
|
||||
@@ -37,6 +38,43 @@
|
||||
#include "hns_roce_hem.h"
|
||||
#include "hns_roce_common.h"
|
||||
|
||||
void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
|
||||
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
|
||||
|
||||
if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
|
||||
return;
|
||||
|
||||
mutex_lock(&cq_table->bank_mutex);
|
||||
cq_table->ctx_num[uctx->cq_bank_id]--;
|
||||
mutex_unlock(&cq_table->bank_mutex);
|
||||
}
|
||||
|
||||
void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
|
||||
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
|
||||
u32 least_load = cq_table->ctx_num[0];
|
||||
u8 bankid = 0;
|
||||
u8 i;
|
||||
|
||||
if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
|
||||
return;
|
||||
|
||||
mutex_lock(&cq_table->bank_mutex);
|
||||
for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
|
||||
if (cq_table->ctx_num[i] < least_load) {
|
||||
least_load = cq_table->ctx_num[i];
|
||||
bankid = i;
|
||||
}
|
||||
}
|
||||
cq_table->ctx_num[bankid]++;
|
||||
mutex_unlock(&cq_table->bank_mutex);
|
||||
|
||||
uctx->cq_bank_id = bankid;
|
||||
}
|
||||
|
||||
static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
|
||||
{
|
||||
u32 least_load = bank[0].inuse;
|
||||
@@ -55,7 +93,21 @@ static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
|
||||
return bankid;
|
||||
}
|
||||
|
||||
static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
|
||||
static u8 select_cq_bankid(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_bank *bank, struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_ucontext *uctx = udata ?
|
||||
rdma_udata_to_drv_context(udata, struct hns_roce_ucontext,
|
||||
ibucontext) : NULL;
|
||||
|
||||
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
|
||||
return uctx ? uctx->cq_bank_id : 0;
|
||||
|
||||
return get_least_load_bankid_for_cq(bank);
|
||||
}
|
||||
|
||||
static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
|
||||
struct hns_roce_bank *bank;
|
||||
@@ -63,7 +115,7 @@ static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
|
||||
int id;
|
||||
|
||||
mutex_lock(&cq_table->bank_mutex);
|
||||
bankid = get_least_load_bankid_for_cq(cq_table->bank);
|
||||
bankid = select_cq_bankid(hr_dev, cq_table->bank, udata);
|
||||
bank = &cq_table->bank[bankid];
|
||||
|
||||
id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
|
||||
@@ -396,7 +448,7 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
|
||||
goto err_cq_buf;
|
||||
}
|
||||
|
||||
ret = alloc_cqn(hr_dev, hr_cq);
|
||||
ret = alloc_cqn(hr_dev, hr_cq, udata);
|
||||
if (ret) {
|
||||
ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
|
||||
goto err_cq_db;
|
||||
|
||||
@@ -217,6 +217,7 @@ struct hns_roce_ucontext {
|
||||
struct mutex page_mutex;
|
||||
struct hns_user_mmap_entry *db_mmap_entry;
|
||||
u32 config;
|
||||
u8 cq_bank_id;
|
||||
};
|
||||
|
||||
struct hns_roce_pd {
|
||||
@@ -495,6 +496,7 @@ struct hns_roce_cq_table {
|
||||
struct hns_roce_hem_table table;
|
||||
struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
|
||||
struct mutex bank_mutex;
|
||||
u32 ctx_num[HNS_ROCE_CQ_BANK_NUM];
|
||||
};
|
||||
|
||||
struct hns_roce_srq_table {
|
||||
@@ -1305,5 +1307,7 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
|
||||
size_t length,
|
||||
enum hns_roce_mmap_type mmap_type);
|
||||
bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl);
|
||||
void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
|
||||
void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
|
||||
|
||||
#endif /* _HNS_ROCE_DEVICE_H */
|
||||
|
||||
@@ -165,6 +165,8 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
|
||||
hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
|
||||
to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
|
||||
hr_reg_clear(fseg, FRMR_BLK_MODE);
|
||||
hr_reg_clear(fseg, FRMR_BLOCK_SIZE);
|
||||
hr_reg_clear(fseg, FRMR_ZBVA);
|
||||
}
|
||||
|
||||
static void set_atomic_seg(const struct ib_send_wr *wr,
|
||||
@@ -339,9 +341,6 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
int j = 0;
|
||||
int i;
|
||||
|
||||
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
|
||||
(*sge_ind) & (qp->sge.sge_cnt - 1));
|
||||
|
||||
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
|
||||
!!(wr->send_flags & IB_SEND_INLINE));
|
||||
if (wr->send_flags & IB_SEND_INLINE)
|
||||
@@ -586,6 +585,9 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
|
||||
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
|
||||
(wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
|
||||
|
||||
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
|
||||
curr_idx & (qp->sge.sge_cnt - 1));
|
||||
|
||||
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
|
||||
wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
|
||||
if (msg_len != ATOMIC_WR_LEN)
|
||||
@@ -734,6 +736,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
|
||||
owner_bit =
|
||||
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
|
||||
|
||||
/* RC and UD share the same DirectWQE field layout */
|
||||
((struct hns_roce_v2_rc_send_wqe *)wqe)->byte_4 = 0;
|
||||
|
||||
/* Corresponding to the QP type, wqe process separately */
|
||||
if (ibqp->qp_type == IB_QPT_RC)
|
||||
ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
|
||||
@@ -7048,7 +7053,6 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
|
||||
goto error_failed_roce_init;
|
||||
}
|
||||
|
||||
|
||||
handle->priv = hr_dev;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -425,6 +425,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
|
||||
if (ret)
|
||||
goto error_fail_copy_to_udata;
|
||||
|
||||
hns_roce_get_cq_bankid_for_uctx(context);
|
||||
|
||||
return 0;
|
||||
|
||||
error_fail_copy_to_udata:
|
||||
@@ -447,6 +449,8 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
||||
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
|
||||
|
||||
hns_roce_put_cq_bankid_for_uctx(context);
|
||||
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
|
||||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
|
||||
mutex_destroy(&context->page_mutex);
|
||||
|
||||
@@ -662,7 +662,6 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev,
|
||||
|
||||
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
|
||||
hr_qp->sq.wqe_cnt = cnt;
|
||||
cap->max_send_sge = hr_qp->sq.max_gs;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -744,7 +743,6 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
|
||||
|
||||
/* sync the parameters of kernel QP to user's configuration */
|
||||
cap->max_send_wr = cnt;
|
||||
cap->max_send_sge = hr_qp->sq.max_gs;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
|
||||
static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
|
||||
struct sd_pd_idx *idx)
|
||||
{
|
||||
idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
|
||||
idx->sd_idx = pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
|
||||
idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
|
||||
idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
|
||||
}
|
||||
|
||||
@@ -706,7 +706,7 @@ struct irdma_sc_dev {
|
||||
u32 vchnl_ver;
|
||||
u16 num_vfs;
|
||||
u16 hmc_fn_id;
|
||||
u8 vf_id;
|
||||
u16 vf_id;
|
||||
bool privileged:1;
|
||||
bool vchnl_up:1;
|
||||
bool ceq_valid:1;
|
||||
|
||||
@@ -2503,6 +2503,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
|
||||
spin_lock_init(&iwcq->lock);
|
||||
INIT_LIST_HEAD(&iwcq->resize_list);
|
||||
INIT_LIST_HEAD(&iwcq->cmpl_generated);
|
||||
iwcq->cq_num = cq_num;
|
||||
info.dev = dev;
|
||||
ukinfo->cq_size = max(entries, 4);
|
||||
ukinfo->cq_id = cq_num;
|
||||
|
||||
@@ -140,7 +140,7 @@ struct irdma_srq {
|
||||
struct irdma_cq {
|
||||
struct ib_cq ibcq;
|
||||
struct irdma_sc_cq sc_cq;
|
||||
u16 cq_num;
|
||||
u32 cq_num;
|
||||
bool user_mode;
|
||||
atomic_t armed;
|
||||
enum irdma_cmpl_notify last_notify;
|
||||
|
||||
@@ -707,7 +707,8 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
|
||||
struct iopt_area *area;
|
||||
unsigned long unmapped_bytes = 0;
|
||||
unsigned int tries = 0;
|
||||
int rc = -ENOENT;
|
||||
/* If there are no mapped entries then success */
|
||||
int rc = 0;
|
||||
|
||||
/*
|
||||
* The domains_rwsem must be held in read mode any time any area->pages
|
||||
@@ -777,8 +778,6 @@ again:
|
||||
|
||||
down_write(&iopt->iova_rwsem);
|
||||
}
|
||||
if (unmapped_bytes)
|
||||
rc = 0;
|
||||
|
||||
out_unlock_iova:
|
||||
up_write(&iopt->iova_rwsem);
|
||||
@@ -815,13 +814,8 @@ int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
|
||||
|
||||
int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped);
|
||||
/* If the IOVAs are empty then unmap all succeeds */
|
||||
if (rc == -ENOENT)
|
||||
return 0;
|
||||
return rc;
|
||||
return iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped);
|
||||
}
|
||||
|
||||
/* The caller must always free all the nodes in the allowed_iova rb_root. */
|
||||
|
||||
@@ -367,6 +367,10 @@ int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd)
|
||||
&unmapped);
|
||||
if (rc)
|
||||
goto out_put;
|
||||
if (!unmapped) {
|
||||
rc = -ENOENT;
|
||||
goto out_put;
|
||||
}
|
||||
}
|
||||
|
||||
cmd->length = unmapped;
|
||||
|
||||
@@ -130,9 +130,8 @@ struct iova_bitmap {
|
||||
static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap,
|
||||
unsigned long iova)
|
||||
{
|
||||
unsigned long pgsize = 1UL << bitmap->mapped.pgshift;
|
||||
|
||||
return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize);
|
||||
return (iova >> bitmap->mapped.pgshift) /
|
||||
BITS_PER_TYPE(*bitmap->bitmap);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -1904,13 +1904,13 @@ out:
|
||||
mISDN_freebchannel(&hw->bch[1]);
|
||||
mISDN_freebchannel(&hw->bch[0]);
|
||||
mISDN_freedchannel(&hw->dch);
|
||||
kfree(hw);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
|
||||
{
|
||||
int err;
|
||||
struct hfcsusb *hw;
|
||||
struct usb_device *dev = interface_to_usbdev(intf);
|
||||
struct usb_host_interface *iface = intf->cur_altsetting;
|
||||
@@ -2101,20 +2101,28 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
|
||||
if (!hw->ctrl_urb) {
|
||||
pr_warn("%s: No memory for control urb\n",
|
||||
driver_info->vend_name);
|
||||
kfree(hw);
|
||||
return -ENOMEM;
|
||||
err = -ENOMEM;
|
||||
goto err_free_hw;
|
||||
}
|
||||
|
||||
pr_info("%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n",
|
||||
hw->name, __func__, driver_info->vend_name,
|
||||
conf_str[small_match], ifnum, alt_used);
|
||||
|
||||
if (setup_instance(hw, dev->dev.parent))
|
||||
return -EIO;
|
||||
if (setup_instance(hw, dev->dev.parent)) {
|
||||
err = -EIO;
|
||||
goto err_free_urb;
|
||||
}
|
||||
|
||||
hw->intf = intf;
|
||||
usb_set_intfdata(hw->intf, hw);
|
||||
return 0;
|
||||
|
||||
err_free_urb:
|
||||
usb_free_urb(hw->ctrl_urb);
|
||||
err_free_hw:
|
||||
kfree(hw);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* function called when an active device is removed */
|
||||
|
||||
@@ -1010,6 +1010,11 @@ int vb2_ioctl_remove_bufs(struct file *file, void *priv,
|
||||
if (vb2_queue_is_busy(vdev->queue, file))
|
||||
return -EBUSY;
|
||||
|
||||
if (vb2_fileio_is_active(vdev->queue)) {
|
||||
dprintk(vdev->queue, 1, "file io in progress\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return vb2_core_remove_bufs(vdev->queue, d->index, d->count);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ioctl_remove_bufs);
|
||||
|
||||
@@ -1136,11 +1136,8 @@ int cx18_init_on_first_open(struct cx18 *cx)
|
||||
int video_input;
|
||||
int fw_retry_count = 3;
|
||||
struct v4l2_frequency vf;
|
||||
struct cx18_open_id fh;
|
||||
v4l2_std_id std;
|
||||
|
||||
fh.cx = cx;
|
||||
|
||||
if (test_bit(CX18_F_I_FAILED, &cx->i_flags))
|
||||
return -ENXIO;
|
||||
|
||||
@@ -1220,14 +1217,14 @@ int cx18_init_on_first_open(struct cx18 *cx)
|
||||
|
||||
video_input = cx->active_input;
|
||||
cx->active_input++; /* Force update of input */
|
||||
cx18_s_input(NULL, &fh, video_input);
|
||||
cx18_do_s_input(cx, video_input);
|
||||
|
||||
/* Let the VIDIOC_S_STD ioctl do all the work, keeps the code
|
||||
in one place. */
|
||||
cx->std++; /* Force full standard initialization */
|
||||
std = (cx->tuner_std == V4L2_STD_ALL) ? V4L2_STD_NTSC_M : cx->tuner_std;
|
||||
cx18_s_std(NULL, &fh, std);
|
||||
cx18_s_frequency(NULL, &fh, &vf);
|
||||
cx18_do_s_std(cx, std);
|
||||
cx18_do_s_frequency(cx, &vf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -521,10 +521,8 @@ static int cx18_g_input(struct file *file, void *fh, unsigned int *i)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cx18_s_input(struct file *file, void *fh, unsigned int inp)
|
||||
int cx18_do_s_input(struct cx18 *cx, unsigned int inp)
|
||||
{
|
||||
struct cx18_open_id *id = file2id(file);
|
||||
struct cx18 *cx = id->cx;
|
||||
v4l2_std_id std = V4L2_STD_ALL;
|
||||
const struct cx18_card_video_input *card_input =
|
||||
cx->card->video_inputs + inp;
|
||||
@@ -558,6 +556,11 @@ int cx18_s_input(struct file *file, void *fh, unsigned int inp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cx18_s_input(struct file *file, void *fh, unsigned int inp)
|
||||
{
|
||||
return cx18_do_s_input(file2id(file)->cx, inp);
|
||||
}
|
||||
|
||||
static int cx18_g_frequency(struct file *file, void *fh,
|
||||
struct v4l2_frequency *vf)
|
||||
{
|
||||
@@ -570,11 +573,8 @@ static int cx18_g_frequency(struct file *file, void *fh,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
|
||||
int cx18_do_s_frequency(struct cx18 *cx, const struct v4l2_frequency *vf)
|
||||
{
|
||||
struct cx18_open_id *id = file2id(file);
|
||||
struct cx18 *cx = id->cx;
|
||||
|
||||
if (vf->tuner != 0)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -585,6 +585,12 @@ int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *v
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cx18_s_frequency(struct file *file, void *fh,
|
||||
const struct v4l2_frequency *vf)
|
||||
{
|
||||
return cx18_do_s_frequency(file2id(file)->cx, vf);
|
||||
}
|
||||
|
||||
static int cx18_g_std(struct file *file, void *fh, v4l2_std_id *std)
|
||||
{
|
||||
struct cx18 *cx = file2id(file)->cx;
|
||||
@@ -593,11 +599,8 @@ static int cx18_g_std(struct file *file, void *fh, v4l2_std_id *std)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cx18_s_std(struct file *file, void *fh, v4l2_std_id std)
|
||||
int cx18_do_s_std(struct cx18 *cx, v4l2_std_id std)
|
||||
{
|
||||
struct cx18_open_id *id = file2id(file);
|
||||
struct cx18 *cx = id->cx;
|
||||
|
||||
if ((std & V4L2_STD_ALL) == 0)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -642,6 +645,11 @@ int cx18_s_std(struct file *file, void *fh, v4l2_std_id std)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cx18_s_std(struct file *file, void *fh, v4l2_std_id std)
|
||||
{
|
||||
return cx18_do_s_std(file2id(file)->cx, std);
|
||||
}
|
||||
|
||||
static int cx18_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
|
||||
{
|
||||
struct cx18_open_id *id = file2id(file);
|
||||
|
||||
@@ -12,6 +12,8 @@ u16 cx18_service2vbi(int type);
|
||||
void cx18_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal);
|
||||
u16 cx18_get_service_set(struct v4l2_sliced_vbi_format *fmt);
|
||||
void cx18_set_funcs(struct video_device *vdev);
|
||||
int cx18_s_std(struct file *file, void *fh, v4l2_std_id std);
|
||||
int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf);
|
||||
int cx18_s_input(struct file *file, void *fh, unsigned int inp);
|
||||
|
||||
struct cx18;
|
||||
int cx18_do_s_std(struct cx18 *cx, v4l2_std_id std);
|
||||
int cx18_do_s_frequency(struct cx18 *cx, const struct v4l2_frequency *vf);
|
||||
int cx18_do_s_input(struct cx18 *cx, unsigned int inp);
|
||||
|
||||
@@ -1247,15 +1247,12 @@ err:
|
||||
|
||||
int ivtv_init_on_first_open(struct ivtv *itv)
|
||||
{
|
||||
struct v4l2_frequency vf;
|
||||
/* Needed to call ioctls later */
|
||||
struct ivtv_open_id fh;
|
||||
struct ivtv_stream *s = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
|
||||
struct v4l2_frequency vf;
|
||||
int fw_retry_count = 3;
|
||||
int video_input;
|
||||
|
||||
fh.itv = itv;
|
||||
fh.type = IVTV_ENC_STREAM_TYPE_MPG;
|
||||
|
||||
if (test_bit(IVTV_F_I_FAILED, &itv->i_flags))
|
||||
return -ENXIO;
|
||||
|
||||
@@ -1297,13 +1294,13 @@ int ivtv_init_on_first_open(struct ivtv *itv)
|
||||
|
||||
video_input = itv->active_input;
|
||||
itv->active_input++; /* Force update of input */
|
||||
ivtv_s_input(NULL, &fh, video_input);
|
||||
ivtv_do_s_input(itv, video_input);
|
||||
|
||||
/* Let the VIDIOC_S_STD ioctl do all the work, keeps the code
|
||||
in one place. */
|
||||
itv->std++; /* Force full standard initialization */
|
||||
itv->std_out = itv->std;
|
||||
ivtv_s_frequency(NULL, &fh, &vf);
|
||||
ivtv_do_s_frequency(s, &vf);
|
||||
|
||||
if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) {
|
||||
/* Turn on the TV-out: ivtv_init_mpeg_decoder() initializes
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user