Merge 6.18-rc3 into tty-next

We need the tty/serial fixes in here as well to build on top of.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman
2025-10-27 08:17:40 +01:00
258 changed files with 2584 additions and 1553 deletions

View File

@@ -27,6 +27,7 @@ Alan Cox <alan@lxorguk.ukuu.org.uk>
Alan Cox <root@hraefn.swansea.linux.org.uk>
Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
Aleksey Gorelov <aleksey_gorelov@phoenix.com>
Alex Williamson <alex@shazbot.org> <alex.williamson@redhat.com>
Alexander Lobakin <alobakin@pm.me> <alobakin@dlink.ru>
Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com>
Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>

View File

@@ -142,7 +142,9 @@ allOf:
required:
- orientation-switch
then:
$ref: /schemas/usb/usb-switch.yaml#
allOf:
- $ref: /schemas/usb/usb-switch.yaml#
- $ref: /schemas/usb/usb-switch-ports.yaml#
unevaluatedProperties: false

View File

@@ -24,6 +24,10 @@ properties:
- enum:
- qcom,qcs8300-qmp-ufs-phy
- const: qcom,sa8775p-qmp-ufs-phy
- items:
- enum:
- qcom,kaanapali-qmp-ufs-phy
- const: qcom,sm8750-qmp-ufs-phy
- enum:
- qcom,msm8996-qmp-ufs-phy
- qcom,msm8998-qmp-ufs-phy

View File

@@ -125,7 +125,9 @@ allOf:
contains:
const: google,gs101-usb31drd-phy
then:
$ref: /schemas/usb/usb-switch.yaml#
allOf:
- $ref: /schemas/usb/usb-switch.yaml#
- $ref: /schemas/usb/usb-switch-ports.yaml#
properties:
clocks:

View File

@@ -197,6 +197,7 @@ allOf:
- renesas,rcar-gen2-scif
- renesas,rcar-gen3-scif
- renesas,rcar-gen4-scif
- renesas,rcar-gen5-scif
then:
properties:
interrupts:

View File

@@ -14,9 +14,14 @@ allOf:
properties:
compatible:
enum:
- cdns,spi-r1p6
- xlnx,zynq-spi-r1p6
oneOf:
- enum:
- xlnx,zynq-spi-r1p6
- items:
- enum:
- xlnx,zynqmp-spi-r1p6
- xlnx,versal-net-spi-r1p6
- const: cdns,spi-r1p6
reg:
maxItems: 1

View File

@@ -34,6 +34,7 @@ properties:
- rockchip,rk3328-spi
- rockchip,rk3368-spi
- rockchip,rk3399-spi
- rockchip,rk3506-spi
- rockchip,rk3528-spi
- rockchip,rk3562-spi
- rockchip,rk3568-spi

View File

@@ -15,6 +15,7 @@ select:
compatible:
contains:
enum:
- qcom,kaanapali-ufshc
- qcom,sm8650-ufshc
- qcom,sm8750-ufshc
required:
@@ -24,6 +25,7 @@ properties:
compatible:
items:
- enum:
- qcom,kaanapali-ufshc
- qcom,sm8650-ufshc
- qcom,sm8750-ufshc
- const: qcom,ufshc

View File

@@ -76,6 +76,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
- $ref: usb-switch-ports.yaml#
additionalProperties: false

View File

@@ -89,13 +89,21 @@ required:
- reg
- "#address-cells"
- "#size-cells"
- dma-ranges
- ranges
- clocks
- clock-names
- interrupts
- power-domains
allOf:
- if:
properties:
compatible:
const: fsl,imx8mp-dwc3
then:
required:
- dma-ranges
additionalProperties: false
examples:

View File

@@ -52,6 +52,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
- $ref: usb-switch-ports.yaml#
- if:
required:
- mode-switch

View File

@@ -46,6 +46,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
- $ref: usb-switch-ports.yaml#
additionalProperties: false

View File

@@ -91,6 +91,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
- $ref: usb-switch-ports.yaml#
additionalProperties: false

View File

@@ -81,6 +81,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
- $ref: usb-switch-ports.yaml#
additionalProperties: false

View File

@@ -68,6 +68,7 @@ properties:
- qcom,sm8550-dwc3
- qcom,sm8650-dwc3
- qcom,x1e80100-dwc3
- qcom,x1e80100-dwc3-mp
- const: qcom,snps-dwc3
reg:
@@ -460,8 +461,10 @@ allOf:
then:
properties:
interrupts:
minItems: 4
maxItems: 5
interrupt-names:
minItems: 4
items:
- const: dwc_usb3
- const: pwr_event

View File

@@ -60,6 +60,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
- $ref: usb-switch-ports.yaml#
additionalProperties: false

View File

@@ -11,6 +11,7 @@ maintainers:
allOf:
- $ref: usb-switch.yaml#
- $ref: usb-switch-ports.yaml#
properties:
compatible:

View File

@@ -0,0 +1,68 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/usb/usb-switch-ports.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: USB Orientation and Mode Switches Ports Graph Properties
maintainers:
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
description:
Ports Graph properties for devices handling USB mode and orientation switching.
properties:
port:
$ref: /schemas/graph.yaml#/$defs/port-base
description:
A port node to link the device to a TypeC controller for the purpose of
handling altmode muxing and orientation switching.
properties:
endpoint:
$ref: /schemas/graph.yaml#/$defs/endpoint-base
unevaluatedProperties: false
properties:
data-lanes:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 8
uniqueItems: true
items:
maximum: 8
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description:
Super Speed (SS) Output endpoint to the Type-C connector
port@1:
$ref: /schemas/graph.yaml#/$defs/port-base
description:
Super Speed (SS) Input endpoint from the Super-Speed PHY
unevaluatedProperties: false
properties:
endpoint:
$ref: /schemas/graph.yaml#/$defs/endpoint-base
unevaluatedProperties: false
properties:
data-lanes:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 8
uniqueItems: true
items:
maximum: 8
oneOf:
- required:
- port
- required:
- ports
additionalProperties: true

View File

@@ -25,56 +25,4 @@ properties:
description: Possible handler of SuperSpeed signals retiming
type: boolean
port:
$ref: /schemas/graph.yaml#/$defs/port-base
description:
A port node to link the device to a TypeC controller for the purpose of
handling altmode muxing and orientation switching.
properties:
endpoint:
$ref: /schemas/graph.yaml#/$defs/endpoint-base
unevaluatedProperties: false
properties:
data-lanes:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 8
uniqueItems: true
items:
maximum: 8
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description:
Super Speed (SS) Output endpoint to the Type-C connector
port@1:
$ref: /schemas/graph.yaml#/$defs/port-base
description:
Super Speed (SS) Input endpoint from the Super-Speed PHY
unevaluatedProperties: false
properties:
endpoint:
$ref: /schemas/graph.yaml#/$defs/endpoint-base
unevaluatedProperties: false
properties:
data-lanes:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 8
uniqueItems: true
items:
maximum: 8
oneOf:
- required:
- port
- required:
- ports
additionalProperties: true

View File

@@ -11,6 +11,7 @@ found on https://linux-ax25.in-berlin.de.
There is a mailing list for discussing Linux amateur radio matters
called linux-hams@vger.kernel.org. To subscribe to it, send a message to
majordomo@vger.kernel.org with the words "subscribe linux-hams" in the body
of the message, the subject field is ignored. You don't need to be
subscribed to post but of course that means you might miss an answer.
linux-hams+subscribe@vger.kernel.org or use the web interface at
https://vger.kernel.org. The subject and body of the message are
ignored. You don't need to be subscribed to post but of course that
means you might miss an answer.

View File

@@ -137,16 +137,20 @@ d. Checksum offload header v5
Checksum offload header fields are in big endian format.
Packet format::
Bit 0 - 6 7 8-15 16-31
Function Header Type Next Header Checksum Valid Reserved
Header Type is to indicate the type of header, this usually is set to CHECKSUM
Header types
= ==========================================
= ===============
0 Reserved
1 Reserved
2 checksum header
= ===============
Checksum Valid is to indicate whether the header checksum is valid. Value of 1
implies that checksum is calculated on this packet and is valid, value of 0
@@ -183,9 +187,11 @@ rmnet in a single linear skb. rmnet will process the individual
packets and either ACK the MAP command or deliver the IP packet to the
network stack as needed
MAP header|IP Packet|Optional padding|MAP header|IP Packet|Optional padding....
Packet format::
MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad...
MAP header|IP Packet|Optional padding|MAP header|IP Packet|Optional padding....
MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad...
3. Userspace configuration
==========================

View File

@@ -96,9 +96,8 @@ needed to these network configuration daemons to make sure that an IP is
received only on the 'failover' device.
Below is the patch snippet used with 'cloud-ifupdown-helper' script found on
Debian cloud images:
Debian cloud images::
::
@@ -27,6 +27,8 @@ do_setup() {
local working="$cfgdir/.$INTERFACE"
local final="$cfgdir/$INTERFACE"
@@ -172,9 +171,8 @@ appropriate FDB entry is added.
The following script is executed on the destination hypervisor once migration
completes, and it reattaches the VF to the VM and brings down the virtio-net
interface.
interface::
::
# reattach-vf.sh
#!/bin/bash

View File

@@ -1997,6 +1997,10 @@ F: include/uapi/linux/if_arcnet.h
ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
M: Arnd Bergmann <arnd@arndb.de>
M: Krzysztof Kozlowski <krzk@kernel.org>
M: Alexandre Belloni <alexandre.belloni@bootlin.com>
M: Linus Walleij <linus.walleij@linaro.org>
R: Drew Fustini <fustini@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: soc@lists.linux.dev
S: Maintained
@@ -3841,6 +3845,7 @@ F: drivers/hwmon/asus-ec-sensors.c
ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
M: Corentin Chary <corentin.chary@gmail.com>
M: Luke D. Jones <luke@ljones.dev>
M: Denis Benato <benato.denis96@gmail.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
W: https://asus-linux.org/
@@ -13111,6 +13116,15 @@ F: include/uapi/linux/io_uring.h
F: include/uapi/linux/io_uring/
F: io_uring/
IO_URING ZCRX
M: Pavel Begunkov <asml.silence@gmail.com>
L: io-uring@vger.kernel.org
L: netdev@vger.kernel.org
T: git https://github.com/isilence/linux.git zcrx/for-next
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux.git
S: Maintained
F: io_uring/zcrx.*
IPMI SUBSYSTEM
M: Corey Minyard <corey@minyard.net>
L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
@@ -14394,6 +14408,7 @@ F: tools/memory-model/
LINUX-NEXT TREE
M: Stephen Rothwell <sfr@canb.auug.org.au>
M: Mark Brown <broonie@kernel.org>
L: linux-next@vger.kernel.org
S: Supported
B: mailto:linux-next@vger.kernel.org and the appropriate development tree
@@ -26885,7 +26900,7 @@ S: Maintained
F: drivers/vfio/cdx/*
VFIO DRIVER
M: Alex Williamson <alex.williamson@redhat.com>
M: Alex Williamson <alex@shazbot.org>
L: kvm@vger.kernel.org
S: Maintained
T: git https://github.com/awilliam/linux-vfio.git
@@ -27048,7 +27063,7 @@ T: git git://linuxtv.org/media.git
F: drivers/media/test-drivers/vimc/*
VIRT LIB
M: Alex Williamson <alex.williamson@redhat.com>
M: Alex Williamson <alex@shazbot.org>
M: Paolo Bonzini <pbonzini@redhat.com>
L: kvm@vger.kernel.org
S: Supported

View File

@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 18
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc3
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@@ -77,6 +77,14 @@
/delete-property/ pinctrl-0;
};
&pm {
clocks = <&firmware_clocks 5>,
<&clocks BCM2835_CLOCK_PERI_IMAGE>,
<&clocks BCM2835_CLOCK_H264>,
<&clocks BCM2835_CLOCK_ISP>;
clock-names = "v3d", "peri_image", "h264", "isp";
};
&rmem {
/*
* RPi4's co-processor will copy the board's bootloader configuration

View File

@@ -13,7 +13,16 @@
clock-names = "pixel", "hdmi";
};
&pm {
clocks = <&firmware_clocks 5>,
<&clocks BCM2835_CLOCK_PERI_IMAGE>,
<&clocks BCM2835_CLOCK_H264>,
<&clocks BCM2835_CLOCK_ISP>;
clock-names = "v3d", "peri_image", "h264", "isp";
};
&v3d {
clocks = <&firmware_clocks 5>;
power-domains = <&power RPI_POWER_DOMAIN_V3D>;
};

View File

@@ -326,6 +326,8 @@
<0x7fffe000 0x2000>;
interrupt-controller;
#address-cells = <0>;
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) |
IRQ_TYPE_LEVEL_HIGH)>;
#interrupt-cells = <3>;
};

View File

@@ -293,7 +293,8 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
static inline pte_t pte_mkwrite_novma(pte_t pte)
{
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
if (pte_sw_dirty(pte))
pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
return pte;
}

View File

@@ -35,7 +35,7 @@ void copy_highpage(struct page *to, struct page *from)
from != folio_page(src, 0))
return;
WARN_ON_ONCE(!folio_try_hugetlb_mte_tagging(dst));
folio_try_hugetlb_mte_tagging(dst);
/*
* Populate tags for all subpages.
@@ -51,8 +51,13 @@ void copy_highpage(struct page *to, struct page *from)
}
folio_set_hugetlb_mte_tagged(dst);
} else if (page_mte_tagged(from)) {
/* It's a new page, shouldn't have been tagged yet */
WARN_ON_ONCE(!try_page_mte_tagging(to));
/*
* Most of the time it's a new page that shouldn't have been
* tagged yet. However, folio migration can end up reusing the
* same page without untagging it. Ignore the warning if the
* page is already tagged.
*/
try_page_mte_tagging(to);
mte_copy_page_tags(kto, kfrom);
set_page_mte_tagged(to);

View File

@@ -21,7 +21,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
folio = page_folio(pfn_to_page(pfn));
if (test_and_set_bit(PG_dcache_clean, &folio->flags))
if (test_and_set_bit(PG_dcache_clean, &folio->flags.f))
return;
icache_inv_range(address, address + nr*PAGE_SIZE);

View File

@@ -20,8 +20,8 @@
static inline void flush_dcache_folio(struct folio *folio)
{
if (test_bit(PG_dcache_clean, &folio->flags))
clear_bit(PG_dcache_clean, &folio->flags);
if (test_bit(PG_dcache_clean, &folio->flags.f))
clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define flush_dcache_folio flush_dcache_folio

View File

@@ -47,7 +47,7 @@ static struct resource standard_io_resources[] = {
.name = "keyboard",
.start = 0x60,
.end = 0x6f,
.flags = IORESOURCE_IO | IORESOURCE_BUSY
.flags = IORESOURCE_IO
},
{
.name = "dma page reg",
@@ -213,7 +213,7 @@ void __init plat_mem_setup(void)
/* Request I/O space for devices used on the Malta board. */
for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
request_resource(&ioport_resource, standard_io_resources+i);
insert_resource(&ioport_resource, standard_io_resources + i);
/*
* Enable DMA channel 4 (cascade channel) in the PIIX4 south bridge.

View File

@@ -230,8 +230,7 @@ void __init mips_pcibios_init(void)
}
/* PIIX4 ACPI starts at 0x1000 */
if (controller->io_resource->start < 0x00001000UL)
controller->io_resource->start = 0x00001000UL;
PCIBIOS_MIN_IO = 0x1000;
iomem_resource.end &= 0xfffffffffULL; /* 64 GB */
ioport_resource.end = controller->io_resource->end;

View File

@@ -84,15 +84,9 @@
.endm
#ifdef CONFIG_SMP
#ifdef CONFIG_32BIT
#define PER_CPU_OFFSET_SHIFT 2
#else
#define PER_CPU_OFFSET_SHIFT 3
#endif
.macro asm_per_cpu dst sym tmp
lw \tmp, TASK_TI_CPU_NUM(tp)
slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
slli \tmp, \tmp, RISCV_LGPTR
la \dst, __per_cpu_offset
add \dst, \dst, \tmp
REG_L \tmp, 0(\dst)

View File

@@ -31,6 +31,8 @@ struct riscv_isainfo {
DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
extern const struct seq_operations cpuinfo_op;
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];

View File

@@ -42,4 +42,11 @@ static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair,
return pair->value == other_pair->value;
}
#ifdef CONFIG_MMU
void riscv_hwprobe_register_async_probe(void);
void riscv_hwprobe_complete_async_probe(void);
#else
static inline void riscv_hwprobe_register_async_probe(void) {}
static inline void riscv_hwprobe_complete_async_probe(void) {}
#endif
#endif

View File

@@ -69,6 +69,8 @@ typedef struct {
#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
#define MAX_POSSIBLE_PHYSMEM_BITS 56
/*
* rv64 PTE format:
* | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0

View File

@@ -654,6 +654,8 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
return __pgprot(prot);
}
#define pgprot_dmacoherent pgprot_writecombine
/*
* Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By
* default the M-mode firmware enables the hardware updating scheme when only Svadu is present in

View File

@@ -12,6 +12,12 @@ struct vdso_arch_data {
/* Boolean indicating all CPUs have the same static hwprobe values. */
__u8 homogeneous_cpus;
/*
* A gate to check and see if the hwprobe data is actually ready, as
* probing is deferred to avoid boot slowdowns.
*/
__u8 ready;
};
#endif /* __RISCV_ASM_VDSO_ARCH_DATA_H */

View File

@@ -62,10 +62,8 @@ int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned lo
return -ENODEV;
}
if (!of_device_is_available(node)) {
pr_info("CPU with hartid=%lu is not available\n", *hart);
if (!of_device_is_available(node))
return -ENODEV;
}
if (of_property_read_string(node, "riscv,isa-base", &isa))
goto old_interface;

View File

@@ -932,9 +932,9 @@ static int has_thead_homogeneous_vlenb(void)
{
int cpu;
u32 prev_vlenb = 0;
u32 vlenb;
u32 vlenb = 0;
/* Ignore thead,vlenb property if xtheavector is not enabled in the kernel */
/* Ignore thead,vlenb property if xtheadvector is not enabled in the kernel */
if (!IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
return 0;

View File

@@ -40,6 +40,17 @@ enum ipi_message_type {
IPI_MAX
};
static const char * const ipi_names[] = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts",
[IPI_CPU_STOP] = "CPU stop interrupts",
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
[IPI_TIMER] = "Timer broadcast interrupts",
[IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
[IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
};
unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = {
[0 ... NR_CPUS-1] = INVALID_HARTID
};
@@ -199,7 +210,7 @@ void riscv_ipi_set_virq_range(int virq, int nr)
/* Request IPIs */
for (i = 0; i < nr_ipi; i++) {
err = request_percpu_irq(ipi_virq_base + i, handle_IPI,
"IPI", &ipi_dummy_dev);
ipi_names[i], &ipi_dummy_dev);
WARN_ON(err);
ipi_desc[i] = irq_to_desc(ipi_virq_base + i);
@@ -210,17 +221,6 @@ void riscv_ipi_set_virq_range(int virq, int nr)
riscv_ipi_enable();
}
static const char * const ipi_names[] = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts",
[IPI_CPU_STOP] = "CPU stop interrupts",
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
[IPI_TIMER] = "Timer broadcast interrupts",
[IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
[IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
};
void show_ipi_stats(struct seq_file *p, int prec)
{
unsigned int cpu, i;

View File

@@ -5,6 +5,9 @@
* more details.
*/
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/atomic.h>
#include <linux/once.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwprobe.h>
@@ -28,6 +31,11 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair,
bool first = true;
int cpu;
if (pair->key != RISCV_HWPROBE_KEY_MVENDORID &&
pair->key != RISCV_HWPROBE_KEY_MIMPID &&
pair->key != RISCV_HWPROBE_KEY_MARCHID)
goto out;
for_each_cpu(cpu, cpus) {
u64 cpu_id;
@@ -58,6 +66,7 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair,
}
}
out:
pair->value = id;
}
@@ -454,28 +463,32 @@ static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
return 0;
}
static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
size_t pair_count, size_t cpusetsize,
unsigned long __user *cpus_user,
unsigned int flags)
{
if (flags & RISCV_HWPROBE_WHICH_CPUS)
return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
cpus_user, flags);
return hwprobe_get_values(pairs, pair_count, cpusetsize,
cpus_user, flags);
}
#ifdef CONFIG_MMU
static int __init init_hwprobe_vdso_data(void)
static DECLARE_COMPLETION(boot_probes_done);
static atomic_t pending_boot_probes = ATOMIC_INIT(1);
void riscv_hwprobe_register_async_probe(void)
{
atomic_inc(&pending_boot_probes);
}
void riscv_hwprobe_complete_async_probe(void)
{
if (atomic_dec_and_test(&pending_boot_probes))
complete(&boot_probes_done);
}
static int complete_hwprobe_vdso_data(void)
{
struct vdso_arch_data *avd = vdso_k_arch_data;
u64 id_bitsmash = 0;
struct riscv_hwprobe pair;
int key;
if (unlikely(!atomic_dec_and_test(&pending_boot_probes)))
wait_for_completion(&boot_probes_done);
/*
* Initialize vDSO data with the answers for the "all CPUs" case, to
* save a syscall in the common case.
@@ -503,13 +516,52 @@ static int __init init_hwprobe_vdso_data(void)
* vDSO should defer to the kernel for exotic cpu masks.
*/
avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
/*
* Make sure all the VDSO values are visible before we look at them.
* This pairs with the implicit "no speculativly visible accesses"
* barrier in the VDSO hwprobe code.
*/
smp_wmb();
avd->ready = true;
return 0;
}
static int __init init_hwprobe_vdso_data(void)
{
struct vdso_arch_data *avd = vdso_k_arch_data;
/*
* Prevent the vDSO cached values from being used, as they're not ready
* yet.
*/
avd->ready = false;
return 0;
}
arch_initcall_sync(init_hwprobe_vdso_data);
#else
static int complete_hwprobe_vdso_data(void) { return 0; }
#endif /* CONFIG_MMU */
static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
size_t pair_count, size_t cpusetsize,
unsigned long __user *cpus_user,
unsigned int flags)
{
DO_ONCE_SLEEPABLE(complete_hwprobe_vdso_data);
if (flags & RISCV_HWPROBE_WHICH_CPUS)
return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
cpus_user, flags);
return hwprobe_get_values(pairs, pair_count, cpusetsize,
cpus_user, flags);
}
SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
cpus, unsigned int, flags)

View File

@@ -379,6 +379,7 @@ free:
static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
{
schedule_on_each_cpu(check_vector_unaligned_access);
riscv_hwprobe_complete_async_probe();
return 0;
}
@@ -473,8 +474,12 @@ static int __init check_unaligned_access_all_cpus(void)
per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
} else if (!check_vector_unaligned_access_emulated_all_cpus() &&
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
kthread_run(vec_check_unaligned_access_speed_all_cpus,
NULL, "vec_check_unaligned_access_speed_all_cpus");
riscv_hwprobe_register_async_probe();
if (IS_ERR(kthread_run(vec_check_unaligned_access_speed_all_cpus,
NULL, "vec_check_unaligned_access_speed_all_cpus"))) {
pr_warn("Failed to create vec_unalign_check kthread\n");
riscv_hwprobe_complete_async_probe();
}
}
/*

View File

@@ -27,7 +27,7 @@ static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count,
* homogeneous, then this function can handle requests for arbitrary
* masks.
*/
if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus))
if (flags != 0 || (!all_cpus && !avd->homogeneous_cpus) || unlikely(!avd->ready))
return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags);
/* This is something we can handle, fill out the pairs. */

View File

@@ -1463,7 +1463,9 @@ static void __init retbleed_update_mitigation(void)
break;
default:
if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) {
pr_err(RETBLEED_INTEL_MSG);
if (retbleed_mitigation != RETBLEED_MITIGATION_NONE)
pr_err(RETBLEED_INTEL_MSG);
retbleed_mitigation = RETBLEED_MITIGATION_NONE;
}
}
@@ -1825,13 +1827,6 @@ void unpriv_ebpf_notify(int new_state)
}
#endif
static inline bool match_option(const char *arg, int arglen, const char *opt)
{
int len = strlen(opt);
return len == arglen && !strncmp(arg, opt, len);
}
/* The kernel command line selection for spectre v2 */
enum spectre_v2_mitigation_cmd {
SPECTRE_V2_CMD_NONE,

View File

@@ -194,7 +194,7 @@ static bool need_sha_check(u32 cur_rev)
}
switch (cur_rev >> 8) {
case 0x80012: return cur_rev <= 0x800126f; break;
case 0x80012: return cur_rev <= 0x8001277; break;
case 0x80082: return cur_rev <= 0x800820f; break;
case 0x83010: return cur_rev <= 0x830107c; break;
case 0x86001: return cur_rev <= 0x860010e; break;

View File

@@ -458,7 +458,16 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r)
r->mon.mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS;
}
if (rdt_cpu_has(X86_FEATURE_ABMC)) {
/*
* resctrl assumes a system that supports assignable counters can
* switch to "default" mode. Ensure that there is a "default" mode
* to switch to. This enforces a dependency between the independent
* X86_FEATURE_ABMC and X86_FEATURE_CQM_MBM_TOTAL/X86_FEATURE_CQM_MBM_LOCAL
* hardware features.
*/
if (rdt_cpu_has(X86_FEATURE_ABMC) &&
(rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL) ||
rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))) {
r->mon.mbm_cntr_assignable = true;
cpuid_count(0x80000020, 5, &eax, &ebx, &ecx, &edx);
r->mon.num_mbm_cntrs = (ebx & GENMASK(15, 0)) + 1;

View File

@@ -184,6 +184,16 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
if (!bi->interval_exp)
bi->interval_exp = ilog2(lim->logical_block_size);
/*
* The PI generation / validation helpers do not expect intervals to
* straddle multiple bio_vecs. Enforce alignment so that those are
* never generated, and that each buffer is aligned as expected.
*/
if (bi->csum_type) {
lim->dma_alignment = max(lim->dma_alignment,
(1U << bi->interval_exp) - 1);
}
return 0;
}

View File

@@ -95,6 +95,11 @@ acpi_tb_print_table_header(acpi_physical_address address,
{
struct acpi_table_header local_header;
#pragma GCC diagnostic push
#if defined(__GNUC__) && __GNUC__ >= 11
#pragma GCC diagnostic ignored "-Wstringop-overread"
#endif
if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
@@ -143,4 +148,5 @@ acpi_tb_print_table_header(acpi_physical_address address,
local_header.asl_compiler_id,
local_header.asl_compiler_revision));
}
#pragma GCC diagnostic pop
}

View File

@@ -1107,7 +1107,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
size_t num_args,
struct fwnode_reference_args *args)
{
return acpi_fwnode_get_reference_args(fwnode, propname, NULL, index, num_args, args);
return acpi_fwnode_get_reference_args(fwnode, propname, NULL, num_args, index, args);
}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);

View File

@@ -61,30 +61,6 @@ static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
return 0;
}
/**
* rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
*
* @node: RIMT table node to be looked-up
*
* Returns: fwnode_handle pointer on success, NULL on failure
*/
static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
{
struct fwnode_handle *fwnode = NULL;
struct rimt_fwnode *curr;
spin_lock(&rimt_fwnode_lock);
list_for_each_entry(curr, &rimt_fwnode_list, list) {
if (curr->rimt_node == node) {
fwnode = curr->fwnode;
break;
}
}
spin_unlock(&rimt_fwnode_lock);
return fwnode;
}
static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
void *context)
{
@@ -202,6 +178,67 @@ static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
return NULL;
}
/*
* RISC-V supports IOMMU as a PCI device or a platform device.
* When it is a platform device, there should be a namespace device as
* well along with RIMT. To create the link between RIMT information and
* the platform device, the IOMMU driver should register itself with the
* RIMT module. This is true for PCI based IOMMU as well.
*/
int rimt_iommu_register(struct device *dev)
{
struct fwnode_handle *rimt_fwnode;
struct acpi_rimt_node *node;
node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
if (!node) {
pr_err("Could not find IOMMU node in RIMT\n");
return -ENODEV;
}
if (dev_is_pci(dev)) {
rimt_fwnode = acpi_alloc_fwnode_static();
if (!rimt_fwnode)
return -ENOMEM;
rimt_fwnode->dev = dev;
if (!dev->fwnode)
dev->fwnode = rimt_fwnode;
rimt_set_fwnode(node, rimt_fwnode);
} else {
rimt_set_fwnode(node, dev->fwnode);
}
return 0;
}
#ifdef CONFIG_IOMMU_API
/**
* rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
*
* @node: RIMT table node to be looked-up
*
* Returns: fwnode_handle pointer on success, NULL on failure
*/
static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
{
struct fwnode_handle *fwnode = NULL;
struct rimt_fwnode *curr;
spin_lock(&rimt_fwnode_lock);
list_for_each_entry(curr, &rimt_fwnode_list, list) {
if (curr->rimt_node == node) {
fwnode = curr->fwnode;
break;
}
}
spin_unlock(&rimt_fwnode_lock);
return fwnode;
}
static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
{
struct acpi_rimt_pcie_rc *pci_rc;
@@ -290,43 +327,6 @@ static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
return NULL;
}
/*
* RISC-V supports IOMMU as a PCI device or a platform device.
* When it is a platform device, there should be a namespace device as
* well along with RIMT. To create the link between RIMT information and
* the platform device, the IOMMU driver should register itself with the
* RIMT module. This is true for PCI based IOMMU as well.
*/
int rimt_iommu_register(struct device *dev)
{
struct fwnode_handle *rimt_fwnode;
struct acpi_rimt_node *node;
node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
if (!node) {
pr_err("Could not find IOMMU node in RIMT\n");
return -ENODEV;
}
if (dev_is_pci(dev)) {
rimt_fwnode = acpi_alloc_fwnode_static();
if (!rimt_fwnode)
return -ENOMEM;
rimt_fwnode->dev = dev;
if (!dev->fwnode)
dev->fwnode = rimt_fwnode;
rimt_set_fwnode(node, rimt_fwnode);
} else {
rimt_set_fwnode(node, dev->fwnode);
}
return 0;
}
#ifdef CONFIG_IOMMU_API
static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
u32 id_in, u32 *id_out,
u8 type_mask)

View File

@@ -851,17 +851,8 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
} else {
if (!internal)
node->local_weak_refs++;
if (!node->has_weak_ref && list_empty(&node->work.entry)) {
if (target_list == NULL) {
pr_err("invalid inc weak node for %d\n",
node->debug_id);
return -EINVAL;
}
/*
* See comment above
*/
if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
binder_enqueue_work_ilocked(&node->work, target_list);
}
}
return 0;
}
@@ -2418,10 +2409,10 @@ err_fd_not_accepted:
/**
* struct binder_ptr_fixup - data to be fixed-up in target buffer
* @offset offset in target buffer to fixup
* @skip_size bytes to skip in copy (fixup will be written later)
* @fixup_data data to write at fixup offset
* @node list node
* @offset: offset in target buffer to fixup
* @skip_size: bytes to skip in copy (fixup will be written later)
* @fixup_data: data to write at fixup offset
* @node: list node
*
* This is used for the pointer fixup list (pf) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -2438,10 +2429,10 @@ struct binder_ptr_fixup {
/**
* struct binder_sg_copy - scatter-gather data to be copied
* @offset offset in target buffer
* @sender_uaddr user address in source buffer
* @length bytes to copy
* @node list node
* @offset: offset in target buffer
* @sender_uaddr: user address in source buffer
* @length: bytes to copy
* @node: list node
*
* This is used for the sg copy list (sgc) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -4063,14 +4054,15 @@ binder_freeze_notification_done(struct binder_proc *proc,
/**
* binder_free_buf() - free the specified buffer
* @proc: binder proc that owns buffer
* @buffer: buffer to be freed
* @is_failure: failed to send transaction
* @proc: binder proc that owns buffer
* @thread: binder thread performing the buffer release
* @buffer: buffer to be freed
* @is_failure: failed to send transaction
*
* If buffer for an async transaction, enqueue the next async
* If the buffer is for an async transaction, enqueue the next async
* transaction from the node.
*
* Cleanup buffer and free it.
* Cleanup the buffer and free it.
*/
static void
binder_free_buf(struct binder_proc *proc,

View File

@@ -106,13 +106,22 @@ impl DeliverToRead for FreezeMessage {
return Ok(true);
}
if freeze.is_clearing {
_removed_listener = freeze_entry.remove_node();
kernel::warn_on!(freeze.num_cleared_duplicates != 0);
if freeze.num_pending_duplicates > 0 {
// The primary freeze listener was deleted, so convert a pending duplicate back
// into the primary one.
freeze.num_pending_duplicates -= 1;
freeze.is_pending = true;
freeze.is_clearing = true;
} else {
_removed_listener = freeze_entry.remove_node();
}
drop(node_refs);
writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
writer.write_payload(&self.cookie.0)?;
Ok(true)
} else {
let is_frozen = freeze.node.owner.inner.lock().is_frozen;
let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
if freeze.last_is_frozen == Some(is_frozen) {
return Ok(true);
}
@@ -245,8 +254,9 @@ impl Process {
);
return Err(EINVAL);
}
if freeze.is_clearing {
// Immediately send another FreezeMessage for BR_CLEAR_FREEZE_NOTIFICATION_DONE.
let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
if freeze.is_clearing || freeze.last_is_frozen != Some(is_frozen) {
// Immediately send another FreezeMessage.
clear_msg = Some(FreezeMessage::init(alloc, cookie));
}
freeze.is_pending = false;

View File

@@ -687,7 +687,7 @@ impl Node {
);
}
if inner.freeze_list.is_empty() {
_unused_capacity = mem::replace(&mut inner.freeze_list, KVVec::new());
_unused_capacity = mem::take(&mut inner.freeze_list);
}
}

View File

@@ -72,6 +72,33 @@ impl Mapping {
const PROC_DEFER_FLUSH: u8 = 1;
const PROC_DEFER_RELEASE: u8 = 2;
#[derive(Copy, Clone)]
pub(crate) enum IsFrozen {
Yes,
No,
InProgress,
}
impl IsFrozen {
/// Whether incoming transactions should be rejected due to freeze.
pub(crate) fn is_frozen(self) -> bool {
match self {
IsFrozen::Yes => true,
IsFrozen::No => false,
IsFrozen::InProgress => true,
}
}
/// Whether freeze notifications consider this process frozen.
pub(crate) fn is_fully_frozen(self) -> bool {
match self {
IsFrozen::Yes => true,
IsFrozen::No => false,
IsFrozen::InProgress => false,
}
}
}
/// The fields of `Process` protected by the spinlock.
pub(crate) struct ProcessInner {
is_manager: bool,
@@ -98,7 +125,7 @@ pub(crate) struct ProcessInner {
/// are woken up.
outstanding_txns: u32,
/// Process is frozen and unable to service binder transactions.
pub(crate) is_frozen: bool,
pub(crate) is_frozen: IsFrozen,
/// Process received sync transactions since last frozen.
pub(crate) sync_recv: bool,
/// Process received async transactions since last frozen.
@@ -124,7 +151,7 @@ impl ProcessInner {
started_thread_count: 0,
defer_work: 0,
outstanding_txns: 0,
is_frozen: false,
is_frozen: IsFrozen::No,
sync_recv: false,
async_recv: false,
binderfs_file: None,
@@ -1260,7 +1287,7 @@ impl Process {
let is_manager = {
let mut inner = self.inner.lock();
inner.is_dead = true;
inner.is_frozen = false;
inner.is_frozen = IsFrozen::No;
inner.sync_recv = false;
inner.async_recv = false;
inner.is_manager
@@ -1346,10 +1373,6 @@ impl Process {
.alloc
.take_for_each(|offset, size, debug_id, odata| {
let ptr = offset + address;
pr_warn!(
"{}: removing orphan mapping {offset}:{size}\n",
self.pid_in_current_ns()
);
let mut alloc =
Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
if let Some(data) = odata {
@@ -1371,7 +1394,7 @@ impl Process {
return;
}
inner.outstanding_txns -= 1;
inner.is_frozen && inner.outstanding_txns == 0
inner.is_frozen.is_frozen() && inner.outstanding_txns == 0
};
if wake {
@@ -1385,7 +1408,7 @@ impl Process {
let mut inner = self.inner.lock();
inner.sync_recv = false;
inner.async_recv = false;
inner.is_frozen = false;
inner.is_frozen = IsFrozen::No;
drop(inner);
msgs.send_messages();
return Ok(());
@@ -1394,7 +1417,7 @@ impl Process {
let mut inner = self.inner.lock();
inner.sync_recv = false;
inner.async_recv = false;
inner.is_frozen = true;
inner.is_frozen = IsFrozen::InProgress;
if info.timeout_ms > 0 {
let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
@@ -1408,7 +1431,7 @@ impl Process {
.wait_interruptible_timeout(&mut inner, jiffies)
{
CondVarTimeoutResult::Signal { .. } => {
inner.is_frozen = false;
inner.is_frozen = IsFrozen::No;
return Err(ERESTARTSYS);
}
CondVarTimeoutResult::Woken { jiffies: remaining } => {
@@ -1422,17 +1445,18 @@ impl Process {
}
if inner.txns_pending_locked() {
inner.is_frozen = false;
inner.is_frozen = IsFrozen::No;
Err(EAGAIN)
} else {
drop(inner);
match self.prepare_freeze_messages() {
Ok(batch) => {
self.inner.lock().is_frozen = IsFrozen::Yes;
batch.send_messages();
Ok(())
}
Err(kernel::alloc::AllocError) => {
self.inner.lock().is_frozen = false;
self.inner.lock().is_frozen = IsFrozen::No;
Err(ENOMEM)
}
}

View File

@@ -249,7 +249,7 @@ impl Transaction {
if oneway {
if let Some(target_node) = self.target_node.clone() {
if process_inner.is_frozen {
if process_inner.is_frozen.is_frozen() {
process_inner.async_recv = true;
if self.flags & TF_UPDATE_TXN != 0 {
if let Some(t_outdated) =
@@ -270,7 +270,7 @@ impl Transaction {
}
}
if process_inner.is_frozen {
if process_inner.is_frozen.is_frozen() {
return Err(BinderError::new_frozen_oneway());
} else {
return Ok(());
@@ -280,7 +280,7 @@ impl Transaction {
}
}
if process_inner.is_frozen {
if process_inner.is_frozen.is_frozen() {
process_inner.sync_recv = true;
return Err(BinderError::new_frozen());
}

View File

@@ -292,7 +292,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
* frequency (by keeping the initial capacity_freq_ref value).
*/
cpu_clk = of_clk_get(cpu_node, 0);
if (!PTR_ERR_OR_ZERO(cpu_clk)) {
if (!IS_ERR_OR_NULL(cpu_clk)) {
per_cpu(capacity_freq_ref, cpu) =
clk_get_rate(cpu_clk) / HZ_PER_KHZ;
clk_put(cpu_clk);

View File

@@ -1784,7 +1784,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data)
return 0;
if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
dev_warn(sup, "sync_state() pending due to %s\n",
dev_info(sup, "sync_state() pending due to %s\n",
dev_name(link->consumer));
return 0;
}

View File

@@ -23,50 +23,46 @@ struct devcd_entry {
void *data;
size_t datalen;
/*
* Here, mutex is required to serialize the calls to del_wk work between
* user/kernel space which happens when devcd is added with device_add()
* and that sends uevent to user space. User space reads the uevents,
* and calls to devcd_data_write() which try to modify the work which is
* not even initialized/queued from devcoredump.
* There are 2 races for which mutex is required.
*
* The first race is between device creation and userspace writing to
* schedule immediately destruction.
*
* This race is handled by arming the timer before device creation, but
* when device creation fails the timer still exists.
*
* cpu0(X) cpu1(Y)
* To solve this, hold the mutex during device_add(), and set
* init_completed on success before releasing the mutex.
*
* dev_coredump() uevent sent to user space
* device_add() ======================> user space process Y reads the
* uevents writes to devcd fd
* which results into writes to
* That way the timer will never fire until device_add() is called,
* it will do nothing if init_completed is not set. The timer is also
* cancelled in that case.
*
* devcd_data_write()
* mod_delayed_work()
* try_to_grab_pending()
* timer_delete()
* debug_assert_init()
* INIT_DELAYED_WORK()
* schedule_delayed_work()
*
*
* Also, mutex alone would not be enough to avoid scheduling of
* del_wk work after it get flush from a call to devcd_free()
* mentioned as below.
*
* disabled_store()
* devcd_free()
* mutex_lock() devcd_data_write()
* flush_delayed_work()
* mutex_unlock()
* mutex_lock()
* mod_delayed_work()
* mutex_unlock()
* So, delete_work flag is required.
* The second race involves multiple parallel invocations of devcd_free(),
* add a deleted flag so only 1 can call the destructor.
*/
struct mutex mutex;
bool delete_work;
bool init_completed, deleted;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
void (*free)(void *data);
/*
* If nothing interferes and device_add() was returns success,
* del_wk will destroy the device after the timer fires.
*
* Multiple userspace processes can interfere in the working of the timer:
* - Writing to the coredump will reschedule the timer to run immediately,
* if still armed.
*
* This is handled by using "if (cancel_delayed_work()) {
* schedule_delayed_work() }", to prevent re-arming after having
* been previously fired.
* - Writing to /sys/class/devcoredump/disabled will destroy the
* coredump synchronously.
* This is handled by using disable_delayed_work_sync(), and then
* checking if deleted flag is set with &devcd->mutex held.
*/
struct delayed_work del_wk;
struct device *failing_dev;
};
@@ -95,14 +91,27 @@ static void devcd_dev_release(struct device *dev)
kfree(devcd);
}
static void __devcd_del(struct devcd_entry *devcd)
{
devcd->deleted = true;
device_del(&devcd->devcd_dev);
put_device(&devcd->devcd_dev);
}
static void devcd_del(struct work_struct *wk)
{
struct devcd_entry *devcd;
bool init_completed;
devcd = container_of(wk, struct devcd_entry, del_wk.work);
device_del(&devcd->devcd_dev);
put_device(&devcd->devcd_dev);
/* devcd->mutex serializes against dev_coredumpm_timeout */
mutex_lock(&devcd->mutex);
init_completed = devcd->init_completed;
mutex_unlock(&devcd->mutex);
if (init_completed)
__devcd_del(devcd);
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
@@ -122,12 +131,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
mutex_lock(&devcd->mutex);
if (!devcd->delete_work) {
devcd->delete_work = true;
mod_delayed_work(system_wq, &devcd->del_wk, 0);
}
mutex_unlock(&devcd->mutex);
/*
* Although it's tempting to use mod_delayed work here,
* that will cause a reschedule if the timer already fired.
*/
if (cancel_delayed_work(&devcd->del_wk))
schedule_delayed_work(&devcd->del_wk, 0);
return count;
}
@@ -151,11 +160,21 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
/*
* To prevent a race with devcd_data_write(), disable work and
* complete manually instead.
*
* We cannot rely on the return value of
* disable_delayed_work_sync() here, because it might be in the
* middle of a cancel_delayed_work + schedule_delayed_work pair.
*
* devcd->mutex here guards against multiple parallel invocations
* of devcd_free().
*/
disable_delayed_work_sync(&devcd->del_wk);
mutex_lock(&devcd->mutex);
if (!devcd->delete_work)
devcd->delete_work = true;
flush_delayed_work(&devcd->del_wk);
if (!devcd->deleted)
__devcd_del(devcd);
mutex_unlock(&devcd->mutex);
return 0;
}
@@ -179,12 +198,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
* put_device() <- last reference
* error = fn(dev, data) devcd_dev_release()
* devcd_free(dev, data) kfree(devcd)
* mutex_lock(&devcd->mutex);
*
*
* In the above diagram, it looks like disabled_store() would be racing with parallelly
* running devcd_del() and result in memory abort while acquiring devcd->mutex which
* is called after kfree of devcd memory after dropping its last reference with
* running devcd_del() and result in memory abort after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
@@ -374,7 +391,7 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
devcd->delete_work = false;
devcd->deleted = false;
mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
@@ -383,8 +400,14 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
mutex_lock(&devcd->mutex);
dev_set_uevent_suppress(&devcd->devcd_dev, true);
/* devcd->mutex prevents devcd_del() completing until init finishes */
mutex_lock(&devcd->mutex);
devcd->init_completed = false;
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, timeout);
if (device_add(&devcd->devcd_dev))
goto put_device;
@@ -401,13 +424,20 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
dev_set_uevent_suppress(&devcd->devcd_dev, false);
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, timeout);
/*
* Safe to run devcd_del() now that we are done with devcd_dev.
* Alternatively we could have taken a ref on devcd_dev before
* dropping the lock.
*/
devcd->init_completed = true;
mutex_unlock(&devcd->mutex);
return;
put_device:
put_device(&devcd->devcd_dev);
mutex_unlock(&devcd->mutex);
cancel_delayed_work_sync(&devcd->del_wk);
put_device(&devcd->devcd_dev);
put_module:
module_put(owner);
free:

View File

@@ -52,6 +52,7 @@
static DEFINE_IDR(nbd_index_idr);
static DEFINE_MUTEX(nbd_index_mutex);
static struct workqueue_struct *nbd_del_wq;
static struct cred *nbd_cred;
static int nbd_total_devices = 0;
struct nbd_sock {
@@ -554,6 +555,7 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
int result;
struct msghdr msg = {} ;
unsigned int noreclaim_flag;
const struct cred *old_cred;
if (unlikely(!sock)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -562,6 +564,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
return -EINVAL;
}
old_cred = override_creds(nbd_cred);
msg.msg_iter = *iter;
noreclaim_flag = memalloc_noreclaim_save();
@@ -586,6 +590,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
memalloc_noreclaim_restore(noreclaim_flag);
revert_creds(old_cred);
return result;
}
@@ -2677,7 +2683,15 @@ static int __init nbd_init(void)
return -ENOMEM;
}
nbd_cred = prepare_kernel_cred(&init_task);
if (!nbd_cred) {
destroy_workqueue(nbd_del_wq);
unregister_blkdev(NBD_MAJOR, "nbd");
return -ENOMEM;
}
if (genl_register_family(&nbd_genl_family)) {
put_cred(nbd_cred);
destroy_workqueue(nbd_del_wq);
unregister_blkdev(NBD_MAJOR, "nbd");
return -EINVAL;
@@ -2732,6 +2746,7 @@ static void __exit nbd_cleanup(void)
/* Also wait for nbd_dev_remove_work() completes */
destroy_workqueue(nbd_del_wq);
put_cred(nbd_cred);
idr_destroy(&nbd_index_idr);
unregister_blkdev(NBD_MAJOR, "nbd");
}

View File

@@ -317,7 +317,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
unsigned int count = 0;
const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
if (!s->munge || (async->cmd.flags & CMDF_RAWDATA) || async->cmd.chanlist_len == 0) {
async->munge_count += num_bytes;
return num_bytes;
}

View File

@@ -1614,7 +1614,11 @@ static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
* min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
* limits, epp and desired perf will get reset to the cached values in cpudata struct
*/
return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
return amd_pstate_update_perf(policy, perf.bios_min_perf,
FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
false);
}
static int amd_pstate_suspend(struct cpufreq_policy *policy)

View File

@@ -188,20 +188,17 @@ again:
*
* This can deal with workloads that have long pauses interspersed
* with sporadic activity with a bunch of short pauses.
*
* However, if the number of remaining samples is too small to exclude
* any more outliers, allow the deepest available idle state to be
* selected because there are systems where the time spent by CPUs in
* deep idle states is correlated to the maximum frequency the CPUs
* can get to. On those systems, shallow idle states should be avoided
* unless there is a clear indication that the given CPU is most likley
* going to be woken up shortly.
*/
if (divisor * 4 <= INTERVALS * 3) {
/*
* If there are sufficiently many data points still under
* consideration after the outliers have been eliminated,
* returning without a prediction would be a mistake because it
* is likely that the next interval will not exceed the current
* maximum, so return the latter in that case.
*/
if (divisor >= INTERVALS / 2)
return max;
if (divisor * 4 <= INTERVALS * 3)
return UINT_MAX;
}
/* Update the thresholds for the next round. */
if (avg - min > max - avg)

View File

@@ -269,7 +269,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
}
static int allocate_tlabel(struct fw_card *card)
__must_hold(&card->transactions_lock)
__must_hold(&card->transactions.lock)
{
int tlabel;

View File

@@ -167,6 +167,7 @@ static inline void __init init_ohci1394_initialize(struct ohci *ohci)
/**
* init_ohci1394_wait_for_busresets - wait until bus resets are completed
* @ohci: Pointer to the OHCI-1394 controller structure
*
* OHCI1394 initialization itself and any device going on- or offline
* and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec
@@ -189,6 +190,8 @@ static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci)
/**
* init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging
* @ohci: Pointer to the OHCI-1394 controller structure
*
* This enables remote DMA access over IEEE1394 from every host for the low
* 4GB of address space. DMA accesses above 4GB are not available currently.
*/
@@ -201,6 +204,8 @@ static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci)
/**
* init_ohci1394_reset_and_init_dma - init controller and enable DMA
* @ohci: Pointer to the OHCI-1394 controller structure
*
* This initializes the given controller and enables physical DMA engine in it.
*/
static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
@@ -230,6 +235,10 @@ static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
/**
* init_ohci1394_controller - Map the registers of the controller and init DMA
* @num: PCI bus number
* @slot: PCI device number
* @func: PCI function number
*
* This maps the registers of the specified controller and initializes it
*/
static inline void __init init_ohci1394_controller(int num, int slot, int func)
@@ -284,6 +293,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
/**
* setup_ohci1394_dma - enables early OHCI1394 DMA initialization
* @opt: Kernel command line parameter string
*/
static int __init setup_ohci1394_dma(char *opt)
{

View File

@@ -649,6 +649,26 @@ static u16 ffa_memory_attributes_get(u32 func_id)
return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
}
static void ffa_emad_impdef_value_init(u32 version, void *dst, void *src)
{
struct ffa_mem_region_attributes *ep_mem_access;
if (FFA_EMAD_HAS_IMPDEF_FIELD(version))
memcpy(dst, src, sizeof(ep_mem_access->impdef_val));
}
static void
ffa_mem_region_additional_setup(u32 version, struct ffa_mem_region *mem_region)
{
if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version)) {
mem_region->ep_mem_size = 0;
} else {
mem_region->ep_mem_size = ffa_emad_size_get(version);
mem_region->ep_mem_offset = sizeof(*mem_region);
memset(mem_region->reserved, 0, 12);
}
}
static int
ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
struct ffa_mem_ops_args *args)
@@ -667,27 +687,24 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
mem_region->flags = args->flags;
mem_region->sender_id = drv_info->vm_id;
mem_region->attributes = ffa_memory_attributes_get(func_id);
ep_mem_access = buffer +
ffa_mem_desc_offset(buffer, 0, drv_info->version);
composite_offset = ffa_mem_desc_offset(buffer, args->nattrs,
drv_info->version);
for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
for (idx = 0; idx < args->nattrs; idx++) {
ep_mem_access = buffer +
ffa_mem_desc_offset(buffer, idx, drv_info->version);
ep_mem_access->receiver = args->attrs[idx].receiver;
ep_mem_access->attrs = args->attrs[idx].attrs;
ep_mem_access->composite_off = composite_offset;
ep_mem_access->flag = 0;
ep_mem_access->reserved = 0;
ffa_emad_impdef_value_init(drv_info->version,
ep_mem_access->impdef_val,
args->attrs[idx].impdef_val);
}
mem_region->handle = 0;
mem_region->ep_count = args->nattrs;
if (drv_info->version <= FFA_VERSION_1_0) {
mem_region->ep_mem_size = 0;
} else {
mem_region->ep_mem_size = sizeof(*ep_mem_access);
mem_region->ep_mem_offset = sizeof(*mem_region);
memset(mem_region->reserved, 0, 12);
}
ffa_mem_region_additional_setup(drv_info->version, mem_region);
composite = buffer + composite_offset;
composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);

View File

@@ -309,16 +309,36 @@ enum debug_counters {
SCMI_DEBUG_COUNTERS_LAST
};
static inline void scmi_inc_count(atomic_t *arr, int stat)
/**
* struct scmi_debug_info - Debug common info
* @top_dentry: A reference to the top debugfs dentry
* @name: Name of this SCMI instance
* @type: Type of this SCMI instance
* @is_atomic: Flag to state if the transport of this instance is atomic
* @counters: An array of atomic_c's used for tracking statistics (if enabled)
*/
struct scmi_debug_info {
struct dentry *top_dentry;
const char *name;
const char *type;
bool is_atomic;
atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
};
static inline void scmi_inc_count(struct scmi_debug_info *dbg, int stat)
{
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
atomic_inc(&arr[stat]);
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
if (dbg)
atomic_inc(&dbg->counters[stat]);
}
}
static inline void scmi_dec_count(atomic_t *arr, int stat)
static inline void scmi_dec_count(struct scmi_debug_info *dbg, int stat)
{
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
atomic_dec(&arr[stat]);
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
if (dbg)
atomic_dec(&dbg->counters[stat]);
}
}
enum scmi_bad_msg {

View File

@@ -115,22 +115,6 @@ struct scmi_protocol_instance {
#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
/**
* struct scmi_debug_info - Debug common info
* @top_dentry: A reference to the top debugfs dentry
* @name: Name of this SCMI instance
* @type: Type of this SCMI instance
* @is_atomic: Flag to state if the transport of this instance is atomic
* @counters: An array of atomic_c's used for tracking statistics (if enabled)
*/
struct scmi_debug_info {
struct dentry *top_dentry;
const char *name;
const char *type;
bool is_atomic;
atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
};
/**
* struct scmi_info - Structure representing a SCMI instance
*
@@ -610,7 +594,7 @@ scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
/* Set in-flight */
set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
scmi_inc_count(info->dbg->counters, XFERS_INFLIGHT);
scmi_inc_count(info->dbg, XFERS_INFLIGHT);
xfer->pending = true;
}
@@ -819,8 +803,9 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
hash_del(&xfer->node);
xfer->pending = false;
scmi_dec_count(info->dbg->counters, XFERS_INFLIGHT);
scmi_dec_count(info->dbg, XFERS_INFLIGHT);
}
xfer->flags = 0;
hlist_add_head(&xfer->node, &minfo->free_xfers);
}
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
@@ -839,8 +824,6 @@ void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
{
struct scmi_info *info = handle_to_scmi_info(handle);
xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
return __scmi_xfer_put(&info->tx_minfo, xfer);
}
@@ -1034,7 +1017,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED);
scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED);
return xfer;
}
@@ -1062,7 +1045,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
msg_type, xfer_id, msg_hdr, xfer->state);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID);
scmi_inc_count(info->dbg, ERR_MSG_INVALID);
/* On error the refcount incremented above has to be dropped */
__scmi_xfer_put(minfo, xfer);
@@ -1107,7 +1090,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
PTR_ERR(xfer));
scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM);
scmi_inc_count(info->dbg, ERR_MSG_NOMEM);
scmi_clear_channel(info, cinfo);
return;
@@ -1123,7 +1106,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "NOTI", xfer->hdr.seq,
xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
scmi_inc_count(info->dbg->counters, NOTIFICATION_OK);
scmi_inc_count(info->dbg, NOTIFICATION_OK);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
@@ -1183,10 +1166,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
scmi_clear_channel(info, cinfo);
complete(xfer->async_done);
scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK);
scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK);
} else {
complete(&xfer->done);
scmi_inc_count(info->dbg->counters, RESPONSE_OK);
scmi_inc_count(info->dbg, RESPONSE_OK);
}
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
@@ -1296,7 +1279,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"timed out in resp(caller: %pS) - polling\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT);
scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT);
}
}
@@ -1321,7 +1304,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"RESP" : "resp",
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK);
scmi_inc_count(info->dbg, RESPONSE_POLLED_OK);
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
scmi_raw_message_report(info->raw, xfer,
@@ -1336,7 +1319,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
dev_err(dev, "timed out in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT);
scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT);
}
}
@@ -1420,13 +1403,13 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
!is_transport_polling_capable(info->desc)) {
dev_warn_once(dev,
"Polling mode is not supported by transport.\n");
scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED);
scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED);
return -EINVAL;
}
cinfo = idr_find(&info->tx_idr, pi->proto->id);
if (unlikely(!cinfo)) {
scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND);
scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND);
return -EINVAL;
}
/* True ONLY if also supported by transport. */
@@ -1461,19 +1444,19 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
ret = info->desc->ops->send_message(cinfo, xfer);
if (ret < 0) {
dev_dbg(dev, "Failed to send message %d\n", ret);
scmi_inc_count(info->dbg->counters, SENT_FAIL);
scmi_inc_count(info->dbg, SENT_FAIL);
return ret;
}
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "CMND", xfer->hdr.seq,
xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
scmi_inc_count(info->dbg->counters, SENT_OK);
scmi_inc_count(info->dbg, SENT_OK);
ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status) {
ret = scmi_to_linux_errno(xfer->hdr.status);
scmi_inc_count(info->dbg->counters, ERR_PROTOCOL);
scmi_inc_count(info->dbg, ERR_PROTOCOL);
}
if (info->desc->ops->mark_txdone)
@@ -3044,9 +3027,6 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
u8 channels[SCMI_MAX_CHANNELS] = {};
DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
if (!info->dbg)
return -EINVAL;
/* Enumerate all channels to collect their ids */
idr_for_each_entry(&info->tx_idr, cinfo, id) {
/*
@@ -3218,7 +3198,7 @@ static int scmi_probe(struct platform_device *pdev)
if (!info->dbg)
dev_warn(dev, "Failed to setup SCMI debugfs.\n");
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
if (info->dbg && IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
ret = scmi_debugfs_raw_mode_setup(info);
if (!coex) {
if (ret)
@@ -3423,6 +3403,9 @@ int scmi_inflight_count(const struct scmi_handle *handle)
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
struct scmi_info *info = handle_to_scmi_info(handle);
if (!info->dbg)
return 0;
return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]);
} else {
return 0;

View File

@@ -59,6 +59,7 @@ static const struct regmap_config idio_16_regmap_config = {
.reg_stride = 1,
.val_bits = 8,
.io_port = true,
.max_register = 0x5,
.wr_table = &idio_16_wr_table,
.rd_table = &idio_16_rd_table,
.volatile_table = &idio_16_rd_table,

View File

@@ -6,6 +6,7 @@
#define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16"
#include <linux/bitmap.h>
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -107,6 +108,7 @@ int devm_idio_16_regmap_register(struct device *const dev,
struct idio_16_data *data;
struct regmap_irq_chip *chip;
struct regmap_irq_chip_data *chip_data;
DECLARE_BITMAP(fixed_direction_output, IDIO_16_NGPIO);
if (!config->parent)
return -EINVAL;
@@ -164,6 +166,9 @@ int devm_idio_16_regmap_register(struct device *const dev,
gpio_config.irq_domain = regmap_irq_get_domain(chip_data);
gpio_config.reg_mask_xlate = idio_16_reg_mask_xlate;
bitmap_from_u64(fixed_direction_output, GENMASK_U64(15, 0));
gpio_config.fixed_direction_output = fixed_direction_output;
return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
}
EXPORT_SYMBOL_GPL(devm_idio_16_regmap_register);

View File

@@ -286,22 +286,14 @@ static void ljca_gpio_event_cb(void *context, u8 cmd, const void *evt_data,
{
const struct ljca_gpio_packet *packet = evt_data;
struct ljca_gpio_dev *ljca_gpio = context;
int i, irq;
int i;
if (cmd != LJCA_GPIO_INT_EVENT)
return;
for (i = 0; i < packet->num; i++) {
irq = irq_find_mapping(ljca_gpio->gc.irq.domain,
packet->item[i].index);
if (!irq) {
dev_err(ljca_gpio->gc.parent,
"gpio_id %u does not mapped to IRQ yet\n",
packet->item[i].index);
return;
}
generic_handle_domain_irq(ljca_gpio->gc.irq.domain, irq);
generic_handle_domain_irq(ljca_gpio->gc.irq.domain,
packet->item[i].index);
set_bit(packet->item[i].index, ljca_gpio->reenable_irqs);
}

View File

@@ -41,6 +41,7 @@ static const struct regmap_config idio_16_regmap_config = {
.reg_stride = 1,
.val_bits = 8,
.io_port = true,
.max_register = 0x7,
.wr_table = &idio_16_wr_table,
.rd_table = &idio_16_rd_table,
.volatile_table = &idio_16_rd_table,

View File

@@ -31,6 +31,7 @@ struct gpio_regmap {
unsigned int reg_clr_base;
unsigned int reg_dir_in_base;
unsigned int reg_dir_out_base;
unsigned long *fixed_direction_output;
#ifdef CONFIG_REGMAP_IRQ
int regmap_irq_line;
@@ -134,6 +135,13 @@ static int gpio_regmap_get_direction(struct gpio_chip *chip,
unsigned int base, val, reg, mask;
int invert, ret;
if (gpio->fixed_direction_output) {
if (test_bit(offset, gpio->fixed_direction_output))
return GPIO_LINE_DIRECTION_OUT;
else
return GPIO_LINE_DIRECTION_IN;
}
if (gpio->reg_dat_base && !gpio->reg_set_base)
return GPIO_LINE_DIRECTION_IN;
if (gpio->reg_set_base && !gpio->reg_dat_base)
@@ -284,6 +292,17 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
goto err_free_gpio;
}
if (config->fixed_direction_output) {
gpio->fixed_direction_output = bitmap_alloc(chip->ngpio,
GFP_KERNEL);
if (!gpio->fixed_direction_output) {
ret = -ENOMEM;
goto err_free_gpio;
}
bitmap_copy(gpio->fixed_direction_output,
config->fixed_direction_output, chip->ngpio);
}
/* if not set, assume there is only one register */
gpio->ngpio_per_reg = config->ngpio_per_reg;
if (!gpio->ngpio_per_reg)
@@ -300,7 +319,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
ret = gpiochip_add_data(chip, gpio);
if (ret < 0)
goto err_free_gpio;
goto err_free_bitmap;
#ifdef CONFIG_REGMAP_IRQ
if (config->regmap_irq_chip) {
@@ -309,7 +328,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
config->regmap_irq_line, config->regmap_irq_flags,
0, config->regmap_irq_chip, &gpio->irq_chip_data);
if (ret)
goto err_free_gpio;
goto err_free_bitmap;
irq_domain = regmap_irq_get_domain(gpio->irq_chip_data);
} else
@@ -326,6 +345,8 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
err_remove_gpiochip:
gpiochip_remove(chip);
err_free_bitmap:
bitmap_free(gpio->fixed_direction_output);
err_free_gpio:
kfree(gpio);
return ERR_PTR(ret);
@@ -344,6 +365,7 @@ void gpio_regmap_unregister(struct gpio_regmap *gpio)
#endif
gpiochip_remove(&gpio->gpio_chip);
bitmap_free(gpio->fixed_direction_output);
kfree(gpio);
}
EXPORT_SYMBOL_GPL(gpio_regmap_unregister);

View File

@@ -291,6 +291,19 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity)
return GPIOD_ASIS;
}
static void acpi_gpio_set_debounce_timeout(struct gpio_desc *desc,
unsigned int acpi_debounce)
{
int ret;
/* ACPI uses hundredths of milliseconds units */
acpi_debounce *= 10;
ret = gpio_set_debounce_timeout(desc, acpi_debounce);
if (ret)
gpiod_warn(desc, "Failed to set debounce-timeout %u: %d\n",
acpi_debounce, ret);
}
static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
struct acpi_resource_gpio *agpio,
unsigned int index,
@@ -300,18 +313,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity);
unsigned int pin = agpio->pin_table[index];
struct gpio_desc *desc;
int ret;
desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags);
if (IS_ERR(desc))
return desc;
/* ACPI uses hundredths of milliseconds units */
ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10);
if (ret)
dev_warn(chip->parent,
"Failed to set debounce-timeout for pin 0x%04X, err %d\n",
pin, ret);
acpi_gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
return desc;
}
@@ -375,8 +382,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event");
if (IS_ERR(desc)) {
dev_err(chip->parent,
"Failed to request GPIO for pin 0x%04X, err %ld\n",
pin, PTR_ERR(desc));
"Failed to request GPIO for pin 0x%04X, err %pe\n",
pin, desc);
return AE_OK;
}
@@ -944,7 +951,6 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
struct acpi_gpio_info info = {};
struct gpio_desc *desc;
int ret;
desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
if (IS_ERR(desc))
@@ -959,10 +965,7 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
acpi_gpio_update_gpiod_flags(dflags, &info);
acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
/* ACPI uses hundredths of milliseconds units */
ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
if (ret)
return ERR_PTR(ret);
acpi_gpio_set_debounce_timeout(desc, info.debounce);
return desc;
}

View File

@@ -551,13 +551,13 @@ static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
struct dc_stream_state *stream,
struct dc_crtc_timing_adjust *adjust)
{
struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL);
struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT);
if (!offload_work) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
return;
}
struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL);
struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT);
if (!adjust_copy) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
kfree(offload_work);

View File

@@ -200,6 +200,9 @@ void dcn401_init_hw(struct dc *dc)
*/
struct dc_link *link = dc->links[i];
if (link->ep_type != DISPLAY_ENDPOINT_PHY)
continue;
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */

View File

@@ -44,7 +44,13 @@
*/
#define MAX_PIPES 6
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
#define MAX_LINKS (MAX_PIPES * 2 +2)
#define MAX_DPIA 6
#define MAX_CONNECTOR 6
#define MAX_VIRTUAL_LINKS 4
#define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS)
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4

View File

@@ -78,6 +78,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
struct audio_output audio_output[MAX_PIPES];
struct dc_stream_state *streams_on_link[MAX_PIPES];
int num_streams_on_link = 0;
struct dc *dc = (struct dc *)link->dc;
needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
@@ -150,7 +151,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {
stream_update.stream = streams_on_link[i];
stream_update.dpms_off = &dpms_off;
dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, streams_on_link[i], &stream_update);
dc_update_planes_and_stream(dc, NULL, 0, streams_on_link[i], &stream_update);
}
}
}

View File

@@ -174,6 +174,33 @@ static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color)
*p = color & 0xff;
}
/*
* Special case if the pixel crosses page boundaries
*/
static void drm_panic_write_pixel24_xpage(void *vaddr, struct page *next_page,
unsigned int offset, u32 color)
{
u8 *vaddr2;
u8 *p = vaddr + offset;
vaddr2 = kmap_local_page_try_from_panic(next_page);
*p++ = color & 0xff;
color >>= 8;
if (offset == PAGE_SIZE - 1)
p = vaddr2;
*p++ = color & 0xff;
color >>= 8;
if (offset == PAGE_SIZE - 2)
p = vaddr2;
*p = color & 0xff;
kunmap_local(vaddr2);
}
static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)
{
u32 *p = vaddr + offset;
@@ -231,7 +258,14 @@ static void drm_panic_blit_page(struct page **pages, unsigned int dpitch,
page = new_page;
vaddr = kmap_local_page_try_from_panic(pages[page]);
}
if (vaddr)
if (!vaddr)
continue;
// Special case for 24bit, as a pixel might cross page boundaries
if (cpp == 3 && offset + 3 > PAGE_SIZE)
drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
offset, fg32);
else
drm_panic_write_pixel(vaddr, offset, fg32, cpp);
}
}
@@ -321,7 +355,15 @@ static void drm_panic_fill_page(struct page **pages, unsigned int dpitch,
page = new_page;
vaddr = kmap_local_page_try_from_panic(pages[page]);
}
drm_panic_write_pixel(vaddr, offset, color, cpp);
if (!vaddr)
continue;
// Special case for 24bit, as a pixel might cross page boundaries
if (cpp == 3 && offset + 3 > PAGE_SIZE)
drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
offset, color);
else
drm_panic_write_pixel(vaddr, offset, color, cpp);
}
}
if (vaddr)
@@ -429,6 +471,9 @@ static void drm_panic_logo_rect(struct drm_rect *rect, const struct font_desc *f
static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect,
const struct font_desc *font, u32 fg_color)
{
if (rect->x2 > sb->width || rect->y2 > sb->height)
return;
if (logo_mono)
drm_panic_blit(sb, rect, logo_mono->data,
DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color);
@@ -477,7 +522,7 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_
struct drm_panic_line *line, int yoffset, u32 fg_color)
{
int chars_per_row = sb->width / font->width;
struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, sb->height);
struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, font->height);
struct drm_panic_line line_wrap;
if (line->len > chars_per_row) {
@@ -520,7 +565,7 @@ static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
struct drm_panic_line line;
int yoffset;
if (!font)
if (!font || font->width > sb->width)
return;
yoffset = sb->height - font->height - (sb->height % font->height) / 2;
@@ -733,7 +778,10 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
pr_debug("QR width %d and scale %d\n", qr_width, scale);
r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale);
v_margin = (sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg)) / 5;
v_margin = sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg);
if (v_margin < 0)
return -ENOSPC;
v_margin /= 5;
drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin);
r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale,
@@ -746,7 +794,7 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
/* Fill with the background color, and draw text on top */
drm_panic_fill(sb, &r_screen, bg_color);
if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr))
if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr_canvas))
drm_panic_logo_draw(sb, &r_logo, font, fg_color);
draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color);

View File

@@ -2117,6 +2117,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
intel_frontbuffer_put(intel_fb->frontbuffer);
kfree(intel_fb->panic);
kfree(intel_fb);
}
@@ -2215,16 +2216,22 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct intel_display *display = to_intel_display(obj->dev);
struct drm_framebuffer *fb = &intel_fb->base;
u32 max_stride;
int ret = -EINVAL;
int ret;
int i;
intel_fb->panic = intel_panic_alloc();
if (!intel_fb->panic)
return -ENOMEM;
/*
* intel_frontbuffer_get() must be done before
* intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race.
*/
intel_fb->frontbuffer = intel_frontbuffer_get(obj);
if (!intel_fb->frontbuffer)
return -ENOMEM;
if (!intel_fb->frontbuffer) {
ret = -ENOMEM;
goto err_free_panic;
}
ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd);
if (ret)
@@ -2323,6 +2330,9 @@ err_bo_framebuffer_fini:
intel_fb_bo_framebuffer_fini(obj);
err_frontbuffer_put:
intel_frontbuffer_put(intel_fb->frontbuffer);
err_free_panic:
kfree(intel_fb->panic);
return ret;
}
@@ -2349,20 +2359,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct intel_framebuffer *intel_framebuffer_alloc(void)
{
struct intel_framebuffer *intel_fb;
struct intel_panic *panic;
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb)
return NULL;
panic = intel_panic_alloc();
if (!panic) {
kfree(intel_fb);
return NULL;
}
intel_fb->panic = panic;
return intel_fb;
}

View File

@@ -1175,10 +1175,14 @@ panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)
break;
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
/* Partial unmaps might trigger a remap with either a prev or a next VA,
* but not both.
/* Two VMAs can be needed for an unmap, as an unmap can happen
* in the middle of a drm_gpuva, requiring a remap with both
* prev & next VA. Or an unmap can span more than one drm_gpuva
* where the first and last ones are covered partially, requring
* a remap for the first with a prev VA and remap for the last
* with a next VA.
*/
vma_count = 1;
vma_count = 2;
break;
default:

View File

@@ -361,7 +361,7 @@ static void dw_hdmi_rk3228_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON2,
FIELD_PREP_WM16(RK3228_HDMI_SDAIN_MSK, 1) |
FIELD_PREP_WM16(RK3328_HDMI_SCLIN_MSK, 1));
FIELD_PREP_WM16(RK3228_HDMI_SCLIN_MSK, 1));
}
static enum drm_connector_status

View File

@@ -292,6 +292,9 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
ggtt->pt_ops = &xelp_pt_ops;
ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
if (!ggtt->wq)
return -ENOMEM;
__xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);

View File

@@ -2022,7 +2022,7 @@ static int op_prepare(struct xe_vm *vm,
case DRM_GPUVA_OP_MAP:
if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
!op->map.invalidate_on_bind) ||
op->map.is_cpu_addr_mirror)
(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
break;
err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
@@ -2252,7 +2252,7 @@ static void op_commit(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
op->map.is_cpu_addr_mirror)
(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
break;
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,

View File

@@ -302,6 +302,11 @@ static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64
if (!vma)
return -EINVAL;
if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) {
drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n");
return 0;
}
if (xe_vma_has_default_mem_attrs(vma))
return 0;

View File

@@ -616,6 +616,13 @@ static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask,
vops->pt_update_ops[i].num_ops += inc_val;
}
#define XE_VMA_CREATE_MASK ( \
XE_VMA_READ_ONLY | \
XE_VMA_DUMPABLE | \
XE_VMA_SYSTEM_ALLOCATOR | \
DRM_GPUVA_SPARSE | \
XE_VMA_MADV_AUTORESET)
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
u8 tile_mask)
{
@@ -628,8 +635,7 @@ static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
op->base.map.gem.offset = vma->gpuva.gem.offset;
op->map.vma = vma;
op->map.immediate = true;
op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
op->map.is_null = xe_vma_is_null(vma);
op->map.vma_flags = vma->gpuva.flags & XE_VMA_CREATE_MASK;
}
static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
@@ -932,11 +938,6 @@ static void xe_vma_free(struct xe_vma *vma)
kfree(vma);
}
#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
#define VMA_CREATE_FLAG_IS_NULL BIT(1)
#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
#define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR BIT(3)
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
u64 bo_offset_or_userptr,
@@ -947,11 +948,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_vma *vma;
struct xe_tile *tile;
u8 id;
bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
bool is_cpu_addr_mirror =
(flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR);
bool is_null = (flags & DRM_GPUVA_SPARSE);
bool is_cpu_addr_mirror = (flags & XE_VMA_SYSTEM_ALLOCATOR);
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
@@ -972,10 +970,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
if (!vma)
return ERR_PTR(-ENOMEM);
if (is_cpu_addr_mirror)
vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR;
if (is_null)
vma->gpuva.flags |= DRM_GPUVA_SPARSE;
if (bo)
vma->gpuva.gem.obj = &bo->ttm.base;
}
@@ -986,10 +980,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->gpuva.vm = &vm->gpuvm;
vma->gpuva.va.addr = start;
vma->gpuva.va.range = end - start + 1;
if (read_only)
vma->gpuva.flags |= XE_VMA_READ_ONLY;
if (dumpable)
vma->gpuva.flags |= XE_VMA_DUMPABLE;
vma->gpuva.flags = flags;
for_each_tile(tile, vm->xe, id)
vma->tile_mask |= 0x1 << id;
@@ -2272,12 +2263,16 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
if (__op->op == DRM_GPUVA_OP_MAP) {
op->map.immediate =
flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
op->map.read_only =
flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
op->map.is_cpu_addr_mirror = flags &
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
if (flags & DRM_XE_VM_BIND_FLAG_READONLY)
op->map.vma_flags |= XE_VMA_READ_ONLY;
if (flags & DRM_XE_VM_BIND_FLAG_NULL)
op->map.vma_flags |= DRM_GPUVA_SPARSE;
if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
op->map.vma_flags |= XE_VMA_SYSTEM_ALLOCATOR;
if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE)
op->map.vma_flags |= XE_VMA_DUMPABLE;
if (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
op->map.vma_flags |= XE_VMA_MADV_AUTORESET;
op->map.pat_index = pat_index;
op->map.invalidate_on_bind =
__xe_vm_needs_clear_scratch_pages(vm, flags);
@@ -2590,14 +2585,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
.pat_index = op->map.pat_index,
};
flags |= op->map.read_only ?
VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0;
flags |= op->map.dumpable ?
VMA_CREATE_FLAG_DUMPABLE : 0;
flags |= op->map.is_cpu_addr_mirror ?
VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
flags |= op->map.vma_flags & XE_VMA_CREATE_MASK;
vma = new_vma(vm, &op->base.map, &default_attr,
flags);
@@ -2606,7 +2594,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
op->map.vma = vma;
if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
!op->map.is_cpu_addr_mirror) ||
!(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) ||
op->map.invalidate_on_bind)
xe_vma_ops_incr_pt_update_ops(vops,
op->tile_mask, 1);
@@ -2637,18 +2625,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
flags |= op->base.remap.unmap->va->flags &
XE_VMA_READ_ONLY ?
VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->base.remap.unmap->va->flags &
DRM_GPUVA_SPARSE ?
VMA_CREATE_FLAG_IS_NULL : 0;
flags |= op->base.remap.unmap->va->flags &
XE_VMA_DUMPABLE ?
VMA_CREATE_FLAG_DUMPABLE : 0;
flags |= xe_vma_is_cpu_addr_mirror(old) ?
VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
flags |= op->base.remap.unmap->va->flags & XE_VMA_CREATE_MASK;
if (op->base.remap.prev) {
vma = new_vma(vm, op->base.remap.prev,
&old->attr, flags);
@@ -3279,7 +3256,8 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE | \
DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | \
DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
#ifdef TEST_VM_OPS_ERROR
#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
@@ -3394,7 +3372,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
!(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
XE_IOCTL_DBG(xe, obj &&
op == DRM_XE_VM_BIND_OP_UNMAP)) {
op == DRM_XE_VM_BIND_OP_UNMAP) ||
XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) &&
(!is_cpu_addr_mirror || op != DRM_XE_VM_BIND_OP_MAP))) {
err = -EINVAL;
goto free_bind_ops;
}
@@ -4212,7 +4192,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
struct xe_vma_ops vops;
struct drm_gpuva_ops *ops = NULL;
struct drm_gpuva_op *__op;
bool is_cpu_addr_mirror = false;
unsigned int vma_flags = 0;
bool remap_op = false;
struct xe_vma_mem_attr tmp_attr;
u16 default_pat;
@@ -4242,15 +4222,17 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
vma = gpuva_to_vma(op->base.unmap.va);
XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma));
default_pat = vma->attr.default_pat_index;
vma_flags = vma->gpuva.flags;
}
if (__op->op == DRM_GPUVA_OP_REMAP) {
vma = gpuva_to_vma(op->base.remap.unmap->va);
default_pat = vma->attr.default_pat_index;
vma_flags = vma->gpuva.flags;
}
if (__op->op == DRM_GPUVA_OP_MAP) {
op->map.is_cpu_addr_mirror = true;
op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
op->map.pat_index = default_pat;
}
} else {
@@ -4259,11 +4241,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
xe_assert(vm->xe, !remap_op);
xe_assert(vm->xe, xe_vma_has_no_bo(vma));
remap_op = true;
if (xe_vma_is_cpu_addr_mirror(vma))
is_cpu_addr_mirror = true;
else
is_cpu_addr_mirror = false;
vma_flags = vma->gpuva.flags;
}
if (__op->op == DRM_GPUVA_OP_MAP) {
@@ -4272,10 +4250,10 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
/*
* In case of madvise ops DRM_GPUVA_OP_MAP is
* always after DRM_GPUVA_OP_REMAP, so ensure
* we assign op->map.is_cpu_addr_mirror true
* if REMAP is for xe_vma_is_cpu_addr_mirror vma
* to propagate the flags from the vma we're
* unmapping.
*/
op->map.is_cpu_addr_mirror = is_cpu_addr_mirror;
op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
}
}
print_op(vm->xe, __op);

View File

@@ -46,6 +46,7 @@ struct xe_vm_pgtable_update_op;
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7)
#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
#define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9)
#define XE_VMA_MADV_AUTORESET (DRM_GPUVA_USERBITS << 10)
/**
* struct xe_vma_mem_attr - memory attributes associated with vma
@@ -345,17 +346,10 @@ struct xe_vm {
struct xe_vma_op_map {
/** @vma: VMA to map */
struct xe_vma *vma;
unsigned int vma_flags;
/** @immediate: Immediate bind */
bool immediate;
/** @read_only: Read only */
bool read_only;
/** @is_null: is NULL binding */
bool is_null;
/** @is_cpu_addr_mirror: is CPU address mirror binding */
bool is_cpu_addr_mirror;
/** @dumpable: whether BO is dumped on GPU hang */
bool dumpable;
/** @invalidate: invalidate the VMA before bind */
bool invalidate_on_bind;
/** @pat_index: The pat index to use for this operation. */
u16 pat_index;

View File

@@ -107,6 +107,9 @@ static int cgbc_hwmon_probe_sensors(struct device *dev, struct cgbc_hwmon_data *
nb_sensors = data[0];
hwmon->sensors = devm_kzalloc(dev, sizeof(*hwmon->sensors) * nb_sensors, GFP_KERNEL);
if (!hwmon->sensors)
return -ENOMEM;
sensor = hwmon->sensors;
for (i = 0; i < nb_sensors; i++) {

View File

@@ -615,14 +615,14 @@ static int gpd_fan_probe(struct platform_device *pdev)
const struct device *hwdev;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (IS_ERR(res))
return dev_err_probe(dev, PTR_ERR(res),
if (!res)
return dev_err_probe(dev, -EINVAL,
"Failed to get platform resource\n");
region = devm_request_region(dev, res->start,
resource_size(res), DRIVER_NAME);
if (IS_ERR(region))
return dev_err_probe(dev, PTR_ERR(region),
if (!region)
return dev_err_probe(dev, -EBUSY,
"Failed to request region\n");
hwdev = devm_hwmon_device_register_with_info(dev,
@@ -631,7 +631,7 @@ static int gpd_fan_probe(struct platform_device *pdev)
&gpd_fan_chip_info,
NULL);
if (IS_ERR(hwdev))
return dev_err_probe(dev, PTR_ERR(region),
return dev_err_probe(dev, PTR_ERR(hwdev),
"Failed to register hwmon device\n");
return 0;

View File

@@ -336,10 +336,9 @@ static int isl68137_probe_from_dt(struct device *dev,
struct isl68137_data *data)
{
const struct device_node *np = dev->of_node;
struct device_node *child;
int err;
for_each_child_of_node(np, child) {
for_each_child_of_node_scoped(np, child) {
if (strcmp(child->name, "channel"))
continue;

View File

@@ -336,18 +336,18 @@ static struct pmbus_driver_info max34440_info[] = {
.format[PSC_CURRENT_IN] = direct,
.format[PSC_CURRENT_OUT] = direct,
.format[PSC_TEMPERATURE] = direct,
.m[PSC_VOLTAGE_IN] = 1,
.m[PSC_VOLTAGE_IN] = 125,
.b[PSC_VOLTAGE_IN] = 0,
.R[PSC_VOLTAGE_IN] = 0,
.m[PSC_VOLTAGE_OUT] = 1,
.m[PSC_VOLTAGE_OUT] = 125,
.b[PSC_VOLTAGE_OUT] = 0,
.R[PSC_VOLTAGE_OUT] = 0,
.m[PSC_CURRENT_IN] = 1,
.m[PSC_CURRENT_IN] = 250,
.b[PSC_CURRENT_IN] = 0,
.R[PSC_CURRENT_IN] = 2,
.m[PSC_CURRENT_OUT] = 1,
.R[PSC_CURRENT_IN] = -1,
.m[PSC_CURRENT_OUT] = 250,
.b[PSC_CURRENT_OUT] = 0,
.R[PSC_CURRENT_OUT] = 2,
.R[PSC_CURRENT_OUT] = -1,
.m[PSC_TEMPERATURE] = 1,
.b[PSC_TEMPERATURE] = 0,
.R[PSC_TEMPERATURE] = 2,

View File

@@ -291,24 +291,26 @@ out:
return data;
}
static int temp1_input_read(struct device *dev)
static int temp1_input_read(struct device *dev, long *temp)
{
struct sht3x_data *data = sht3x_update_client(dev);
if (IS_ERR(data))
return PTR_ERR(data);
return data->temperature;
*temp = data->temperature;
return 0;
}
static int humidity1_input_read(struct device *dev)
static int humidity1_input_read(struct device *dev, long *humidity)
{
struct sht3x_data *data = sht3x_update_client(dev);
if (IS_ERR(data))
return PTR_ERR(data);
return data->humidity;
*humidity = data->humidity;
return 0;
}
/*
@@ -706,6 +708,7 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
enum sht3x_limits index;
int ret;
switch (type) {
case hwmon_chip:
@@ -720,10 +723,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
*val = temp1_input_read(dev);
break;
return temp1_input_read(dev, val);
case hwmon_temp_alarm:
*val = temp1_alarm_read(dev);
ret = temp1_alarm_read(dev);
if (ret < 0)
return ret;
*val = ret;
break;
case hwmon_temp_max:
index = limit_max;
@@ -748,10 +753,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_humidity:
switch (attr) {
case hwmon_humidity_input:
*val = humidity1_input_read(dev);
break;
return humidity1_input_read(dev, val);
case hwmon_humidity_alarm:
*val = humidity1_alarm_read(dev);
ret = humidity1_alarm_read(dev);
if (ret < 0)
return ret;
*val = ret;
break;
case hwmon_humidity_max:
index = limit_max;

View File

@@ -2,9 +2,11 @@
config AMD_SBRMI_I2C
tristate "AMD side band RMI support"
depends on I2C
depends on ARM || ARM64 || COMPILE_TEST
select REGMAP_I2C
help
Side band RMI over I2C support for AMD out of band management.
This driver is intended to run on the BMC, not the managed node.
This driver can also be built as a module. If so, the module will
be called sbrmi-i2c.

View File

@@ -381,6 +381,8 @@ static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
}
spin_unlock(&fl->lock);
dma_buf_put(buf);
return ret;
}

View File

@@ -120,6 +120,8 @@
#define MEI_DEV_ID_PTL_H 0xE370 /* Panther Lake H */
#define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */
#define MEI_DEV_ID_WCL_P 0x4D70 /* Wildcat Lake P */
/*
* MEI HW Section
*/

View File

@@ -134,8 +134,7 @@ static bool mei_lb_check_response(const struct device *dev, ssize_t bytes,
return true;
}
static int mei_lb_push_payload(struct device *dev,
enum intel_lb_type type, u32 flags,
static int mei_lb_push_payload(struct device *dev, u32 type, u32 flags,
const void *payload, size_t payload_size)
{
struct mei_cl_device *cldev;

View File

@@ -127,6 +127,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_WCL_P, MEI_ME_PCH15_CFG)},
/* required last entry */
{0, }
};

View File

@@ -109,19 +109,19 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto end;
}
err = mei_register(dev, &pdev->dev);
if (err)
goto release_irq;
if (mei_start(dev)) {
dev_err(&pdev->dev, "init hw failure.\n");
err = -ENODEV;
goto release_irq;
goto deregister;
}
pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
err = mei_register(dev, &pdev->dev);
if (err)
goto stop;
pci_set_drvdata(pdev, dev);
/*
@@ -144,8 +144,8 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
stop:
mei_stop(dev);
deregister:
mei_deregister(dev);
release_irq:
mei_cancel_work(dev);
mei_disable_interrupts(dev);

View File

@@ -1737,7 +1737,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
{
unsigned long status, flags;
struct vmballoon *b;
int ret;
int ret = 0;
b = container_of(b_dev_info, struct vmballoon, b_dev_info);
@@ -1796,17 +1796,15 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
* A failure happened. While we can deflate the page we just
* inflated, this deflation can also encounter an error. Instead
* we will decrease the size of the balloon to reflect the
* change and report failure.
* change.
*/
atomic64_dec(&b->size);
ret = -EBUSY;
} else {
/*
* Success. Take a reference for the page, and we will add it to
* the list after acquiring the lock.
*/
get_page(newpage);
ret = 0;
}
/* Update the balloon list under the @pages_lock */
@@ -1817,7 +1815,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
* If we succeed just insert it to the list and update the statistics
* under the lock.
*/
if (!ret) {
if (status == VMW_BALLOON_SUCCESS) {
balloon_page_insert(&b->b_dev_info, newpage);
__count_vm_event(BALLOON_MIGRATE);
}

Some files were not shown because too many files have changed in this diff Show More