mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 11:56:58 +00:00
Compare commits
246 Commits
416f99c3b1
...
5e5ea7f616
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e5ea7f616 | ||
|
|
b0319c4642 | ||
|
|
1a68aefc71 | ||
|
|
249872f53d | ||
|
|
fbff949679 | ||
|
|
973ec55764 | ||
|
|
e637b37a52 | ||
|
|
eee654ca9a | ||
|
|
10003ff8ce | ||
|
|
56a1a04dc9 | ||
|
|
a7405aa92f | ||
|
|
f468cf53c5 | ||
|
|
309e49039f | ||
|
|
c84d574698 | ||
|
|
150215b89b | ||
|
|
7dfbe9a675 | ||
|
|
311607017e | ||
|
|
df8c841dd9 | ||
|
|
4fcf9952fb | ||
|
|
21478b6eca | ||
|
|
9c0bad7508 | ||
|
|
1a23ba6a1b | ||
|
|
2ba8b24e9d | ||
|
|
8808292799 | ||
|
|
951845d51d | ||
|
|
68769a0b5a | ||
|
|
19476a592b | ||
|
|
51860d6330 | ||
|
|
7446bd6119 | ||
|
|
90a2fe2576 | ||
|
|
2ff4b59f2e | ||
|
|
d71cb404f0 | ||
|
|
2313b97bc0 | ||
|
|
28a3ad1fd2 | ||
|
|
4be423572d | ||
|
|
eeb934137d | ||
|
|
c3859de858 | ||
|
|
8a5dd102e4 | ||
|
|
5ba71195a9 | ||
|
|
f523d110a6 | ||
|
|
69ec6a1bed | ||
|
|
fa8d4e6784 | ||
|
|
06c5c97293 | ||
|
|
e3e8e176ca | ||
|
|
641092c1bc | ||
|
|
7e81fa8d80 | ||
|
|
cda5dc12eb | ||
|
|
cb200e41ed | ||
|
|
e7839f773e | ||
|
|
5e6fee736e | ||
|
|
54f9baf537 | ||
|
|
a18ee3f31f | ||
|
|
f7ef7de6b9 | ||
|
|
49c9e09d96 | ||
|
|
099a60cca1 | ||
|
|
ac82dbc539 | ||
|
|
e61462232a | ||
|
|
335ef80e4a | ||
|
|
f4d3ef2dd0 | ||
|
|
950c74fd6c | ||
|
|
acd6c28a25 | ||
|
|
db03780e43 | ||
|
|
ca079ec3eb | ||
|
|
7f07a5c3e2 | ||
|
|
30065e73d7 | ||
|
|
c70b9d5fdc | ||
|
|
6297fb3863 | ||
|
|
d0cf6512bb | ||
|
|
f5535d78e1 | ||
|
|
112766cdf2 | ||
|
|
a53e356df5 | ||
|
|
a1f2c2d55a | ||
|
|
3d447dcdae | ||
|
|
67a7bc7f03 | ||
|
|
99f5aa14f0 | ||
|
|
6f52063db9 | ||
|
|
1c873a2fd1 | ||
|
|
f7cb94fad4 | ||
|
|
14282cc3cf | ||
|
|
b0f8e1f1f5 | ||
|
|
42ba5bd2e2 | ||
|
|
3bd937b49a | ||
|
|
75a9b40f3b | ||
|
|
4bd68e4753 | ||
|
|
c604cb5fdf | ||
|
|
3937b05bb7 | ||
|
|
b1cff2f4b2 | ||
|
|
610c4408a2 | ||
|
|
bb0e7fda87 | ||
|
|
1fe1c28a10 | ||
|
|
54bfd90ca3 | ||
|
|
2ef26ba819 | ||
|
|
331a1457d8 | ||
|
|
350f06c9e2 | ||
|
|
0f8407a1f1 | ||
|
|
c1c6ab80b2 | ||
|
|
2a6c045640 | ||
|
|
85a8ff1185 | ||
|
|
138ab44108 | ||
|
|
2fc00c008e | ||
|
|
8a838dabf1 | ||
|
|
27856d2b2b | ||
|
|
d1e1a7271e | ||
|
|
d51b09a0fe | ||
|
|
7996cbdb3f | ||
|
|
dbfe51513a | ||
|
|
898f944652 | ||
|
|
f6dcad1d74 | ||
|
|
d1cadd4bfc | ||
|
|
6f880e7bd1 | ||
|
|
5c33a631a5 | ||
|
|
3f5c1277a9 | ||
|
|
d5eb4d512f | ||
|
|
9f356d1277 | ||
|
|
66395eac5d | ||
|
|
606e481169 | ||
|
|
8049dc7b63 | ||
|
|
e819a62d52 | ||
|
|
4120602423 | ||
|
|
36951036a7 | ||
|
|
3003773ad6 | ||
|
|
78cd170d03 | ||
|
|
fceb8734e7 | ||
|
|
8320b75b2b | ||
|
|
a73d4a0556 | ||
|
|
6fec913ff1 | ||
|
|
3a1ce35030 | ||
|
|
6b3b697d65 | ||
|
|
ebd3330d1c | ||
|
|
d686e64e93 | ||
|
|
d7de37d6d7 | ||
|
|
803bc849f0 | ||
|
|
cc6c40e09d | ||
|
|
0cd0d15d47 | ||
|
|
a1dce715c6 | ||
|
|
3524b021b0 | ||
|
|
566a414558 | ||
|
|
166274a245 | ||
|
|
bfce8e4273 | ||
|
|
ccd608e29b | ||
|
|
b5fc406bc7 | ||
|
|
4552f4e3f2 | ||
|
|
89bd77cf43 | ||
|
|
bf94dea7fd | ||
|
|
5bcc5786a0 | ||
|
|
6fbf541520 | ||
|
|
26f2f5ed16 | ||
|
|
a8c762cbd1 | ||
|
|
aa33a6c8ce | ||
|
|
25c0b472ea | ||
|
|
e0c50cddbd | ||
|
|
e4948e8011 | ||
|
|
9d8ca99d60 | ||
|
|
6121d0b889 | ||
|
|
f909b3d4f1 | ||
|
|
f0a4bf61f1 | ||
|
|
a742d1713c | ||
|
|
26d21c835f | ||
|
|
550d1bda39 | ||
|
|
017bca9163 | ||
|
|
0917135963 | ||
|
|
babe81b061 | ||
|
|
ece1ad19c3 | ||
|
|
fbd10d9670 | ||
|
|
af34a25336 | ||
|
|
13e8664671 | ||
|
|
0eb54296dc | ||
|
|
b3bc229b54 | ||
|
|
1cafd2a850 | ||
|
|
fcba285525 | ||
|
|
909c8ea1ad | ||
|
|
a3e32b41c2 | ||
|
|
055f0576e8 | ||
|
|
7aa31ee9ec | ||
|
|
f7ae6d4ec6 | ||
|
|
c316c75d57 | ||
|
|
50cbec192f | ||
|
|
079115370d | ||
|
|
f86e51399c | ||
|
|
c16af019d9 | ||
|
|
e5b5f8b7c2 | ||
|
|
110c155e8a | ||
|
|
5a3d530caa | ||
|
|
1ddac5cd7f | ||
|
|
acd9ea1714 | ||
|
|
6f15c3d715 | ||
|
|
0cb302c9c9 | ||
|
|
7e898a9a99 | ||
|
|
ae83f3b726 | ||
|
|
57e9853737 | ||
|
|
9de2198ab9 | ||
|
|
a4438f06b1 | ||
|
|
9ddaf9c3ed | ||
|
|
1e4d2ff3ae | ||
|
|
290b633a7d | ||
|
|
c0c1262fbf | ||
|
|
3225f52cde | ||
|
|
215afa89d2 | ||
|
|
f16469ee73 | ||
|
|
603c646f00 | ||
|
|
f59b701b46 | ||
|
|
43bc0aa19a | ||
|
|
ee3b8134b2 | ||
|
|
e119c2fe8c | ||
|
|
0b24f9740f | ||
|
|
3809d7a89f | ||
|
|
0b08fc2928 | ||
|
|
51d9ee90ea | ||
|
|
821fe7bf16 | ||
|
|
131971f67e | ||
|
|
936a9f0cb1 | ||
|
|
33d2c5ee10 | ||
|
|
38c0d0ebf5 | ||
|
|
a10d648d13 | ||
|
|
96ddf2ef58 | ||
|
|
e4e3fff66a | ||
|
|
6aaecdf0d8 | ||
|
|
14cb413af0 | ||
|
|
af85de5a9f | ||
|
|
50b149be07 | ||
|
|
52c9aa1adc | ||
|
|
45fa6d190d | ||
|
|
ed7fc3cbb3 | ||
|
|
f74ee32963 | ||
|
|
12dc929c6c | ||
|
|
6e863a57dd | ||
|
|
f1b26faafd | ||
|
|
4531b6bad5 | ||
|
|
5a4d08351b | ||
|
|
b2d66cd137 | ||
|
|
016a3d4bcf | ||
|
|
ddbec021a3 | ||
|
|
6dfb04332f | ||
|
|
696d15cbd8 | ||
|
|
64f96057a6 | ||
|
|
885e822764 | ||
|
|
56dbb87850 | ||
|
|
4e35847d7b | ||
|
|
92664f2e6a | ||
|
|
8fd705c5e7 | ||
|
|
ff7c763b91 | ||
|
|
9b2451658a | ||
|
|
65af722aa8 | ||
|
|
b0106defc0 | ||
|
|
6c5c37dc41 | ||
|
|
80405a34e1 |
@@ -621,3 +621,84 @@ Description:
|
||||
number extended capability. The file is read only and due to
|
||||
the possible sensitivity of accessible serial numbers, admin
|
||||
only.
|
||||
|
||||
What: /sys/bus/pci/devices/.../tsm/
|
||||
Contact: linux-coco@lists.linux.dev
|
||||
Description:
|
||||
This directory only appears if a physical device function
|
||||
supports authentication (PCIe CMA-SPDM), interface security
|
||||
(PCIe TDISP), and is accepted for secure operation by the
|
||||
platform TSM driver. This attribute directory appears
|
||||
dynamically after the platform TSM driver loads. So, only after
|
||||
the /sys/class/tsm/tsm0 device arrives can tools assume that
|
||||
devices without a tsm/ attribute directory will never have one;
|
||||
before that, the security capabilities of the device relative to
|
||||
the platform TSM are unknown. See
|
||||
Documentation/ABI/testing/sysfs-class-tsm.
|
||||
|
||||
What: /sys/bus/pci/devices/.../tsm/connect
|
||||
Contact: linux-coco@lists.linux.dev
|
||||
Description:
|
||||
(RW) Write the name of a TSM (TEE Security Manager) device from
|
||||
/sys/class/tsm to this file to establish a connection with the
|
||||
device. This typically includes an SPDM (DMTF Security
|
||||
Protocols and Data Models) session over PCIe DOE (Data Object
|
||||
Exchange) and may also include PCIe IDE (Integrity and Data
|
||||
Encryption) establishment. Reads from this attribute return the
|
||||
name of the connected TSM or the empty string if not
|
||||
connected. A TSM device signals its readiness to accept PCI
|
||||
connection via a KOBJ_CHANGE event.
|
||||
|
||||
What: /sys/bus/pci/devices/.../tsm/disconnect
|
||||
Contact: linux-coco@lists.linux.dev
|
||||
Description:
|
||||
(WO) Write the name of the TSM device that was specified
|
||||
to 'connect' to teardown the connection.
|
||||
|
||||
What: /sys/bus/pci/devices/.../tsm/dsm
|
||||
Contact: linux-coco@lists.linux.dev
|
||||
Description: (RO) Return PCI device name of this device's DSM (Device
|
||||
Security Manager). When a device is in the connected state it
|
||||
indicates that the platform TSM (TEE Security Manager) has made
|
||||
a secure-session connection with a device's DSM. A DSM is always
|
||||
physical function 0 and when the device supports TDISP (TEE
|
||||
Device Interface Security Protocol) its managed functions also
|
||||
populate this tsm/dsm attribute. The managed functions of a DSM
|
||||
are SR-IOV (Single Root I/O Virtualization) virtual functions,
|
||||
non-zero functions of a multi-function device, or downstream
|
||||
endpoints depending on whether the DSM is an SR-IOV physical
|
||||
function, function0 of a multi-function device, or an upstream
|
||||
PCIe switch port. This is a "link" TSM attribute, see
|
||||
Documentation/ABI/testing/sysfs-class-tsm.
|
||||
|
||||
What: /sys/bus/pci/devices/.../tsm/bound
|
||||
Contact: linux-coco@lists.linux.dev
|
||||
Description: (RO) Return the device name of the TSM when the device is in a
|
||||
TDISP (TEE Device Interface Security Protocol) operational state
|
||||
(LOCKED, RUN, or ERROR, not UNLOCKED). Bound devices consume
|
||||
platform TSM resources and depend on the device's configuration
|
||||
(e.g. BME (Bus Master Enable) and MSE (Memory Space Enable)
|
||||
among other settings) to remain stable for the duration of the
|
||||
bound state. This attribute is only visible for devices that
|
||||
support TDISP operation, and it is only populated after
|
||||
successful connect and TSM bind. The TSM bind operation is
|
||||
initiated by VFIO/IOMMUFD. This is a "link" TSM attribute, see
|
||||
Documentation/ABI/testing/sysfs-class-tsm.
|
||||
|
||||
What: /sys/bus/pci/devices/.../authenticated
|
||||
Contact: linux-pci@vger.kernel.org
|
||||
Description:
|
||||
When the device's tsm/ directory is present device
|
||||
authentication (PCIe CMA-SPDM) and link encryption (PCIe IDE)
|
||||
are handled by the platform TSM (TEE Security Manager). When the
|
||||
tsm/ directory is not present this attribute reflects only the
|
||||
native CMA-SPDM authentication state with the kernel's
|
||||
certificate store.
|
||||
|
||||
If the attribute is not present, it indicates that
|
||||
authentication is unsupported by the device, or the TSM has no
|
||||
available authentication methods for the device.
|
||||
|
||||
When present and the tsm/ attribute directory is present, the
|
||||
authenticated attribute is an alias for the device 'connect'
|
||||
state. See the 'tsm/connect' attribute for more details.
|
||||
|
||||
19
Documentation/ABI/testing/sysfs-class-tsm
Normal file
19
Documentation/ABI/testing/sysfs-class-tsm
Normal file
@@ -0,0 +1,19 @@
|
||||
What: /sys/class/tsm/tsmN
|
||||
Contact: linux-coco@lists.linux.dev
|
||||
Description:
|
||||
"tsmN" is a device that represents the generic attributes of a
|
||||
platform TEE Security Manager. It is typically a child of a
|
||||
platform enumerated TSM device. /sys/class/tsm/tsmN/uevent
|
||||
signals when the PCI layer is able to support establishment of
|
||||
link encryption and other device-security features coordinated
|
||||
through a platform tsm.
|
||||
|
||||
What: /sys/class/tsm/tsmN/streamH.R.E
|
||||
Contact: linux-pci@vger.kernel.org
|
||||
Description:
|
||||
(RO) When a host bridge has established a secure connection via
|
||||
the platform TSM, symlink appears. The primary function of this
|
||||
is have a system global review of TSM resource consumption
|
||||
across host bridges. The link points to the endpoint PCI device
|
||||
and matches the same link published by the host bridge. See
|
||||
Documentation/ABI/testing/sysfs-devices-pci-host-bridge.
|
||||
45
Documentation/ABI/testing/sysfs-devices-pci-host-bridge
Normal file
45
Documentation/ABI/testing/sysfs-devices-pci-host-bridge
Normal file
@@ -0,0 +1,45 @@
|
||||
What: /sys/devices/pciDDDD:BB
|
||||
/sys/devices/.../pciDDDD:BB
|
||||
Contact: linux-pci@vger.kernel.org
|
||||
Description:
|
||||
A PCI host bridge device parents a PCI bus device topology. PCI
|
||||
controllers may also parent host bridges. The DDDD:BB format
|
||||
conveys the PCI domain (ACPI segment) number and root bus number
|
||||
(in hexadecimal) of the host bridge. Note that the domain number
|
||||
may be larger than the 16-bits that the "DDDD" format implies
|
||||
for emulated host-bridges.
|
||||
|
||||
What: pciDDDD:BB/firmware_node
|
||||
Contact: linux-pci@vger.kernel.org
|
||||
Description:
|
||||
(RO) Symlink to the platform firmware device object "companion"
|
||||
of the host bridge. For example, an ACPI device with an _HID of
|
||||
PNP0A08 (/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00). See
|
||||
/sys/devices/pciDDDD:BB entry for details about the DDDD:BB
|
||||
format.
|
||||
|
||||
What: pciDDDD:BB/streamH.R.E
|
||||
Contact: linux-pci@vger.kernel.org
|
||||
Description:
|
||||
(RO) When a platform has established a secure connection, PCIe
|
||||
IDE, between two Partner Ports, this symlink appears. A stream
|
||||
consumes a Stream ID slot in each of the Host bridge (H), Root
|
||||
Port (R) and Endpoint (E). The link points to the Endpoint PCI
|
||||
device in the Selective IDE Stream pairing. Specifically, "R"
|
||||
and "E" represent the assigned Selective IDE Stream Register
|
||||
Block in the Root Port and Endpoint, and "H" represents a
|
||||
platform specific pool of stream resources shared by the Root
|
||||
Ports in a host bridge. See /sys/devices/pciDDDD:BB entry for
|
||||
details about the DDDD:BB format.
|
||||
|
||||
What: pciDDDD:BB/available_secure_streams
|
||||
Contact: linux-pci@vger.kernel.org
|
||||
Description:
|
||||
(RO) When a host bridge has Root Ports that support PCIe IDE
|
||||
(link encryption and integrity protection) there may be a
|
||||
limited number of Selective IDE Streams that can be used for
|
||||
establishing new end-to-end secure links. This attribute
|
||||
decrements upon secure link setup, and increments upon secure
|
||||
link teardown. The in-use stream count is determined by counting
|
||||
stream symlinks. See /sys/devices/pciDDDD:BB entry for details
|
||||
about the DDDD:BB format.
|
||||
@@ -57,8 +57,7 @@ properties:
|
||||
- prstb
|
||||
- intb-only
|
||||
|
||||
timeout-sec:
|
||||
maxItems: 2
|
||||
timeout-sec: true
|
||||
|
||||
regulators:
|
||||
$ref: /schemas/regulator/rohm,bd96801-regulator.yaml
|
||||
@@ -72,7 +71,10 @@ required:
|
||||
- interrupt-names
|
||||
- regulators
|
||||
|
||||
additionalProperties: false
|
||||
allOf:
|
||||
- $ref: /schemas/watchdog/watchdog.yaml
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
|
||||
@@ -24,6 +24,7 @@ properties:
|
||||
- qcom,msm8998-adsp-pas
|
||||
- qcom,msm8998-slpi-pas
|
||||
- qcom,sdm660-adsp-pas
|
||||
- qcom,sdm660-cdsp-pas
|
||||
- qcom,sdm845-adsp-pas
|
||||
- qcom,sdm845-cdsp-pas
|
||||
- qcom,sdm845-slpi-pas
|
||||
@@ -31,9 +32,6 @@ properties:
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
cx-supply:
|
||||
description: Phandle to the CX regulator
|
||||
|
||||
px-supply:
|
||||
description: Phandle to the PX regulator
|
||||
|
||||
@@ -69,6 +67,8 @@ allOf:
|
||||
- qcom,msm8996-slpi-pil
|
||||
- qcom,msm8998-adsp-pas
|
||||
- qcom,msm8998-slpi-pas
|
||||
- qcom,sdm660-adsp-pas
|
||||
- qcom,sdm660-cdsp-pas
|
||||
- qcom,sdm845-adsp-pas
|
||||
- qcom,sdm845-cdsp-pas
|
||||
- qcom,sdm845-slpi-pas
|
||||
@@ -93,6 +93,8 @@ allOf:
|
||||
- qcom,msm8996-slpi-pil
|
||||
- qcom,msm8998-adsp-pas
|
||||
- qcom,msm8998-slpi-pas
|
||||
- qcom,sdm660-adsp-pas
|
||||
- qcom,sdm660-cdsp-pas
|
||||
- qcom,sdm845-adsp-pas
|
||||
- qcom,sdm845-cdsp-pas
|
||||
- qcom,sdm845-slpi-pas
|
||||
@@ -103,16 +105,6 @@ allOf:
|
||||
interrupt-names:
|
||||
maxItems: 5
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- qcom,msm8974-adsp-pil
|
||||
then:
|
||||
required:
|
||||
- cx-supply
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
@@ -120,8 +112,11 @@ allOf:
|
||||
enum:
|
||||
- qcom,msm8226-adsp-pil
|
||||
- qcom,msm8953-adsp-pil
|
||||
- qcom,msm8974-adsp-pil
|
||||
- qcom,msm8996-adsp-pil
|
||||
- qcom,msm8998-adsp-pas
|
||||
- qcom,sdm660-adsp-pas
|
||||
- qcom,sdm660-cdsp-pas
|
||||
then:
|
||||
properties:
|
||||
power-domains:
|
||||
@@ -178,6 +173,7 @@ allOf:
|
||||
- qcom,msm8998-adsp-pas
|
||||
- qcom,msm8998-slpi-pas
|
||||
- qcom,sdm660-adsp-pas
|
||||
- qcom,sdm660-cdsp-pas
|
||||
then:
|
||||
properties:
|
||||
qcom,qmp: false
|
||||
@@ -187,6 +183,7 @@ examples:
|
||||
#include <dt-bindings/clock/qcom,rpmcc.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
#include <dt-bindings/power/qcom-rpmpd.h>
|
||||
adsp {
|
||||
compatible = "qcom,msm8974-adsp-pil";
|
||||
|
||||
@@ -204,7 +201,8 @@ examples:
|
||||
clocks = <&rpmcc RPM_CXO_CLK>;
|
||||
clock-names = "xo";
|
||||
|
||||
cx-supply = <&pm8841_s2>;
|
||||
power-domains = <&rpmpd MSM8974_VDDCX>;
|
||||
power-domain-names = "cx";
|
||||
|
||||
memory-region = <&adsp_region>;
|
||||
|
||||
|
||||
@@ -91,9 +91,13 @@ allOf:
|
||||
power-domains:
|
||||
items:
|
||||
- description: NSP power domain
|
||||
- description: CX power domain
|
||||
- description: MXC power domain
|
||||
power-domain-names:
|
||||
items:
|
||||
- const: nsp
|
||||
- const: cx
|
||||
- const: mxc
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
|
||||
@@ -14,7 +14,11 @@ allOf:
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: airoha,en7581-wdt
|
||||
oneOf:
|
||||
- items:
|
||||
- const: airoha,an7583-wdt
|
||||
- const: airoha,en7581-wdt
|
||||
- const: airoha,en7581-wdt
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
@@ -15,6 +15,7 @@ properties:
|
||||
- aspeed,ast2400-wdt
|
||||
- aspeed,ast2500-wdt
|
||||
- aspeed,ast2600-wdt
|
||||
- aspeed,ast2700-wdt
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
@@ -87,13 +88,15 @@ properties:
|
||||
aspeed,reset-mask:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32-array
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
maxItems: 5
|
||||
description: >
|
||||
A bitmask indicating which peripherals will be reset if the watchdog
|
||||
timer expires. On AST2500 SoCs this should be a single word defined using
|
||||
the AST2500_WDT_RESET_* macros; on AST2600 SoCs this should be a two-word
|
||||
array with the first word defined using the AST2600_WDT_RESET1_* macros,
|
||||
and the second word defined using the AST2600_WDT_RESET2_* macros.
|
||||
and the second word defined using the AST2600_WDT_RESET2_* macros; on
|
||||
AST2700 SoCs, this should be five-word array from AST2700_WDT_RESET1_*
|
||||
macros to AST2700_WDT_RESET5_* macros.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
@@ -114,6 +117,7 @@ allOf:
|
||||
enum:
|
||||
- aspeed,ast2500-wdt
|
||||
- aspeed,ast2600-wdt
|
||||
- aspeed,ast2700-wdt
|
||||
- if:
|
||||
required:
|
||||
- aspeed,ext-active-high
|
||||
|
||||
57
Documentation/devicetree/bindings/watchdog/lantiq,wdt.yaml
Normal file
57
Documentation/devicetree/bindings/watchdog/lantiq,wdt.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/watchdog/lantiq,wdt.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Lantiq WTD watchdog
|
||||
|
||||
maintainers:
|
||||
- Hauke Mehrtens <hauke@hauke-m.de>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
oneOf:
|
||||
- enum:
|
||||
- lantiq,falcon-wdt
|
||||
- lantiq,wdt
|
||||
- lantiq,xrx100-wdt
|
||||
- items:
|
||||
- enum:
|
||||
- lantiq,xrx200-wdt
|
||||
- const: lantiq,xrx100-wdt
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
lantiq,rcu:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description: Phandle to the RCU syscon node
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
||||
allOf:
|
||||
- $ref: watchdog.yaml#
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- lantiq,xrx100-wdt
|
||||
- lantiq,falcon-wdt
|
||||
then:
|
||||
required:
|
||||
- lantiq,rcu
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
watchdog@803f0 {
|
||||
compatible = "lantiq,xrx200-wdt", "lantiq,xrx100-wdt";
|
||||
reg = <0x803f0 0x10>;
|
||||
|
||||
lantiq,rcu = <&rcu0>;
|
||||
};
|
||||
@@ -1,24 +0,0 @@
|
||||
Lantiq WTD watchdog binding
|
||||
============================
|
||||
|
||||
This describes the binding of the Lantiq watchdog driver.
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
Required properties:
|
||||
- compatible : Should be one of
|
||||
"lantiq,wdt"
|
||||
"lantiq,xrx100-wdt"
|
||||
"lantiq,xrx200-wdt", "lantiq,xrx100-wdt"
|
||||
"lantiq,falcon-wdt"
|
||||
- reg : Address of the watchdog block
|
||||
- lantiq,rcu : A phandle to the RCU syscon (required for
|
||||
"lantiq,falcon-wdt" and "lantiq,xrx100-wdt")
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
Example for the watchdog on the xRX200 SoCs:
|
||||
watchdog@803f0 {
|
||||
compatible = "lantiq,xrx200-wdt", "lantiq,xrx100-wdt";
|
||||
reg = <0x803f0 0x10>;
|
||||
|
||||
lantiq,rcu = <&rcu0>;
|
||||
};
|
||||
@@ -4,7 +4,7 @@
|
||||
$id: http://devicetree.org/schemas/watchdog/loongson,ls1x-wdt.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Loongson-1 Watchdog Timer
|
||||
title: Loongson Watchdog Timer
|
||||
|
||||
maintainers:
|
||||
- Keguang Zhang <keguang.zhang@gmail.com>
|
||||
@@ -17,6 +17,7 @@ properties:
|
||||
enum:
|
||||
- loongson,ls1b-wdt
|
||||
- loongson,ls1c-wdt
|
||||
- loongson,ls2k0300-wdt
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
* Marvell Orion Watchdog Time
|
||||
|
||||
Required Properties:
|
||||
|
||||
- Compatibility : "marvell,orion-wdt"
|
||||
"marvell,armada-370-wdt"
|
||||
"marvell,armada-xp-wdt"
|
||||
"marvell,armada-375-wdt"
|
||||
"marvell,armada-380-wdt"
|
||||
|
||||
- reg : Should contain two entries: first one with the
|
||||
timer control address, second one with the
|
||||
rstout enable address.
|
||||
|
||||
For "marvell,armada-375-wdt" and "marvell,armada-380-wdt":
|
||||
|
||||
- reg : A third entry is mandatory and should contain the
|
||||
shared mask/unmask RSTOUT address.
|
||||
|
||||
Clocks required for compatibles = "marvell,orion-wdt",
|
||||
"marvell,armada-370-wdt":
|
||||
- clocks : Must contain a single entry describing the clock input
|
||||
|
||||
Clocks required for compatibles = "marvell,armada-xp-wdt"
|
||||
"marvell,armada-375-wdt"
|
||||
"marvell,armada-380-wdt":
|
||||
- clocks : Must contain an entry for each entry in clock-names.
|
||||
- clock-names : Must include the following entries:
|
||||
"nbclk" (L2/coherency fabric clock),
|
||||
"fixed" (Reference 25 MHz fixed-clock).
|
||||
|
||||
Optional properties:
|
||||
|
||||
- interrupts : Contains the IRQ for watchdog expiration
|
||||
- timeout-sec : Contains the watchdog timeout in seconds
|
||||
|
||||
Example:
|
||||
|
||||
wdt@20300 {
|
||||
compatible = "marvell,orion-wdt";
|
||||
reg = <0x20300 0x28>, <0x20108 0x4>;
|
||||
interrupts = <3>;
|
||||
timeout-sec = <10>;
|
||||
clocks = <&gate_clk 7>;
|
||||
};
|
||||
@@ -0,0 +1,100 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/watchdog/marvell,orion-wdt.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Marvell Orion Watchdog Timer
|
||||
|
||||
maintainers:
|
||||
- Andrew Lunn <andrew@lunn.ch>
|
||||
- Gregory Clement <gregory.clement@bootlin.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- marvell,orion-wdt
|
||||
- marvell,armada-370-wdt
|
||||
- marvell,armada-xp-wdt
|
||||
- marvell,armada-375-wdt
|
||||
- marvell,armada-380-wdt
|
||||
|
||||
reg:
|
||||
minItems: 2
|
||||
items:
|
||||
- description: Timer control register address
|
||||
- description: RSTOUT enable register address
|
||||
- description: Shared mask/unmask RSTOUT register address
|
||||
|
||||
clocks:
|
||||
minItems: 1
|
||||
items:
|
||||
- description: L2/coherency fabric clock input
|
||||
- description: Reference 25 MHz fixed-clock supply
|
||||
|
||||
clock-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: nbclk
|
||||
- const: fixed
|
||||
|
||||
interrupts:
|
||||
minItems: 1
|
||||
items:
|
||||
- description: timeout
|
||||
- description: pre-timeout
|
||||
|
||||
allOf:
|
||||
- $ref: watchdog.yaml#
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- marvell,armada-375-wdt
|
||||
- marvell,armada-380-wdt
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
minItems: 3
|
||||
else:
|
||||
properties:
|
||||
reg:
|
||||
maxItems: 2
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- marvell,armada-xp-wdt
|
||||
- marvell,armada-375-wdt
|
||||
- marvell,armada-380-wdt
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 2
|
||||
clock-names:
|
||||
minItems: 2
|
||||
interrupts:
|
||||
minItems: 2
|
||||
|
||||
required:
|
||||
- clock-names
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
watchdog@20300 {
|
||||
compatible = "marvell,orion-wdt";
|
||||
reg = <0x20300 0x28>, <0x20108 0x4>;
|
||||
interrupts = <3>;
|
||||
timeout-sec = <10>;
|
||||
clocks = <&gate_clk 7>;
|
||||
};
|
||||
@@ -41,6 +41,8 @@ properties:
|
||||
- mediatek,mt7623-wdt
|
||||
- mediatek,mt7629-wdt
|
||||
- mediatek,mt8173-wdt
|
||||
- mediatek,mt8188-wdt
|
||||
- mediatek,mt8189-wdt
|
||||
- mediatek,mt8365-wdt
|
||||
- mediatek,mt8516-wdt
|
||||
- const: mediatek,mt6589-wdt
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
TI Watchdog Timer (WDT) Controller for OMAP
|
||||
|
||||
Required properties:
|
||||
- compatible : "ti,omap3-wdt" for OMAP3 or "ti,omap4-wdt" for OMAP4
|
||||
- ti,hwmods : Name of the hwmod associated to the WDT
|
||||
|
||||
Optional properties:
|
||||
- timeout-sec : default watchdog timeout in seconds
|
||||
|
||||
Examples:
|
||||
|
||||
wdt2: wdt@4a314000 {
|
||||
compatible = "ti,omap4-wdt", "ti,omap3-wdt";
|
||||
ti,hwmods = "wd_timer2";
|
||||
};
|
||||
@@ -22,6 +22,7 @@ properties:
|
||||
- qcom,apss-wdt-ipq5332
|
||||
- qcom,apss-wdt-ipq5424
|
||||
- qcom,apss-wdt-ipq9574
|
||||
- qcom,apss-wdt-kaanapali
|
||||
- qcom,apss-wdt-msm8226
|
||||
- qcom,apss-wdt-msm8974
|
||||
- qcom,apss-wdt-msm8994
|
||||
|
||||
@@ -0,0 +1,99 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/watchdog/renesas,r9a09g057-wdt.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Renesas RZ/V2H(P) Watchdog Timer (WDT) Controller
|
||||
|
||||
maintainers:
|
||||
- Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
oneOf:
|
||||
- items:
|
||||
- enum:
|
||||
- renesas,r9a09g047-wdt # RZ/G3E
|
||||
- renesas,r9a09g056-wdt # RZ/V2N
|
||||
- const: renesas,r9a09g057-wdt # RZ/V2H(P)
|
||||
|
||||
- items:
|
||||
- const: renesas,r9a09g087-wdt # RZ/N2H
|
||||
- const: renesas,r9a09g077-wdt # RZ/T2H
|
||||
|
||||
- enum:
|
||||
- renesas,r9a09g057-wdt # RZ/V2H(P)
|
||||
- renesas,r9a09g077-wdt # RZ/T2H
|
||||
|
||||
reg:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
clocks:
|
||||
minItems: 1
|
||||
items:
|
||||
- description: Register access clock
|
||||
- description: Main clock
|
||||
|
||||
clock-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: pclk
|
||||
- const: oscclk
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
timeout-sec: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
- clock-names
|
||||
- power-domains
|
||||
|
||||
allOf:
|
||||
- $ref: watchdog.yaml#
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: renesas,r9a09g057-wdt
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
maxItems: 1
|
||||
clocks:
|
||||
minItems: 2
|
||||
clock-names:
|
||||
minItems: 2
|
||||
else:
|
||||
properties:
|
||||
clocks:
|
||||
maxItems: 1
|
||||
clock-names:
|
||||
maxItems: 1
|
||||
reg:
|
||||
minItems: 2
|
||||
resets: false
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/renesas,r9a09g057-cpg.h>
|
||||
|
||||
watchdog@11c00400 {
|
||||
compatible = "renesas,r9a09g057-wdt";
|
||||
reg = <0x11c00400 0x400>;
|
||||
clocks = <&cpg CPG_MOD 0x4b>, <&cpg CPG_MOD 0x4c>;
|
||||
clock-names = "pclk", "oscclk";
|
||||
resets = <&cpg 0x75>;
|
||||
power-domains = <&cpg>;
|
||||
};
|
||||
@@ -0,0 +1,114 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/watchdog/renesas,rcar-gen3-wwdt.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Renesas Window Watchdog Timer (WWDT) Controller
|
||||
|
||||
maintainers:
|
||||
- Wolfram Sang <wsa+renesas@sang-engineering.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
oneOf:
|
||||
- items:
|
||||
- enum:
|
||||
- renesas,r8a77970-wwdt # R-Car V3M
|
||||
- renesas,r8a77980-wwdt # R-Car V3H
|
||||
- const: renesas,rcar-gen3-wwdt
|
||||
|
||||
- items:
|
||||
- enum:
|
||||
- renesas,r8a779a0-wwdt # R-Car V3U
|
||||
- renesas,r8a779f0-wwdt # R-Car S4
|
||||
- renesas,r8a779g0-wwdt # R-Car V4H
|
||||
- renesas,r8a779h0-wwdt # R-Car V4M
|
||||
- const: renesas,rcar-gen4-wwdt
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
items:
|
||||
- description: Pretimeout, 75% of overflow reached
|
||||
- description: Error occurred
|
||||
|
||||
interrupt-names:
|
||||
items:
|
||||
- const: pretimeout
|
||||
- const: error
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: Counting clock
|
||||
- description: Bus clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: cnt
|
||||
- const: bus
|
||||
|
||||
resets:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
reset-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: cnt
|
||||
- const: bus
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- interrupt-names
|
||||
- clocks
|
||||
- clock-names
|
||||
- resets
|
||||
- reset-names
|
||||
- power-domains
|
||||
|
||||
allOf:
|
||||
- $ref: watchdog.yaml#
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- renesas,r8a779a0-wwdt
|
||||
- renesas,r8a779f0-wwdt
|
||||
then:
|
||||
properties:
|
||||
resets:
|
||||
minItems: 2
|
||||
reset-names:
|
||||
minItems: 2
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/r8a779g0-cpg-mssr.h>
|
||||
#include <dt-bindings/power/r8a779g0-sysc.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
|
||||
watchdog@ffc90000 {
|
||||
compatible = "renesas,r8a779g0-wwdt",
|
||||
"renesas,rcar-gen4-wwdt";
|
||||
reg = <0xffc90000 0x10>;
|
||||
interrupts = <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "pretimeout", "error";
|
||||
clocks = <&cpg CPG_CORE R8A779G0_CLK_R>,
|
||||
<&cpg CPG_CORE R8A779G0_CLK_SASYNCRT>;
|
||||
clock-names = "cnt", "bus";
|
||||
power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
|
||||
resets = <&cpg 1200>;
|
||||
reset-names = "cnt";
|
||||
};
|
||||
@@ -0,0 +1,51 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/watchdog/renesas,rza-wdt.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Renesas RZ/A Watchdog Timer (WDT) Controller
|
||||
|
||||
maintainers:
|
||||
- Wolfram Sang <wsa+renesas@sang-engineering.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- renesas,r7s72100-wdt # RZ/A1
|
||||
- renesas,r7s9210-wdt # RZ/A2
|
||||
- const: renesas,rza-wdt # RZ/A
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
timeout-sec: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
|
||||
allOf:
|
||||
- $ref: watchdog.yaml#
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/r7s72100-clock.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
|
||||
watchdog@fcfe0000 {
|
||||
compatible = "renesas,r7s72100-wdt", "renesas,rza-wdt";
|
||||
reg = <0xfcfe0000 0x6>;
|
||||
interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&p0_clk>;
|
||||
};
|
||||
@@ -0,0 +1,111 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/watchdog/renesas,rzg2l-wdt.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Renesas RZ/G2L Watchdog Timer (WDT) Controller
|
||||
|
||||
maintainers:
|
||||
- Biju Das <biju.das.jz@bp.renesas.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
oneOf:
|
||||
- items:
|
||||
- enum:
|
||||
- renesas,r9a07g043-wdt # RZ/G2UL and RZ/Five
|
||||
- renesas,r9a07g044-wdt # RZ/G2{L,LC}
|
||||
- renesas,r9a07g054-wdt # RZ/V2L
|
||||
- renesas,r9a08g045-wdt # RZ/G3S
|
||||
- const: renesas,rzg2l-wdt
|
||||
|
||||
- items:
|
||||
- const: renesas,r9a09g011-wdt # RZ/V2M
|
||||
- const: renesas,rzv2m-wdt # RZ/V2M
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
minItems: 1
|
||||
items:
|
||||
- description: Timeout
|
||||
- description: Parity error
|
||||
|
||||
interrupt-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: wdt
|
||||
- const: perrout
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: Register access clock
|
||||
- description: Main clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: pclk
|
||||
- const: oscclk
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
timeout-sec: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- clocks
|
||||
- clock-names
|
||||
- power-domains
|
||||
- resets
|
||||
|
||||
allOf:
|
||||
- $ref: watchdog.yaml#
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: renesas,rzg2l-wdt
|
||||
then:
|
||||
properties:
|
||||
interrupts:
|
||||
minItems: 2
|
||||
interrupt-names:
|
||||
minItems: 2
|
||||
required:
|
||||
- interrupt-names
|
||||
else:
|
||||
properties:
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
interrupt-names:
|
||||
maxItems: 1
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/r9a07g044-cpg.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
|
||||
watchdog@12800800 {
|
||||
compatible = "renesas,r9a07g044-wdt",
|
||||
"renesas,rzg2l-wdt";
|
||||
reg = <0x12800800 0x400>;
|
||||
clocks = <&cpg CPG_MOD R9A07G044_WDT0_PCLK>,
|
||||
<&cpg CPG_MOD R9A07G044_WDT0_CLK>;
|
||||
clock-names = "pclk", "oscclk";
|
||||
interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "wdt", "perrout";
|
||||
resets = <&cpg R9A07G044_WDT0_PRESETN>;
|
||||
power-domains = <&cpg>;
|
||||
};
|
||||
@@ -0,0 +1,50 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/watchdog/renesas,rzn1-wdt.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Renesas RZ/N1 Watchdog Timer (WDT) Controller
|
||||
|
||||
maintainers:
|
||||
- Wolfram Sang <wsa+renesas@sang-engineering.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: renesas,r9a06g032-wdt # RZ/N1D
|
||||
- const: renesas,rzn1-wdt # RZ/N1
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
timeout-sec: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- clocks
|
||||
|
||||
allOf:
|
||||
- $ref: watchdog.yaml#
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/r9a06g032-sysctrl.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
|
||||
watchdog@40008000 {
|
||||
compatible = "renesas,r9a06g032-wdt", "renesas,rzn1-wdt";
|
||||
reg = <0x40008000 0x1000>;
|
||||
interrupts = <GIC_SPI 73 IRQ_TYPE_EDGE_RISING>;
|
||||
clocks = <&sysctrl R9A06G032_CLK_WATCHDOG>;
|
||||
};
|
||||
@@ -13,30 +13,6 @@ maintainers:
|
||||
properties:
|
||||
compatible:
|
||||
oneOf:
|
||||
- items:
|
||||
- enum:
|
||||
- renesas,r7s72100-wdt # RZ/A1
|
||||
- renesas,r7s9210-wdt # RZ/A2
|
||||
- const: renesas,rza-wdt # RZ/A
|
||||
|
||||
- items:
|
||||
- enum:
|
||||
- renesas,r9a06g032-wdt # RZ/N1D
|
||||
- const: renesas,rzn1-wdt # RZ/N1
|
||||
|
||||
- items:
|
||||
- enum:
|
||||
- renesas,r9a07g043-wdt # RZ/G2UL and RZ/Five
|
||||
- renesas,r9a07g044-wdt # RZ/G2{L,LC}
|
||||
- renesas,r9a07g054-wdt # RZ/V2L
|
||||
- renesas,r9a08g045-wdt # RZ/G3S
|
||||
- const: renesas,rzg2l-wdt
|
||||
|
||||
- items:
|
||||
- enum:
|
||||
- renesas,r9a09g011-wdt # RZ/V2M
|
||||
- const: renesas,rzv2m-wdt # RZ/V2M
|
||||
|
||||
- items:
|
||||
- enum:
|
||||
- renesas,r8a7742-wdt # RZ/G1H
|
||||
@@ -75,47 +51,14 @@ properties:
|
||||
- renesas,r8a779h0-wdt # R-Car V4M
|
||||
- const: renesas,rcar-gen4-wdt # R-Car Gen4
|
||||
|
||||
- items:
|
||||
- enum:
|
||||
- renesas,r9a09g047-wdt # RZ/G3E
|
||||
- renesas,r9a09g056-wdt # RZ/V2N
|
||||
- const: renesas,r9a09g057-wdt # RZ/V2H(P)
|
||||
|
||||
- enum:
|
||||
- renesas,r9a09g057-wdt # RZ/V2H(P)
|
||||
- renesas,r9a09g077-wdt # RZ/T2H
|
||||
|
||||
- items:
|
||||
- const: renesas,r9a09g087-wdt # RZ/N2H
|
||||
- const: renesas,r9a09g077-wdt # RZ/T2H
|
||||
|
||||
reg:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
minItems: 1
|
||||
items:
|
||||
- description: Timeout
|
||||
- description: Parity error
|
||||
|
||||
interrupt-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: wdt
|
||||
- const: perrout
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
minItems: 1
|
||||
items:
|
||||
- description: Register access clock
|
||||
- description: Main clock
|
||||
|
||||
clock-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: pclk
|
||||
- const: oscclk
|
||||
maxItems: 1
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
@@ -129,6 +72,8 @@ required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
- interrupts
|
||||
- power-domains
|
||||
|
||||
allOf:
|
||||
- $ref: watchdog.yaml#
|
||||
@@ -138,90 +83,11 @@ allOf:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- renesas,r9a09g077-wdt
|
||||
- renesas,rza-wdt
|
||||
- renesas,rzn1-wdt
|
||||
const: renesas,r8a77980-wdt
|
||||
then:
|
||||
required:
|
||||
- power-domains
|
||||
- resets
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- renesas,r9a09g057-wdt
|
||||
- renesas,rzg2l-wdt
|
||||
- renesas,rzv2m-wdt
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 2
|
||||
clock-names:
|
||||
minItems: 2
|
||||
required:
|
||||
- clock-names
|
||||
else:
|
||||
properties:
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- renesas,rzg2l-wdt
|
||||
then:
|
||||
properties:
|
||||
interrupts:
|
||||
minItems: 2
|
||||
interrupt-names:
|
||||
minItems: 2
|
||||
required:
|
||||
- interrupt-names
|
||||
else:
|
||||
properties:
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- renesas,r9a09g057-wdt
|
||||
- renesas,r9a09g077-wdt
|
||||
then:
|
||||
properties:
|
||||
interrupts: false
|
||||
interrupt-names: false
|
||||
else:
|
||||
required:
|
||||
- interrupts
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: renesas,r9a09g077-wdt
|
||||
then:
|
||||
properties:
|
||||
resets: false
|
||||
clock-names:
|
||||
maxItems: 1
|
||||
reg:
|
||||
minItems: 2
|
||||
required:
|
||||
- clock-names
|
||||
- power-domains
|
||||
else:
|
||||
properties:
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
|
||||
@@ -28,6 +28,7 @@ properties:
|
||||
- rockchip,rk3328-wdt
|
||||
- rockchip,rk3368-wdt
|
||||
- rockchip,rk3399-wdt
|
||||
- rockchip,rk3506-wdt
|
||||
- rockchip,rk3562-wdt
|
||||
- rockchip,rk3568-wdt
|
||||
- rockchip,rk3576-wdt
|
||||
|
||||
51
Documentation/devicetree/bindings/watchdog/ti,omap2-wdt.yaml
Normal file
51
Documentation/devicetree/bindings/watchdog/ti,omap2-wdt.yaml
Normal file
@@ -0,0 +1,51 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/watchdog/ti,omap2-wdt.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: TI OMAP Watchdog Timer Controller
|
||||
|
||||
maintainers:
|
||||
- Aaro Koskinen <aaro.koskinen@iki.fi>
|
||||
|
||||
allOf:
|
||||
- $ref: watchdog.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
oneOf:
|
||||
- enum:
|
||||
- ti,omap2-wdt
|
||||
- ti,omap3-wdt
|
||||
- items:
|
||||
- enum:
|
||||
- ti,am4372-wdt
|
||||
- ti,omap4-wdt
|
||||
- ti,omap5-wdt
|
||||
- const: ti,omap3-wdt
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
ti,hwmods:
|
||||
description: Name of the hardware module associated with the watchdog.
|
||||
$ref: /schemas/types.yaml#/definitions/string
|
||||
deprecated: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
watchdog@48314000 {
|
||||
compatible = "ti,omap3-wdt";
|
||||
reg = <0x48314000 0x80>;
|
||||
ti,hwmods = "wd_timer2";
|
||||
};
|
||||
@@ -21,9 +21,10 @@ select:
|
||||
|
||||
properties:
|
||||
$nodename:
|
||||
pattern: "^(timer|watchdog)(@.*|-([0-9]|[1-9][0-9]+))?$"
|
||||
pattern: "^(pmic|timer|watchdog)(@.*|-([0-9]|[1-9][0-9]+))?$"
|
||||
|
||||
timeout-sec:
|
||||
maxItems: 1
|
||||
description:
|
||||
Contains the watchdog timeout in seconds.
|
||||
|
||||
|
||||
@@ -83,7 +83,7 @@ flags, and the remaining form the internal block number.
|
||||
======== =============================================================
|
||||
Bit Description
|
||||
======== =============================================================
|
||||
31 - 30 Error and Zero flags - Used in the following way::
|
||||
31 - 30 Error and Zero flags - Used in the following way:
|
||||
|
||||
== == ====================================================
|
||||
31 30 Description
|
||||
|
||||
@@ -10,6 +10,7 @@ The Linux PCI driver implementer's API guide
|
||||
|
||||
pci
|
||||
p2pdma
|
||||
tsm
|
||||
|
||||
.. only:: subproject and html
|
||||
|
||||
|
||||
21
Documentation/driver-api/pci/tsm.rst
Normal file
21
Documentation/driver-api/pci/tsm.rst
Normal file
@@ -0,0 +1,21 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
.. include:: <isonum.txt>
|
||||
|
||||
========================================================
|
||||
PCI Trusted Execution Environment Security Manager (TSM)
|
||||
========================================================
|
||||
|
||||
Subsystem Interfaces
|
||||
====================
|
||||
|
||||
.. kernel-doc:: include/linux/pci-ide.h
|
||||
:internal:
|
||||
|
||||
.. kernel-doc:: drivers/pci/ide.c
|
||||
:export:
|
||||
|
||||
.. kernel-doc:: include/linux/pci-tsm.h
|
||||
:internal:
|
||||
|
||||
.. kernel-doc:: drivers/pci/tsm.c
|
||||
:export:
|
||||
@@ -13,5 +13,6 @@ NFS
|
||||
rpc-cache
|
||||
rpc-server-gss
|
||||
nfs41-server
|
||||
nfsd-io-modes
|
||||
knfsd-stats
|
||||
reexport
|
||||
|
||||
153
Documentation/filesystems/nfs/nfsd-io-modes.rst
Normal file
153
Documentation/filesystems/nfs/nfsd-io-modes.rst
Normal file
@@ -0,0 +1,153 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=============
|
||||
NFSD IO MODES
|
||||
=============
|
||||
|
||||
Overview
|
||||
========
|
||||
|
||||
NFSD has historically always used buffered IO when servicing READ and
|
||||
WRITE operations. BUFFERED is NFSD's default IO mode, but it is possible
|
||||
to override that default to use either DONTCACHE or DIRECT IO modes.
|
||||
|
||||
Experimental NFSD debugfs interfaces are available to allow the NFSD IO
|
||||
mode used for READ and WRITE to be configured independently. See both:
|
||||
|
||||
- /sys/kernel/debug/nfsd/io_cache_read
|
||||
- /sys/kernel/debug/nfsd/io_cache_write
|
||||
|
||||
The default value for both io_cache_read and io_cache_write reflects
|
||||
NFSD's default IO mode (which is NFSD_IO_BUFFERED=0).
|
||||
|
||||
Based on the configured settings, NFSD's IO will either be:
|
||||
|
||||
- cached using page cache (NFSD_IO_BUFFERED=0)
|
||||
- cached but removed from page cache on completion (NFSD_IO_DONTCACHE=1)
|
||||
- not cached stable_how=NFS_UNSTABLE (NFSD_IO_DIRECT=2)
|
||||
|
||||
To set an NFSD IO mode, write a supported value (0 - 2) to the
|
||||
corresponding IO operation's debugfs interface, e.g.::
|
||||
|
||||
echo 2 > /sys/kernel/debug/nfsd/io_cache_read
|
||||
echo 2 > /sys/kernel/debug/nfsd/io_cache_write
|
||||
|
||||
To check which IO mode NFSD is using for READ or WRITE, simply read the
|
||||
corresponding IO operation's debugfs interface, e.g.::
|
||||
|
||||
cat /sys/kernel/debug/nfsd/io_cache_read
|
||||
cat /sys/kernel/debug/nfsd/io_cache_write
|
||||
|
||||
If you experiment with NFSD's IO modes on a recent kernel and have
|
||||
interesting results, please report them to linux-nfs@vger.kernel.org
|
||||
|
||||
NFSD DONTCACHE
|
||||
==============
|
||||
|
||||
DONTCACHE offers a hybrid approach to servicing IO that aims to offer
|
||||
the benefits of using DIRECT IO without any of the strict alignment
|
||||
requirements that DIRECT IO imposes. To achieve this buffered IO is used
|
||||
but the IO is flagged to "drop behind" (meaning associated pages are
|
||||
dropped from the page cache) when IO completes.
|
||||
|
||||
DONTCACHE aims to avoid what has proven to be a fairly significant
|
||||
limition of Linux's memory management subsystem if/when large amounts of
|
||||
data is infrequently accessed (e.g. read once _or_ written once but not
|
||||
read until much later). Such use-cases are particularly problematic
|
||||
because the page cache will eventually become a bottleneck to servicing
|
||||
new IO requests.
|
||||
|
||||
For more context on DONTCACHE, please see these Linux commit headers:
|
||||
|
||||
- Overview: 9ad6344568cc3 ("mm/filemap: change filemap_create_folio()
|
||||
to take a struct kiocb")
|
||||
- for READ: 8026e49bff9b1 ("mm/filemap: add read support for
|
||||
RWF_DONTCACHE")
|
||||
- for WRITE: 974c5e6139db3 ("xfs: flag as supporting FOP_DONTCACHE")
|
||||
|
||||
NFSD_IO_DONTCACHE will fall back to NFSD_IO_BUFFERED if the underlying
|
||||
filesystem doesn't indicate support by setting FOP_DONTCACHE.
|
||||
|
||||
NFSD DIRECT
|
||||
===========
|
||||
|
||||
DIRECT IO doesn't make use of the page cache, as such it is able to
|
||||
avoid the Linux memory management's page reclaim scalability problems
|
||||
without resorting to the hybrid use of page cache that DONTCACHE does.
|
||||
|
||||
Some workloads benefit from NFSD avoiding the page cache, particularly
|
||||
those with a working set that is significantly larger than available
|
||||
system memory. The pathological worst-case workload that NFSD DIRECT has
|
||||
proven to help most is: NFS client issuing large sequential IO to a file
|
||||
that is 2-3 times larger than the NFS server's available system memory.
|
||||
The reason for such improvement is NFSD DIRECT eliminates a lot of work
|
||||
that the memory management subsystem would otherwise be required to
|
||||
perform (e.g. page allocation, dirty writeback, page reclaim). When
|
||||
using NFSD DIRECT, kswapd and kcompactd are no longer commanding CPU
|
||||
time trying to find adequate free pages so that forward IO progress can
|
||||
be made.
|
||||
|
||||
The performance win associated with using NFSD DIRECT was previously
|
||||
discussed on linux-nfs, see:
|
||||
https://lore.kernel.org/linux-nfs/aEslwqa9iMeZjjlV@kernel.org/
|
||||
|
||||
But in summary:
|
||||
|
||||
- NFSD DIRECT can significantly reduce memory requirements
|
||||
- NFSD DIRECT can reduce CPU load by avoiding costly page reclaim work
|
||||
- NFSD DIRECT can offer more deterministic IO performance
|
||||
|
||||
As always, your mileage may vary and so it is important to carefully
|
||||
consider if/when it is beneficial to make use of NFSD DIRECT. When
|
||||
assessing comparative performance of your workload please be sure to log
|
||||
relevant performance metrics during testing (e.g. memory usage, cpu
|
||||
usage, IO performance). Using perf to collect perf data that may be used
|
||||
to generate a "flamegraph" for work Linux must perform on behalf of your
|
||||
test is a really meaningful way to compare the relative health of the
|
||||
system and how switching NFSD's IO mode changes what is observed.
|
||||
|
||||
If NFSD_IO_DIRECT is specified by writing 2 (or 3 and 4 for WRITE) to
|
||||
NFSD's debugfs interfaces, ideally the IO will be aligned relative to
|
||||
the underlying block device's logical_block_size. Also the memory buffer
|
||||
used to store the READ or WRITE payload must be aligned relative to the
|
||||
underlying block device's dma_alignment.
|
||||
|
||||
But NFSD DIRECT does handle misaligned IO in terms of O_DIRECT as best
|
||||
it can:
|
||||
|
||||
Misaligned READ:
|
||||
If NFSD_IO_DIRECT is used, expand any misaligned READ to the next
|
||||
DIO-aligned block (on either end of the READ). The expanded READ is
|
||||
verified to have proper offset/len (logical_block_size) and
|
||||
dma_alignment checking.
|
||||
|
||||
Misaligned WRITE:
|
||||
If NFSD_IO_DIRECT is used, split any misaligned WRITE into a start,
|
||||
middle and end as needed. The large middle segment is DIO-aligned
|
||||
and the start and/or end are misaligned. Buffered IO is used for the
|
||||
misaligned segments and O_DIRECT is used for the middle DIO-aligned
|
||||
segment. DONTCACHE buffered IO is _not_ used for the misaligned
|
||||
segments because using normal buffered IO offers significant RMW
|
||||
performance benefit when handling streaming misaligned WRITEs.
|
||||
|
||||
Tracing:
|
||||
The nfsd_read_direct trace event shows how NFSD expands any
|
||||
misaligned READ to the next DIO-aligned block (on either end of the
|
||||
original READ, as needed).
|
||||
|
||||
This combination of trace events is useful for READs::
|
||||
|
||||
echo 1 > /sys/kernel/tracing/events/nfsd/nfsd_read_vector/enable
|
||||
echo 1 > /sys/kernel/tracing/events/nfsd/nfsd_read_direct/enable
|
||||
echo 1 > /sys/kernel/tracing/events/nfsd/nfsd_read_io_done/enable
|
||||
echo 1 > /sys/kernel/tracing/events/xfs/xfs_file_direct_read/enable
|
||||
|
||||
The nfsd_write_direct trace event shows how NFSD splits a given
|
||||
misaligned WRITE into a DIO-aligned middle segment.
|
||||
|
||||
This combination of trace events is useful for WRITEs::
|
||||
|
||||
echo 1 > /sys/kernel/tracing/events/nfsd/nfsd_write_opened/enable
|
||||
echo 1 > /sys/kernel/tracing/events/nfsd/nfsd_write_direct/enable
|
||||
echo 1 > /sys/kernel/tracing/events/nfsd/nfsd_write_io_done/enable
|
||||
echo 1 > /sys/kernel/tracing/events/xfs/xfs_file_direct_write/enable
|
||||
547
Documentation/filesystems/nfs/nfsd-maintainer-entry-profile.rst
Normal file
547
Documentation/filesystems/nfs/nfsd-maintainer-entry-profile.rst
Normal file
@@ -0,0 +1,547 @@
|
||||
NFSD Maintainer Entry Profile
|
||||
=============================
|
||||
|
||||
A Maintainer Entry Profile supplements the top-level process
|
||||
documents (found in Documentation/process/) with customs that are
|
||||
specific to a subsystem and its maintainers. A contributor may use
|
||||
this document to set their expectations and avoid common mistakes.
|
||||
A maintainer may use these profiles to look across subsystems for
|
||||
opportunities to converge on best common practices.
|
||||
|
||||
Overview
|
||||
--------
|
||||
The Network File System (NFS) is a standardized family of network
|
||||
protocols that enable access to files across a set of network-
|
||||
connected peer hosts. Applications on NFS clients access files that
|
||||
reside on file systems that are shared by NFS servers. A single
|
||||
network peer can act as both an NFS client and an NFS server.
|
||||
|
||||
NFSD refers to the NFS server implementation included in the Linux
|
||||
kernel. An in-kernel NFS server has fast access to files stored
|
||||
in file systems local to that server. NFSD can share files stored
|
||||
on most of the file system types native to Linux, including xfs,
|
||||
ext4, btrfs, and tmpfs.
|
||||
|
||||
Mailing list
|
||||
------------
|
||||
The linux-nfs@vger.kernel.org mailing list is a public list. Its
|
||||
purpose is to enable collaboration among developers working on the
|
||||
Linux NFS stack, both client and server. It is not a place for
|
||||
conversations that are not related directly to the Linux NFS stack.
|
||||
|
||||
The linux-nfs mailing list is archived on `lore.kernel.org <https://lore.kernel.org/linux-nfs/>`_.
|
||||
|
||||
The Linux NFS community does not have any chat room.
|
||||
|
||||
Reporting bugs
|
||||
--------------
|
||||
If you experience an NFSD-related bug on a distribution-built
|
||||
kernel, please start by working with your Linux distributor.
|
||||
|
||||
Bug reports against upstream Linux code bases are welcome on the
|
||||
linux-nfs@vger.kernel.org mailing list, where some active triage
|
||||
can be done. NFSD bugs may also be reported in the Linux kernel
|
||||
community's bugzilla at:
|
||||
|
||||
https://bugzilla.kernel.org
|
||||
|
||||
Please file NFSD-related bugs under the "Filesystems/NFSD"
|
||||
component. In general, including as much detail as possible is a
|
||||
good start, including pertinent system log messages from both
|
||||
the client and server.
|
||||
|
||||
User space software related to NFSD, such as mountd or the exportfs
|
||||
command, is contained in the nfs-utils package. Report problems
|
||||
with those components to linux-nfs@vger.kernel.org. You might be
|
||||
directed to move the report to a specific bug tracker.
|
||||
|
||||
Contributor's Guide
|
||||
-------------------
|
||||
|
||||
Standards compliance
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
The priority is for NFSD to interoperate fully with the Linux NFS
|
||||
client. We also test against other popular NFS client implementa-
|
||||
tions regularly at NFS bake-a-thon events (also known as plug-
|
||||
fests). Non-Linux NFS clients are not part of upstream NFSD CI/CD.
|
||||
|
||||
The NFSD community strives to provide an NFS server implementation
|
||||
that interoperates with all standards-compliant NFS client
|
||||
implementations. This is done by staying as close as is sensible to
|
||||
the normative mandates in the IETF's published NFS, RPC, and GSS-API
|
||||
standards.
|
||||
|
||||
It is always useful to reference an RFC and section number in a code
|
||||
comment where behavior deviates from the standard (and even when the
|
||||
behavior is compliant but the implementation is obfuscatory).
|
||||
|
||||
On the rare occasion when a deviation from standard-mandated
|
||||
behavior is needed, brief documentation of the use case or
|
||||
deficiencies in the standard is a required part of in-code
|
||||
documentation.
|
||||
|
||||
Care must always be taken to avoid leaking local error codes (ie,
|
||||
errnos) to clients of NFSD. A proper NFS status code is always
|
||||
required in NFS protocol replies.
|
||||
|
||||
NFSD administrative interfaces
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
NFSD administrative interfaces include:
|
||||
|
||||
- an NFSD or SUNRPC module parameter
|
||||
|
||||
- export options in /etc/exports
|
||||
|
||||
- files under /proc/fs/nfsd/ or /proc/sys/sunrpc/
|
||||
|
||||
- the NFSD netlink protocol
|
||||
|
||||
Frequently, a request is made to introduce or modify one of NFSD's
|
||||
traditional administrative interfaces. Certainly it is technically
|
||||
easy to introduce a new administrative setting. However, there are
|
||||
good reasons why the NFSD maintainers prefer to leave that as a last
|
||||
resort:
|
||||
|
||||
- As with any API, administrative interfaces are difficult to get
|
||||
right.
|
||||
|
||||
- Once they are documented and have a legacy of use, administrative
|
||||
interfaces become difficult to modify or remove.
|
||||
|
||||
- Every new administrative setting multiplies the NFSD test matrix.
|
||||
|
||||
- The cost of one administrative interface is incremental, but costs
|
||||
add up across all of the existing interfaces.
|
||||
|
||||
It is often better for everyone if effort is made up front to
|
||||
understanding the underlying requirement of the new setting, and
|
||||
then trying to make it tune itself (or to become otherwise
|
||||
unnecessary).
|
||||
|
||||
If a new setting is indeed necessary, first consider adding it to
|
||||
the NFSD netlink protocol. Or if it doesn't need to be a reliable
|
||||
long term user space feature, it can be added to NFSD's menagerie of
|
||||
experimental settings which reside under /sys/kernel/debug/nfsd/ .
|
||||
|
||||
Field observability
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
NFSD employs several different mechanisms for observing operation,
|
||||
including counters, printks, WARNings, and static trace points. Each
|
||||
have their strengths and weaknesses. Contributors should select the
|
||||
most appropriate tool for their task.
|
||||
|
||||
- BUG must be avoided if at all possible, as it will frequently
|
||||
result in a full system crash.
|
||||
|
||||
- WARN is appropriate only when a full stack trace is useful.
|
||||
|
||||
- printk can show detailed information. These must not be used
|
||||
in code paths where they can be triggered repeatedly by remote
|
||||
users.
|
||||
|
||||
- dprintk can show detailed information, but can be enabled only
|
||||
in pre-set groups. The overhead of emitting output makes dprintk
|
||||
inappropriate for frequent operations like I/O.
|
||||
|
||||
- Counters are always on, but provide little information about
|
||||
individual events other than how frequently they occur.
|
||||
|
||||
- static trace points can be enabled individually or in groups
|
||||
(via a glob). These are generally low overhead, and thus are
|
||||
favored for use in hot paths.
|
||||
|
||||
- dynamic tracing, such as kprobes or eBPF, are quite flexible but
|
||||
cannot be used in certain environments (eg, full kernel lock-
|
||||
down).
|
||||
|
||||
Testing
|
||||
~~~~~~~
|
||||
The kdevops project
|
||||
|
||||
https://github.com/linux-kdevops/kdevops
|
||||
|
||||
contains several NFS-specific workflows, as well as the community
|
||||
standard fstests suite. These workflows are based on open source
|
||||
testing tools such as ltp and fio. Contributors are encouraged to
|
||||
use these tools without kdevops, or contributors should install and
|
||||
use kdevops themselves to verify their patches before submission.
|
||||
|
||||
Coding style
|
||||
~~~~~~~~~~~~
|
||||
Follow the coding style preferences described in
|
||||
|
||||
Documentation/process/coding-style.rst
|
||||
|
||||
with the following exceptions:
|
||||
|
||||
- Add new local variables to a function in reverse Christmas tree
|
||||
order
|
||||
|
||||
- Use the kdoc comment style for
|
||||
+ non-static functions
|
||||
+ static inline functions
|
||||
+ static functions that are callbacks/virtual functions
|
||||
|
||||
- All new function names start with ``nfsd_`` for non-NFS-version-
|
||||
specific functions.
|
||||
|
||||
- New function names that are specific to NFSv2 or NFSv3, or are
|
||||
used by all minor versions of NFSv4, use ``nfsdN_`` where N is
|
||||
the version.
|
||||
|
||||
- New function names specific to an NFSv4 minor version can be
|
||||
named with ``nfsd4M_`` where M is the minor version.
|
||||
|
||||
Patch preparation
|
||||
~~~~~~~~~~~~~~~~~
|
||||
Read and follow all guidelines in
|
||||
|
||||
Documentation/process/submitting-patches.rst
|
||||
|
||||
Use tagging to identify all patch authors. However, reviewers and
|
||||
testers should be added by replying to the email patch submission.
|
||||
Email is extensively used in order to publicly archive review and
|
||||
testing attributions. These tags are automatically inserted into
|
||||
your patches when they are applied.
|
||||
|
||||
The code in the body of the diff already shows /what/ is being
|
||||
changed. Thus it is not necessary to repeat that in the patch
|
||||
description. Instead, the description should contain one or more
|
||||
of:
|
||||
|
||||
- A brief problem statement ("what is this patch trying to fix?")
|
||||
with a root-cause analysis.
|
||||
|
||||
- End-user visible symptoms or items that a support engineer might
|
||||
use to search for the patch, like stack traces.
|
||||
|
||||
- A brief explanation of why the patch is the best way to address
|
||||
the problem.
|
||||
|
||||
- Any context that reviewers might need to understand the changes
|
||||
made by the patch.
|
||||
|
||||
- Any relevant benchmarking results, and/or functional test results.
|
||||
|
||||
As detailed in Documentation/process/submitting-patches.rst,
|
||||
identify the point in history that the issue being addressed was
|
||||
introduced by using a Fixes: tag.
|
||||
|
||||
Mention in the patch description if that point in history cannot be
|
||||
determined -- that is, no Fixes: tag can be provided. In this case,
|
||||
please make it clear to maintainers whether an LTS backport is
|
||||
needed even though there is no Fixes: tag.
|
||||
|
||||
The NFSD maintainers prefer to add stable tagging themselves, after
|
||||
public discussion in response to the patch submission. Contributors
|
||||
may suggest stable tagging, but be aware that many version
|
||||
management tools add such stable Cc's when you post your patches.
|
||||
Don't add "Cc: stable" unless you are absolutely sure the patch
|
||||
needs to go to stable during the initial submission process.
|
||||
|
||||
Patch submission
|
||||
~~~~~~~~~~~~~~~~
|
||||
Patches to NFSD are submitted via the kernel's email-based review
|
||||
process that is common to most other kernel subsystems.
|
||||
|
||||
Just before each submission, rebase your patch or series on the
|
||||
nfsd-testing branch at
|
||||
|
||||
https://git.kernel.org/pub/scm/linux/kernel/git/cel/linux.git
|
||||
|
||||
The NFSD subsystem is maintained separately from the Linux in-kernel
|
||||
NFS client. The NFSD maintainers do not normally take submissions
|
||||
for client changes, nor can they respond authoritatively to bug
|
||||
reports or feature requests for NFS client code.
|
||||
|
||||
This means that contributors might be asked to resubmit patches if
|
||||
they were emailed to the incorrect set of maintainers and reviewers.
|
||||
This is not a rejection, but simply a correction of the submission
|
||||
process.
|
||||
|
||||
When in doubt, consult the NFSD entry in the MAINTAINERS file to
|
||||
see which files and directories fall under the NFSD subsystem.
|
||||
|
||||
The proper set of email addresses for NFSD patches are:
|
||||
|
||||
To: the NFSD maintainers and reviewers listed in MAINTAINERS
|
||||
Cc: linux-nfs@vger.kernel.org and optionally linux-kernel@
|
||||
|
||||
If there are other subsystems involved in the patches (for example
|
||||
MM or RDMA) their primary mailing list address can be included in
|
||||
the Cc: field. Other contributors and interested parties may be
|
||||
included there as well.
|
||||
|
||||
In general we prefer that contributors use common patch email tools
|
||||
such as "git send-email" or "stg email format/send", which tend to
|
||||
get the details right without a lot of fuss.
|
||||
|
||||
A series consisting of a single patch is not required to have a
|
||||
cover letter. However, a cover letter can be included if there is
|
||||
substantial context that is not appropriate to include in the
|
||||
patch description.
|
||||
|
||||
Please note that, with an e-mail based submission process, series
|
||||
cover letters are not part of the work that is committed to the
|
||||
kernel source code base or its commit history. Therefore always try
|
||||
to keep pertinent information in the patch descriptions.
|
||||
|
||||
Design documentation is welcome, but as cover letters are not
|
||||
preserved, a perhaps better option is to include a patch that adds
|
||||
such documentation under Documentation/filesystems/nfs/.
|
||||
|
||||
Reviewers will ask about test coverage and what use cases the
|
||||
patches are expected to address. Please be prepared to answer these
|
||||
questions.
|
||||
|
||||
Review comments from maintainers might be politely stated, but in
|
||||
general, these are not optional to address when they are actionable.
|
||||
If necessary, the maintainers retain the right to not apply patches
|
||||
when contributors refuse to address reasonable requests.
|
||||
|
||||
Post changes to kernel source code and user space source code as
|
||||
separate series. You can connect the two series with comments in
|
||||
your cover letters.
|
||||
|
||||
Generally the NFSD maintainers ask for a reposts even for simple
|
||||
modifications in order to publicly archive the request and the
|
||||
resulting repost before it is pulled into the NFSD trees. This
|
||||
also enables us to rebuild a patch series quickly without missing
|
||||
changes that might have been discussed via email.
|
||||
|
||||
Avoid frequently reposting large series with only small changes. As
|
||||
a rule of thumb, posting substantial changes more than once a week
|
||||
will result in reviewer overload.
|
||||
|
||||
Remember, there are only a handful of subsystem maintainers and
|
||||
reviewers, but potentially many sources of contributions. The
|
||||
maintainers and reviewers, therefore, are always the less scalable
|
||||
resource. Be kind to your friendly neighborhood maintainer.
|
||||
|
||||
Patch Acceptance
|
||||
~~~~~~~~~~~~~~~~
|
||||
There isn't a formal review process for NFSD, but we like to see
|
||||
at least two Reviewed-by: notices for patches that are more than
|
||||
simple clean-ups. Reviews are done in public on
|
||||
linux-nfs@vger.kernel.org and are archived on lore.kernel.org.
|
||||
|
||||
Currently the NFSD patch queues are maintained in branches here:
|
||||
|
||||
https://git.kernel.org/pub/scm/linux/kernel/git/cel/linux.git
|
||||
|
||||
The NFSD maintainers apply patches initially to the nfsd-testing
|
||||
branch, which is always open to new submissions. Patches can be
|
||||
applied while review is ongoing. nfsd-testing is a topic branch,
|
||||
so it can change frequently, it will be rebased, and your patch
|
||||
might get dropped if there is a problem with it.
|
||||
|
||||
Generally a script-generated "thank you" email will indicate when
|
||||
your patch has been added to the nfsd-testing branch. You can track
|
||||
the progress of your patch using the linux-nfs patchworks instance:
|
||||
|
||||
https://patchwork.kernel.org/project/linux-nfs/list/
|
||||
|
||||
While your patch is in nfsd-testing, it is exposed to a variety of
|
||||
test environments, including community zero-day bots, static
|
||||
analysis tools, and NFSD continuous integration testing. The soak
|
||||
period is three to four weeks.
|
||||
|
||||
Each patch that survives in nfsd-testing for the soak period without
|
||||
changes is moved to the nfsd-next branch.
|
||||
|
||||
The nfsd-next branch is automatically merged into linux-next and
|
||||
fs-next on a nightly basis.
|
||||
|
||||
Patches that survive in nfsd-next are included in the next NFSD
|
||||
merge window pull request. These windows typically occur once every
|
||||
63 days (nine weeks).
|
||||
|
||||
When the upstream merge window closes, the nfsd-next branch is
|
||||
renamed nfsd-fixes, and a new nfsd-next branch is created, based on
|
||||
the upstream -rc1 tag.
|
||||
|
||||
Fixes that are destined for an upstream -rc release also run the
|
||||
nfsd-testing gauntlet, but are then applied to the nfsd-fixes
|
||||
branch. That branch is made available for Linus to pull after a
|
||||
short time. In order to limit the risk of introducing regressions,
|
||||
we limit such fixes to emergency situations or fixes to breakage
|
||||
that occurred during the most recent upstream merge.
|
||||
|
||||
Please make it clear when submitting an emergency patch that
|
||||
immediate action (either application to -rc or LTS backport) is
|
||||
needed.
|
||||
|
||||
Sensitive patch submissions and bug reports
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
CVEs are generated by specific members of the Linux kernel community
|
||||
and several external entities. The Linux NFS community does not emit
|
||||
or assign CVEs. CVEs are assigned after an issue and its fix are
|
||||
known.
|
||||
|
||||
However, the NFSD maintainers sometimes receive sensitive security
|
||||
reports, and at times these are significant enough to need to be
|
||||
embargoed. In such rare cases, fixes can be developed and reviewed
|
||||
out of the public eye.
|
||||
|
||||
Please be aware that many version management tools add the stable
|
||||
Cc's when you post your patches. This is generally a nuisance, but
|
||||
it can result in outing an embargoed security issue accidentally.
|
||||
Don't add "Cc: stable" unless you are absolutely sure the patch
|
||||
needs to go to stable@ during the initial submission process.
|
||||
|
||||
Patches that are merged without ever appearing on any list, and
|
||||
which carry a Reported-by: or Fixes: tag are detected as suspicious
|
||||
by security-focused people. We encourage that, after any private
|
||||
review, security-sensitive patches should be posted to linux-nfs@
|
||||
for the usual public review, archiving, and test period.
|
||||
|
||||
LLM-generated submissions
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
The Linux kernel community as a whole is still exploring the new
|
||||
world of LLM-generated code. The NFSD maintainers will entertain
|
||||
submission of patches that are partially or wholly generated by
|
||||
LLM-based development tools. Such submissions are held to the
|
||||
same standards as submissions created entirely by human authors:
|
||||
|
||||
- The human contributor identifies themselves via a Signed-off-by:
|
||||
tag. This tag counts as a DoC.
|
||||
|
||||
- The human contributor is solely responsible for code provenance
|
||||
and any contamination by inadvertently-included code with a
|
||||
conflicting license, as usual.
|
||||
|
||||
- The human contributor must be able to answer and address review
|
||||
questions. A patch description such as "This fixed my problem
|
||||
but I don't know why" is not acceptable.
|
||||
|
||||
- The contribution is subjected to the same test regimen as all
|
||||
other submissions.
|
||||
|
||||
- An indication (via a Generated-by: tag or otherwise) that the
|
||||
contribution is LLM-generated is not required.
|
||||
|
||||
It is easy to address review comments and fix requests in LLM
|
||||
generated code. So easy, in fact, that it becomes tempting to repost
|
||||
refreshed code immediately. Please resist that temptation.
|
||||
|
||||
As always, please avoid reposting series revisions more than once
|
||||
every 24 hours.
|
||||
|
||||
Clean-up patches
|
||||
~~~~~~~~~~~~~~~~
|
||||
The NFSD maintainers discourage patches which perform simple clean-
|
||||
ups, which are not in the context of other work. For example:
|
||||
|
||||
* Addressing ``checkpatch.pl`` warnings after merge
|
||||
* Addressing :ref:`Local variable ordering<rcs>` issues
|
||||
* Addressing long-standing whitespace damage
|
||||
|
||||
This is because it is felt that the churn that such changes produce
|
||||
comes at a greater cost than the value of such clean-ups.
|
||||
|
||||
Conversely, spelling and grammar fixes are encouraged.
|
||||
|
||||
Stable and LTS support
|
||||
----------------------
|
||||
Upstream NFSD continuous integration testing runs against LTS trees
|
||||
whenever they are updated.
|
||||
|
||||
Please indicate when a patch containing a fix needs to be considered
|
||||
for LTS kernels, either via a Fixes: tag or explicit mention.
|
||||
|
||||
Feature requests
|
||||
----------------
|
||||
There is no one way to make an official feature request, but
|
||||
discussion about the request should eventually make its way to
|
||||
the linux-nfs@vger.kernel.org mailing list for public review by
|
||||
the community.
|
||||
|
||||
Subsystem boundaries
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
NFSD itself is not much more than a protocol engine. This means its
|
||||
primary responsibility is to translate the NFS protocol into API
|
||||
calls in the Linux kernel. For example, NFSD is not responsible for
|
||||
knowing exactly how bytes or file attributes are managed on a block
|
||||
device. It relies on other kernel subsystems for that.
|
||||
|
||||
If the subsystems on which NFSD relies do not implement a particular
|
||||
feature, even if the standard NFS protocols do support that feature,
|
||||
that usually means NFSD cannot provide that feature without
|
||||
substantial development work in other areas of the kernel.
|
||||
|
||||
Specificity
|
||||
~~~~~~~~~~~
|
||||
Feature requests can come from anywhere, and thus can often be
|
||||
nebulous. A requester might not understand what a "use case" or
|
||||
"user story" is. These descriptive paradigms are often used by
|
||||
developers and architects to understand what is required of a
|
||||
design, but are terms of art in the software trade, not used in
|
||||
the everyday world.
|
||||
|
||||
In order to prevent contributors and maintainers from becoming
|
||||
overwhelmed, we won't be afraid of saying "no" politely to
|
||||
underspecified requests.
|
||||
|
||||
Community roles and their authority
|
||||
-----------------------------------
|
||||
The purpose of Linux subsystem communities is to provide expertise
|
||||
and active stewardship of a narrow set of source files in the Linux
|
||||
kernel. This can include managing user space tooling as well.
|
||||
|
||||
To contextualize the structure of the Linux NFS community that
|
||||
is responsible for stewardship of the NFS server code base, we
|
||||
define the community roles here.
|
||||
|
||||
- **Contributor** : Anyone who submits a code change, bug fix,
|
||||
recommendation, documentation fix, and so on. A contributor can
|
||||
submit regularly or infrequently.
|
||||
|
||||
- **Outside Contributor** : A contributor who is not a regular actor
|
||||
in the Linux NFS community. This can mean someone who contributes
|
||||
to other parts of the kernel, or someone who just noticed a
|
||||
misspelling in a comment and sent a patch.
|
||||
|
||||
- **Reviewer** : Someone who is named in the MAINTAINERS file as a
|
||||
reviewer is an area expert who can request changes to contributed
|
||||
code, and expects that contributors will address the request.
|
||||
|
||||
- **External Reviewer** : Someone who is not named in the
|
||||
MAINTAINERS file as a reviewer, but who is an area expert.
|
||||
Examples include Linux kernel contributors with networking,
|
||||
security, or persistent storage expertise, or developers who
|
||||
contribute primarily to other NFS implementations.
|
||||
|
||||
One or more people will take on the following roles. These people
|
||||
are often generically referred to as "maintainers", and are
|
||||
identified in the MAINTAINERS file with the "M:" tag under the NFSD
|
||||
subsystem.
|
||||
|
||||
- **Upstream Release Manager** : This role is responsible for
|
||||
curating contributions into a branch, reviewing test results, and
|
||||
then sending a pull request during merge windows. There is a
|
||||
trust relationship between the release manager and Linus.
|
||||
|
||||
- **Bug Triager** : Someone who is a first responder to bug reports
|
||||
submitted to the linux-nfs mailing list or bug trackers, and helps
|
||||
troubleshoot and identify next steps.
|
||||
|
||||
- **Security Lead** : The security lead handles contacts from the
|
||||
security community to resolve immediate issues, as well as dealing
|
||||
with long-term security issues such as supply chain concerns. For
|
||||
upstream, that's usually whether contributions violate licensing
|
||||
or other intellectual property agreements.
|
||||
|
||||
- **Testing Lead** : The testing lead builds and runs the test
|
||||
infrastructure for the subsystem. The testing lead may ask for
|
||||
patches to be dropped because of ongoing high defect rates.
|
||||
|
||||
- **LTS Maintainer** : The LTS maintainer is responsible for managing
|
||||
the Fixes: and Cc: stable annotations on patches, and seeing that
|
||||
patches that cannot be automatically applied to LTS kernels get
|
||||
proper manual backports as necessary.
|
||||
|
||||
- **Community Manager** : This umpire role can be asked to call balls
|
||||
and strikes during conflicts, but is also responsible for ensuring
|
||||
the health of the relationships within the community and for
|
||||
facilitating discussions on long-term topics such as how to manage
|
||||
growing technical debt.
|
||||
@@ -110,5 +110,6 @@ to do something different in the near future.
|
||||
../process/maintainer-netdev
|
||||
../driver-api/vfio-pci-device-specific-driver-acceptance
|
||||
../nvme/feature-and-quirk-policy
|
||||
../filesystems/nfs/nfsd-maintainer-entry-profile
|
||||
../filesystems/xfs/xfs-maintainer-entry-profile
|
||||
../mm/damon/maintainer-profile
|
||||
|
||||
@@ -7,7 +7,7 @@ Landlock LSM: kernel documentation
|
||||
==================================
|
||||
|
||||
:Author: Mickaël Salaün
|
||||
:Date: March 2025
|
||||
:Date: September 2025
|
||||
|
||||
Landlock's goal is to create scoped access-control (i.e. sandboxing). To
|
||||
harden a whole system, this feature should be available to any process,
|
||||
@@ -110,6 +110,12 @@ Filesystem
|
||||
.. kernel-doc:: security/landlock/fs.h
|
||||
:identifiers:
|
||||
|
||||
Process credential
|
||||
------------------
|
||||
|
||||
.. kernel-doc:: security/landlock/cred.h
|
||||
:identifiers:
|
||||
|
||||
Ruleset and domain
|
||||
------------------
|
||||
|
||||
@@ -128,6 +134,9 @@ makes the reasoning much easier and helps avoid pitfalls.
|
||||
.. kernel-doc:: security/landlock/ruleset.h
|
||||
:identifiers:
|
||||
|
||||
.. kernel-doc:: security/landlock/domain.h
|
||||
:identifiers:
|
||||
|
||||
Additional documentation
|
||||
========================
|
||||
|
||||
|
||||
16
MAINTAINERS
16
MAINTAINERS
@@ -4432,6 +4432,7 @@ F: arch/*/lib/bitops.c
|
||||
F: include/asm-generic/bitops
|
||||
F: include/asm-generic/bitops.h
|
||||
F: include/linux/bitops.h
|
||||
F: lib/hweight.c
|
||||
F: lib/test_bitops.c
|
||||
F: tools/*/bitops*
|
||||
|
||||
@@ -13653,6 +13654,7 @@ R: Dai Ngo <Dai.Ngo@oracle.com>
|
||||
R: Tom Talpey <tom@talpey.com>
|
||||
L: linux-nfs@vger.kernel.org
|
||||
S: Supported
|
||||
P: Documentation/filesystems/nfs/nfsd-maintainer-entry-profile.rst
|
||||
B: https://bugzilla.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux.git
|
||||
F: Documentation/filesystems/nfs/
|
||||
@@ -13672,6 +13674,10 @@ F: include/uapi/linux/sunrpc/
|
||||
F: net/sunrpc/
|
||||
F: tools/net/sunrpc/
|
||||
|
||||
KERNEL NFSD BLOCK and SCSI LAYOUT DRIVER
|
||||
R: Christoph Hellwig <hch@lst.de>
|
||||
F: fs/nfsd/blocklayout*
|
||||
|
||||
KERNEL PACMAN PACKAGING (in addition to generic KERNEL BUILD)
|
||||
M: Thomas Weißschuh <linux@weissschuh.net>
|
||||
R: Christian Heusel <christian@heusel.eu>
|
||||
@@ -17522,6 +17528,7 @@ M: Luis Chamberlain <mcgrof@kernel.org>
|
||||
M: Petr Pavlu <petr.pavlu@suse.com>
|
||||
M: Daniel Gomez <da.gomez@kernel.org>
|
||||
R: Sami Tolvanen <samitolvanen@google.com>
|
||||
R: Aaron Tomlin <atomlin@atomlin.com>
|
||||
L: linux-modules@vger.kernel.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
@@ -17531,6 +17538,8 @@ F: include/linux/module*.h
|
||||
F: kernel/module/
|
||||
F: lib/test_kmod.c
|
||||
F: lib/tests/module/
|
||||
F: rust/kernel/module_param.rs
|
||||
F: rust/macros/module.rs
|
||||
F: scripts/module*
|
||||
F: tools/testing/selftests/kmod/
|
||||
F: tools/testing/selftests/module/
|
||||
@@ -20089,6 +20098,7 @@ Q: https://patchwork.kernel.org/project/linux-pci/list/
|
||||
B: https://bugzilla.kernel.org
|
||||
C: irc://irc.oftc.net/linux-pci
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
|
||||
F: Documentation/ABI/testing/sysfs-devices-pci-host-bridge
|
||||
F: Documentation/PCI/
|
||||
F: Documentation/devicetree/bindings/pci/
|
||||
F: arch/x86/kernel/early-quirks.c
|
||||
@@ -26388,14 +26398,16 @@ M: David Lechner <dlechner@baylibre.com>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/trigger-source/*
|
||||
|
||||
TRUSTED SECURITY MODULE (TSM) INFRASTRUCTURE
|
||||
TRUSTED EXECUTION ENVIRONMENT SECURITY MANAGER (TSM)
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
L: linux-coco@lists.linux.dev
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/configfs-tsm-report
|
||||
F: Documentation/driver-api/coco/
|
||||
F: Documentation/driver-api/pci/tsm.rst
|
||||
F: drivers/pci/tsm.c
|
||||
F: drivers/virt/coco/guest/
|
||||
F: include/linux/tsm*.h
|
||||
F: include/linux/*tsm*.h
|
||||
F: samples/tsm-mr/
|
||||
|
||||
TRUSTED SERVICES TEE DRIVER
|
||||
|
||||
@@ -224,28 +224,26 @@ static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
|
||||
until either pci_unmap_single or pci_dma_sync_single is performed. */
|
||||
|
||||
static dma_addr_t
|
||||
pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
|
||||
pci_map_single_1(struct pci_dev *pdev, phys_addr_t paddr, size_t size,
|
||||
int dac_allowed)
|
||||
{
|
||||
struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
|
||||
dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
|
||||
unsigned long offset = offset_in_page(paddr);
|
||||
struct pci_iommu_arena *arena;
|
||||
long npages, dma_ofs, i;
|
||||
unsigned long paddr;
|
||||
dma_addr_t ret;
|
||||
unsigned int align = 0;
|
||||
struct device *dev = pdev ? &pdev->dev : NULL;
|
||||
|
||||
paddr = __pa(cpu_addr);
|
||||
|
||||
#if !DEBUG_NODIRECT
|
||||
/* First check to see if we can use the direct map window. */
|
||||
if (paddr + size + __direct_map_base - 1 <= max_dma
|
||||
&& paddr + size <= __direct_map_size) {
|
||||
ret = paddr + __direct_map_base;
|
||||
|
||||
DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n",
|
||||
cpu_addr, size, ret, __builtin_return_address(0));
|
||||
DBGA2("pci_map_single: [%pa,%zx] -> direct %llx from %ps\n",
|
||||
&paddr, size, ret, __builtin_return_address(0));
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -255,8 +253,8 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
|
||||
if (dac_allowed) {
|
||||
ret = paddr + alpha_mv.pci_dac_offset;
|
||||
|
||||
DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n",
|
||||
cpu_addr, size, ret, __builtin_return_address(0));
|
||||
DBGA2("pci_map_single: [%pa,%zx] -> DAC %llx from %ps\n",
|
||||
&paddr, size, ret, __builtin_return_address(0));
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -290,10 +288,10 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
|
||||
arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
|
||||
|
||||
ret = arena->dma_base + dma_ofs * PAGE_SIZE;
|
||||
ret += (unsigned long)cpu_addr & ~PAGE_MASK;
|
||||
ret += offset;
|
||||
|
||||
DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n",
|
||||
cpu_addr, size, npages, ret, __builtin_return_address(0));
|
||||
DBGA2("pci_map_single: [%pa,%zx] np %ld -> sg %llx from %ps\n",
|
||||
&paddr, size, npages, ret, __builtin_return_address(0));
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -322,19 +320,18 @@ static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
static dma_addr_t alpha_pci_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
|
||||
int dac_allowed;
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
|
||||
return pci_map_single_1(pdev, (char *)page_address(page) + offset,
|
||||
size, dac_allowed);
|
||||
dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
|
||||
return pci_map_single_1(pdev, phys, size, dac_allowed);
|
||||
}
|
||||
|
||||
/* Unmap a single streaming mode DMA translation. The DMA_ADDR and
|
||||
@@ -343,7 +340,7 @@ static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
|
||||
the cpu to the buffer are guaranteed to see whatever the device
|
||||
wrote there. */
|
||||
|
||||
static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
static void alpha_pci_unmap_phys(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
@@ -353,8 +350,6 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
struct pci_iommu_arena *arena;
|
||||
long dma_ofs, npages;
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
if (dma_addr >= __direct_map_base
|
||||
&& dma_addr < __direct_map_base + __direct_map_size) {
|
||||
/* Nothing to do. */
|
||||
@@ -429,7 +424,7 @@ try_again:
|
||||
}
|
||||
memset(cpu_addr, 0, size);
|
||||
|
||||
*dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
|
||||
*dma_addrp = pci_map_single_1(pdev, virt_to_phys(cpu_addr), size, 0);
|
||||
if (*dma_addrp == DMA_MAPPING_ERROR) {
|
||||
free_pages((unsigned long)cpu_addr, order);
|
||||
if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
|
||||
@@ -643,9 +638,8 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
/* Fast path single entry scatterlists. */
|
||||
if (nents == 1) {
|
||||
sg->dma_length = sg->length;
|
||||
sg->dma_address
|
||||
= pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
|
||||
sg->length, dac_allowed);
|
||||
sg->dma_address = pci_map_single_1(pdev, sg_phys(sg),
|
||||
sg->length, dac_allowed);
|
||||
if (sg->dma_address == DMA_MAPPING_ERROR)
|
||||
return -EIO;
|
||||
return 1;
|
||||
@@ -917,8 +911,8 @@ iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
|
||||
const struct dma_map_ops alpha_pci_ops = {
|
||||
.alloc = alpha_pci_alloc_coherent,
|
||||
.free = alpha_pci_free_coherent,
|
||||
.map_page = alpha_pci_map_page,
|
||||
.unmap_page = alpha_pci_unmap_page,
|
||||
.map_phys = alpha_pci_map_phys,
|
||||
.unmap_phys = alpha_pci_unmap_phys,
|
||||
.map_sg = alpha_pci_map_sg,
|
||||
.unmap_sg = alpha_pci_unmap_sg,
|
||||
.dma_supported = alpha_pci_supported,
|
||||
|
||||
@@ -624,16 +624,14 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
static void dma_cache_maint_page(phys_addr_t phys, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
void (*op)(const void *, size_t, int))
|
||||
{
|
||||
unsigned long pfn;
|
||||
unsigned long offset = offset_in_page(phys);
|
||||
unsigned long pfn = __phys_to_pfn(phys);
|
||||
size_t left = size;
|
||||
|
||||
pfn = page_to_pfn(page) + offset / PAGE_SIZE;
|
||||
offset %= PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* A single sg entry may refer to multiple physically contiguous
|
||||
* pages. But we still need to process highmem pages individually.
|
||||
@@ -644,17 +642,18 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
||||
size_t len = left;
|
||||
void *vaddr;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
|
||||
if (PageHighMem(page)) {
|
||||
phys = __pfn_to_phys(pfn);
|
||||
if (PhysHighMem(phys)) {
|
||||
if (len + offset > PAGE_SIZE)
|
||||
len = PAGE_SIZE - offset;
|
||||
|
||||
if (cache_is_vipt_nonaliasing()) {
|
||||
vaddr = kmap_atomic(page);
|
||||
vaddr = kmap_atomic_pfn(pfn);
|
||||
op(vaddr + offset, len, dir);
|
||||
kunmap_atomic(vaddr);
|
||||
} else {
|
||||
struct page *page = phys_to_page(phys);
|
||||
|
||||
vaddr = kmap_high_get(page);
|
||||
if (vaddr) {
|
||||
op(vaddr + offset, len, dir);
|
||||
@@ -662,7 +661,8 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
vaddr = page_address(page) + offset;
|
||||
phys += offset;
|
||||
vaddr = phys_to_virt(phys);
|
||||
op(vaddr, len, dir);
|
||||
}
|
||||
offset = 0;
|
||||
@@ -676,14 +676,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
||||
* Note: Drivers should NOT use this function directly.
|
||||
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
|
||||
*/
|
||||
static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
phys_addr_t paddr;
|
||||
dma_cache_maint_page(paddr, size, dir, dmac_map_area);
|
||||
|
||||
dma_cache_maint_page(page, off, size, dir, dmac_map_area);
|
||||
|
||||
paddr = page_to_phys(page) + off;
|
||||
if (dir == DMA_FROM_DEVICE) {
|
||||
outer_inv_range(paddr, paddr + size);
|
||||
} else {
|
||||
@@ -692,17 +689,15 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
||||
/* FIXME: non-speculating: flush on bidirectional mappings? */
|
||||
}
|
||||
|
||||
static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
phys_addr_t paddr = page_to_phys(page) + off;
|
||||
|
||||
/* FIXME: non-speculating: not required */
|
||||
/* in any case, don't bother invalidating if DMA to device */
|
||||
if (dir != DMA_TO_DEVICE) {
|
||||
outer_inv_range(paddr, paddr + size);
|
||||
|
||||
dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
|
||||
dma_cache_maint_page(paddr, size, dir, dmac_unmap_area);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -737,6 +732,9 @@ static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
|
||||
if (attrs & DMA_ATTR_PRIVILEGED)
|
||||
prot |= IOMMU_PRIV;
|
||||
|
||||
if (attrs & DMA_ATTR_MMIO)
|
||||
prot |= IOMMU_MMIO;
|
||||
|
||||
switch (dir) {
|
||||
case DMA_BIDIRECTIONAL:
|
||||
return prot | IOMMU_READ | IOMMU_WRITE;
|
||||
@@ -1205,7 +1203,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
||||
unsigned int len = PAGE_ALIGN(s->offset + s->length);
|
||||
|
||||
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
||||
arch_sync_dma_for_device(sg_phys(s), s->length, dir);
|
||||
|
||||
prot = __dma_info_to_prot(dir, attrs);
|
||||
|
||||
@@ -1307,8 +1305,7 @@ static void arm_iommu_unmap_sg(struct device *dev,
|
||||
__iommu_remove_mapping(dev, sg_dma_address(s),
|
||||
sg_dma_len(s));
|
||||
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
__dma_page_dev_to_cpu(sg_page(s), s->offset,
|
||||
s->length, dir);
|
||||
arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1330,7 +1327,7 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
|
||||
return;
|
||||
|
||||
for_each_sg(sg, s, nents, i)
|
||||
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
|
||||
arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
|
||||
|
||||
}
|
||||
|
||||
@@ -1352,29 +1349,31 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
|
||||
return;
|
||||
|
||||
for_each_sg(sg, s, nents, i)
|
||||
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
||||
arch_sync_dma_for_device(sg_phys(s), s->length, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_iommu_map_page
|
||||
* arm_iommu_map_phys
|
||||
* @dev: valid struct device pointer
|
||||
* @page: page that buffer resides in
|
||||
* @offset: offset into page for start of buffer
|
||||
* @phys: physical address that buffer resides in
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
* @attrs: DMA mapping attributes
|
||||
*
|
||||
* IOMMU aware version of arm_dma_map_page()
|
||||
*/
|
||||
static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
static dma_addr_t arm_iommu_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
int len = PAGE_ALIGN(size + offset_in_page(phys));
|
||||
phys_addr_t addr = phys & PAGE_MASK;
|
||||
dma_addr_t dma_addr;
|
||||
int ret, prot, len = PAGE_ALIGN(size + offset);
|
||||
int ret, prot;
|
||||
|
||||
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
if (!dev->dma_coherent &&
|
||||
!(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
|
||||
dma_addr = __alloc_iova(mapping, len);
|
||||
if (dma_addr == DMA_MAPPING_ERROR)
|
||||
@@ -1382,12 +1381,11 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
|
||||
|
||||
prot = __dma_info_to_prot(dir, attrs);
|
||||
|
||||
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
|
||||
prot, GFP_KERNEL);
|
||||
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
return dma_addr + offset;
|
||||
return dma_addr + offset_in_page(phys);
|
||||
fail:
|
||||
__free_iova(mapping, dma_addr, len);
|
||||
return DMA_MAPPING_ERROR;
|
||||
@@ -1399,100 +1397,45 @@ fail:
|
||||
* @handle: DMA address of buffer
|
||||
* @size: size of buffer (same as passed to dma_map_page)
|
||||
* @dir: DMA transfer direction (same as passed to dma_map_page)
|
||||
* @attrs: DMA mapping attributes
|
||||
*
|
||||
* IOMMU aware version of arm_dma_unmap_page()
|
||||
* IOMMU aware version of arm_dma_unmap_phys()
|
||||
*/
|
||||
static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
static void arm_iommu_unmap_phys(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
dma_addr_t iova = handle & PAGE_MASK;
|
||||
struct page *page;
|
||||
int offset = handle & ~PAGE_MASK;
|
||||
int len = PAGE_ALIGN(size + offset);
|
||||
|
||||
if (!iova)
|
||||
return;
|
||||
|
||||
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
|
||||
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
||||
__dma_page_dev_to_cpu(page, offset, size, dir);
|
||||
if (!dev->dma_coherent &&
|
||||
!(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
|
||||
phys_addr_t phys = iommu_iova_to_phys(mapping->domain, iova);
|
||||
|
||||
arch_sync_dma_for_cpu(phys + offset, size, dir);
|
||||
}
|
||||
|
||||
iommu_unmap(mapping->domain, iova, len);
|
||||
__free_iova(mapping, iova, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_iommu_map_resource - map a device resource for DMA
|
||||
* @dev: valid struct device pointer
|
||||
* @phys_addr: physical address of resource
|
||||
* @size: size of resource to map
|
||||
* @dir: DMA transfer direction
|
||||
*/
|
||||
static dma_addr_t arm_iommu_map_resource(struct device *dev,
|
||||
phys_addr_t phys_addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
dma_addr_t dma_addr;
|
||||
int ret, prot;
|
||||
phys_addr_t addr = phys_addr & PAGE_MASK;
|
||||
unsigned int offset = phys_addr & ~PAGE_MASK;
|
||||
size_t len = PAGE_ALIGN(size + offset);
|
||||
|
||||
dma_addr = __alloc_iova(mapping, len);
|
||||
if (dma_addr == DMA_MAPPING_ERROR)
|
||||
return dma_addr;
|
||||
|
||||
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
|
||||
|
||||
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
return dma_addr + offset;
|
||||
fail:
|
||||
__free_iova(mapping, dma_addr, len);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_iommu_unmap_resource - unmap a device DMA resource
|
||||
* @dev: valid struct device pointer
|
||||
* @dma_handle: DMA address to resource
|
||||
* @size: size of resource to map
|
||||
* @dir: DMA transfer direction
|
||||
*/
|
||||
static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
dma_addr_t iova = dma_handle & PAGE_MASK;
|
||||
unsigned int offset = dma_handle & ~PAGE_MASK;
|
||||
size_t len = PAGE_ALIGN(size + offset);
|
||||
|
||||
if (!iova)
|
||||
return;
|
||||
|
||||
iommu_unmap(mapping->domain, iova, len);
|
||||
__free_iova(mapping, iova, len);
|
||||
}
|
||||
|
||||
static void arm_iommu_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
dma_addr_t iova = handle & PAGE_MASK;
|
||||
struct page *page;
|
||||
unsigned int offset = handle & ~PAGE_MASK;
|
||||
phys_addr_t phys;
|
||||
|
||||
if (dev->dma_coherent || !iova)
|
||||
return;
|
||||
|
||||
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
||||
__dma_page_dev_to_cpu(page, offset, size, dir);
|
||||
phys = iommu_iova_to_phys(mapping->domain, iova);
|
||||
arch_sync_dma_for_cpu(phys + offset, size, dir);
|
||||
}
|
||||
|
||||
static void arm_iommu_sync_single_for_device(struct device *dev,
|
||||
@@ -1500,14 +1443,14 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
dma_addr_t iova = handle & PAGE_MASK;
|
||||
struct page *page;
|
||||
unsigned int offset = handle & ~PAGE_MASK;
|
||||
phys_addr_t phys;
|
||||
|
||||
if (dev->dma_coherent || !iova)
|
||||
return;
|
||||
|
||||
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
phys = iommu_iova_to_phys(mapping->domain, iova);
|
||||
arch_sync_dma_for_device(phys + offset, size, dir);
|
||||
}
|
||||
|
||||
static const struct dma_map_ops iommu_ops = {
|
||||
@@ -1516,8 +1459,8 @@ static const struct dma_map_ops iommu_ops = {
|
||||
.mmap = arm_iommu_mmap_attrs,
|
||||
.get_sgtable = arm_iommu_get_sgtable,
|
||||
|
||||
.map_page = arm_iommu_map_page,
|
||||
.unmap_page = arm_iommu_unmap_page,
|
||||
.map_phys = arm_iommu_map_phys,
|
||||
.unmap_phys = arm_iommu_unmap_phys,
|
||||
.sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
|
||||
.sync_single_for_device = arm_iommu_sync_single_for_device,
|
||||
|
||||
@@ -1525,9 +1468,6 @@ static const struct dma_map_ops iommu_ops = {
|
||||
.unmap_sg = arm_iommu_unmap_sg,
|
||||
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
|
||||
|
||||
.map_resource = arm_iommu_map_resource,
|
||||
.unmap_resource = arm_iommu_unmap_resource,
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -1794,20 +1734,6 @@ void arch_teardown_dma_ops(struct device *dev)
|
||||
set_dma_ops(dev, NULL);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
|
||||
size, dir);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
|
||||
size, dir);
|
||||
}
|
||||
|
||||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
|
||||
@@ -521,18 +521,24 @@ static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
__free_pages(virt_to_page(vaddr), get_order(size));
|
||||
}
|
||||
|
||||
static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
static dma_addr_t jazz_dma_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
phys_addr_t phys = page_to_phys(page) + offset;
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
/*
|
||||
* This check is included because older versions of the code lacked
|
||||
* MMIO path support, and my ability to test this path is limited.
|
||||
* However, from a software technical standpoint, there is no restriction,
|
||||
* as the following code operates solely on physical addresses.
|
||||
*/
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
return vdma_alloc(phys, size);
|
||||
}
|
||||
|
||||
static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
static void jazz_dma_unmap_phys(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
@@ -607,8 +613,8 @@ static void jazz_dma_sync_sg_for_cpu(struct device *dev,
|
||||
const struct dma_map_ops jazz_dma_ops = {
|
||||
.alloc = jazz_dma_alloc,
|
||||
.free = jazz_dma_free,
|
||||
.map_page = jazz_dma_map_page,
|
||||
.unmap_page = jazz_dma_unmap_page,
|
||||
.map_phys = jazz_dma_map_phys,
|
||||
.unmap_phys = jazz_dma_unmap_phys,
|
||||
.map_sg = jazz_dma_map_sg,
|
||||
.unmap_sg = jazz_dma_unmap_sg,
|
||||
.sync_single_for_cpu = jazz_dma_sync_single_for_cpu,
|
||||
|
||||
@@ -274,12 +274,12 @@ extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
|
||||
unsigned long mask, gfp_t flag, int node);
|
||||
extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle);
|
||||
extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
|
||||
struct page *page, unsigned long offset,
|
||||
size_t size, unsigned long mask,
|
||||
extern dma_addr_t iommu_map_phys(struct device *dev, struct iommu_table *tbl,
|
||||
phys_addr_t phys, size_t size,
|
||||
unsigned long mask,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long attrs);
|
||||
extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
|
||||
extern void iommu_unmap_phys(struct iommu_table *tbl, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction direction,
|
||||
unsigned long attrs);
|
||||
|
||||
|
||||
@@ -93,28 +93,26 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size,
|
||||
|
||||
/* Creates TCEs for a user provided buffer. The user buffer must be
|
||||
* contiguous real kernel storage (not vmalloc). The address passed here
|
||||
* comprises a page address and offset into that page. The dma_addr_t
|
||||
* returned will point to the same byte within the page as was passed in.
|
||||
* is a physical address to that page. The dma_addr_t returned will point
|
||||
* to the same byte within the page as was passed in.
|
||||
*/
|
||||
static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
static dma_addr_t dma_iommu_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t size,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
|
||||
size, dma_get_mask(dev), direction, attrs);
|
||||
return iommu_map_phys(dev, get_iommu_table_base(dev), phys, size,
|
||||
dma_get_mask(dev), direction, attrs);
|
||||
}
|
||||
|
||||
|
||||
static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
||||
static void dma_iommu_unmap_phys(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
|
||||
iommu_unmap_phys(get_iommu_table_base(dev), dma_handle, size, direction,
|
||||
attrs);
|
||||
}
|
||||
|
||||
|
||||
static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
int nelems, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
@@ -211,8 +209,8 @@ const struct dma_map_ops dma_iommu_ops = {
|
||||
.map_sg = dma_iommu_map_sg,
|
||||
.unmap_sg = dma_iommu_unmap_sg,
|
||||
.dma_supported = dma_iommu_dma_supported,
|
||||
.map_page = dma_iommu_map_page,
|
||||
.unmap_page = dma_iommu_unmap_page,
|
||||
.map_phys = dma_iommu_map_phys,
|
||||
.unmap_phys = dma_iommu_unmap_phys,
|
||||
.get_required_mask = dma_iommu_get_required_mask,
|
||||
.mmap = dma_common_mmap,
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
|
||||
@@ -848,12 +848,12 @@ EXPORT_SYMBOL_GPL(iommu_tce_table_put);
|
||||
|
||||
/* Creates TCEs for a user provided buffer. The user buffer must be
|
||||
* contiguous real kernel storage (not vmalloc). The address passed here
|
||||
* comprises a page address and offset into that page. The dma_addr_t
|
||||
* returned will point to the same byte within the page as was passed in.
|
||||
* is physical address into that page. The dma_addr_t returned will point
|
||||
* to the same byte within the page as was passed in.
|
||||
*/
|
||||
dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
|
||||
struct page *page, unsigned long offset, size_t size,
|
||||
unsigned long mask, enum dma_data_direction direction,
|
||||
dma_addr_t iommu_map_phys(struct device *dev, struct iommu_table *tbl,
|
||||
phys_addr_t phys, size_t size, unsigned long mask,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
dma_addr_t dma_handle = DMA_MAPPING_ERROR;
|
||||
@@ -863,7 +863,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
vaddr = page_address(page) + offset;
|
||||
vaddr = phys_to_virt(phys);
|
||||
uaddr = (unsigned long)vaddr;
|
||||
|
||||
if (tbl) {
|
||||
@@ -890,7 +890,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
|
||||
return dma_handle;
|
||||
}
|
||||
|
||||
void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
|
||||
void iommu_unmap_phys(struct iommu_table *tbl, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
|
||||
@@ -551,18 +551,20 @@ static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
|
||||
|
||||
/* Creates TCEs for a user provided buffer. The user buffer must be
|
||||
* contiguous real kernel storage (not vmalloc). The address passed here
|
||||
* comprises a page address and offset into that page. The dma_addr_t
|
||||
* returned will point to the same byte within the page as was passed in.
|
||||
* is physical address to that hat page. The dma_addr_t returned will point
|
||||
* to the same byte within the page as was passed in.
|
||||
*/
|
||||
|
||||
static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
static dma_addr_t ps3_sb_map_phys(struct device *_dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction direction, unsigned long attrs)
|
||||
{
|
||||
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
|
||||
int result;
|
||||
dma_addr_t bus_addr;
|
||||
void *ptr = page_address(page) + offset;
|
||||
void *ptr = phys_to_virt(phys);
|
||||
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
|
||||
&bus_addr,
|
||||
@@ -577,8 +579,8 @@ static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page,
|
||||
return bus_addr;
|
||||
}
|
||||
|
||||
static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
static dma_addr_t ps3_ioc0_map_phys(struct device *_dev, phys_addr_t phys,
|
||||
size_t size,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
@@ -586,7 +588,10 @@ static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
|
||||
int result;
|
||||
dma_addr_t bus_addr;
|
||||
u64 iopte_flag;
|
||||
void *ptr = page_address(page) + offset;
|
||||
void *ptr = phys_to_virt(phys);
|
||||
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
iopte_flag = CBE_IOPTE_M;
|
||||
switch (direction) {
|
||||
@@ -613,7 +618,7 @@ static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
|
||||
return bus_addr;
|
||||
}
|
||||
|
||||
static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr,
|
||||
static void ps3_unmap_phys(struct device *_dev, dma_addr_t dma_addr,
|
||||
size_t size, enum dma_data_direction direction, unsigned long attrs)
|
||||
{
|
||||
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
|
||||
@@ -690,8 +695,8 @@ static const struct dma_map_ops ps3_sb_dma_ops = {
|
||||
.map_sg = ps3_sb_map_sg,
|
||||
.unmap_sg = ps3_sb_unmap_sg,
|
||||
.dma_supported = ps3_dma_supported,
|
||||
.map_page = ps3_sb_map_page,
|
||||
.unmap_page = ps3_unmap_page,
|
||||
.map_phys = ps3_sb_map_phys,
|
||||
.unmap_phys = ps3_unmap_phys,
|
||||
.mmap = dma_common_mmap,
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
.alloc_pages_op = dma_common_alloc_pages,
|
||||
@@ -704,8 +709,8 @@ static const struct dma_map_ops ps3_ioc0_dma_ops = {
|
||||
.map_sg = ps3_ioc0_map_sg,
|
||||
.unmap_sg = ps3_ioc0_unmap_sg,
|
||||
.dma_supported = ps3_dma_supported,
|
||||
.map_page = ps3_ioc0_map_page,
|
||||
.unmap_page = ps3_unmap_page,
|
||||
.map_phys = ps3_ioc0_map_phys,
|
||||
.unmap_phys = ps3_unmap_phys,
|
||||
.mmap = dma_common_mmap,
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
.alloc_pages_op = dma_common_alloc_pages,
|
||||
|
||||
@@ -86,17 +86,18 @@ static void ibmebus_free_coherent(struct device *dev,
|
||||
kfree(vaddr);
|
||||
}
|
||||
|
||||
static dma_addr_t ibmebus_map_page(struct device *dev,
|
||||
struct page *page,
|
||||
unsigned long offset,
|
||||
static dma_addr_t ibmebus_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t size,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return (dma_addr_t)(page_address(page) + offset);
|
||||
if (attrs & DMA_ATTR_MMIO)
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
return (dma_addr_t)(phys_to_virt(phys));
|
||||
}
|
||||
|
||||
static void ibmebus_unmap_page(struct device *dev,
|
||||
static void ibmebus_unmap_phys(struct device *dev,
|
||||
dma_addr_t dma_addr,
|
||||
size_t size,
|
||||
enum dma_data_direction direction,
|
||||
@@ -146,8 +147,8 @@ static const struct dma_map_ops ibmebus_dma_ops = {
|
||||
.unmap_sg = ibmebus_unmap_sg,
|
||||
.dma_supported = ibmebus_dma_supported,
|
||||
.get_required_mask = ibmebus_dma_get_required_mask,
|
||||
.map_page = ibmebus_map_page,
|
||||
.unmap_page = ibmebus_unmap_page,
|
||||
.map_phys = ibmebus_map_phys,
|
||||
.unmap_phys = ibmebus_unmap_phys,
|
||||
};
|
||||
|
||||
static int ibmebus_match_path(struct device *dev, const void *data)
|
||||
|
||||
@@ -512,18 +512,21 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
|
||||
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
|
||||
}
|
||||
|
||||
static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
static dma_addr_t vio_dma_iommu_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t size,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct vio_dev *viodev = to_vio_dev(dev);
|
||||
struct iommu_table *tbl = get_iommu_table_base(dev);
|
||||
dma_addr_t ret = DMA_MAPPING_ERROR;
|
||||
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
return ret;
|
||||
|
||||
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
|
||||
goto out_fail;
|
||||
ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev),
|
||||
ret = iommu_map_phys(dev, tbl, phys, size, dma_get_mask(dev),
|
||||
direction, attrs);
|
||||
if (unlikely(ret == DMA_MAPPING_ERROR))
|
||||
goto out_deallocate;
|
||||
@@ -536,7 +539,7 @@ out_fail:
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
||||
static void vio_dma_iommu_unmap_phys(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
@@ -544,7 +547,7 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
||||
struct vio_dev *viodev = to_vio_dev(dev);
|
||||
struct iommu_table *tbl = get_iommu_table_base(dev);
|
||||
|
||||
iommu_unmap_page(tbl, dma_handle, size, direction, attrs);
|
||||
iommu_unmap_phys(tbl, dma_handle, size, direction, attrs);
|
||||
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
|
||||
}
|
||||
|
||||
@@ -605,8 +608,8 @@ static const struct dma_map_ops vio_dma_mapping_ops = {
|
||||
.free = vio_dma_iommu_free_coherent,
|
||||
.map_sg = vio_dma_iommu_map_sg,
|
||||
.unmap_sg = vio_dma_iommu_unmap_sg,
|
||||
.map_page = vio_dma_iommu_map_page,
|
||||
.unmap_page = vio_dma_iommu_unmap_page,
|
||||
.map_phys = vio_dma_iommu_map_phys,
|
||||
.unmap_phys = vio_dma_iommu_unmap_phys,
|
||||
.dma_supported = dma_iommu_dma_supported,
|
||||
.get_required_mask = dma_iommu_get_required_mask,
|
||||
.mmap = dma_common_mmap,
|
||||
|
||||
@@ -260,26 +260,35 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
|
||||
free_pages((unsigned long)cpu, order);
|
||||
}
|
||||
|
||||
static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t sz,
|
||||
enum dma_data_direction direction,
|
||||
static dma_addr_t dma_4u_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t sz, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct iommu *iommu;
|
||||
struct strbuf *strbuf;
|
||||
iopte_t *base;
|
||||
unsigned long flags, npages, oaddr;
|
||||
unsigned long i, base_paddr, ctx;
|
||||
unsigned long i, ctx;
|
||||
u32 bus_addr, ret;
|
||||
unsigned long iopte_protection;
|
||||
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
/*
|
||||
* This check is included because older versions of the code
|
||||
* lacked MMIO path support, and my ability to test this path
|
||||
* is limited. However, from a software technical standpoint,
|
||||
* there is no restriction, as the following code operates
|
||||
* solely on physical addresses.
|
||||
*/
|
||||
goto bad_no_ctx;
|
||||
|
||||
iommu = dev->archdata.iommu;
|
||||
strbuf = dev->archdata.stc;
|
||||
|
||||
if (unlikely(direction == DMA_NONE))
|
||||
goto bad_no_ctx;
|
||||
|
||||
oaddr = (unsigned long)(page_address(page) + offset);
|
||||
oaddr = (unsigned long)(phys_to_virt(phys));
|
||||
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
|
||||
npages >>= IO_PAGE_SHIFT;
|
||||
|
||||
@@ -296,7 +305,6 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
|
||||
bus_addr = (iommu->tbl.table_map_base +
|
||||
((base - iommu->page_table) << IO_PAGE_SHIFT));
|
||||
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
|
||||
base_paddr = __pa(oaddr & IO_PAGE_MASK);
|
||||
if (strbuf->strbuf_enabled)
|
||||
iopte_protection = IOPTE_STREAMING(ctx);
|
||||
else
|
||||
@@ -304,8 +312,8 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
|
||||
if (direction != DMA_TO_DEVICE)
|
||||
iopte_protection |= IOPTE_WRITE;
|
||||
|
||||
for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
|
||||
iopte_val(*base) = iopte_protection | base_paddr;
|
||||
for (i = 0; i < npages; i++, base++, phys += IO_PAGE_SIZE)
|
||||
iopte_val(*base) = iopte_protection | phys;
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -383,7 +391,7 @@ do_flush_sync:
|
||||
vaddr, ctx, npages);
|
||||
}
|
||||
|
||||
static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
|
||||
static void dma_4u_unmap_phys(struct device *dev, dma_addr_t bus_addr,
|
||||
size_t sz, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
@@ -753,8 +761,8 @@ static int dma_4u_supported(struct device *dev, u64 device_mask)
|
||||
static const struct dma_map_ops sun4u_dma_ops = {
|
||||
.alloc = dma_4u_alloc_coherent,
|
||||
.free = dma_4u_free_coherent,
|
||||
.map_page = dma_4u_map_page,
|
||||
.unmap_page = dma_4u_unmap_page,
|
||||
.map_phys = dma_4u_map_phys,
|
||||
.unmap_phys = dma_4u_unmap_phys,
|
||||
.map_sg = dma_4u_map_sg,
|
||||
.unmap_sg = dma_4u_unmap_sg,
|
||||
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
|
||||
|
||||
@@ -352,9 +352,8 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
|
||||
free_pages((unsigned long)cpu, order);
|
||||
}
|
||||
|
||||
static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t sz,
|
||||
enum dma_data_direction direction,
|
||||
static dma_addr_t dma_4v_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t sz, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct iommu *iommu;
|
||||
@@ -362,18 +361,27 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
||||
struct iommu_map_table *tbl;
|
||||
u64 mask;
|
||||
unsigned long flags, npages, oaddr;
|
||||
unsigned long i, base_paddr;
|
||||
unsigned long prot;
|
||||
unsigned long i, prot;
|
||||
dma_addr_t bus_addr, ret;
|
||||
long entry;
|
||||
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
/*
|
||||
* This check is included because older versions of the code
|
||||
* lacked MMIO path support, and my ability to test this path
|
||||
* is limited. However, from a software technical standpoint,
|
||||
* there is no restriction, as the following code operates
|
||||
* solely on physical addresses.
|
||||
*/
|
||||
goto bad;
|
||||
|
||||
iommu = dev->archdata.iommu;
|
||||
atu = iommu->atu;
|
||||
|
||||
if (unlikely(direction == DMA_NONE))
|
||||
goto bad;
|
||||
|
||||
oaddr = (unsigned long)(page_address(page) + offset);
|
||||
oaddr = (unsigned long)(phys_to_virt(phys));
|
||||
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
|
||||
npages >>= IO_PAGE_SHIFT;
|
||||
|
||||
@@ -391,7 +399,6 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
||||
|
||||
bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
|
||||
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
|
||||
base_paddr = __pa(oaddr & IO_PAGE_MASK);
|
||||
prot = HV_PCI_MAP_ATTR_READ;
|
||||
if (direction != DMA_TO_DEVICE)
|
||||
prot |= HV_PCI_MAP_ATTR_WRITE;
|
||||
@@ -403,8 +410,8 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
||||
|
||||
iommu_batch_start(dev, prot, entry);
|
||||
|
||||
for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
|
||||
long err = iommu_batch_add(base_paddr, mask);
|
||||
for (i = 0; i < npages; i++, phys += IO_PAGE_SIZE) {
|
||||
long err = iommu_batch_add(phys, mask);
|
||||
if (unlikely(err < 0L))
|
||||
goto iommu_map_fail;
|
||||
}
|
||||
@@ -426,7 +433,7 @@ iommu_map_fail:
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
|
||||
static void dma_4v_unmap_phys(struct device *dev, dma_addr_t bus_addr,
|
||||
size_t sz, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
@@ -686,8 +693,8 @@ static int dma_4v_supported(struct device *dev, u64 device_mask)
|
||||
static const struct dma_map_ops sun4v_dma_ops = {
|
||||
.alloc = dma_4v_alloc_coherent,
|
||||
.free = dma_4v_free_coherent,
|
||||
.map_page = dma_4v_map_page,
|
||||
.unmap_page = dma_4v_unmap_page,
|
||||
.map_phys = dma_4v_map_phys,
|
||||
.unmap_phys = dma_4v_unmap_phys,
|
||||
.map_sg = dma_4v_map_sg,
|
||||
.unmap_sg = dma_4v_unmap_sg,
|
||||
.dma_supported = dma_4v_supported,
|
||||
|
||||
@@ -94,13 +94,14 @@ static int __init iounit_init(void)
|
||||
subsys_initcall(iounit_init);
|
||||
|
||||
/* One has to hold iounit->lock to call this */
|
||||
static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
|
||||
static dma_addr_t iounit_get_area(struct iounit_struct *iounit,
|
||||
phys_addr_t phys, int size)
|
||||
{
|
||||
int i, j, k, npages;
|
||||
unsigned long rotor, scan, limit;
|
||||
iopte_t iopte;
|
||||
|
||||
npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
|
||||
npages = (offset_in_page(phys) + size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
|
||||
/* A tiny bit of magic ingredience :) */
|
||||
switch (npages) {
|
||||
@@ -109,7 +110,7 @@ static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long
|
||||
default: i = 0x0213; break;
|
||||
}
|
||||
|
||||
IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
|
||||
IOD(("%s(%pa,%d[%d])=", __func__, &phys, size, npages));
|
||||
|
||||
next: j = (i & 15);
|
||||
rotor = iounit->rotor[j - 1];
|
||||
@@ -124,7 +125,8 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
|
||||
}
|
||||
i >>= 4;
|
||||
if (!(i & 15))
|
||||
panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
|
||||
panic("iounit_get_area: Couldn't find free iopte slots for (%pa,%d)\n",
|
||||
&phys, size);
|
||||
goto next;
|
||||
}
|
||||
for (k = 1, scan++; k < npages; k++)
|
||||
@@ -132,30 +134,29 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
|
||||
goto nexti;
|
||||
iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
|
||||
scan -= npages;
|
||||
iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
|
||||
vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
|
||||
iopte = MKIOPTE(phys & PAGE_MASK);
|
||||
phys = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + offset_in_page(phys);
|
||||
for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
|
||||
set_bit(scan, iounit->bmap);
|
||||
sbus_writel(iopte_val(iopte), &iounit->page_table[scan]);
|
||||
}
|
||||
IOD(("%08lx\n", vaddr));
|
||||
return vaddr;
|
||||
IOD(("%pa\n", &phys));
|
||||
return phys;
|
||||
}
|
||||
|
||||
static dma_addr_t iounit_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t len, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
static dma_addr_t iounit_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t len, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
void *vaddr = page_address(page) + offset;
|
||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||
unsigned long ret, flags;
|
||||
unsigned long flags;
|
||||
dma_addr_t ret;
|
||||
|
||||
/* XXX So what is maxphys for us and how do drivers know it? */
|
||||
if (!len || len > 256 * 1024)
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
|
||||
ret = iounit_get_area(iounit, phys, len);
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
@@ -171,14 +172,15 @@ static int iounit_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
|
||||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
|
||||
sg->dma_address =
|
||||
iounit_get_area(iounit, sg_phys(sg), sg->length);
|
||||
sg->dma_length = sg->length;
|
||||
}
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
return nents;
|
||||
}
|
||||
|
||||
static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len,
|
||||
static void iounit_unmap_phys(struct device *dev, dma_addr_t vaddr, size_t len,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||
@@ -279,8 +281,8 @@ static const struct dma_map_ops iounit_dma_ops = {
|
||||
.alloc = iounit_alloc,
|
||||
.free = iounit_free,
|
||||
#endif
|
||||
.map_page = iounit_map_page,
|
||||
.unmap_page = iounit_unmap_page,
|
||||
.map_phys = iounit_map_phys,
|
||||
.unmap_phys = iounit_unmap_phys,
|
||||
.map_sg = iounit_map_sg,
|
||||
.unmap_sg = iounit_unmap_sg,
|
||||
};
|
||||
|
||||
@@ -181,18 +181,20 @@ static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
|
||||
}
|
||||
}
|
||||
|
||||
static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t len, bool per_page_flush)
|
||||
static dma_addr_t __sbus_iommu_map_phys(struct device *dev, phys_addr_t paddr,
|
||||
size_t len, bool per_page_flush, unsigned long attrs)
|
||||
{
|
||||
struct iommu_struct *iommu = dev->archdata.iommu;
|
||||
phys_addr_t paddr = page_to_phys(page) + offset;
|
||||
unsigned long off = paddr & ~PAGE_MASK;
|
||||
unsigned long off = offset_in_page(paddr);
|
||||
unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
unsigned long pfn = __phys_to_pfn(paddr);
|
||||
unsigned int busa, busa0;
|
||||
iopte_t *iopte, *iopte0;
|
||||
int ioptex, i;
|
||||
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
/* XXX So what is maxphys for us and how do drivers know it? */
|
||||
if (!len || len > 256 * 1024)
|
||||
return DMA_MAPPING_ERROR;
|
||||
@@ -202,10 +204,10 @@ static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
|
||||
* XXX Is this a good assumption?
|
||||
* XXX What if someone else unmaps it here and races us?
|
||||
*/
|
||||
if (per_page_flush && !PageHighMem(page)) {
|
||||
if (per_page_flush && !PhysHighMem(paddr)) {
|
||||
unsigned long vaddr, p;
|
||||
|
||||
vaddr = (unsigned long)page_address(page) + offset;
|
||||
vaddr = (unsigned long)phys_to_virt(paddr);
|
||||
for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
|
||||
flush_page_for_dma(p);
|
||||
}
|
||||
@@ -231,19 +233,19 @@ static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
|
||||
return busa0 + off;
|
||||
}
|
||||
|
||||
static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
|
||||
struct page *page, unsigned long offset, size_t len,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
static dma_addr_t sbus_iommu_map_phys_gflush(struct device *dev,
|
||||
phys_addr_t phys, size_t len, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
flush_page_for_dma(0);
|
||||
return __sbus_iommu_map_page(dev, page, offset, len, false);
|
||||
return __sbus_iommu_map_phys(dev, phys, len, false, attrs);
|
||||
}
|
||||
|
||||
static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
|
||||
struct page *page, unsigned long offset, size_t len,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
static dma_addr_t sbus_iommu_map_phys_pflush(struct device *dev,
|
||||
phys_addr_t phys, size_t len, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return __sbus_iommu_map_page(dev, page, offset, len, true);
|
||||
return __sbus_iommu_map_phys(dev, phys, len, true, attrs);
|
||||
}
|
||||
|
||||
static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
@@ -254,8 +256,8 @@ static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int j;
|
||||
|
||||
for_each_sg(sgl, sg, nents, j) {
|
||||
sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
|
||||
sg->offset, sg->length, per_page_flush);
|
||||
sg->dma_address = __sbus_iommu_map_phys(dev, sg_phys(sg),
|
||||
sg->length, per_page_flush, attrs);
|
||||
if (sg->dma_address == DMA_MAPPING_ERROR)
|
||||
return -EIO;
|
||||
sg->dma_length = sg->length;
|
||||
@@ -277,7 +279,7 @@ static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
|
||||
return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true);
|
||||
}
|
||||
|
||||
static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
static void sbus_iommu_unmap_phys(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t len, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct iommu_struct *iommu = dev->archdata.iommu;
|
||||
@@ -303,7 +305,7 @@ static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
|
||||
sbus_iommu_unmap_phys(dev, sg->dma_address, sg->length, dir,
|
||||
attrs);
|
||||
sg->dma_address = 0x21212121;
|
||||
}
|
||||
@@ -426,8 +428,8 @@ static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
|
||||
.alloc = sbus_iommu_alloc,
|
||||
.free = sbus_iommu_free,
|
||||
#endif
|
||||
.map_page = sbus_iommu_map_page_gflush,
|
||||
.unmap_page = sbus_iommu_unmap_page,
|
||||
.map_phys = sbus_iommu_map_phys_gflush,
|
||||
.unmap_phys = sbus_iommu_unmap_phys,
|
||||
.map_sg = sbus_iommu_map_sg_gflush,
|
||||
.unmap_sg = sbus_iommu_unmap_sg,
|
||||
};
|
||||
@@ -437,8 +439,8 @@ static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
|
||||
.alloc = sbus_iommu_alloc,
|
||||
.free = sbus_iommu_free,
|
||||
#endif
|
||||
.map_page = sbus_iommu_map_page_pflush,
|
||||
.unmap_page = sbus_iommu_unmap_page,
|
||||
.map_phys = sbus_iommu_map_phys_pflush,
|
||||
.unmap_phys = sbus_iommu_unmap_phys,
|
||||
.map_sg = sbus_iommu_map_sg_pflush,
|
||||
.unmap_sg = sbus_iommu_unmap_sg,
|
||||
};
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
#ifndef _ASM_X86_CPUMASK_H
|
||||
#define _ASM_X86_CPUMASK_H
|
||||
#ifndef __ASSEMBLER__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
extern void setup_cpu_local_masks(void);
|
||||
|
||||
@@ -222,13 +222,14 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
|
||||
}
|
||||
|
||||
/* Map a single area into the IOMMU */
|
||||
static dma_addr_t gart_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
static dma_addr_t gart_map_phys(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
unsigned long bus;
|
||||
phys_addr_t paddr = page_to_phys(page) + offset;
|
||||
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (!need_iommu(dev, paddr, size))
|
||||
return paddr;
|
||||
@@ -242,7 +243,7 @@ static dma_addr_t gart_map_page(struct device *dev, struct page *page,
|
||||
/*
|
||||
* Free a DMA mapping.
|
||||
*/
|
||||
static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
static void gart_unmap_phys(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
@@ -282,7 +283,7 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
if (!s->dma_length || !s->length)
|
||||
break;
|
||||
gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
|
||||
gart_unmap_phys(dev, s->dma_address, s->dma_length, dir, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -487,7 +488,7 @@ static void
|
||||
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
|
||||
gart_unmap_phys(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
|
||||
dma_direct_free(dev, size, vaddr, dma_addr, attrs);
|
||||
}
|
||||
|
||||
@@ -672,8 +673,8 @@ static __init int init_amd_gatt(struct agp_kern_info *info)
|
||||
static const struct dma_map_ops gart_dma_ops = {
|
||||
.map_sg = gart_map_sg,
|
||||
.unmap_sg = gart_unmap_sg,
|
||||
.map_page = gart_map_page,
|
||||
.unmap_page = gart_unmap_page,
|
||||
.map_phys = gart_map_phys,
|
||||
.unmap_phys = gart_unmap_phys,
|
||||
.alloc = gart_alloc_coherent,
|
||||
.free = gart_free_coherent,
|
||||
.mmap = dma_common_mmap,
|
||||
|
||||
@@ -160,7 +160,7 @@ obj-$(CONFIG_RPMSG) += rpmsg/
|
||||
obj-$(CONFIG_SOUNDWIRE) += soundwire/
|
||||
|
||||
# Virtualization drivers
|
||||
obj-$(CONFIG_VIRT_DRIVERS) += virt/
|
||||
obj-y += virt/
|
||||
obj-$(CONFIG_HYPERV) += hv/
|
||||
|
||||
obj-$(CONFIG_PM_DEVFREQ) += devfreq/
|
||||
|
||||
@@ -19,6 +19,7 @@ use kernel::{
|
||||
cred::Credential,
|
||||
error::Error,
|
||||
fs::file::{self, File},
|
||||
id_pool::IdPool,
|
||||
list::{List, ListArc, ListArcField, ListLinks},
|
||||
mm,
|
||||
prelude::*,
|
||||
@@ -394,6 +395,8 @@ kernel::list::impl_list_item! {
|
||||
struct ProcessNodeRefs {
|
||||
/// Used to look up nodes using the 32-bit id that this process knows it by.
|
||||
by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
|
||||
/// Used to quickly find unused ids in `by_handle`.
|
||||
handle_is_present: IdPool,
|
||||
/// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
|
||||
/// the underlying `Node` struct as returned by `Node::global_id`.
|
||||
by_node: RBTree<usize, u32>,
|
||||
@@ -408,6 +411,7 @@ impl ProcessNodeRefs {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
by_handle: RBTree::new(),
|
||||
handle_is_present: IdPool::new(),
|
||||
by_node: RBTree::new(),
|
||||
freeze_listeners: RBTree::new(),
|
||||
}
|
||||
@@ -802,7 +806,7 @@ impl Process {
|
||||
pub(crate) fn insert_or_update_handle(
|
||||
self: ArcBorrow<'_, Process>,
|
||||
node_ref: NodeRef,
|
||||
is_mananger: bool,
|
||||
is_manager: bool,
|
||||
) -> Result<u32> {
|
||||
{
|
||||
let mut refs = self.node_refs.lock();
|
||||
@@ -821,7 +825,33 @@ impl Process {
|
||||
let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
|
||||
let info = UniqueArc::new_uninit(GFP_KERNEL)?;
|
||||
|
||||
let mut refs = self.node_refs.lock();
|
||||
let mut refs_lock = self.node_refs.lock();
|
||||
let mut refs = &mut *refs_lock;
|
||||
|
||||
let (unused_id, by_handle_slot) = loop {
|
||||
// ID 0 may only be used by the manager.
|
||||
let start = if is_manager { 0 } else { 1 };
|
||||
|
||||
if let Some(res) = refs.handle_is_present.find_unused_id(start) {
|
||||
match refs.by_handle.entry(res.as_u32()) {
|
||||
rbtree::Entry::Vacant(entry) => break (res, entry),
|
||||
rbtree::Entry::Occupied(_) => {
|
||||
pr_err!("Detected mismatch between handle_is_present and by_handle");
|
||||
res.acquire();
|
||||
kernel::warn_on!(true);
|
||||
return Err(EINVAL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let grow_request = refs.handle_is_present.grow_request().ok_or(ENOMEM)?;
|
||||
drop(refs_lock);
|
||||
let resizer = grow_request.realloc(GFP_KERNEL)?;
|
||||
refs_lock = self.node_refs.lock();
|
||||
refs = &mut *refs_lock;
|
||||
refs.handle_is_present.grow(resizer);
|
||||
};
|
||||
let handle = unused_id.as_u32();
|
||||
|
||||
// Do a lookup again as node may have been inserted before the lock was reacquired.
|
||||
if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
|
||||
@@ -831,20 +861,9 @@ impl Process {
|
||||
return Ok(handle);
|
||||
}
|
||||
|
||||
// Find id.
|
||||
let mut target: u32 = if is_mananger { 0 } else { 1 };
|
||||
for handle in refs.by_handle.keys() {
|
||||
if *handle > target {
|
||||
break;
|
||||
}
|
||||
if *handle == target {
|
||||
target = target.checked_add(1).ok_or(ENOMEM)?;
|
||||
}
|
||||
}
|
||||
|
||||
let gid = node_ref.node.global_id();
|
||||
let (info_proc, info_node) = {
|
||||
let info_init = NodeRefInfo::new(node_ref, target, self.into());
|
||||
let info_init = NodeRefInfo::new(node_ref, handle, self.into());
|
||||
match info.pin_init_with(info_init) {
|
||||
Ok(info) => ListArc::pair_from_pin_unique(info),
|
||||
// error is infallible
|
||||
@@ -865,9 +884,10 @@ impl Process {
|
||||
// `info_node` into the right node's `refs` list.
|
||||
unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
|
||||
|
||||
refs.by_node.insert(reserve1.into_node(gid, target));
|
||||
refs.by_handle.insert(reserve2.into_node(target, info_proc));
|
||||
Ok(target)
|
||||
refs.by_node.insert(reserve1.into_node(gid, handle));
|
||||
by_handle_slot.insert(info_proc, reserve2);
|
||||
unused_id.acquire();
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
|
||||
@@ -932,6 +952,16 @@ impl Process {
|
||||
let id = info.node_ref().node.global_id();
|
||||
refs.by_handle.remove(&handle);
|
||||
refs.by_node.remove(&id);
|
||||
refs.handle_is_present.release_id(handle as usize);
|
||||
|
||||
if let Some(shrink) = refs.handle_is_present.shrink_request() {
|
||||
drop(refs);
|
||||
// This intentionally ignores allocation failures.
|
||||
if let Ok(new_bitmap) = shrink.realloc(GFP_KERNEL) {
|
||||
refs = self.node_refs.lock();
|
||||
refs.handle_is_present.shrink(new_bitmap);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// All refs are cleared in process exit, so this warning is expected in that case.
|
||||
|
||||
@@ -334,6 +334,19 @@ static struct device *next_device(struct klist_iter *i)
|
||||
return dev;
|
||||
}
|
||||
|
||||
static struct device *prev_device(struct klist_iter *i)
|
||||
{
|
||||
struct klist_node *n = klist_prev(i);
|
||||
struct device *dev = NULL;
|
||||
struct device_private *dev_prv;
|
||||
|
||||
if (n) {
|
||||
dev_prv = to_device_private_bus(n);
|
||||
dev = dev_prv->device;
|
||||
}
|
||||
return dev;
|
||||
}
|
||||
|
||||
/**
|
||||
* bus_for_each_dev - device iterator.
|
||||
* @bus: bus type.
|
||||
@@ -414,6 +427,31 @@ struct device *bus_find_device(const struct bus_type *bus,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bus_find_device);
|
||||
|
||||
struct device *bus_find_device_reverse(const struct bus_type *bus,
|
||||
struct device *start, const void *data,
|
||||
device_match_t match)
|
||||
{
|
||||
struct subsys_private *sp = bus_to_subsys(bus);
|
||||
struct klist_iter i;
|
||||
struct device *dev;
|
||||
|
||||
if (!sp)
|
||||
return NULL;
|
||||
|
||||
klist_iter_init_node(&sp->klist_devices, &i,
|
||||
(start ? &start->p->knode_bus : NULL));
|
||||
while ((dev = prev_device(&i))) {
|
||||
if (match(dev, data)) {
|
||||
get_device(dev);
|
||||
break;
|
||||
}
|
||||
}
|
||||
klist_iter_exit(&i);
|
||||
subsys_put(sp);
|
||||
return dev;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bus_find_device_reverse);
|
||||
|
||||
static struct device_driver *next_driver(struct klist_iter *i)
|
||||
{
|
||||
struct klist_node *n = klist_next(i);
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
@@ -117,9 +117,6 @@ struct at91_clk_pms {
|
||||
unsigned int parent;
|
||||
};
|
||||
|
||||
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
|
||||
#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
|
||||
|
||||
#define ndck(a, s) (a[s - 1].id + 1)
|
||||
#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1)
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
@@ -171,8 +172,7 @@ static u8 cpg_div6_clock_get_parent(struct clk_hw *hw)
|
||||
if (clock->src_mask == 0)
|
||||
return 0;
|
||||
|
||||
hw_index = (readl(clock->reg) & clock->src_mask) >>
|
||||
__ffs(clock->src_mask);
|
||||
hw_index = field_get(clock->src_mask, readl(clock->reg));
|
||||
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
|
||||
if (clock->parents[i] == hw_index)
|
||||
return i;
|
||||
@@ -191,7 +191,7 @@ static int cpg_div6_clock_set_parent(struct clk_hw *hw, u8 index)
|
||||
if (index >= clk_hw_get_num_parents(hw))
|
||||
return -EINVAL;
|
||||
|
||||
src = clock->parents[index] << __ffs(clock->src_mask);
|
||||
src = field_prep(clock->src_mask, clock->parents[index]);
|
||||
writel((readl(clock->reg) & ~clock->src_mask) | src, clock->reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -54,10 +54,8 @@ static unsigned long cpg_pll_clk_recalc_rate(struct clk_hw *hw,
|
||||
{
|
||||
struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
|
||||
unsigned int mult;
|
||||
u32 val;
|
||||
|
||||
val = readl(pll_clk->pllcr_reg) & CPG_PLLnCR_STC_MASK;
|
||||
mult = (val >> __ffs(CPG_PLLnCR_STC_MASK)) + 1;
|
||||
mult = FIELD_GET(CPG_PLLnCR_STC_MASK, readl(pll_clk->pllcr_reg)) + 1;
|
||||
|
||||
return parent_rate * mult * pll_clk->fixed_mult;
|
||||
}
|
||||
@@ -94,7 +92,7 @@ static int cpg_pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
|
||||
val = readl(pll_clk->pllcr_reg);
|
||||
val &= ~CPG_PLLnCR_STC_MASK;
|
||||
val |= (mult - 1) << __ffs(CPG_PLLnCR_STC_MASK);
|
||||
val |= FIELD_PREP(CPG_PLLnCR_STC_MASK, mult - 1);
|
||||
writel(val, pll_clk->pllcr_reg);
|
||||
|
||||
for (i = 1000; i; i--) {
|
||||
@@ -176,11 +174,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
struct cpg_z_clk *zclk = to_z_clk(hw);
|
||||
unsigned int mult;
|
||||
u32 val;
|
||||
|
||||
val = readl(zclk->reg) & zclk->mask;
|
||||
mult = 32 - (val >> __ffs(zclk->mask));
|
||||
unsigned int mult = 32 - field_get(zclk->mask, readl(zclk->reg));
|
||||
|
||||
return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
|
||||
32 * zclk->fixed_div);
|
||||
@@ -231,7 +225,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
|
||||
return -EBUSY;
|
||||
|
||||
cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
|
||||
cpg_reg_modify(zclk->reg, zclk->mask,
|
||||
field_prep(zclk->mask, 32 - mult));
|
||||
|
||||
/*
|
||||
* Set KICK bit in FRQCRB to update hardware setting and wait for
|
||||
|
||||
@@ -279,11 +279,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
struct cpg_z_clk *zclk = to_z_clk(hw);
|
||||
unsigned int mult;
|
||||
u32 val;
|
||||
|
||||
val = readl(zclk->reg) & zclk->mask;
|
||||
mult = 32 - (val >> __ffs(zclk->mask));
|
||||
unsigned int mult = 32 - field_get(zclk->mask, readl(zclk->reg));
|
||||
|
||||
return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
|
||||
32 * zclk->fixed_div);
|
||||
@@ -334,7 +330,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
|
||||
return -EBUSY;
|
||||
|
||||
cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
|
||||
cpg_reg_modify(zclk->reg, zclk->mask,
|
||||
field_prep(zclk->mask, 32 - mult));
|
||||
|
||||
/*
|
||||
* Set KICK bit in FRQCRB to update hardware setting and wait for
|
||||
|
||||
@@ -39,6 +39,7 @@ config CRYPTO_DEV_SP_PSP
|
||||
bool "Platform Security Processor (PSP) device"
|
||||
default y
|
||||
depends on CRYPTO_DEV_CCP_DD && X86_64 && AMD_IOMMU
|
||||
select PCI_TSM if PCI
|
||||
help
|
||||
Provide support for the AMD Platform Security Processor (PSP).
|
||||
The PSP is a dedicated processor that provides support for key
|
||||
|
||||
@@ -16,6 +16,10 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
|
||||
hsti.o \
|
||||
sfs.o
|
||||
|
||||
ifeq ($(CONFIG_PCI_TSM),y)
|
||||
ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += sev-dev-tsm.o sev-dev-tio.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
|
||||
ccp-crypto-objs := ccp-crypto-main.o \
|
||||
ccp-crypto-aes.o \
|
||||
|
||||
864
drivers/crypto/ccp/sev-dev-tio.c
Normal file
864
drivers/crypto/ccp/sev-dev-tio.c
Normal file
@@ -0,0 +1,864 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
// Interface to PSP for CCP/SEV-TIO/SNP-VM
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/tsm.h>
|
||||
#include <linux/psp.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/pci-doe.h>
|
||||
#include <asm/sev-common.h>
|
||||
#include <asm/sev.h>
|
||||
#include <asm/page.h>
|
||||
#include "sev-dev.h"
|
||||
#include "sev-dev-tio.h"
|
||||
|
||||
#define to_tio_status(dev_data) \
|
||||
(container_of((dev_data), struct tio_dsm, data)->sev->tio_status)
|
||||
|
||||
#define SLA_PAGE_TYPE_DATA 0
|
||||
#define SLA_PAGE_TYPE_SCATTER 1
|
||||
#define SLA_PAGE_SIZE_4K 0
|
||||
#define SLA_PAGE_SIZE_2M 1
|
||||
#define SLA_SZ(s) ((s).page_size == SLA_PAGE_SIZE_2M ? SZ_2M : SZ_4K)
|
||||
#define SLA_SCATTER_LEN(s) (SLA_SZ(s) / sizeof(struct sla_addr_t))
|
||||
#define SLA_EOL ((struct sla_addr_t) { .pfn = ((1UL << 40) - 1) })
|
||||
#define SLA_NULL ((struct sla_addr_t) { 0 })
|
||||
#define IS_SLA_NULL(s) ((s).sla == SLA_NULL.sla)
|
||||
#define IS_SLA_EOL(s) ((s).sla == SLA_EOL.sla)
|
||||
|
||||
static phys_addr_t sla_to_pa(struct sla_addr_t sla)
|
||||
{
|
||||
u64 pfn = sla.pfn;
|
||||
u64 pa = pfn << PAGE_SHIFT;
|
||||
|
||||
return pa;
|
||||
}
|
||||
|
||||
static void *sla_to_va(struct sla_addr_t sla)
|
||||
{
|
||||
void *va = __va(__sme_clr(sla_to_pa(sla)));
|
||||
|
||||
return va;
|
||||
}
|
||||
|
||||
#define sla_to_pfn(sla) (__pa(sla_to_va(sla)) >> PAGE_SHIFT)
|
||||
#define sla_to_page(sla) virt_to_page(sla_to_va(sla))
|
||||
|
||||
static struct sla_addr_t make_sla(struct page *pg, bool stp)
|
||||
{
|
||||
u64 pa = __sme_set(page_to_phys(pg));
|
||||
struct sla_addr_t ret = {
|
||||
.pfn = pa >> PAGE_SHIFT,
|
||||
.page_size = SLA_PAGE_SIZE_4K, /* Do not do SLA_PAGE_SIZE_2M ATM */
|
||||
.page_type = stp ? SLA_PAGE_TYPE_SCATTER : SLA_PAGE_TYPE_DATA
|
||||
};
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* the BUFFER Structure */
|
||||
#define SLA_BUFFER_FLAG_ENCRYPTION BIT(0)
|
||||
|
||||
/*
|
||||
* struct sla_buffer_hdr - Scatter list address buffer header
|
||||
*
|
||||
* @capacity_sz: Total capacity of the buffer in bytes
|
||||
* @payload_sz: Size of buffer payload in bytes, must be multiple of 32B
|
||||
* @flags: Buffer flags (SLA_BUFFER_FLAG_ENCRYPTION: buffer is encrypted)
|
||||
* @iv: Initialization vector used for encryption
|
||||
* @authtag: Authentication tag for encrypted buffer
|
||||
*/
|
||||
struct sla_buffer_hdr {
|
||||
u32 capacity_sz;
|
||||
u32 payload_sz; /* The size of BUFFER_PAYLOAD in bytes. Must be multiple of 32B */
|
||||
u32 flags;
|
||||
u8 reserved1[4];
|
||||
u8 iv[16]; /* IV used for the encryption of this buffer */
|
||||
u8 authtag[16]; /* Authentication tag for this buffer */
|
||||
u8 reserved2[16];
|
||||
} __packed;
|
||||
|
||||
enum spdm_data_type_t {
|
||||
DOBJ_DATA_TYPE_SPDM = 0x1,
|
||||
DOBJ_DATA_TYPE_SECURE_SPDM = 0x2,
|
||||
};
|
||||
|
||||
struct spdm_dobj_hdr_req {
|
||||
struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_REQ */
|
||||
u8 data_type; /* spdm_data_type_t */
|
||||
u8 reserved2[5];
|
||||
} __packed;
|
||||
|
||||
struct spdm_dobj_hdr_resp {
|
||||
struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_RESP */
|
||||
u8 data_type; /* spdm_data_type_t */
|
||||
u8 reserved2[5];
|
||||
} __packed;
|
||||
|
||||
/* Defined in sev-dev-tio.h so sev-dev-tsm.c can read types of blobs */
|
||||
struct spdm_dobj_hdr_cert;
|
||||
struct spdm_dobj_hdr_meas;
|
||||
struct spdm_dobj_hdr_report;
|
||||
|
||||
/* Used in all SPDM-aware TIO commands */
|
||||
struct spdm_ctrl {
|
||||
struct sla_addr_t req;
|
||||
struct sla_addr_t resp;
|
||||
struct sla_addr_t scratch;
|
||||
struct sla_addr_t output;
|
||||
} __packed;
|
||||
|
||||
static size_t sla_dobj_id_to_size(u8 id)
|
||||
{
|
||||
size_t n;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct spdm_dobj_hdr_resp) != 0x10);
|
||||
switch (id) {
|
||||
case SPDM_DOBJ_ID_REQ:
|
||||
n = sizeof(struct spdm_dobj_hdr_req);
|
||||
break;
|
||||
case SPDM_DOBJ_ID_RESP:
|
||||
n = sizeof(struct spdm_dobj_hdr_resp);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
n = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
#define SPDM_DOBJ_HDR_SIZE(hdr) sla_dobj_id_to_size((hdr)->id)
|
||||
#define SPDM_DOBJ_DATA(hdr) ((u8 *)(hdr) + SPDM_DOBJ_HDR_SIZE(hdr))
|
||||
#define SPDM_DOBJ_LEN(hdr) ((hdr)->length - SPDM_DOBJ_HDR_SIZE(hdr))
|
||||
|
||||
#define sla_to_dobj_resp_hdr(buf) ((struct spdm_dobj_hdr_resp *) \
|
||||
sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_RESP))
|
||||
#define sla_to_dobj_req_hdr(buf) ((struct spdm_dobj_hdr_req *) \
|
||||
sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_REQ))
|
||||
|
||||
static struct spdm_dobj_hdr *sla_to_dobj_hdr(struct sla_buffer_hdr *buf)
|
||||
{
|
||||
if (!buf)
|
||||
return NULL;
|
||||
|
||||
return (struct spdm_dobj_hdr *) &buf[1];
|
||||
}
|
||||
|
||||
static struct spdm_dobj_hdr *sla_to_dobj_hdr_check(struct sla_buffer_hdr *buf, u32 check_dobjid)
|
||||
{
|
||||
struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
|
||||
|
||||
if (WARN_ON_ONCE(!hdr))
|
||||
return NULL;
|
||||
|
||||
if (hdr->id != check_dobjid) {
|
||||
pr_err("! ERROR: expected %d, found %d\n", check_dobjid, hdr->id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return hdr;
|
||||
}
|
||||
|
||||
static void *sla_to_data(struct sla_buffer_hdr *buf, u32 dobjid)
|
||||
{
|
||||
struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
|
||||
|
||||
if (WARN_ON_ONCE(dobjid != SPDM_DOBJ_ID_REQ && dobjid != SPDM_DOBJ_ID_RESP))
|
||||
return NULL;
|
||||
|
||||
if (!hdr)
|
||||
return NULL;
|
||||
|
||||
return (u8 *) hdr + sla_dobj_id_to_size(dobjid);
|
||||
}
|
||||
|
||||
/*
|
||||
* struct sev_data_tio_status - SEV_CMD_TIO_STATUS command
|
||||
*
|
||||
* @length: Length of this command buffer in bytes
|
||||
* @status_paddr: System physical address of the TIO_STATUS structure
|
||||
*/
|
||||
struct sev_data_tio_status {
|
||||
u32 length;
|
||||
u8 reserved[4];
|
||||
u64 status_paddr;
|
||||
} __packed;
|
||||
|
||||
/* TIO_INIT */
|
||||
struct sev_data_tio_init {
|
||||
u32 length;
|
||||
u8 reserved[12];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* struct sev_data_tio_dev_create - TIO_DEV_CREATE command
|
||||
*
|
||||
* @length: Length in bytes of this command buffer
|
||||
* @dev_ctx_sla: Scatter list address pointing to a buffer to be used as a device context buffer
|
||||
* @device_id: PCIe Routing Identifier of the device to connect to
|
||||
* @root_port_id: PCIe Routing Identifier of the root port of the device
|
||||
* @segment_id: PCIe Segment Identifier of the device to connect to
|
||||
*/
|
||||
struct sev_data_tio_dev_create {
|
||||
u32 length;
|
||||
u8 reserved1[4];
|
||||
struct sla_addr_t dev_ctx_sla;
|
||||
u16 device_id;
|
||||
u16 root_port_id;
|
||||
u8 segment_id;
|
||||
u8 reserved2[11];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* struct sev_data_tio_dev_connect - TIO_DEV_CONNECT command
|
||||
*
|
||||
* @length: Length in bytes of this command buffer
|
||||
* @spdm_ctrl: SPDM control structure defined in Section 5.1
|
||||
* @dev_ctx_sla: Scatter list address of the device context buffer
|
||||
* @tc_mask: Bitmask of the traffic classes to initialize for SEV-TIO usage.
|
||||
* Setting the kth bit of the TC_MASK to 1 indicates that the traffic
|
||||
* class k will be initialized
|
||||
* @cert_slot: Slot number of the certificate requested for constructing the SPDM session
|
||||
* @ide_stream_id: IDE stream IDs to be associated with this device.
|
||||
* Valid only if corresponding bit in TC_MASK is set
|
||||
*/
|
||||
struct sev_data_tio_dev_connect {
|
||||
u32 length;
|
||||
u8 reserved1[4];
|
||||
struct spdm_ctrl spdm_ctrl;
|
||||
u8 reserved2[8];
|
||||
struct sla_addr_t dev_ctx_sla;
|
||||
u8 tc_mask;
|
||||
u8 cert_slot;
|
||||
u8 reserved3[6];
|
||||
u8 ide_stream_id[8];
|
||||
u8 reserved4[8];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* struct sev_data_tio_dev_disconnect - TIO_DEV_DISCONNECT command
|
||||
*
|
||||
* @length: Length in bytes of this command buffer
|
||||
* @flags: Command flags (TIO_DEV_DISCONNECT_FLAG_FORCE: force disconnect)
|
||||
* @spdm_ctrl: SPDM control structure defined in Section 5.1
|
||||
* @dev_ctx_sla: Scatter list address of the device context buffer
|
||||
*/
|
||||
#define TIO_DEV_DISCONNECT_FLAG_FORCE BIT(0)
|
||||
|
||||
struct sev_data_tio_dev_disconnect {
|
||||
u32 length;
|
||||
u32 flags;
|
||||
struct spdm_ctrl spdm_ctrl;
|
||||
struct sla_addr_t dev_ctx_sla;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* struct sev_data_tio_dev_meas - TIO_DEV_MEASUREMENTS command
|
||||
*
|
||||
* @length: Length in bytes of this command buffer
|
||||
* @flags: Command flags (TIO_DEV_MEAS_FLAG_RAW_BITSTREAM: request raw measurements)
|
||||
* @spdm_ctrl: SPDM control structure defined in Section 5.1
|
||||
* @dev_ctx_sla: Scatter list address of the device context buffer
|
||||
* @meas_nonce: Nonce for measurement freshness verification
|
||||
*/
|
||||
#define TIO_DEV_MEAS_FLAG_RAW_BITSTREAM BIT(0)
|
||||
|
||||
struct sev_data_tio_dev_meas {
|
||||
u32 length;
|
||||
u32 flags;
|
||||
struct spdm_ctrl spdm_ctrl;
|
||||
struct sla_addr_t dev_ctx_sla;
|
||||
u8 meas_nonce[32];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* struct sev_data_tio_dev_certs - TIO_DEV_CERTIFICATES command
|
||||
*
|
||||
* @length: Length in bytes of this command buffer
|
||||
* @spdm_ctrl: SPDM control structure defined in Section 5.1
|
||||
* @dev_ctx_sla: Scatter list address of the device context buffer
|
||||
*/
|
||||
struct sev_data_tio_dev_certs {
|
||||
u32 length;
|
||||
u8 reserved[4];
|
||||
struct spdm_ctrl spdm_ctrl;
|
||||
struct sla_addr_t dev_ctx_sla;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* struct sev_data_tio_dev_reclaim - TIO_DEV_RECLAIM command
|
||||
*
|
||||
* @length: Length in bytes of this command buffer
|
||||
* @dev_ctx_sla: Scatter list address of the device context buffer
|
||||
*
|
||||
* This command reclaims resources associated with a device context.
|
||||
*/
|
||||
struct sev_data_tio_dev_reclaim {
|
||||
u32 length;
|
||||
u8 reserved[4];
|
||||
struct sla_addr_t dev_ctx_sla;
|
||||
} __packed;
|
||||
|
||||
static struct sla_buffer_hdr *sla_buffer_map(struct sla_addr_t sla)
|
||||
{
|
||||
struct sla_buffer_hdr *buf;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct sla_buffer_hdr) != 0x40);
|
||||
if (IS_SLA_NULL(sla))
|
||||
return NULL;
|
||||
|
||||
if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
|
||||
struct sla_addr_t *scatter = sla_to_va(sla);
|
||||
unsigned int i, npages = 0;
|
||||
|
||||
for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
|
||||
if (WARN_ON_ONCE(SLA_SZ(scatter[i]) > SZ_4K))
|
||||
return NULL;
|
||||
|
||||
if (WARN_ON_ONCE(scatter[i].page_type == SLA_PAGE_TYPE_SCATTER))
|
||||
return NULL;
|
||||
|
||||
if (IS_SLA_EOL(scatter[i])) {
|
||||
npages = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (WARN_ON_ONCE(!npages))
|
||||
return NULL;
|
||||
|
||||
struct page **pp = kmalloc_array(npages, sizeof(pp[0]), GFP_KERNEL);
|
||||
|
||||
if (!pp)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < npages; ++i)
|
||||
pp[i] = sla_to_page(scatter[i]);
|
||||
|
||||
buf = vm_map_ram(pp, npages, 0);
|
||||
kfree(pp);
|
||||
} else {
|
||||
struct page *pg = sla_to_page(sla);
|
||||
|
||||
buf = vm_map_ram(&pg, 1, 0);
|
||||
}
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static void sla_buffer_unmap(struct sla_addr_t sla, struct sla_buffer_hdr *buf)
|
||||
{
|
||||
if (!buf)
|
||||
return;
|
||||
|
||||
if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
|
||||
struct sla_addr_t *scatter = sla_to_va(sla);
|
||||
unsigned int i, npages = 0;
|
||||
|
||||
for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
|
||||
if (IS_SLA_EOL(scatter[i])) {
|
||||
npages = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!npages)
|
||||
return;
|
||||
|
||||
vm_unmap_ram(buf, npages);
|
||||
} else {
|
||||
vm_unmap_ram(buf, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void dobj_response_init(struct sla_buffer_hdr *buf)
|
||||
{
|
||||
struct spdm_dobj_hdr *dobj = sla_to_dobj_hdr(buf);
|
||||
|
||||
dobj->id = SPDM_DOBJ_ID_RESP;
|
||||
dobj->version.major = 0x1;
|
||||
dobj->version.minor = 0;
|
||||
dobj->length = 0;
|
||||
buf->payload_sz = sla_dobj_id_to_size(dobj->id) + dobj->length;
|
||||
}
|
||||
|
||||
static void sla_free(struct sla_addr_t sla, size_t len, bool firmware_state)
|
||||
{
|
||||
unsigned int npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
|
||||
struct sla_addr_t *scatter = NULL;
|
||||
int ret = 0, i;
|
||||
|
||||
if (IS_SLA_NULL(sla))
|
||||
return;
|
||||
|
||||
if (firmware_state) {
|
||||
if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
|
||||
scatter = sla_to_va(sla);
|
||||
|
||||
for (i = 0; i < npages; ++i) {
|
||||
if (IS_SLA_EOL(scatter[i]))
|
||||
break;
|
||||
|
||||
ret = snp_reclaim_pages(sla_to_pa(scatter[i]), 1, false);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
ret = snp_reclaim_pages(sla_to_pa(sla), 1, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (WARN_ON(ret))
|
||||
return;
|
||||
|
||||
if (scatter) {
|
||||
for (i = 0; i < npages; ++i) {
|
||||
if (IS_SLA_EOL(scatter[i]))
|
||||
break;
|
||||
free_page((unsigned long)sla_to_va(scatter[i]));
|
||||
}
|
||||
}
|
||||
|
||||
free_page((unsigned long)sla_to_va(sla));
|
||||
}
|
||||
|
||||
static struct sla_addr_t sla_alloc(size_t len, bool firmware_state)
|
||||
{
|
||||
unsigned long i, npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
|
||||
struct sla_addr_t *scatter = NULL;
|
||||
struct sla_addr_t ret = SLA_NULL;
|
||||
struct sla_buffer_hdr *buf;
|
||||
struct page *pg;
|
||||
|
||||
if (npages == 0)
|
||||
return ret;
|
||||
|
||||
if (WARN_ON_ONCE(npages > ((PAGE_SIZE / sizeof(struct sla_addr_t)) + 1)))
|
||||
return ret;
|
||||
|
||||
BUILD_BUG_ON(PAGE_SIZE < SZ_4K);
|
||||
|
||||
if (npages > 1) {
|
||||
pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!pg)
|
||||
return SLA_NULL;
|
||||
|
||||
ret = make_sla(pg, true);
|
||||
scatter = page_to_virt(pg);
|
||||
for (i = 0; i < npages; ++i) {
|
||||
pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!pg)
|
||||
goto no_reclaim_exit;
|
||||
|
||||
scatter[i] = make_sla(pg, false);
|
||||
}
|
||||
scatter[i] = SLA_EOL;
|
||||
} else {
|
||||
pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!pg)
|
||||
return SLA_NULL;
|
||||
|
||||
ret = make_sla(pg, false);
|
||||
}
|
||||
|
||||
buf = sla_buffer_map(ret);
|
||||
if (!buf)
|
||||
goto no_reclaim_exit;
|
||||
|
||||
buf->capacity_sz = (npages << PAGE_SHIFT);
|
||||
sla_buffer_unmap(ret, buf);
|
||||
|
||||
if (firmware_state) {
|
||||
if (scatter) {
|
||||
for (i = 0; i < npages; ++i) {
|
||||
if (rmp_make_private(sla_to_pfn(scatter[i]), 0,
|
||||
PG_LEVEL_4K, 0, true))
|
||||
goto free_exit;
|
||||
}
|
||||
} else {
|
||||
if (rmp_make_private(sla_to_pfn(ret), 0, PG_LEVEL_4K, 0, true))
|
||||
goto no_reclaim_exit;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
no_reclaim_exit:
|
||||
firmware_state = false;
|
||||
free_exit:
|
||||
sla_free(ret, len, firmware_state);
|
||||
return SLA_NULL;
|
||||
}
|
||||
|
||||
/* Expands a buffer, only firmware owned buffers allowed for now */
|
||||
static int sla_expand(struct sla_addr_t *sla, size_t *len)
|
||||
{
|
||||
struct sla_buffer_hdr *oldbuf = sla_buffer_map(*sla), *newbuf;
|
||||
struct sla_addr_t oldsla = *sla, newsla;
|
||||
size_t oldlen = *len, newlen;
|
||||
|
||||
if (!oldbuf)
|
||||
return -EFAULT;
|
||||
|
||||
newlen = oldbuf->capacity_sz;
|
||||
if (oldbuf->capacity_sz == oldlen) {
|
||||
/* This buffer does not require expansion, must be another buffer */
|
||||
sla_buffer_unmap(oldsla, oldbuf);
|
||||
return 1;
|
||||
}
|
||||
|
||||
pr_notice("Expanding BUFFER from %ld to %ld bytes\n", oldlen, newlen);
|
||||
|
||||
newsla = sla_alloc(newlen, true);
|
||||
if (IS_SLA_NULL(newsla))
|
||||
return -ENOMEM;
|
||||
|
||||
newbuf = sla_buffer_map(newsla);
|
||||
if (!newbuf) {
|
||||
sla_free(newsla, newlen, true);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
memcpy(newbuf, oldbuf, oldlen);
|
||||
|
||||
sla_buffer_unmap(newsla, newbuf);
|
||||
sla_free(oldsla, oldlen, true);
|
||||
*sla = newsla;
|
||||
*len = newlen;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sev_tio_do_cmd(int cmd, void *data, size_t data_len, int *psp_ret,
|
||||
struct tsm_dsm_tio *dev_data)
|
||||
{
|
||||
int rc;
|
||||
|
||||
*psp_ret = 0;
|
||||
rc = sev_do_cmd(cmd, data, psp_ret);
|
||||
|
||||
if (WARN_ON(!rc && *psp_ret == SEV_RET_SPDM_REQUEST))
|
||||
return -EIO;
|
||||
|
||||
if (rc == 0 && *psp_ret == SEV_RET_EXPAND_BUFFER_LENGTH_REQUEST) {
|
||||
int rc1, rc2;
|
||||
|
||||
rc1 = sla_expand(&dev_data->output, &dev_data->output_len);
|
||||
if (rc1 < 0)
|
||||
return rc1;
|
||||
|
||||
rc2 = sla_expand(&dev_data->scratch, &dev_data->scratch_len);
|
||||
if (rc2 < 0)
|
||||
return rc2;
|
||||
|
||||
if (!rc1 && !rc2)
|
||||
/* Neither buffer requires expansion, this is wrong */
|
||||
return -EFAULT;
|
||||
|
||||
*psp_ret = 0;
|
||||
rc = sev_do_cmd(cmd, data, psp_ret);
|
||||
}
|
||||
|
||||
if ((rc == 0 || rc == -EIO) && *psp_ret == SEV_RET_SPDM_REQUEST) {
|
||||
struct spdm_dobj_hdr_resp *resp_hdr;
|
||||
struct spdm_dobj_hdr_req *req_hdr;
|
||||
struct sev_tio_status *tio_status = to_tio_status(dev_data);
|
||||
size_t resp_len = tio_status->spdm_req_size_max -
|
||||
(sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) + sizeof(struct sla_buffer_hdr));
|
||||
|
||||
if (!dev_data->cmd) {
|
||||
if (WARN_ON_ONCE(!data_len || (data_len != *(u32 *) data)))
|
||||
return -EINVAL;
|
||||
if (WARN_ON(data_len > sizeof(dev_data->cmd_data)))
|
||||
return -EFAULT;
|
||||
memcpy(dev_data->cmd_data, data, data_len);
|
||||
memset(&dev_data->cmd_data[data_len], 0xFF,
|
||||
sizeof(dev_data->cmd_data) - data_len);
|
||||
dev_data->cmd = cmd;
|
||||
}
|
||||
|
||||
req_hdr = sla_to_dobj_req_hdr(dev_data->reqbuf);
|
||||
resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
|
||||
switch (req_hdr->data_type) {
|
||||
case DOBJ_DATA_TYPE_SPDM:
|
||||
rc = PCI_DOE_FEATURE_CMA;
|
||||
break;
|
||||
case DOBJ_DATA_TYPE_SECURE_SPDM:
|
||||
rc = PCI_DOE_FEATURE_SSESSION;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
resp_hdr->data_type = req_hdr->data_type;
|
||||
dev_data->spdm.req_len = req_hdr->hdr.length -
|
||||
sla_dobj_id_to_size(SPDM_DOBJ_ID_REQ);
|
||||
dev_data->spdm.rsp_len = resp_len;
|
||||
} else if (dev_data && dev_data->cmd) {
|
||||
/* For either error or success just stop the bouncing */
|
||||
memset(dev_data->cmd_data, 0, sizeof(dev_data->cmd_data));
|
||||
dev_data->cmd = 0;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int sev_tio_continue(struct tsm_dsm_tio *dev_data)
|
||||
{
|
||||
struct spdm_dobj_hdr_resp *resp_hdr;
|
||||
int ret;
|
||||
|
||||
if (!dev_data || !dev_data->cmd)
|
||||
return -EINVAL;
|
||||
|
||||
resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
|
||||
resp_hdr->hdr.length = ALIGN(sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
|
||||
dev_data->spdm.rsp_len, 32);
|
||||
dev_data->respbuf->payload_sz = resp_hdr->hdr.length;
|
||||
|
||||
ret = sev_tio_do_cmd(dev_data->cmd, dev_data->cmd_data, 0,
|
||||
&dev_data->psp_ret, dev_data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (dev_data->psp_ret != SEV_RET_SUCCESS)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void spdm_ctrl_init(struct spdm_ctrl *ctrl, struct tsm_dsm_tio *dev_data)
|
||||
{
|
||||
ctrl->req = dev_data->req;
|
||||
ctrl->resp = dev_data->resp;
|
||||
ctrl->scratch = dev_data->scratch;
|
||||
ctrl->output = dev_data->output;
|
||||
}
|
||||
|
||||
static void spdm_ctrl_free(struct tsm_dsm_tio *dev_data)
|
||||
{
|
||||
struct sev_tio_status *tio_status = to_tio_status(dev_data);
|
||||
size_t len = tio_status->spdm_req_size_max -
|
||||
(sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
|
||||
sizeof(struct sla_buffer_hdr));
|
||||
struct tsm_spdm *spdm = &dev_data->spdm;
|
||||
|
||||
sla_buffer_unmap(dev_data->resp, dev_data->respbuf);
|
||||
sla_buffer_unmap(dev_data->req, dev_data->reqbuf);
|
||||
spdm->rsp = NULL;
|
||||
spdm->req = NULL;
|
||||
sla_free(dev_data->req, len, true);
|
||||
sla_free(dev_data->resp, len, false);
|
||||
sla_free(dev_data->scratch, tio_status->spdm_scratch_size_max, true);
|
||||
|
||||
dev_data->req.sla = 0;
|
||||
dev_data->resp.sla = 0;
|
||||
dev_data->scratch.sla = 0;
|
||||
dev_data->respbuf = NULL;
|
||||
dev_data->reqbuf = NULL;
|
||||
sla_free(dev_data->output, tio_status->spdm_out_size_max, true);
|
||||
}
|
||||
|
||||
static int spdm_ctrl_alloc(struct tsm_dsm_tio *dev_data)
|
||||
{
|
||||
struct sev_tio_status *tio_status = to_tio_status(dev_data);
|
||||
struct tsm_spdm *spdm = &dev_data->spdm;
|
||||
int ret;
|
||||
|
||||
dev_data->req = sla_alloc(tio_status->spdm_req_size_max, true);
|
||||
dev_data->resp = sla_alloc(tio_status->spdm_req_size_max, false);
|
||||
dev_data->scratch_len = tio_status->spdm_scratch_size_max;
|
||||
dev_data->scratch = sla_alloc(dev_data->scratch_len, true);
|
||||
dev_data->output_len = tio_status->spdm_out_size_max;
|
||||
dev_data->output = sla_alloc(dev_data->output_len, true);
|
||||
|
||||
if (IS_SLA_NULL(dev_data->req) || IS_SLA_NULL(dev_data->resp) ||
|
||||
IS_SLA_NULL(dev_data->scratch) || IS_SLA_NULL(dev_data->dev_ctx)) {
|
||||
ret = -ENOMEM;
|
||||
goto free_spdm_exit;
|
||||
}
|
||||
|
||||
dev_data->reqbuf = sla_buffer_map(dev_data->req);
|
||||
dev_data->respbuf = sla_buffer_map(dev_data->resp);
|
||||
if (!dev_data->reqbuf || !dev_data->respbuf) {
|
||||
ret = -EFAULT;
|
||||
goto free_spdm_exit;
|
||||
}
|
||||
|
||||
spdm->req = sla_to_data(dev_data->reqbuf, SPDM_DOBJ_ID_REQ);
|
||||
spdm->rsp = sla_to_data(dev_data->respbuf, SPDM_DOBJ_ID_RESP);
|
||||
if (!spdm->req || !spdm->rsp) {
|
||||
ret = -EFAULT;
|
||||
goto free_spdm_exit;
|
||||
}
|
||||
|
||||
dobj_response_init(dev_data->respbuf);
|
||||
|
||||
return 0;
|
||||
|
||||
free_spdm_exit:
|
||||
spdm_ctrl_free(dev_data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sev_tio_init_locked(void *tio_status_page)
|
||||
{
|
||||
struct sev_tio_status *tio_status = tio_status_page;
|
||||
struct sev_data_tio_status data_status = {
|
||||
.length = sizeof(data_status),
|
||||
};
|
||||
int ret, psp_ret;
|
||||
|
||||
data_status.status_paddr = __psp_pa(tio_status_page);
|
||||
ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (tio_status->length < offsetofend(struct sev_tio_status, tdictx_size) ||
|
||||
tio_status->reserved)
|
||||
return -EFAULT;
|
||||
|
||||
if (!tio_status->tio_en && !tio_status->tio_init_done)
|
||||
return -ENOENT;
|
||||
|
||||
if (tio_status->tio_init_done)
|
||||
return -EBUSY;
|
||||
|
||||
struct sev_data_tio_init ti = { .length = sizeof(ti) };
|
||||
|
||||
ret = __sev_do_cmd_locked(SEV_CMD_TIO_INIT, &ti, &psp_ret);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id,
|
||||
u16 root_port_id, u8 segment_id)
|
||||
{
|
||||
struct sev_tio_status *tio_status = to_tio_status(dev_data);
|
||||
struct sev_data_tio_dev_create create = {
|
||||
.length = sizeof(create),
|
||||
.device_id = device_id,
|
||||
.root_port_id = root_port_id,
|
||||
.segment_id = segment_id,
|
||||
};
|
||||
void *data_pg;
|
||||
int ret;
|
||||
|
||||
dev_data->dev_ctx = sla_alloc(tio_status->devctx_size, true);
|
||||
if (IS_SLA_NULL(dev_data->dev_ctx))
|
||||
return -ENOMEM;
|
||||
|
||||
data_pg = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!data_pg) {
|
||||
ret = -ENOMEM;
|
||||
goto free_ctx_exit;
|
||||
}
|
||||
|
||||
create.dev_ctx_sla = dev_data->dev_ctx;
|
||||
ret = sev_do_cmd(SEV_CMD_TIO_DEV_CREATE, &create, &dev_data->psp_ret);
|
||||
if (ret)
|
||||
goto free_data_pg_exit;
|
||||
|
||||
dev_data->data_pg = data_pg;
|
||||
|
||||
return 0;
|
||||
|
||||
free_data_pg_exit:
|
||||
snp_free_firmware_page(data_pg);
|
||||
free_ctx_exit:
|
||||
sla_free(create.dev_ctx_sla, tio_status->devctx_size, true);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data)
|
||||
{
|
||||
struct sev_tio_status *tio_status = to_tio_status(dev_data);
|
||||
struct sev_data_tio_dev_reclaim r = {
|
||||
.length = sizeof(r),
|
||||
.dev_ctx_sla = dev_data->dev_ctx,
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (dev_data->data_pg) {
|
||||
snp_free_firmware_page(dev_data->data_pg);
|
||||
dev_data->data_pg = NULL;
|
||||
}
|
||||
|
||||
if (IS_SLA_NULL(dev_data->dev_ctx))
|
||||
return 0;
|
||||
|
||||
ret = sev_do_cmd(SEV_CMD_TIO_DEV_RECLAIM, &r, &dev_data->psp_ret);
|
||||
|
||||
sla_free(dev_data->dev_ctx, tio_status->devctx_size, true);
|
||||
dev_data->dev_ctx = SLA_NULL;
|
||||
|
||||
spdm_ctrl_free(dev_data);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot)
|
||||
{
|
||||
struct sev_data_tio_dev_connect connect = {
|
||||
.length = sizeof(connect),
|
||||
.tc_mask = tc_mask,
|
||||
.cert_slot = cert_slot,
|
||||
.dev_ctx_sla = dev_data->dev_ctx,
|
||||
.ide_stream_id = {
|
||||
ids[0], ids[1], ids[2], ids[3],
|
||||
ids[4], ids[5], ids[6], ids[7]
|
||||
},
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(IS_SLA_NULL(dev_data->dev_ctx)))
|
||||
return -EFAULT;
|
||||
if (!(tc_mask & 1))
|
||||
return -EINVAL;
|
||||
|
||||
ret = spdm_ctrl_alloc(dev_data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
spdm_ctrl_init(&connect.spdm_ctrl, dev_data);
|
||||
|
||||
return sev_tio_do_cmd(SEV_CMD_TIO_DEV_CONNECT, &connect, sizeof(connect),
|
||||
&dev_data->psp_ret, dev_data);
|
||||
}
|
||||
|
||||
int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force)
|
||||
{
|
||||
struct sev_data_tio_dev_disconnect dc = {
|
||||
.length = sizeof(dc),
|
||||
.dev_ctx_sla = dev_data->dev_ctx,
|
||||
.flags = force ? TIO_DEV_DISCONNECT_FLAG_FORCE : 0,
|
||||
};
|
||||
|
||||
if (WARN_ON_ONCE(IS_SLA_NULL(dev_data->dev_ctx)))
|
||||
return -EFAULT;
|
||||
|
||||
spdm_ctrl_init(&dc.spdm_ctrl, dev_data);
|
||||
|
||||
return sev_tio_do_cmd(SEV_CMD_TIO_DEV_DISCONNECT, &dc, sizeof(dc),
|
||||
&dev_data->psp_ret, dev_data);
|
||||
}
|
||||
|
||||
int sev_tio_cmd_buffer_len(int cmd)
|
||||
{
|
||||
switch (cmd) {
|
||||
case SEV_CMD_TIO_STATUS: return sizeof(struct sev_data_tio_status);
|
||||
case SEV_CMD_TIO_INIT: return sizeof(struct sev_data_tio_init);
|
||||
case SEV_CMD_TIO_DEV_CREATE: return sizeof(struct sev_data_tio_dev_create);
|
||||
case SEV_CMD_TIO_DEV_RECLAIM: return sizeof(struct sev_data_tio_dev_reclaim);
|
||||
case SEV_CMD_TIO_DEV_CONNECT: return sizeof(struct sev_data_tio_dev_connect);
|
||||
case SEV_CMD_TIO_DEV_DISCONNECT: return sizeof(struct sev_data_tio_dev_disconnect);
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
123
drivers/crypto/ccp/sev-dev-tio.h
Normal file
123
drivers/crypto/ccp/sev-dev-tio.h
Normal file
@@ -0,0 +1,123 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef __PSP_SEV_TIO_H__
|
||||
#define __PSP_SEV_TIO_H__
|
||||
|
||||
#include <linux/pci-tsm.h>
|
||||
#include <linux/pci-ide.h>
|
||||
#include <linux/tsm.h>
|
||||
#include <uapi/linux/psp-sev.h>
|
||||
|
||||
struct sla_addr_t {
|
||||
union {
|
||||
u64 sla;
|
||||
struct {
|
||||
u64 page_type :1,
|
||||
page_size :1,
|
||||
reserved1 :10,
|
||||
pfn :40,
|
||||
reserved2 :12;
|
||||
};
|
||||
};
|
||||
} __packed;
|
||||
|
||||
#define SEV_TIO_MAX_COMMAND_LENGTH 128
|
||||
|
||||
/* SPDM control structure for DOE */
|
||||
struct tsm_spdm {
|
||||
unsigned long req_len;
|
||||
void *req;
|
||||
unsigned long rsp_len;
|
||||
void *rsp;
|
||||
};
|
||||
|
||||
/* Describes TIO device */
|
||||
struct tsm_dsm_tio {
|
||||
u8 cert_slot;
|
||||
struct sla_addr_t dev_ctx;
|
||||
struct sla_addr_t req;
|
||||
struct sla_addr_t resp;
|
||||
struct sla_addr_t scratch;
|
||||
struct sla_addr_t output;
|
||||
size_t output_len;
|
||||
size_t scratch_len;
|
||||
struct tsm_spdm spdm;
|
||||
struct sla_buffer_hdr *reqbuf; /* vmap'ed @req for DOE */
|
||||
struct sla_buffer_hdr *respbuf; /* vmap'ed @resp for DOE */
|
||||
|
||||
int cmd;
|
||||
int psp_ret;
|
||||
u8 cmd_data[SEV_TIO_MAX_COMMAND_LENGTH];
|
||||
void *data_pg; /* Data page for DEV_STATUS/TDI_STATUS/TDI_INFO/ASID_FENCE */
|
||||
|
||||
#define TIO_IDE_MAX_TC 8
|
||||
struct pci_ide *ide[TIO_IDE_MAX_TC];
|
||||
};
|
||||
|
||||
/* Describes TSM structure for PF0 pointed by pci_dev->tsm */
|
||||
struct tio_dsm {
|
||||
struct pci_tsm_pf0 tsm;
|
||||
struct tsm_dsm_tio data;
|
||||
struct sev_device *sev;
|
||||
};
|
||||
|
||||
/* Data object IDs */
|
||||
#define SPDM_DOBJ_ID_NONE 0
|
||||
#define SPDM_DOBJ_ID_REQ 1
|
||||
#define SPDM_DOBJ_ID_RESP 2
|
||||
|
||||
struct spdm_dobj_hdr {
|
||||
u32 id; /* Data object type identifier */
|
||||
u32 length; /* Length of the data object, INCLUDING THIS HEADER */
|
||||
struct { /* Version of the data object structure */
|
||||
u8 minor;
|
||||
u8 major;
|
||||
} version;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct sev_tio_status - TIO_STATUS command's info_paddr buffer
|
||||
*
|
||||
* @length: Length of this structure in bytes
|
||||
* @tio_en: Indicates that SNP_INIT_EX initialized the RMP for SEV-TIO
|
||||
* @tio_init_done: Indicates TIO_INIT has been invoked
|
||||
* @spdm_req_size_min: Minimum SPDM request buffer size in bytes
|
||||
* @spdm_req_size_max: Maximum SPDM request buffer size in bytes
|
||||
* @spdm_scratch_size_min: Minimum SPDM scratch buffer size in bytes
|
||||
* @spdm_scratch_size_max: Maximum SPDM scratch buffer size in bytes
|
||||
* @spdm_out_size_min: Minimum SPDM output buffer size in bytes
|
||||
* @spdm_out_size_max: Maximum for the SPDM output buffer size in bytes
|
||||
* @spdm_rsp_size_min: Minimum SPDM response buffer size in bytes
|
||||
* @spdm_rsp_size_max: Maximum SPDM response buffer size in bytes
|
||||
* @devctx_size: Size of a device context buffer in bytes
|
||||
* @tdictx_size: Size of a TDI context buffer in bytes
|
||||
* @tio_crypto_alg: TIO crypto algorithms supported
|
||||
*/
|
||||
struct sev_tio_status {
|
||||
u32 length;
|
||||
u32 tio_en :1,
|
||||
tio_init_done :1,
|
||||
reserved :30;
|
||||
u32 spdm_req_size_min;
|
||||
u32 spdm_req_size_max;
|
||||
u32 spdm_scratch_size_min;
|
||||
u32 spdm_scratch_size_max;
|
||||
u32 spdm_out_size_min;
|
||||
u32 spdm_out_size_max;
|
||||
u32 spdm_rsp_size_min;
|
||||
u32 spdm_rsp_size_max;
|
||||
u32 devctx_size;
|
||||
u32 tdictx_size;
|
||||
u32 tio_crypto_alg;
|
||||
u8 reserved2[12];
|
||||
} __packed;
|
||||
|
||||
int sev_tio_init_locked(void *tio_status_page);
|
||||
int sev_tio_continue(struct tsm_dsm_tio *dev_data);
|
||||
|
||||
int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id, u16 root_port_id,
|
||||
u8 segment_id);
|
||||
int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot);
|
||||
int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force);
|
||||
int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data);
|
||||
|
||||
#endif /* __PSP_SEV_TIO_H__ */
|
||||
405
drivers/crypto/ccp/sev-dev-tsm.c
Normal file
405
drivers/crypto/ccp/sev-dev-tsm.c
Normal file
@@ -0,0 +1,405 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
// Interface to CCP/SEV-TIO for generic PCIe TDISP module
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/tsm.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/pci-doe.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/sev-common.h>
|
||||
#include <asm/sev.h>
|
||||
|
||||
#include "psp-dev.h"
|
||||
#include "sev-dev.h"
|
||||
#include "sev-dev-tio.h"
|
||||
|
||||
MODULE_IMPORT_NS("PCI_IDE");
|
||||
|
||||
#define TIO_DEFAULT_NR_IDE_STREAMS 1
|
||||
|
||||
static uint nr_ide_streams = TIO_DEFAULT_NR_IDE_STREAMS;
|
||||
module_param_named(ide_nr, nr_ide_streams, uint, 0644);
|
||||
MODULE_PARM_DESC(ide_nr, "Set the maximum number of IDE streams per PHB");
|
||||
|
||||
#define dev_to_sp(dev) ((struct sp_device *)dev_get_drvdata(dev))
|
||||
#define dev_to_psp(dev) ((struct psp_device *)(dev_to_sp(dev)->psp_data))
|
||||
#define dev_to_sev(dev) ((struct sev_device *)(dev_to_psp(dev)->sev_data))
|
||||
#define tsm_dev_to_sev(tsmdev) dev_to_sev((tsmdev)->dev.parent)
|
||||
|
||||
#define pdev_to_tio_dsm(pdev) (container_of((pdev)->tsm, struct tio_dsm, tsm.base_tsm))
|
||||
|
||||
static int sev_tio_spdm_cmd(struct tio_dsm *dsm, int ret)
|
||||
{
|
||||
struct tsm_dsm_tio *dev_data = &dsm->data;
|
||||
struct tsm_spdm *spdm = &dev_data->spdm;
|
||||
|
||||
/* Check the main command handler response before entering the loop */
|
||||
if (ret == 0 && dev_data->psp_ret != SEV_RET_SUCCESS)
|
||||
return -EINVAL;
|
||||
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
/* ret > 0 means "SPDM requested" */
|
||||
while (ret == PCI_DOE_FEATURE_CMA || ret == PCI_DOE_FEATURE_SSESSION) {
|
||||
ret = pci_doe(dsm->tsm.doe_mb, PCI_VENDOR_ID_PCI_SIG, ret,
|
||||
spdm->req, spdm->req_len, spdm->rsp, spdm->rsp_len);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
WARN_ON_ONCE(ret == 0); /* The response should never be empty */
|
||||
spdm->rsp_len = ret;
|
||||
ret = sev_tio_continue(dev_data);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int stream_enable(struct pci_ide *ide)
|
||||
{
|
||||
struct pci_dev *rp = pcie_find_root_port(ide->pdev);
|
||||
int ret;
|
||||
|
||||
ret = pci_ide_stream_enable(rp, ide);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pci_ide_stream_enable(ide->pdev, ide);
|
||||
if (ret)
|
||||
pci_ide_stream_disable(rp, ide);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int streams_enable(struct pci_ide **ide)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
|
||||
if (ide[i]) {
|
||||
ret = stream_enable(ide[i]);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void stream_disable(struct pci_ide *ide)
|
||||
{
|
||||
pci_ide_stream_disable(ide->pdev, ide);
|
||||
pci_ide_stream_disable(pcie_find_root_port(ide->pdev), ide);
|
||||
}
|
||||
|
||||
static void streams_disable(struct pci_ide **ide)
|
||||
{
|
||||
for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
|
||||
if (ide[i])
|
||||
stream_disable(ide[i]);
|
||||
}
|
||||
|
||||
static void stream_setup(struct pci_ide *ide)
|
||||
{
|
||||
struct pci_dev *rp = pcie_find_root_port(ide->pdev);
|
||||
|
||||
ide->partner[PCI_IDE_EP].rid_start = 0;
|
||||
ide->partner[PCI_IDE_EP].rid_end = 0xffff;
|
||||
ide->partner[PCI_IDE_RP].rid_start = 0;
|
||||
ide->partner[PCI_IDE_RP].rid_end = 0xffff;
|
||||
|
||||
ide->pdev->ide_cfg = 0;
|
||||
ide->pdev->ide_tee_limit = 1;
|
||||
rp->ide_cfg = 1;
|
||||
rp->ide_tee_limit = 0;
|
||||
|
||||
pci_warn(ide->pdev, "Forcing CFG/TEE for %s", pci_name(rp));
|
||||
pci_ide_stream_setup(ide->pdev, ide);
|
||||
pci_ide_stream_setup(rp, ide);
|
||||
}
|
||||
|
||||
static u8 streams_setup(struct pci_ide **ide, u8 *ids)
|
||||
{
|
||||
bool def = false;
|
||||
u8 tc_mask = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
|
||||
if (!ide[i]) {
|
||||
ids[i] = 0xFF;
|
||||
continue;
|
||||
}
|
||||
|
||||
tc_mask |= BIT(i);
|
||||
ids[i] = ide[i]->stream_id;
|
||||
|
||||
if (!def) {
|
||||
struct pci_ide_partner *settings;
|
||||
|
||||
settings = pci_ide_to_settings(ide[i]->pdev, ide[i]);
|
||||
settings->default_stream = 1;
|
||||
def = true;
|
||||
}
|
||||
|
||||
stream_setup(ide[i]);
|
||||
}
|
||||
|
||||
return tc_mask;
|
||||
}
|
||||
|
||||
static int streams_register(struct pci_ide **ide)
|
||||
{
|
||||
int ret = 0, i;
|
||||
|
||||
for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
|
||||
if (ide[i]) {
|
||||
ret = pci_ide_stream_register(ide[i]);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void streams_unregister(struct pci_ide **ide)
|
||||
{
|
||||
for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
|
||||
if (ide[i])
|
||||
pci_ide_stream_unregister(ide[i]);
|
||||
}
|
||||
|
||||
static void stream_teardown(struct pci_ide *ide)
|
||||
{
|
||||
pci_ide_stream_teardown(ide->pdev, ide);
|
||||
pci_ide_stream_teardown(pcie_find_root_port(ide->pdev), ide);
|
||||
}
|
||||
|
||||
static void streams_teardown(struct pci_ide **ide)
|
||||
{
|
||||
for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
|
||||
if (ide[i]) {
|
||||
stream_teardown(ide[i]);
|
||||
pci_ide_stream_free(ide[i]);
|
||||
ide[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide,
|
||||
unsigned int tc)
|
||||
{
|
||||
struct pci_dev *rp = pcie_find_root_port(pdev);
|
||||
struct pci_ide *ide1;
|
||||
|
||||
if (ide[tc]) {
|
||||
pci_err(pdev, "Stream for class=%d already registered", tc);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* FIXME: find a better way */
|
||||
if (nr_ide_streams != TIO_DEFAULT_NR_IDE_STREAMS)
|
||||
pci_notice(pdev, "Enable non-default %d streams", nr_ide_streams);
|
||||
pci_ide_set_nr_streams(to_pci_host_bridge(rp->bus->bridge), nr_ide_streams);
|
||||
|
||||
ide1 = pci_ide_stream_alloc(pdev);
|
||||
if (!ide1)
|
||||
return -EFAULT;
|
||||
|
||||
/* Blindly assign streamid=0 to TC=0, and so on */
|
||||
ide1->stream_id = tc;
|
||||
|
||||
ide[tc] = ide1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pci_tsm *tio_pf0_probe(struct pci_dev *pdev, struct sev_device *sev)
|
||||
{
|
||||
struct tio_dsm *dsm __free(kfree) = kzalloc(sizeof(*dsm), GFP_KERNEL);
|
||||
int rc;
|
||||
|
||||
if (!dsm)
|
||||
return NULL;
|
||||
|
||||
rc = pci_tsm_pf0_constructor(pdev, &dsm->tsm, sev->tsmdev);
|
||||
if (rc)
|
||||
return NULL;
|
||||
|
||||
pci_dbg(pdev, "TSM enabled\n");
|
||||
dsm->sev = sev;
|
||||
return &no_free_ptr(dsm)->tsm.base_tsm;
|
||||
}
|
||||
|
||||
static struct pci_tsm *dsm_probe(struct tsm_dev *tsmdev, struct pci_dev *pdev)
|
||||
{
|
||||
struct sev_device *sev = tsm_dev_to_sev(tsmdev);
|
||||
|
||||
if (is_pci_tsm_pf0(pdev))
|
||||
return tio_pf0_probe(pdev, sev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dsm_remove(struct pci_tsm *tsm)
|
||||
{
|
||||
struct pci_dev *pdev = tsm->pdev;
|
||||
|
||||
pci_dbg(pdev, "TSM disabled\n");
|
||||
|
||||
if (is_pci_tsm_pf0(pdev)) {
|
||||
struct tio_dsm *dsm = container_of(tsm, struct tio_dsm, tsm.base_tsm);
|
||||
|
||||
pci_tsm_pf0_destructor(&dsm->tsm);
|
||||
kfree(dsm);
|
||||
}
|
||||
}
|
||||
|
||||
static int dsm_create(struct tio_dsm *dsm)
|
||||
{
|
||||
struct pci_dev *pdev = dsm->tsm.base_tsm.pdev;
|
||||
u8 segment_id = pdev->bus ? pci_domain_nr(pdev->bus) : 0;
|
||||
struct pci_dev *rootport = pcie_find_root_port(pdev);
|
||||
u16 device_id = pci_dev_id(pdev);
|
||||
u16 root_port_id;
|
||||
u32 lnkcap = 0;
|
||||
|
||||
if (pci_read_config_dword(rootport, pci_pcie_cap(rootport) + PCI_EXP_LNKCAP,
|
||||
&lnkcap))
|
||||
return -ENODEV;
|
||||
|
||||
root_port_id = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
|
||||
|
||||
return sev_tio_dev_create(&dsm->data, device_id, root_port_id, segment_id);
|
||||
}
|
||||
|
||||
static int dsm_connect(struct pci_dev *pdev)
|
||||
{
|
||||
struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
|
||||
struct tsm_dsm_tio *dev_data = &dsm->data;
|
||||
u8 ids[TIO_IDE_MAX_TC];
|
||||
u8 tc_mask;
|
||||
int ret;
|
||||
|
||||
if (pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG,
|
||||
PCI_DOE_FEATURE_SSESSION) != dsm->tsm.doe_mb) {
|
||||
pci_err(pdev, "CMA DOE MB must support SSESSION\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
ret = stream_alloc(pdev, dev_data->ide, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = dsm_create(dsm);
|
||||
if (ret)
|
||||
goto ide_free_exit;
|
||||
|
||||
tc_mask = streams_setup(dev_data->ide, ids);
|
||||
|
||||
ret = sev_tio_dev_connect(dev_data, tc_mask, ids, dev_data->cert_slot);
|
||||
ret = sev_tio_spdm_cmd(dsm, ret);
|
||||
if (ret)
|
||||
goto free_exit;
|
||||
|
||||
streams_enable(dev_data->ide);
|
||||
|
||||
ret = streams_register(dev_data->ide);
|
||||
if (ret)
|
||||
goto free_exit;
|
||||
|
||||
return 0;
|
||||
|
||||
free_exit:
|
||||
sev_tio_dev_reclaim(dev_data);
|
||||
|
||||
streams_disable(dev_data->ide);
|
||||
ide_free_exit:
|
||||
|
||||
streams_teardown(dev_data->ide);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dsm_disconnect(struct pci_dev *pdev)
|
||||
{
|
||||
bool force = SYSTEM_HALT <= system_state && system_state <= SYSTEM_RESTART;
|
||||
struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
|
||||
struct tsm_dsm_tio *dev_data = &dsm->data;
|
||||
int ret;
|
||||
|
||||
ret = sev_tio_dev_disconnect(dev_data, force);
|
||||
ret = sev_tio_spdm_cmd(dsm, ret);
|
||||
if (ret && !force) {
|
||||
ret = sev_tio_dev_disconnect(dev_data, true);
|
||||
sev_tio_spdm_cmd(dsm, ret);
|
||||
}
|
||||
|
||||
sev_tio_dev_reclaim(dev_data);
|
||||
|
||||
streams_disable(dev_data->ide);
|
||||
streams_unregister(dev_data->ide);
|
||||
streams_teardown(dev_data->ide);
|
||||
}
|
||||
|
||||
static struct pci_tsm_ops sev_tsm_ops = {
|
||||
.probe = dsm_probe,
|
||||
.remove = dsm_remove,
|
||||
.connect = dsm_connect,
|
||||
.disconnect = dsm_disconnect,
|
||||
};
|
||||
|
||||
void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page)
|
||||
{
|
||||
struct sev_tio_status *t = kzalloc(sizeof(*t), GFP_KERNEL);
|
||||
struct tsm_dev *tsmdev;
|
||||
int ret;
|
||||
|
||||
WARN_ON(sev->tio_status);
|
||||
|
||||
if (!t)
|
||||
return;
|
||||
|
||||
ret = sev_tio_init_locked(tio_status_page);
|
||||
if (ret) {
|
||||
pr_warn("SEV-TIO STATUS failed with %d\n", ret);
|
||||
goto error_exit;
|
||||
}
|
||||
|
||||
tsmdev = tsm_register(sev->dev, &sev_tsm_ops);
|
||||
if (IS_ERR(tsmdev))
|
||||
goto error_exit;
|
||||
|
||||
memcpy(t, tio_status_page, sizeof(*t));
|
||||
|
||||
pr_notice("SEV-TIO status: EN=%d INIT_DONE=%d rq=%d..%d rs=%d..%d "
|
||||
"scr=%d..%d out=%d..%d dev=%d tdi=%d algos=%x\n",
|
||||
t->tio_en, t->tio_init_done,
|
||||
t->spdm_req_size_min, t->spdm_req_size_max,
|
||||
t->spdm_rsp_size_min, t->spdm_rsp_size_max,
|
||||
t->spdm_scratch_size_min, t->spdm_scratch_size_max,
|
||||
t->spdm_out_size_min, t->spdm_out_size_max,
|
||||
t->devctx_size, t->tdictx_size,
|
||||
t->tio_crypto_alg);
|
||||
|
||||
sev->tsmdev = tsmdev;
|
||||
sev->tio_status = t;
|
||||
|
||||
return;
|
||||
|
||||
error_exit:
|
||||
kfree(t);
|
||||
pr_err("Failed to enable SEV-TIO: ret=%d en=%d initdone=%d SEV=%d\n",
|
||||
ret, t->tio_en, t->tio_init_done, boot_cpu_has(X86_FEATURE_SEV));
|
||||
}
|
||||
|
||||
void sev_tsm_uninit(struct sev_device *sev)
|
||||
{
|
||||
if (sev->tsmdev)
|
||||
tsm_unregister(sev->tsmdev);
|
||||
|
||||
sev->tsmdev = NULL;
|
||||
}
|
||||
@@ -75,6 +75,14 @@ static bool psp_init_on_probe = true;
|
||||
module_param(psp_init_on_probe, bool, 0444);
|
||||
MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it");
|
||||
|
||||
#if IS_ENABLED(CONFIG_PCI_TSM)
|
||||
static bool sev_tio_enabled = true;
|
||||
module_param_named(tio, sev_tio_enabled, bool, 0444);
|
||||
MODULE_PARM_DESC(tio, "Enables TIO in SNP_INIT_EX");
|
||||
#else
|
||||
static const bool sev_tio_enabled = false;
|
||||
#endif
|
||||
|
||||
MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
|
||||
MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
|
||||
MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
|
||||
@@ -251,7 +259,7 @@ static int sev_cmd_buffer_len(int cmd)
|
||||
case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit);
|
||||
case SEV_CMD_SNP_FEATURE_INFO: return sizeof(struct sev_data_snp_feature_info);
|
||||
case SEV_CMD_SNP_VLEK_LOAD: return sizeof(struct sev_user_data_snp_vlek_load);
|
||||
default: return 0;
|
||||
default: return sev_tio_cmd_buffer_len(cmd);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -380,13 +388,7 @@ static int sev_write_init_ex_file_if_required(int cmd_id)
|
||||
return sev_write_init_ex_file();
|
||||
}
|
||||
|
||||
/*
|
||||
* snp_reclaim_pages() needs __sev_do_cmd_locked(), and __sev_do_cmd_locked()
|
||||
* needs snp_reclaim_pages(), so a forward declaration is needed.
|
||||
*/
|
||||
static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
|
||||
|
||||
static int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
|
||||
int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
|
||||
{
|
||||
int ret, err, i;
|
||||
|
||||
@@ -420,6 +422,7 @@ cleanup:
|
||||
snp_leak_pages(__phys_to_pfn(paddr), npages - i);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(snp_reclaim_pages);
|
||||
|
||||
static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked)
|
||||
{
|
||||
@@ -850,7 +853,7 @@ static int snp_reclaim_cmd_buf(int cmd, void *cmd_buf)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
|
||||
int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
|
||||
{
|
||||
struct cmd_buf_desc desc_list[CMD_BUF_DESC_MAX] = {0};
|
||||
struct psp_device *psp = psp_master;
|
||||
@@ -1392,6 +1395,8 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
|
||||
*
|
||||
*/
|
||||
if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) {
|
||||
bool tio_supp = !!(sev->snp_feat_info_0.ebx & SNP_SEV_TIO_SUPPORTED);
|
||||
|
||||
/*
|
||||
* Firmware checks that the pages containing the ranges enumerated
|
||||
* in the RANGES structure are either in the default page state or in the
|
||||
@@ -1432,6 +1437,17 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
|
||||
data.init_rmp = 1;
|
||||
data.list_paddr_en = 1;
|
||||
data.list_paddr = __psp_pa(snp_range_list);
|
||||
|
||||
data.tio_en = tio_supp && sev_tio_enabled && amd_iommu_sev_tio_supported();
|
||||
|
||||
/*
|
||||
* When psp_init_on_probe is disabled, the userspace calling
|
||||
* SEV ioctl can inadvertently shut down SNP and SEV-TIO causing
|
||||
* unexpected state loss.
|
||||
*/
|
||||
if (data.tio_en && !psp_init_on_probe)
|
||||
dev_warn(sev->dev, "SEV-TIO as incompatible with psp_init_on_probe=0\n");
|
||||
|
||||
cmd = SEV_CMD_SNP_INIT_EX;
|
||||
} else {
|
||||
cmd = SEV_CMD_SNP_INIT;
|
||||
@@ -1469,7 +1485,8 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
|
||||
|
||||
snp_hv_fixed_pages_state_update(sev, HV_FIXED);
|
||||
sev->snp_initialized = true;
|
||||
dev_dbg(sev->dev, "SEV-SNP firmware initialized\n");
|
||||
dev_dbg(sev->dev, "SEV-SNP firmware initialized, SEV-TIO is %s\n",
|
||||
data.tio_en ? "enabled" : "disabled");
|
||||
|
||||
dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major,
|
||||
sev->api_minor, sev->build);
|
||||
@@ -1477,6 +1494,23 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
|
||||
atomic_notifier_chain_register(&panic_notifier_list,
|
||||
&snp_panic_notifier);
|
||||
|
||||
if (data.tio_en) {
|
||||
/*
|
||||
* This executes with the sev_cmd_mutex held so down the stack
|
||||
* snp_reclaim_pages(locked=false) might be needed (which is extremely
|
||||
* unlikely) but will cause a deadlock.
|
||||
* Instead of exporting __snp_alloc_firmware_pages(), allocate a page
|
||||
* for this one call here.
|
||||
*/
|
||||
void *tio_status = page_address(__snp_alloc_firmware_pages(
|
||||
GFP_KERNEL_ACCOUNT | __GFP_ZERO, 0, true));
|
||||
|
||||
if (tio_status) {
|
||||
sev_tsm_init_locked(sev, tio_status);
|
||||
__snp_free_firmware_pages(virt_to_page(tio_status), 0, true);
|
||||
}
|
||||
}
|
||||
|
||||
sev_es_tmr_size = SNP_TMR_SIZE;
|
||||
|
||||
return 0;
|
||||
@@ -2756,8 +2790,20 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
|
||||
|
||||
static void sev_firmware_shutdown(struct sev_device *sev)
|
||||
{
|
||||
/*
|
||||
* Calling without sev_cmd_mutex held as TSM will likely try disconnecting
|
||||
* IDE and this ends up calling sev_do_cmd() which locks sev_cmd_mutex.
|
||||
*/
|
||||
if (sev->tio_status)
|
||||
sev_tsm_uninit(sev);
|
||||
|
||||
mutex_lock(&sev_cmd_mutex);
|
||||
|
||||
__sev_firmware_shutdown(sev, false);
|
||||
|
||||
kfree(sev->tio_status);
|
||||
sev->tio_status = NULL;
|
||||
|
||||
mutex_unlock(&sev_cmd_mutex);
|
||||
}
|
||||
|
||||
|
||||
@@ -34,6 +34,8 @@ struct sev_misc_dev {
|
||||
struct miscdevice misc;
|
||||
};
|
||||
|
||||
struct sev_tio_status;
|
||||
|
||||
struct sev_device {
|
||||
struct device *dev;
|
||||
struct psp_device *psp;
|
||||
@@ -61,15 +63,24 @@ struct sev_device {
|
||||
|
||||
struct sev_user_data_snp_status snp_plat_status;
|
||||
struct snp_feature_info snp_feat_info_0;
|
||||
|
||||
struct tsm_dev *tsmdev;
|
||||
struct sev_tio_status *tio_status;
|
||||
};
|
||||
|
||||
int sev_dev_init(struct psp_device *psp);
|
||||
void sev_dev_destroy(struct psp_device *psp);
|
||||
|
||||
int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
|
||||
|
||||
void sev_pci_init(void);
|
||||
void sev_pci_exit(void);
|
||||
|
||||
struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages);
|
||||
void snp_free_hv_fixed_pages(struct page *page);
|
||||
|
||||
void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page);
|
||||
void sev_tsm_uninit(struct sev_device *sev);
|
||||
int sev_tio_cmd_buffer_len(int cmd);
|
||||
|
||||
#endif /* __SEV_DEV_H */
|
||||
|
||||
@@ -1,18 +1,12 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright(c) 2025 Intel Corporation */
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/sprintf.h>
|
||||
#include <linux/string_helpers.h>
|
||||
|
||||
#include "adf_pm_dbgfs_utils.h"
|
||||
|
||||
/*
|
||||
* This is needed because a variable is used to index the mask at
|
||||
* pm_scnprint_table(), making it not compile time constant, so the compile
|
||||
* asserts from FIELD_GET() or u32_get_bits() won't be fulfilled.
|
||||
*/
|
||||
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
|
||||
|
||||
#define PM_INFO_MAX_KEY_LEN 21
|
||||
|
||||
static int pm_scnprint_table(char *buff, const struct pm_status_row *table,
|
||||
|
||||
@@ -44,6 +44,7 @@
|
||||
* but lo_hi_readq() ensures that we are safe across all e3-1200 processors.
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pci.h>
|
||||
@@ -139,9 +140,6 @@
|
||||
#define IE31200_CAPID0_DDPCD BIT(6)
|
||||
#define IE31200_CAPID0_ECC BIT(1)
|
||||
|
||||
/* Non-constant mask variant of FIELD_GET() */
|
||||
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
|
||||
|
||||
static int nr_channels;
|
||||
static struct pci_dev *mci_pdev;
|
||||
static int ie31200_registered = 1;
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
* Joel Stanley <joel@jms.id.au>
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/gpio/aspeed.h>
|
||||
@@ -30,10 +31,6 @@
|
||||
*/
|
||||
#include <linux/gpio/consumer.h>
|
||||
|
||||
/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
|
||||
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
|
||||
#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
|
||||
|
||||
#define GPIO_G7_IRQ_STS_BASE 0x100
|
||||
#define GPIO_G7_IRQ_STS_OFFSET(x) (GPIO_G7_IRQ_STS_BASE + (x) * 0x4)
|
||||
#define GPIO_G7_CTRL_REG_BASE 0x180
|
||||
|
||||
@@ -53,9 +53,6 @@
|
||||
#define AD3530R_MAX_CHANNELS 8
|
||||
#define AD3531R_MAX_CHANNELS 4
|
||||
|
||||
/* Non-constant mask variant of FIELD_PREP() */
|
||||
#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
|
||||
|
||||
enum ad3530r_mode {
|
||||
AD3530R_NORMAL_OP,
|
||||
AD3530R_POWERDOWN_1K,
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
* the "wakeup" GPIO is not given, power management will be disabled.
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
@@ -68,10 +69,6 @@
|
||||
#define MLX90614_CONST_SCALE 20 /* Scale in milliKelvin (0.02 * 1000) */
|
||||
#define MLX90614_CONST_FIR 0x7 /* Fixed value for FIR part of low pass filter */
|
||||
|
||||
/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
|
||||
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
|
||||
#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
|
||||
|
||||
struct mlx_chip_info {
|
||||
/* EEPROM offsets with 16-bit data, MSB first */
|
||||
/* emissivity correction coefficient */
|
||||
|
||||
@@ -107,6 +107,7 @@
|
||||
|
||||
|
||||
/* Extended Feature 2 Bits */
|
||||
#define FEATURE_SEVSNPIO_SUP BIT_ULL(1)
|
||||
#define FEATURE_SNPAVICSUP GENMASK_ULL(7, 5)
|
||||
#define FEATURE_SNPAVICSUP_GAM(x) \
|
||||
(FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1)
|
||||
|
||||
@@ -2261,6 +2261,9 @@ static void print_iommu_info(void)
|
||||
if (check_feature(FEATURE_SNP))
|
||||
pr_cont(" SNP");
|
||||
|
||||
if (check_feature2(FEATURE_SEVSNPIO_SUP))
|
||||
pr_cont(" SEV-TIO");
|
||||
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
@@ -4028,4 +4031,10 @@ int amd_iommu_snp_disable(void)
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(amd_iommu_snp_disable);
|
||||
|
||||
bool amd_iommu_sev_tio_supported(void)
|
||||
{
|
||||
return check_feature2(FEATURE_SEVSNPIO_SUP);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(amd_iommu_sev_tio_supported);
|
||||
#endif
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
|
||||
/* driver definitions */
|
||||
#define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>";
|
||||
#define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>"
|
||||
#define DRIVER_CARD "Silicon Labs Si470x FM Radio"
|
||||
#define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers"
|
||||
#define DRIVER_VERSION "1.0.2"
|
||||
|
||||
@@ -70,12 +70,12 @@
|
||||
#include "ts2020.h"
|
||||
|
||||
|
||||
#define LME2510_C_S7395 "dvb-usb-lme2510c-s7395.fw";
|
||||
#define LME2510_C_LG "dvb-usb-lme2510c-lg.fw";
|
||||
#define LME2510_C_S0194 "dvb-usb-lme2510c-s0194.fw";
|
||||
#define LME2510_C_RS2000 "dvb-usb-lme2510c-rs2000.fw";
|
||||
#define LME2510_LG "dvb-usb-lme2510-lg.fw";
|
||||
#define LME2510_S0194 "dvb-usb-lme2510-s0194.fw";
|
||||
#define LME2510_C_S7395 "dvb-usb-lme2510c-s7395.fw"
|
||||
#define LME2510_C_LG "dvb-usb-lme2510c-lg.fw"
|
||||
#define LME2510_C_S0194 "dvb-usb-lme2510c-s0194.fw"
|
||||
#define LME2510_C_RS2000 "dvb-usb-lme2510c-rs2000.fw"
|
||||
#define LME2510_LG "dvb-usb-lme2510-lg.fw"
|
||||
#define LME2510_S0194 "dvb-usb-lme2510-s0194.fw"
|
||||
|
||||
/* debug */
|
||||
static int dvb_usb_lme2510_debug;
|
||||
|
||||
@@ -97,6 +97,25 @@ config OF_PMEM
|
||||
|
||||
Select Y if unsure.
|
||||
|
||||
config RAMDAX
|
||||
tristate "Support persistent memory interfaces on RAM carveouts"
|
||||
depends on X86_PMEM_LEGACY || OF || COMPILE_TEST
|
||||
default LIBNVDIMM
|
||||
help
|
||||
Allows creation of DAX devices on RAM carveouts.
|
||||
|
||||
Memory ranges that are manually specified by the
|
||||
'memmap=nn[KMG]!ss[KMG]' kernel command line or defined by dummy
|
||||
pmem-region device tree nodes would be managed by this driver as DIMM
|
||||
devices with support for dynamic layout of namespaces.
|
||||
The driver steals 128K in the end of the memmap range for the
|
||||
namespace management. This allows supporting up to 509 namespaces
|
||||
(see 'ndctl create-namespace --help').
|
||||
The driver should be force bound to e820_pmem or pmem-region platform
|
||||
devices using 'driver_override' device attribute.
|
||||
|
||||
Select N if unsure.
|
||||
|
||||
config NVDIMM_KEYS
|
||||
def_bool y
|
||||
depends on ENCRYPTED_KEYS
|
||||
|
||||
@@ -5,6 +5,7 @@ obj-$(CONFIG_ND_BTT) += nd_btt.o
|
||||
obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
|
||||
obj-$(CONFIG_OF_PMEM) += of_pmem.o
|
||||
obj-$(CONFIG_VIRTIO_PMEM) += virtio_pmem.o nd_virtio.o
|
||||
obj-$(CONFIG_RAMDAX) += ramdax.o
|
||||
|
||||
nd_pmem-y := pmem.o
|
||||
|
||||
|
||||
282
drivers/nvdimm/ramdax.c
Normal file
282
drivers/nvdimm/ramdax.c
Normal file
@@ -0,0 +1,282 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2025, Mike Rapoport, Microsoft
|
||||
*
|
||||
* Based on e820 pmem driver:
|
||||
* Copyright (c) 2015, Christoph Hellwig.
|
||||
* Copyright (c) 2015, Intel Corporation.
|
||||
*/
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/memory_hotplug.h>
|
||||
#include <linux/libnvdimm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#include <uapi/linux/ndctl.h>
|
||||
|
||||
#define LABEL_AREA_SIZE SZ_128K
|
||||
|
||||
struct ramdax_dimm {
|
||||
struct nvdimm *nvdimm;
|
||||
void *label_area;
|
||||
};
|
||||
|
||||
static void ramdax_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct nvdimm_bus *nvdimm_bus = platform_get_drvdata(pdev);
|
||||
|
||||
nvdimm_bus_unregister(nvdimm_bus);
|
||||
}
|
||||
|
||||
static int ramdax_register_region(struct resource *res,
|
||||
struct nvdimm *nvdimm,
|
||||
struct nvdimm_bus *nvdimm_bus)
|
||||
{
|
||||
struct nd_mapping_desc mapping;
|
||||
struct nd_region_desc ndr_desc;
|
||||
struct nd_interleave_set *nd_set;
|
||||
int nid = phys_to_target_node(res->start);
|
||||
|
||||
nd_set = kzalloc(sizeof(*nd_set), GFP_KERNEL);
|
||||
if (!nd_set)
|
||||
return -ENOMEM;
|
||||
|
||||
nd_set->cookie1 = 0xcafebeefcafebeef;
|
||||
nd_set->cookie2 = nd_set->cookie1;
|
||||
nd_set->altcookie = nd_set->cookie1;
|
||||
|
||||
memset(&mapping, 0, sizeof(mapping));
|
||||
mapping.nvdimm = nvdimm;
|
||||
mapping.start = 0;
|
||||
mapping.size = resource_size(res) - LABEL_AREA_SIZE;
|
||||
|
||||
memset(&ndr_desc, 0, sizeof(ndr_desc));
|
||||
ndr_desc.res = res;
|
||||
ndr_desc.numa_node = numa_map_to_online_node(nid);
|
||||
ndr_desc.target_node = nid;
|
||||
ndr_desc.num_mappings = 1;
|
||||
ndr_desc.mapping = &mapping;
|
||||
ndr_desc.nd_set = nd_set;
|
||||
|
||||
if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc))
|
||||
goto err_free_nd_set;
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_nd_set:
|
||||
kfree(nd_set);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int ramdax_register_dimm(struct resource *res, void *data)
|
||||
{
|
||||
resource_size_t start = res->start;
|
||||
resource_size_t size = resource_size(res);
|
||||
unsigned long flags = 0, cmd_mask = 0;
|
||||
struct nvdimm_bus *nvdimm_bus = data;
|
||||
struct ramdax_dimm *dimm;
|
||||
int err;
|
||||
|
||||
dimm = kzalloc(sizeof(*dimm), GFP_KERNEL);
|
||||
if (!dimm)
|
||||
return -ENOMEM;
|
||||
|
||||
dimm->label_area = memremap(start + size - LABEL_AREA_SIZE,
|
||||
LABEL_AREA_SIZE, MEMREMAP_WB);
|
||||
if (!dimm->label_area) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_dimm;
|
||||
}
|
||||
|
||||
set_bit(NDD_LABELING, &flags);
|
||||
set_bit(NDD_REGISTER_SYNC, &flags);
|
||||
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
|
||||
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
|
||||
set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
|
||||
dimm->nvdimm = nvdimm_create(nvdimm_bus, dimm,
|
||||
/* dimm_attribute_groups */ NULL,
|
||||
flags, cmd_mask, 0, NULL);
|
||||
if (!dimm->nvdimm) {
|
||||
err = -ENOMEM;
|
||||
goto err_unmap_label;
|
||||
}
|
||||
|
||||
err = ramdax_register_region(res, dimm->nvdimm, nvdimm_bus);
|
||||
if (err)
|
||||
goto err_remove_nvdimm;
|
||||
|
||||
return 0;
|
||||
|
||||
err_remove_nvdimm:
|
||||
nvdimm_delete(dimm->nvdimm);
|
||||
err_unmap_label:
|
||||
memunmap(dimm->label_area);
|
||||
err_free_dimm:
|
||||
kfree(dimm);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ramdax_get_config_size(struct nvdimm *nvdimm, int buf_len,
|
||||
struct nd_cmd_get_config_size *cmd)
|
||||
{
|
||||
if (sizeof(*cmd) > buf_len)
|
||||
return -EINVAL;
|
||||
|
||||
*cmd = (struct nd_cmd_get_config_size){
|
||||
.status = 0,
|
||||
.config_size = LABEL_AREA_SIZE,
|
||||
.max_xfer = 8,
|
||||
};
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ramdax_get_config_data(struct nvdimm *nvdimm, int buf_len,
|
||||
struct nd_cmd_get_config_data_hdr *cmd)
|
||||
{
|
||||
struct ramdax_dimm *dimm = nvdimm_provider_data(nvdimm);
|
||||
|
||||
if (sizeof(*cmd) > buf_len)
|
||||
return -EINVAL;
|
||||
if (struct_size(cmd, out_buf, cmd->in_length) > buf_len)
|
||||
return -EINVAL;
|
||||
if (size_add(cmd->in_offset, cmd->in_length) > LABEL_AREA_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(cmd->out_buf, dimm->label_area + cmd->in_offset, cmd->in_length);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ramdax_set_config_data(struct nvdimm *nvdimm, int buf_len,
|
||||
struct nd_cmd_set_config_hdr *cmd)
|
||||
{
|
||||
struct ramdax_dimm *dimm = nvdimm_provider_data(nvdimm);
|
||||
|
||||
if (sizeof(*cmd) > buf_len)
|
||||
return -EINVAL;
|
||||
if (struct_size(cmd, in_buf, cmd->in_length) > buf_len)
|
||||
return -EINVAL;
|
||||
if (size_add(cmd->in_offset, cmd->in_length) > LABEL_AREA_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(dimm->label_area + cmd->in_offset, cmd->in_buf, cmd->in_length);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ramdax_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
|
||||
void *buf, unsigned int buf_len)
|
||||
{
|
||||
unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
|
||||
|
||||
if (!test_bit(cmd, &cmd_mask))
|
||||
return -ENOTTY;
|
||||
|
||||
switch (cmd) {
|
||||
case ND_CMD_GET_CONFIG_SIZE:
|
||||
return ramdax_get_config_size(nvdimm, buf_len, buf);
|
||||
case ND_CMD_GET_CONFIG_DATA:
|
||||
return ramdax_get_config_data(nvdimm, buf_len, buf);
|
||||
case ND_CMD_SET_CONFIG_DATA:
|
||||
return ramdax_set_config_data(nvdimm, buf_len, buf);
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
}
|
||||
|
||||
static int ramdax_ctl(struct nvdimm_bus_descriptor *nd_desc,
|
||||
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
|
||||
unsigned int buf_len, int *cmd_rc)
|
||||
{
|
||||
/*
|
||||
* No firmware response to translate, let the transport error
|
||||
* code take precedence.
|
||||
*/
|
||||
*cmd_rc = 0;
|
||||
|
||||
if (!nvdimm)
|
||||
return -ENOTTY;
|
||||
return ramdax_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static const struct of_device_id ramdax_of_matches[] = {
|
||||
{ .compatible = "pmem-region", },
|
||||
{ },
|
||||
};
|
||||
#endif
|
||||
|
||||
static int ramdax_probe_of(struct platform_device *pdev,
|
||||
struct nvdimm_bus *bus, struct device_node *np)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!of_match_node(ramdax_of_matches, np))
|
||||
return -ENODEV;
|
||||
|
||||
for (int i = 0; i < pdev->num_resources; i++) {
|
||||
err = ramdax_register_dimm(&pdev->resource[i], bus);
|
||||
if (err)
|
||||
goto err_unregister;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_unregister:
|
||||
/*
|
||||
* FIXME: should we unregister the dimms that were registered
|
||||
* successfully
|
||||
*/
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ramdax_probe(struct platform_device *pdev)
|
||||
{
|
||||
static struct nvdimm_bus_descriptor nd_desc;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct nvdimm_bus *nvdimm_bus;
|
||||
struct device_node *np;
|
||||
int rc = -ENXIO;
|
||||
|
||||
nd_desc.provider_name = "ramdax";
|
||||
nd_desc.module = THIS_MODULE;
|
||||
nd_desc.ndctl = ramdax_ctl;
|
||||
nvdimm_bus = nvdimm_bus_register(dev, &nd_desc);
|
||||
if (!nvdimm_bus)
|
||||
goto err;
|
||||
|
||||
np = dev_of_node(&pdev->dev);
|
||||
if (np)
|
||||
rc = ramdax_probe_of(pdev, nvdimm_bus, np);
|
||||
else
|
||||
rc = walk_iomem_res_desc(IORES_DESC_PERSISTENT_MEMORY_LEGACY,
|
||||
IORESOURCE_MEM, 0, -1, nvdimm_bus,
|
||||
ramdax_register_dimm);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
platform_set_drvdata(pdev, nvdimm_bus);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
nvdimm_bus_unregister(nvdimm_bus);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct platform_driver ramdax_driver = {
|
||||
.probe = ramdax_probe,
|
||||
.remove = ramdax_remove,
|
||||
.driver = {
|
||||
.name = "ramdax",
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(ramdax_driver);
|
||||
|
||||
MODULE_DESCRIPTION("NVDIMM support for e820 type-12 memory and OF pmem-region");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Microsoft Corporation");
|
||||
@@ -424,7 +424,7 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
|
||||
* query.
|
||||
*/
|
||||
get_device(dev);
|
||||
queue_delayed_work(system_wq, &nvdimm->dwork, 0);
|
||||
queue_delayed_work(system_percpu_wq, &nvdimm->dwork, 0);
|
||||
}
|
||||
|
||||
return rc;
|
||||
@@ -457,7 +457,7 @@ static void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
|
||||
|
||||
/* setup delayed work again */
|
||||
tmo += 10;
|
||||
queue_delayed_work(system_wq, &nvdimm->dwork, tmo * HZ);
|
||||
queue_delayed_work(system_percpu_wq, &nvdimm->dwork, tmo * HZ);
|
||||
nvdimm->sec.overwrite_tmo = min(15U * 60U, tmo);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -517,10 +517,10 @@ static u32 hint_lookup[] = {
|
||||
* ccio_io_pdir_entry - Initialize an I/O Pdir.
|
||||
* @pdir_ptr: A pointer into I/O Pdir.
|
||||
* @sid: The Space Identifier.
|
||||
* @vba: The virtual address.
|
||||
* @pba: The physical address.
|
||||
* @hints: The DMA Hint.
|
||||
*
|
||||
* Given a virtual address (vba, arg2) and space id, (sid, arg1),
|
||||
* Given a physical address (pba, arg2) and space id, (sid, arg1),
|
||||
* load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
|
||||
* entry consists of 8 bytes as shown below (MSB == bit 0):
|
||||
*
|
||||
@@ -543,7 +543,7 @@ static u32 hint_lookup[] = {
|
||||
* index are bits 12:19 of the value returned by LCI.
|
||||
*/
|
||||
static void
|
||||
ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
|
||||
ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba,
|
||||
unsigned long hints)
|
||||
{
|
||||
register unsigned long pa;
|
||||
@@ -557,7 +557,7 @@ ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
|
||||
** "hints" parm includes the VALID bit!
|
||||
** "dep" clobbers the physical address offset bits as well.
|
||||
*/
|
||||
pa = lpa(vba);
|
||||
pa = pba;
|
||||
asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
|
||||
((u32 *)pdir_ptr)[1] = (u32) pa;
|
||||
|
||||
@@ -582,7 +582,7 @@ ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
|
||||
** Grab virtual index [0:11]
|
||||
** Deposit virt_idx bits into I/O PDIR word
|
||||
*/
|
||||
asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
|
||||
asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pba)));
|
||||
asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
|
||||
asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
|
||||
|
||||
@@ -704,14 +704,14 @@ ccio_dma_supported(struct device *dev, u64 mask)
|
||||
/**
|
||||
* ccio_map_single - Map an address range into the IOMMU.
|
||||
* @dev: The PCI device.
|
||||
* @addr: The start address of the DMA region.
|
||||
* @addr: The physical address of the DMA region.
|
||||
* @size: The length of the DMA region.
|
||||
* @direction: The direction of the DMA transaction (to/from device).
|
||||
*
|
||||
* This function implements the pci_map_single function.
|
||||
*/
|
||||
static dma_addr_t
|
||||
ccio_map_single(struct device *dev, void *addr, size_t size,
|
||||
ccio_map_single(struct device *dev, phys_addr_t addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
int idx;
|
||||
@@ -730,7 +730,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
|
||||
BUG_ON(size <= 0);
|
||||
|
||||
/* save offset bits */
|
||||
offset = ((unsigned long) addr) & ~IOVP_MASK;
|
||||
offset = offset_in_page(addr);
|
||||
|
||||
/* round up to nearest IOVP_SIZE */
|
||||
size = ALIGN(size + offset, IOVP_SIZE);
|
||||
@@ -746,15 +746,15 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
|
||||
|
||||
pdir_start = &(ioc->pdir_base[idx]);
|
||||
|
||||
DBG_RUN("%s() %px -> %#lx size: %zu\n",
|
||||
__func__, addr, (long)(iovp | offset), size);
|
||||
DBG_RUN("%s() %pa -> %#lx size: %zu\n",
|
||||
__func__, &addr, (long)(iovp | offset), size);
|
||||
|
||||
/* If not cacheline aligned, force SAFE_DMA on the whole mess */
|
||||
if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
|
||||
if ((size % L1_CACHE_BYTES) || (addr % L1_CACHE_BYTES))
|
||||
hint |= HINT_SAFE_DMA;
|
||||
|
||||
while(size > 0) {
|
||||
ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint);
|
||||
ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, hint);
|
||||
|
||||
DBG_RUN(" pdir %p %08x%08x\n",
|
||||
pdir_start,
|
||||
@@ -773,17 +773,18 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
|
||||
|
||||
|
||||
static dma_addr_t
|
||||
ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
|
||||
size_t size, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
ccio_map_phys(struct device *dev, phys_addr_t phys, size_t size,
|
||||
enum dma_data_direction direction, unsigned long attrs)
|
||||
{
|
||||
return ccio_map_single(dev, page_address(page) + offset, size,
|
||||
direction);
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
return ccio_map_single(dev, phys, size, direction);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ccio_unmap_page - Unmap an address range from the IOMMU.
|
||||
* ccio_unmap_phys - Unmap an address range from the IOMMU.
|
||||
* @dev: The PCI device.
|
||||
* @iova: The start address of the DMA region.
|
||||
* @size: The length of the DMA region.
|
||||
@@ -791,7 +792,7 @@ ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
|
||||
* @attrs: attributes
|
||||
*/
|
||||
static void
|
||||
ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
|
||||
ccio_unmap_phys(struct device *dev, dma_addr_t iova, size_t size,
|
||||
enum dma_data_direction direction, unsigned long attrs)
|
||||
{
|
||||
struct ioc *ioc;
|
||||
@@ -853,7 +854,8 @@ ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
|
||||
|
||||
if (ret) {
|
||||
memset(ret, 0, size);
|
||||
*dma_handle = ccio_map_single(dev, ret, size, DMA_BIDIRECTIONAL);
|
||||
*dma_handle = ccio_map_single(dev, virt_to_phys(ret), size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -873,7 +875,7 @@ static void
|
||||
ccio_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
ccio_unmap_page(dev, dma_handle, size, 0, 0);
|
||||
ccio_unmap_phys(dev, dma_handle, size, 0, 0);
|
||||
free_pages((unsigned long)cpu_addr, get_order(size));
|
||||
}
|
||||
|
||||
@@ -920,7 +922,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
||||
/* Fast path single entry scatterlists. */
|
||||
if (nents == 1) {
|
||||
sg_dma_address(sglist) = ccio_map_single(dev,
|
||||
sg_virt(sglist), sglist->length,
|
||||
sg_phys(sglist), sglist->length,
|
||||
direction);
|
||||
sg_dma_len(sglist) = sglist->length;
|
||||
return 1;
|
||||
@@ -1004,7 +1006,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
||||
#ifdef CCIO_COLLECT_STATS
|
||||
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
|
||||
#endif
|
||||
ccio_unmap_page(dev, sg_dma_address(sglist),
|
||||
ccio_unmap_phys(dev, sg_dma_address(sglist),
|
||||
sg_dma_len(sglist), direction, 0);
|
||||
++sglist;
|
||||
nents--;
|
||||
@@ -1017,8 +1019,8 @@ static const struct dma_map_ops ccio_ops = {
|
||||
.dma_supported = ccio_dma_supported,
|
||||
.alloc = ccio_alloc,
|
||||
.free = ccio_free,
|
||||
.map_page = ccio_map_page,
|
||||
.unmap_page = ccio_unmap_page,
|
||||
.map_phys = ccio_map_phys,
|
||||
.unmap_phys = ccio_unmap_phys,
|
||||
.map_sg = ccio_map_sg,
|
||||
.unmap_sg = ccio_unmap_sg,
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
@@ -1072,7 +1074,7 @@ static int ccio_proc_info(struct seq_file *m, void *p)
|
||||
ioc->msingle_calls, ioc->msingle_pages,
|
||||
(int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
|
||||
|
||||
/* KLUGE - unmap_sg calls unmap_page for each mapped page */
|
||||
/* KLUGE - unmap_sg calls unmap_phys for each mapped page */
|
||||
min = ioc->usingle_calls - ioc->usg_calls;
|
||||
max = ioc->usingle_pages - ioc->usg_pages;
|
||||
seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
static inline unsigned int
|
||||
iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
|
||||
unsigned long hint,
|
||||
void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long,
|
||||
void (*iommu_io_pdir_entry)(__le64 *, space_t, phys_addr_t,
|
||||
unsigned long))
|
||||
{
|
||||
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
|
||||
@@ -28,7 +28,7 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
|
||||
dma_sg--;
|
||||
|
||||
while (nents-- > 0) {
|
||||
unsigned long vaddr;
|
||||
phys_addr_t paddr;
|
||||
long size;
|
||||
|
||||
DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents,
|
||||
@@ -67,7 +67,7 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
|
||||
|
||||
BUG_ON(pdirp == NULL);
|
||||
|
||||
vaddr = (unsigned long)sg_virt(startsg);
|
||||
paddr = sg_phys(startsg);
|
||||
sg_dma_len(dma_sg) += startsg->length;
|
||||
size = startsg->length + dma_offset;
|
||||
dma_offset = 0;
|
||||
@@ -76,8 +76,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
|
||||
#endif
|
||||
do {
|
||||
iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
|
||||
vaddr, hint);
|
||||
vaddr += IOVP_SIZE;
|
||||
paddr, hint);
|
||||
paddr += IOVP_SIZE;
|
||||
size -= IOVP_SIZE;
|
||||
pdirp++;
|
||||
} while(unlikely(size > 0));
|
||||
|
||||
@@ -532,7 +532,7 @@ typedef unsigned long space_t;
|
||||
* sba_io_pdir_entry - fill in one IO PDIR entry
|
||||
* @pdir_ptr: pointer to IO PDIR entry
|
||||
* @sid: process Space ID - currently only support KERNEL_SPACE
|
||||
* @vba: Virtual CPU address of buffer to map
|
||||
* @pba: Physical address of buffer to map
|
||||
* @hint: DMA hint set to use for this mapping
|
||||
*
|
||||
* SBA Mapping Routine
|
||||
@@ -569,20 +569,17 @@ typedef unsigned long space_t;
|
||||
*/
|
||||
|
||||
static void
|
||||
sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
|
||||
sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba,
|
||||
unsigned long hint)
|
||||
{
|
||||
u64 pa; /* physical address */
|
||||
register unsigned ci; /* coherent index */
|
||||
|
||||
pa = lpa(vba);
|
||||
pa &= IOVP_MASK;
|
||||
asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pba)));
|
||||
pba &= IOVP_MASK;
|
||||
pba |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
|
||||
|
||||
asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
|
||||
pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
|
||||
|
||||
pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
|
||||
*pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
|
||||
pba |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
|
||||
*pdir_ptr = cpu_to_le64(pba); /* swap and store into I/O Pdir */
|
||||
|
||||
/*
|
||||
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
|
||||
@@ -707,7 +704,7 @@ static int sba_dma_supported( struct device *dev, u64 mask)
|
||||
* See Documentation/core-api/dma-api-howto.rst
|
||||
*/
|
||||
static dma_addr_t
|
||||
sba_map_single(struct device *dev, void *addr, size_t size,
|
||||
sba_map_single(struct device *dev, phys_addr_t addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct ioc *ioc;
|
||||
@@ -722,7 +719,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
/* save offset bits */
|
||||
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
|
||||
offset = offset_in_page(addr);
|
||||
|
||||
/* round up to nearest IOVP_SIZE */
|
||||
size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
|
||||
@@ -739,13 +736,13 @@ sba_map_single(struct device *dev, void *addr, size_t size,
|
||||
pide = sba_alloc_range(ioc, dev, size);
|
||||
iovp = (dma_addr_t) pide << IOVP_SHIFT;
|
||||
|
||||
DBG_RUN("%s() 0x%p -> 0x%lx\n",
|
||||
__func__, addr, (long) iovp | offset);
|
||||
DBG_RUN("%s() 0x%pa -> 0x%lx\n",
|
||||
__func__, &addr, (long) iovp | offset);
|
||||
|
||||
pdir_start = &(ioc->pdir_base[pide]);
|
||||
|
||||
while (size > 0) {
|
||||
sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
|
||||
sba_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, 0);
|
||||
|
||||
DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
|
||||
pdir_start,
|
||||
@@ -778,17 +775,18 @@ sba_map_single(struct device *dev, void *addr, size_t size,
|
||||
|
||||
|
||||
static dma_addr_t
|
||||
sba_map_page(struct device *dev, struct page *page, unsigned long offset,
|
||||
size_t size, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
sba_map_phys(struct device *dev, phys_addr_t phys, size_t size,
|
||||
enum dma_data_direction direction, unsigned long attrs)
|
||||
{
|
||||
return sba_map_single(dev, page_address(page) + offset, size,
|
||||
direction);
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
return sba_map_single(dev, phys, size, direction);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* sba_unmap_page - unmap one IOVA and free resources
|
||||
* sba_unmap_phys - unmap one IOVA and free resources
|
||||
* @dev: instance of PCI owned by the driver that's asking.
|
||||
* @iova: IOVA of driver buffer previously mapped.
|
||||
* @size: number of bytes mapped in driver buffer.
|
||||
@@ -798,7 +796,7 @@ sba_map_page(struct device *dev, struct page *page, unsigned long offset,
|
||||
* See Documentation/core-api/dma-api-howto.rst
|
||||
*/
|
||||
static void
|
||||
sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
|
||||
sba_unmap_phys(struct device *dev, dma_addr_t iova, size_t size,
|
||||
enum dma_data_direction direction, unsigned long attrs)
|
||||
{
|
||||
struct ioc *ioc;
|
||||
@@ -893,7 +891,7 @@ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle
|
||||
|
||||
if (ret) {
|
||||
memset(ret, 0, size);
|
||||
*dma_handle = sba_map_single(hwdev, ret, size, 0);
|
||||
*dma_handle = sba_map_single(hwdev, virt_to_phys(ret), size, 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -914,7 +912,7 @@ static void
|
||||
sba_free(struct device *hwdev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
sba_unmap_page(hwdev, dma_handle, size, 0, 0);
|
||||
sba_unmap_phys(hwdev, dma_handle, size, 0, 0);
|
||||
free_pages((unsigned long) vaddr, get_order(size));
|
||||
}
|
||||
|
||||
@@ -962,7 +960,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
||||
|
||||
/* Fast path single entry scatterlists. */
|
||||
if (nents == 1) {
|
||||
sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist),
|
||||
sg_dma_address(sglist) = sba_map_single(dev, sg_phys(sglist),
|
||||
sglist->length, direction);
|
||||
sg_dma_len(sglist) = sglist->length;
|
||||
return 1;
|
||||
@@ -1061,7 +1059,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
||||
|
||||
while (nents && sg_dma_len(sglist)) {
|
||||
|
||||
sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
|
||||
sba_unmap_phys(dev, sg_dma_address(sglist), sg_dma_len(sglist),
|
||||
direction, 0);
|
||||
#ifdef SBA_COLLECT_STATS
|
||||
ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
|
||||
@@ -1085,8 +1083,8 @@ static const struct dma_map_ops sba_ops = {
|
||||
.dma_supported = sba_dma_supported,
|
||||
.alloc = sba_alloc,
|
||||
.free = sba_free,
|
||||
.map_page = sba_map_page,
|
||||
.unmap_page = sba_unmap_page,
|
||||
.map_phys = sba_map_phys,
|
||||
.unmap_phys = sba_unmap_phys,
|
||||
.map_sg = sba_map_sg,
|
||||
.unmap_sg = sba_unmap_sg,
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
|
||||
@@ -122,6 +122,24 @@ config XEN_PCIDEV_FRONTEND
|
||||
config PCI_ATS
|
||||
bool
|
||||
|
||||
config PCI_IDE
|
||||
bool
|
||||
|
||||
config PCI_TSM
|
||||
bool "PCI TSM: Device security protocol support"
|
||||
select PCI_IDE
|
||||
select PCI_DOE
|
||||
select TSM
|
||||
help
|
||||
The TEE (Trusted Execution Environment) Device Interface
|
||||
Security Protocol (TDISP) defines a "TSM" as a platform agent
|
||||
that manages device authentication, link encryption, link
|
||||
integrity protection, and assignment of PCI device functions
|
||||
(virtual or physical) to confidential computing VMs that can
|
||||
access (DMA) guest private memory.
|
||||
|
||||
Enable a platform TSM driver to use this capability.
|
||||
|
||||
config PCI_DOE
|
||||
bool "Enable PCI Data Object Exchange (DOE) support"
|
||||
help
|
||||
|
||||
@@ -34,6 +34,8 @@ obj-$(CONFIG_PCI_P2PDMA) += p2pdma.o
|
||||
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
|
||||
obj-$(CONFIG_VGA_ARB) += vgaarb.o
|
||||
obj-$(CONFIG_PCI_DOE) += doe.o
|
||||
obj-$(CONFIG_PCI_IDE) += ide.o
|
||||
obj-$(CONFIG_PCI_TSM) += tsm.o
|
||||
obj-$(CONFIG_PCI_DYNAMIC_OF_NODES) += of_property.o
|
||||
obj-$(CONFIG_PCI_NPEM) += npem.o
|
||||
obj-$(CONFIG_PCIE_TPH) += tph.o
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/ioport.h>
|
||||
@@ -435,6 +436,27 @@ static int __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __pci_walk_bus_reverse(struct pci_bus *top,
|
||||
int (*cb)(struct pci_dev *, void *),
|
||||
void *userdata)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry_reverse(dev, &top->devices, bus_list) {
|
||||
if (dev->subordinate) {
|
||||
ret = __pci_walk_bus_reverse(dev->subordinate, cb,
|
||||
userdata);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
ret = cb(dev, userdata);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_walk_bus - walk devices on/under bus, calling callback.
|
||||
* @top: bus whose devices should be walked
|
||||
@@ -456,6 +478,23 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_walk_bus);
|
||||
|
||||
/**
|
||||
* pci_walk_bus_reverse - walk devices on/under bus, calling callback.
|
||||
* @top: bus whose devices should be walked
|
||||
* @cb: callback to be called for each device found
|
||||
* @userdata: arbitrary pointer to be passed to callback
|
||||
*
|
||||
* Same semantics as pci_walk_bus(), but walks the bus in reverse order.
|
||||
*/
|
||||
void pci_walk_bus_reverse(struct pci_bus *top,
|
||||
int (*cb)(struct pci_dev *, void *), void *userdata)
|
||||
{
|
||||
down_read(&pci_bus_sem);
|
||||
__pci_walk_bus_reverse(top, cb, userdata);
|
||||
up_read(&pci_bus_sem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_walk_bus_reverse);
|
||||
|
||||
void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
|
||||
{
|
||||
lockdep_assert_held(&pci_bus_sem);
|
||||
|
||||
@@ -24,8 +24,6 @@
|
||||
|
||||
#include "pci.h"
|
||||
|
||||
#define PCI_DOE_FEATURE_DISCOVERY 0
|
||||
|
||||
/* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
|
||||
#define PCI_DOE_TIMEOUT HZ
|
||||
#define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128)
|
||||
|
||||
815
drivers/pci/ide.c
Normal file
815
drivers/pci/ide.c
Normal file
@@ -0,0 +1,815 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
|
||||
|
||||
/* PCIe r7.0 section 6.33 Integrity & Data Encryption (IDE) */
|
||||
|
||||
#define dev_fmt(fmt) "PCI/IDE: " fmt
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci-ide.h>
|
||||
#include <linux/pci_regs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/tsm.h>
|
||||
|
||||
#include "pci.h"
|
||||
|
||||
static int __sel_ide_offset(u16 ide_cap, u8 nr_link_ide, u8 stream_index,
|
||||
u8 nr_ide_mem)
|
||||
{
|
||||
u32 offset = ide_cap + PCI_IDE_LINK_STREAM_0 +
|
||||
nr_link_ide * PCI_IDE_LINK_BLOCK_SIZE;
|
||||
|
||||
/*
|
||||
* Assume a constant number of address association resources per stream
|
||||
* index
|
||||
*/
|
||||
return offset + stream_index * PCI_IDE_SEL_BLOCK_SIZE(nr_ide_mem);
|
||||
}
|
||||
|
||||
static int sel_ide_offset(struct pci_dev *pdev,
|
||||
struct pci_ide_partner *settings)
|
||||
{
|
||||
return __sel_ide_offset(pdev->ide_cap, pdev->nr_link_ide,
|
||||
settings->stream_index, pdev->nr_ide_mem);
|
||||
}
|
||||
|
||||
static bool reserve_stream_index(struct pci_dev *pdev, u8 idx)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ida_alloc_range(&pdev->ide_stream_ida, idx, idx, GFP_KERNEL);
|
||||
return ret >= 0;
|
||||
}
|
||||
|
||||
static bool reserve_stream_id(struct pci_host_bridge *hb, u8 id)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ida_alloc_range(&hb->ide_stream_ids_ida, id, id, GFP_KERNEL);
|
||||
return ret >= 0;
|
||||
}
|
||||
|
||||
static bool claim_stream(struct pci_host_bridge *hb, u8 stream_id,
|
||||
struct pci_dev *pdev, u8 stream_idx)
|
||||
{
|
||||
dev_info(&hb->dev, "Stream ID %d active at init\n", stream_id);
|
||||
if (!reserve_stream_id(hb, stream_id)) {
|
||||
dev_info(&hb->dev, "Failed to claim %s Stream ID %d\n",
|
||||
stream_id == PCI_IDE_RESERVED_STREAM_ID ? "reserved" :
|
||||
"active",
|
||||
stream_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* No stream index to reserve in the Link IDE case */
|
||||
if (!pdev)
|
||||
return true;
|
||||
|
||||
if (!reserve_stream_index(pdev, stream_idx)) {
|
||||
pci_info(pdev, "Failed to claim active Selective Stream %d\n",
|
||||
stream_idx);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void pci_ide_init(struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
|
||||
u16 nr_link_ide, nr_ide_mem, nr_streams;
|
||||
u16 ide_cap;
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* Unconditionally init so that ida idle state is consistent with
|
||||
* pdev->ide_cap.
|
||||
*/
|
||||
ida_init(&pdev->ide_stream_ida);
|
||||
|
||||
if (!pci_is_pcie(pdev))
|
||||
return;
|
||||
|
||||
ide_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_IDE);
|
||||
if (!ide_cap)
|
||||
return;
|
||||
|
||||
pci_read_config_dword(pdev, ide_cap + PCI_IDE_CAP, &val);
|
||||
if ((val & PCI_IDE_CAP_SELECTIVE) == 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Require endpoint IDE capability to be paired with IDE Root Port IDE
|
||||
* capability.
|
||||
*/
|
||||
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT) {
|
||||
struct pci_dev *rp = pcie_find_root_port(pdev);
|
||||
|
||||
if (!rp->ide_cap)
|
||||
return;
|
||||
}
|
||||
|
||||
pdev->ide_cfg = FIELD_GET(PCI_IDE_CAP_SEL_CFG, val);
|
||||
pdev->ide_tee_limit = FIELD_GET(PCI_IDE_CAP_TEE_LIMITED, val);
|
||||
|
||||
if (val & PCI_IDE_CAP_LINK)
|
||||
nr_link_ide = 1 + FIELD_GET(PCI_IDE_CAP_LINK_TC_NUM, val);
|
||||
else
|
||||
nr_link_ide = 0;
|
||||
|
||||
nr_ide_mem = 0;
|
||||
nr_streams = 1 + FIELD_GET(PCI_IDE_CAP_SEL_NUM, val);
|
||||
for (u16 i = 0; i < nr_streams; i++) {
|
||||
int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem);
|
||||
int nr_assoc;
|
||||
u32 val;
|
||||
u8 id;
|
||||
|
||||
pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val);
|
||||
|
||||
/*
|
||||
* Let's not entertain streams that do not have a constant
|
||||
* number of address association blocks
|
||||
*/
|
||||
nr_assoc = FIELD_GET(PCI_IDE_SEL_CAP_ASSOC_NUM, val);
|
||||
if (i && (nr_assoc != nr_ide_mem)) {
|
||||
pci_info(pdev, "Unsupported Selective Stream %d capability, SKIP the rest\n", i);
|
||||
nr_streams = i;
|
||||
break;
|
||||
}
|
||||
|
||||
nr_ide_mem = nr_assoc;
|
||||
|
||||
/*
|
||||
* Claim Stream IDs and Selective Stream blocks that are already
|
||||
* active on the device
|
||||
*/
|
||||
pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CTL, &val);
|
||||
id = FIELD_GET(PCI_IDE_SEL_CTL_ID, val);
|
||||
if ((val & PCI_IDE_SEL_CTL_EN) &&
|
||||
!claim_stream(hb, id, pdev, i))
|
||||
return;
|
||||
}
|
||||
|
||||
/* Reserve link stream-ids that are already active on the device */
|
||||
for (u16 i = 0; i < nr_link_ide; ++i) {
|
||||
int pos = ide_cap + PCI_IDE_LINK_STREAM_0 + i * PCI_IDE_LINK_BLOCK_SIZE;
|
||||
u8 id;
|
||||
|
||||
pci_read_config_dword(pdev, pos + PCI_IDE_LINK_CTL_0, &val);
|
||||
id = FIELD_GET(PCI_IDE_LINK_CTL_ID, val);
|
||||
if ((val & PCI_IDE_LINK_CTL_EN) &&
|
||||
!claim_stream(hb, id, NULL, -1))
|
||||
return;
|
||||
}
|
||||
|
||||
for (u16 i = 0; i < nr_streams; i++) {
|
||||
int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem);
|
||||
|
||||
pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val);
|
||||
if (val & PCI_IDE_SEL_CTL_EN)
|
||||
continue;
|
||||
val &= ~PCI_IDE_SEL_CTL_ID;
|
||||
val |= FIELD_PREP(PCI_IDE_SEL_CTL_ID, PCI_IDE_RESERVED_STREAM_ID);
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, val);
|
||||
}
|
||||
|
||||
for (u16 i = 0; i < nr_link_ide; ++i) {
|
||||
int pos = ide_cap + PCI_IDE_LINK_STREAM_0 +
|
||||
i * PCI_IDE_LINK_BLOCK_SIZE;
|
||||
|
||||
pci_read_config_dword(pdev, pos, &val);
|
||||
if (val & PCI_IDE_LINK_CTL_EN)
|
||||
continue;
|
||||
val &= ~PCI_IDE_LINK_CTL_ID;
|
||||
val |= FIELD_PREP(PCI_IDE_LINK_CTL_ID, PCI_IDE_RESERVED_STREAM_ID);
|
||||
pci_write_config_dword(pdev, pos, val);
|
||||
}
|
||||
|
||||
pdev->ide_cap = ide_cap;
|
||||
pdev->nr_link_ide = nr_link_ide;
|
||||
pdev->nr_sel_ide = nr_streams;
|
||||
pdev->nr_ide_mem = nr_ide_mem;
|
||||
}
|
||||
|
||||
struct stream_index {
|
||||
struct ida *ida;
|
||||
u8 stream_index;
|
||||
};
|
||||
|
||||
static void free_stream_index(struct stream_index *stream)
|
||||
{
|
||||
ida_free(stream->ida, stream->stream_index);
|
||||
}
|
||||
|
||||
DEFINE_FREE(free_stream, struct stream_index *, if (_T) free_stream_index(_T))
|
||||
static struct stream_index *alloc_stream_index(struct ida *ida, u16 max,
|
||||
struct stream_index *stream)
|
||||
{
|
||||
int id;
|
||||
|
||||
if (!max)
|
||||
return NULL;
|
||||
|
||||
id = ida_alloc_max(ida, max - 1, GFP_KERNEL);
|
||||
if (id < 0)
|
||||
return NULL;
|
||||
|
||||
*stream = (struct stream_index) {
|
||||
.ida = ida,
|
||||
.stream_index = id,
|
||||
};
|
||||
return stream;
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_ide_stream_alloc() - Reserve stream indices and probe for settings
|
||||
* @pdev: IDE capable PCIe Endpoint Physical Function
|
||||
*
|
||||
* Retrieve the Requester ID range of @pdev for programming its Root
|
||||
* Port IDE RID Association registers, and conversely retrieve the
|
||||
* Requester ID of the Root Port for programming @pdev's IDE RID
|
||||
* Association registers.
|
||||
*
|
||||
* Allocate a Selective IDE Stream Register Block instance per port.
|
||||
*
|
||||
* Allocate a platform stream resource from the associated host bridge.
|
||||
* Retrieve stream association parameters for Requester ID range and
|
||||
* address range restrictions for the stream.
|
||||
*/
|
||||
struct pci_ide *pci_ide_stream_alloc(struct pci_dev *pdev)
|
||||
{
|
||||
/* EP, RP, + HB Stream allocation */
|
||||
struct stream_index __stream[PCI_IDE_HB + 1];
|
||||
struct pci_bus_region pref_assoc = { 0, -1 };
|
||||
struct pci_bus_region mem_assoc = { 0, -1 };
|
||||
struct resource *mem, *pref;
|
||||
struct pci_host_bridge *hb;
|
||||
struct pci_dev *rp, *br;
|
||||
int num_vf, rid_end;
|
||||
|
||||
if (!pci_is_pcie(pdev))
|
||||
return NULL;
|
||||
|
||||
if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ENDPOINT)
|
||||
return NULL;
|
||||
|
||||
if (!pdev->ide_cap)
|
||||
return NULL;
|
||||
|
||||
struct pci_ide *ide __free(kfree) = kzalloc(sizeof(*ide), GFP_KERNEL);
|
||||
if (!ide)
|
||||
return NULL;
|
||||
|
||||
hb = pci_find_host_bridge(pdev->bus);
|
||||
struct stream_index *hb_stream __free(free_stream) = alloc_stream_index(
|
||||
&hb->ide_stream_ida, hb->nr_ide_streams, &__stream[PCI_IDE_HB]);
|
||||
if (!hb_stream)
|
||||
return NULL;
|
||||
|
||||
rp = pcie_find_root_port(pdev);
|
||||
struct stream_index *rp_stream __free(free_stream) = alloc_stream_index(
|
||||
&rp->ide_stream_ida, rp->nr_sel_ide, &__stream[PCI_IDE_RP]);
|
||||
if (!rp_stream)
|
||||
return NULL;
|
||||
|
||||
struct stream_index *ep_stream __free(free_stream) = alloc_stream_index(
|
||||
&pdev->ide_stream_ida, pdev->nr_sel_ide, &__stream[PCI_IDE_EP]);
|
||||
if (!ep_stream)
|
||||
return NULL;
|
||||
|
||||
/* for SR-IOV case, cover all VFs */
|
||||
num_vf = pci_num_vf(pdev);
|
||||
if (num_vf)
|
||||
rid_end = PCI_DEVID(pci_iov_virtfn_bus(pdev, num_vf),
|
||||
pci_iov_virtfn_devfn(pdev, num_vf));
|
||||
else
|
||||
rid_end = pci_dev_id(pdev);
|
||||
|
||||
br = pci_upstream_bridge(pdev);
|
||||
if (!br)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Check if the device consumes memory and/or prefetch-memory. Setup
|
||||
* downstream address association ranges for each.
|
||||
*/
|
||||
mem = pci_resource_n(br, PCI_BRIDGE_MEM_WINDOW);
|
||||
pref = pci_resource_n(br, PCI_BRIDGE_PREF_MEM_WINDOW);
|
||||
if (resource_assigned(mem))
|
||||
pcibios_resource_to_bus(br->bus, &mem_assoc, mem);
|
||||
if (resource_assigned(pref))
|
||||
pcibios_resource_to_bus(br->bus, &pref_assoc, pref);
|
||||
|
||||
*ide = (struct pci_ide) {
|
||||
.pdev = pdev,
|
||||
.partner = {
|
||||
[PCI_IDE_EP] = {
|
||||
.rid_start = pci_dev_id(rp),
|
||||
.rid_end = pci_dev_id(rp),
|
||||
.stream_index = no_free_ptr(ep_stream)->stream_index,
|
||||
/* Disable upstream address association */
|
||||
.mem_assoc = { 0, -1 },
|
||||
.pref_assoc = { 0, -1 },
|
||||
},
|
||||
[PCI_IDE_RP] = {
|
||||
.rid_start = pci_dev_id(pdev),
|
||||
.rid_end = rid_end,
|
||||
.stream_index = no_free_ptr(rp_stream)->stream_index,
|
||||
.mem_assoc = mem_assoc,
|
||||
.pref_assoc = pref_assoc,
|
||||
},
|
||||
},
|
||||
.host_bridge_stream = no_free_ptr(hb_stream)->stream_index,
|
||||
.stream_id = -1,
|
||||
};
|
||||
|
||||
return_ptr(ide);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_ide_stream_alloc);
|
||||
|
||||
/**
|
||||
* pci_ide_stream_free() - unwind pci_ide_stream_alloc()
|
||||
* @ide: idle IDE settings descriptor
|
||||
*
|
||||
* Free all of the stream index (register block) allocations acquired by
|
||||
* pci_ide_stream_alloc(). The stream represented by @ide is assumed to
|
||||
* be unregistered and not instantiated in any device.
|
||||
*/
|
||||
void pci_ide_stream_free(struct pci_ide *ide)
|
||||
{
|
||||
struct pci_dev *pdev = ide->pdev;
|
||||
struct pci_dev *rp = pcie_find_root_port(pdev);
|
||||
struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
|
||||
|
||||
ida_free(&pdev->ide_stream_ida, ide->partner[PCI_IDE_EP].stream_index);
|
||||
ida_free(&rp->ide_stream_ida, ide->partner[PCI_IDE_RP].stream_index);
|
||||
ida_free(&hb->ide_stream_ida, ide->host_bridge_stream);
|
||||
kfree(ide);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_ide_stream_free);
|
||||
|
||||
/**
|
||||
* pci_ide_stream_release() - unwind and release an @ide context
|
||||
* @ide: partially or fully registered IDE settings descriptor
|
||||
*
|
||||
* In support of automatic cleanup of IDE setup routines perform IDE
|
||||
* teardown in expected reverse order of setup and with respect to which
|
||||
* aspects of IDE setup have successfully completed.
|
||||
*
|
||||
* Be careful that setup order mirrors this shutdown order. Otherwise,
|
||||
* open code releasing the IDE context.
|
||||
*/
|
||||
void pci_ide_stream_release(struct pci_ide *ide)
|
||||
{
|
||||
struct pci_dev *pdev = ide->pdev;
|
||||
struct pci_dev *rp = pcie_find_root_port(pdev);
|
||||
|
||||
if (ide->partner[PCI_IDE_RP].enable)
|
||||
pci_ide_stream_disable(rp, ide);
|
||||
|
||||
if (ide->partner[PCI_IDE_EP].enable)
|
||||
pci_ide_stream_disable(pdev, ide);
|
||||
|
||||
if (ide->tsm_dev)
|
||||
tsm_ide_stream_unregister(ide);
|
||||
|
||||
if (ide->partner[PCI_IDE_RP].setup)
|
||||
pci_ide_stream_teardown(rp, ide);
|
||||
|
||||
if (ide->partner[PCI_IDE_EP].setup)
|
||||
pci_ide_stream_teardown(pdev, ide);
|
||||
|
||||
if (ide->name)
|
||||
pci_ide_stream_unregister(ide);
|
||||
|
||||
pci_ide_stream_free(ide);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_ide_stream_release);
|
||||
|
||||
struct pci_ide_stream_id {
|
||||
struct pci_host_bridge *hb;
|
||||
u8 stream_id;
|
||||
};
|
||||
|
||||
static struct pci_ide_stream_id *
|
||||
request_stream_id(struct pci_host_bridge *hb, u8 stream_id,
|
||||
struct pci_ide_stream_id *sid)
|
||||
{
|
||||
if (!reserve_stream_id(hb, stream_id))
|
||||
return NULL;
|
||||
|
||||
*sid = (struct pci_ide_stream_id) {
|
||||
.hb = hb,
|
||||
.stream_id = stream_id,
|
||||
};
|
||||
|
||||
return sid;
|
||||
}
|
||||
DEFINE_FREE(free_stream_id, struct pci_ide_stream_id *,
|
||||
if (_T) ida_free(&_T->hb->ide_stream_ids_ida, _T->stream_id))
|
||||
|
||||
/**
|
||||
* pci_ide_stream_register() - Prepare to activate an IDE Stream
|
||||
* @ide: IDE settings descriptor
|
||||
*
|
||||
* After a Stream ID has been acquired for @ide, record the presence of
|
||||
* the stream in sysfs. The expectation is that @ide is immutable while
|
||||
* registered.
|
||||
*/
|
||||
int pci_ide_stream_register(struct pci_ide *ide)
|
||||
{
|
||||
struct pci_dev *pdev = ide->pdev;
|
||||
struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
|
||||
struct pci_ide_stream_id __sid;
|
||||
u8 ep_stream, rp_stream;
|
||||
int rc;
|
||||
|
||||
if (ide->stream_id < 0 || ide->stream_id > U8_MAX) {
|
||||
pci_err(pdev, "Setup fail: Invalid Stream ID: %d\n", ide->stream_id);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
struct pci_ide_stream_id *sid __free(free_stream_id) =
|
||||
request_stream_id(hb, ide->stream_id, &__sid);
|
||||
if (!sid) {
|
||||
pci_err(pdev, "Setup fail: Stream ID %d in use\n", ide->stream_id);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
ep_stream = ide->partner[PCI_IDE_EP].stream_index;
|
||||
rp_stream = ide->partner[PCI_IDE_RP].stream_index;
|
||||
const char *name __free(kfree) = kasprintf(GFP_KERNEL, "stream%d.%d.%d",
|
||||
ide->host_bridge_stream,
|
||||
rp_stream, ep_stream);
|
||||
if (!name)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = sysfs_create_link(&hb->dev.kobj, &pdev->dev.kobj, name);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
ide->name = no_free_ptr(name);
|
||||
|
||||
/* Stream ID reservation recorded in @ide is now successfully registered */
|
||||
retain_and_null_ptr(sid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_ide_stream_register);
|
||||
|
||||
/**
|
||||
* pci_ide_stream_unregister() - unwind pci_ide_stream_register()
|
||||
* @ide: idle IDE settings descriptor
|
||||
*
|
||||
* In preparation for freeing @ide, remove sysfs enumeration for the
|
||||
* stream.
|
||||
*/
|
||||
void pci_ide_stream_unregister(struct pci_ide *ide)
|
||||
{
|
||||
struct pci_dev *pdev = ide->pdev;
|
||||
struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
|
||||
|
||||
sysfs_remove_link(&hb->dev.kobj, ide->name);
|
||||
kfree(ide->name);
|
||||
ida_free(&hb->ide_stream_ids_ida, ide->stream_id);
|
||||
ide->name = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_ide_stream_unregister);
|
||||
|
||||
static int pci_ide_domain(struct pci_dev *pdev)
|
||||
{
|
||||
if (pdev->fm_enabled)
|
||||
return pci_domain_nr(pdev->bus);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct pci_ide_partner *pci_ide_to_settings(struct pci_dev *pdev, struct pci_ide *ide)
|
||||
{
|
||||
if (!pci_is_pcie(pdev)) {
|
||||
pci_warn_once(pdev, "not a PCIe device\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
switch (pci_pcie_type(pdev)) {
|
||||
case PCI_EXP_TYPE_ENDPOINT:
|
||||
if (pdev != ide->pdev) {
|
||||
pci_warn_once(pdev, "setup expected Endpoint: %s\n", pci_name(ide->pdev));
|
||||
return NULL;
|
||||
}
|
||||
return &ide->partner[PCI_IDE_EP];
|
||||
case PCI_EXP_TYPE_ROOT_PORT: {
|
||||
struct pci_dev *rp = pcie_find_root_port(ide->pdev);
|
||||
|
||||
if (pdev != rp) {
|
||||
pci_warn_once(pdev, "setup expected Root Port: %s\n",
|
||||
pci_name(rp));
|
||||
return NULL;
|
||||
}
|
||||
return &ide->partner[PCI_IDE_RP];
|
||||
}
|
||||
default:
|
||||
pci_warn_once(pdev, "invalid device type\n");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_ide_to_settings);
|
||||
|
||||
static void set_ide_sel_ctl(struct pci_dev *pdev, struct pci_ide *ide,
|
||||
struct pci_ide_partner *settings, int pos,
|
||||
bool enable)
|
||||
{
|
||||
u32 val = FIELD_PREP(PCI_IDE_SEL_CTL_ID, ide->stream_id) |
|
||||
FIELD_PREP(PCI_IDE_SEL_CTL_DEFAULT, settings->default_stream) |
|
||||
FIELD_PREP(PCI_IDE_SEL_CTL_CFG_EN, pdev->ide_cfg) |
|
||||
FIELD_PREP(PCI_IDE_SEL_CTL_TEE_LIMITED, pdev->ide_tee_limit) |
|
||||
FIELD_PREP(PCI_IDE_SEL_CTL_EN, enable);
|
||||
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, val);
|
||||
}
|
||||
|
||||
#define SEL_ADDR1_LOWER GENMASK(31, 20)
|
||||
#define SEL_ADDR_UPPER GENMASK_ULL(63, 32)
|
||||
#define PREP_PCI_IDE_SEL_ADDR1(base, limit) \
|
||||
(FIELD_PREP(PCI_IDE_SEL_ADDR_1_VALID, 1) | \
|
||||
FIELD_PREP(PCI_IDE_SEL_ADDR_1_BASE_LOW, \
|
||||
FIELD_GET(SEL_ADDR1_LOWER, (base))) | \
|
||||
FIELD_PREP(PCI_IDE_SEL_ADDR_1_LIMIT_LOW, \
|
||||
FIELD_GET(SEL_ADDR1_LOWER, (limit))))
|
||||
|
||||
static void mem_assoc_to_regs(struct pci_bus_region *region,
|
||||
struct pci_ide_regs *regs, int idx)
|
||||
{
|
||||
/* convert to u64 range for bitfield size checks */
|
||||
struct range r = { region->start, region->end };
|
||||
|
||||
regs->addr[idx].assoc1 = PREP_PCI_IDE_SEL_ADDR1(r.start, r.end);
|
||||
regs->addr[idx].assoc2 = FIELD_GET(SEL_ADDR_UPPER, r.end);
|
||||
regs->addr[idx].assoc3 = FIELD_GET(SEL_ADDR_UPPER, r.start);
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_ide_stream_to_regs() - convert IDE settings to association register values
|
||||
* @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
|
||||
* @ide: registered IDE settings descriptor
|
||||
* @regs: output register values
|
||||
*/
|
||||
static void pci_ide_stream_to_regs(struct pci_dev *pdev, struct pci_ide *ide,
|
||||
struct pci_ide_regs *regs)
|
||||
{
|
||||
struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
|
||||
int assoc_idx = 0;
|
||||
|
||||
memset(regs, 0, sizeof(*regs));
|
||||
|
||||
if (!settings)
|
||||
return;
|
||||
|
||||
regs->rid1 = FIELD_PREP(PCI_IDE_SEL_RID_1_LIMIT, settings->rid_end);
|
||||
|
||||
regs->rid2 = FIELD_PREP(PCI_IDE_SEL_RID_2_VALID, 1) |
|
||||
FIELD_PREP(PCI_IDE_SEL_RID_2_BASE, settings->rid_start) |
|
||||
FIELD_PREP(PCI_IDE_SEL_RID_2_SEG, pci_ide_domain(pdev));
|
||||
|
||||
if (pdev->nr_ide_mem && pci_bus_region_size(&settings->mem_assoc)) {
|
||||
mem_assoc_to_regs(&settings->mem_assoc, regs, assoc_idx);
|
||||
assoc_idx++;
|
||||
}
|
||||
|
||||
if (pdev->nr_ide_mem > assoc_idx &&
|
||||
pci_bus_region_size(&settings->pref_assoc)) {
|
||||
mem_assoc_to_regs(&settings->pref_assoc, regs, assoc_idx);
|
||||
assoc_idx++;
|
||||
}
|
||||
|
||||
regs->nr_addr = assoc_idx;
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_ide_stream_setup() - program settings to Selective IDE Stream registers
|
||||
* @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
|
||||
* @ide: registered IDE settings descriptor
|
||||
*
|
||||
* When @pdev is a PCI_EXP_TYPE_ENDPOINT then the PCI_IDE_EP partner
|
||||
* settings are written to @pdev's Selective IDE Stream register block,
|
||||
* and when @pdev is a PCI_EXP_TYPE_ROOT_PORT, the PCI_IDE_RP settings
|
||||
* are selected.
|
||||
*/
|
||||
void pci_ide_stream_setup(struct pci_dev *pdev, struct pci_ide *ide)
|
||||
{
|
||||
struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
|
||||
struct pci_ide_regs regs;
|
||||
int pos;
|
||||
|
||||
if (!settings)
|
||||
return;
|
||||
|
||||
pci_ide_stream_to_regs(pdev, ide, ®s);
|
||||
|
||||
pos = sel_ide_offset(pdev, settings);
|
||||
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_1, regs.rid1);
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_2, regs.rid2);
|
||||
|
||||
for (int i = 0; i < regs.nr_addr; i++) {
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i),
|
||||
regs.addr[i].assoc1);
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i),
|
||||
regs.addr[i].assoc2);
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i),
|
||||
regs.addr[i].assoc3);
|
||||
}
|
||||
|
||||
/* clear extra unused address association blocks */
|
||||
for (int i = regs.nr_addr; i < pdev->nr_ide_mem; i++) {
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i), 0);
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i), 0);
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup control register early for devices that expect
|
||||
* stream_id is set during key programming.
|
||||
*/
|
||||
set_ide_sel_ctl(pdev, ide, settings, pos, false);
|
||||
settings->setup = 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_ide_stream_setup);
|
||||
|
||||
/**
|
||||
* pci_ide_stream_teardown() - disable the stream and clear all settings
|
||||
* @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
|
||||
* @ide: registered IDE settings descriptor
|
||||
*
|
||||
* For stream destruction, zero all registers that may have been written
|
||||
* by pci_ide_stream_setup(). Consider pci_ide_stream_disable() to leave
|
||||
* settings in place while temporarily disabling the stream.
|
||||
*/
|
||||
void pci_ide_stream_teardown(struct pci_dev *pdev, struct pci_ide *ide)
|
||||
{
|
||||
struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
|
||||
int pos, i;
|
||||
|
||||
if (!settings)
|
||||
return;
|
||||
|
||||
pos = sel_ide_offset(pdev, settings);
|
||||
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, 0);
|
||||
|
||||
for (i = 0; i < pdev->nr_ide_mem; i++) {
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i), 0);
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i), 0);
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i), 0);
|
||||
}
|
||||
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_2, 0);
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_1, 0);
|
||||
settings->setup = 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_ide_stream_teardown);
|
||||
|
||||
/**
|
||||
* pci_ide_stream_enable() - enable a Selective IDE Stream
|
||||
* @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
|
||||
* @ide: registered and setup IDE settings descriptor
|
||||
*
|
||||
* Activate the stream by writing to the Selective IDE Stream Control
|
||||
* Register.
|
||||
*
|
||||
* Return: 0 if the stream successfully entered the "secure" state, and -EINVAL
|
||||
* if @ide is invalid, and -ENXIO if the stream fails to enter the secure state.
|
||||
*
|
||||
* Note that the state may go "insecure" at any point after returning 0, but
|
||||
* those events are equivalent to a "link down" event and handled via
|
||||
* asynchronous error reporting.
|
||||
*
|
||||
* Caller is responsible to clear the enable bit in the -ENXIO case.
|
||||
*/
|
||||
int pci_ide_stream_enable(struct pci_dev *pdev, struct pci_ide *ide)
|
||||
{
|
||||
struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
|
||||
int pos;
|
||||
u32 val;
|
||||
|
||||
if (!settings)
|
||||
return -EINVAL;
|
||||
|
||||
pos = sel_ide_offset(pdev, settings);
|
||||
|
||||
set_ide_sel_ctl(pdev, ide, settings, pos, true);
|
||||
settings->enable = 1;
|
||||
|
||||
pci_read_config_dword(pdev, pos + PCI_IDE_SEL_STS, &val);
|
||||
if (FIELD_GET(PCI_IDE_SEL_STS_STATE, val) !=
|
||||
PCI_IDE_SEL_STS_STATE_SECURE)
|
||||
return -ENXIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_ide_stream_enable);
|
||||
|
||||
/**
|
||||
* pci_ide_stream_disable() - disable a Selective IDE Stream
|
||||
* @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
|
||||
* @ide: registered and setup IDE settings descriptor
|
||||
*
|
||||
* Clear the Selective IDE Stream Control Register, but leave all other
|
||||
* registers untouched.
|
||||
*/
|
||||
void pci_ide_stream_disable(struct pci_dev *pdev, struct pci_ide *ide)
|
||||
{
|
||||
struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
|
||||
int pos;
|
||||
|
||||
if (!settings)
|
||||
return;
|
||||
|
||||
pos = sel_ide_offset(pdev, settings);
|
||||
|
||||
pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, 0);
|
||||
settings->enable = 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_ide_stream_disable);
|
||||
|
||||
void pci_ide_init_host_bridge(struct pci_host_bridge *hb)
|
||||
{
|
||||
hb->nr_ide_streams = 256;
|
||||
ida_init(&hb->ide_stream_ida);
|
||||
ida_init(&hb->ide_stream_ids_ida);
|
||||
reserve_stream_id(hb, PCI_IDE_RESERVED_STREAM_ID);
|
||||
}
|
||||
|
||||
static ssize_t available_secure_streams_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct pci_host_bridge *hb = to_pci_host_bridge(dev);
|
||||
int nr = READ_ONCE(hb->nr_ide_streams);
|
||||
int avail = nr;
|
||||
|
||||
if (!nr)
|
||||
return -ENXIO;
|
||||
|
||||
/*
|
||||
* Yes, this is inefficient and racy, but it is only for occasional
|
||||
* platform resource surveys. Worst case is bounded to 256 streams.
|
||||
*/
|
||||
for (int i = 0; i < nr; i++)
|
||||
if (ida_exists(&hb->ide_stream_ida, i))
|
||||
avail--;
|
||||
return sysfs_emit(buf, "%d\n", avail);
|
||||
}
|
||||
static DEVICE_ATTR_RO(available_secure_streams);
|
||||
|
||||
static struct attribute *pci_ide_attrs[] = {
|
||||
&dev_attr_available_secure_streams.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static umode_t pci_ide_attr_visible(struct kobject *kobj, struct attribute *a, int n)
|
||||
{
|
||||
struct device *dev = kobj_to_dev(kobj);
|
||||
struct pci_host_bridge *hb = to_pci_host_bridge(dev);
|
||||
|
||||
if (a == &dev_attr_available_secure_streams.attr)
|
||||
if (!hb->nr_ide_streams)
|
||||
return 0;
|
||||
|
||||
return a->mode;
|
||||
}
|
||||
|
||||
const struct attribute_group pci_ide_attr_group = {
|
||||
.attrs = pci_ide_attrs,
|
||||
.is_visible = pci_ide_attr_visible,
|
||||
};
|
||||
|
||||
/**
|
||||
* pci_ide_set_nr_streams() - sets size of the pool of IDE Stream resources
|
||||
* @hb: host bridge boundary for the stream pool
|
||||
* @nr: number of streams
|
||||
*
|
||||
* Platform PCI init and/or expert test module use only. Limit IDE
|
||||
* Stream establishment by setting the number of stream resources
|
||||
* available at the host bridge. Platform init code must set this before
|
||||
* the first pci_ide_stream_alloc() call if the platform has less than the
|
||||
* default of 256 streams per host-bridge.
|
||||
*
|
||||
* The "PCI_IDE" symbol namespace is required because this is typically
|
||||
* a detail that is settled in early PCI init. I.e. this export is not
|
||||
* for endpoint drivers.
|
||||
*/
|
||||
void pci_ide_set_nr_streams(struct pci_host_bridge *hb, u16 nr)
|
||||
{
|
||||
hb->nr_ide_streams = min(nr, 256);
|
||||
WARN_ON_ONCE(!ida_is_empty(&hb->ide_stream_ida));
|
||||
sysfs_update_group(&hb->dev.kobj, &pci_ide_attr_group);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(pci_ide_set_nr_streams, "PCI_IDE");
|
||||
|
||||
void pci_ide_destroy(struct pci_dev *pdev)
|
||||
{
|
||||
ida_destroy(&pdev->ide_stream_ida);
|
||||
}
|
||||
@@ -1855,6 +1855,10 @@ const struct attribute_group *pci_dev_attr_groups[] = {
|
||||
#endif
|
||||
#ifdef CONFIG_PCI_DOE
|
||||
&pci_doe_sysfs_group,
|
||||
#endif
|
||||
#ifdef CONFIG_PCI_TSM
|
||||
&pci_tsm_auth_attr_group,
|
||||
&pci_tsm_attr_group,
|
||||
#endif
|
||||
NULL,
|
||||
};
|
||||
|
||||
@@ -615,6 +615,27 @@ static inline void pci_doe_sysfs_init(struct pci_dev *pdev) { }
|
||||
static inline void pci_doe_sysfs_teardown(struct pci_dev *pdev) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PCI_IDE
|
||||
void pci_ide_init(struct pci_dev *dev);
|
||||
void pci_ide_init_host_bridge(struct pci_host_bridge *hb);
|
||||
void pci_ide_destroy(struct pci_dev *dev);
|
||||
extern const struct attribute_group pci_ide_attr_group;
|
||||
#else
|
||||
static inline void pci_ide_init(struct pci_dev *dev) { }
|
||||
static inline void pci_ide_init_host_bridge(struct pci_host_bridge *hb) { }
|
||||
static inline void pci_ide_destroy(struct pci_dev *dev) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PCI_TSM
|
||||
void pci_tsm_init(struct pci_dev *pdev);
|
||||
void pci_tsm_destroy(struct pci_dev *pdev);
|
||||
extern const struct attribute_group pci_tsm_attr_group;
|
||||
extern const struct attribute_group pci_tsm_auth_attr_group;
|
||||
#else
|
||||
static inline void pci_tsm_init(struct pci_dev *pdev) { }
|
||||
static inline void pci_tsm_destroy(struct pci_dev *pdev) { }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* pci_dev_set_io_state - Set the new error state if possible.
|
||||
*
|
||||
|
||||
@@ -658,6 +658,18 @@ static void pci_release_host_bridge_dev(struct device *dev)
|
||||
kfree(bridge);
|
||||
}
|
||||
|
||||
static const struct attribute_group *pci_host_bridge_groups[] = {
|
||||
#ifdef CONFIG_PCI_IDE
|
||||
&pci_ide_attr_group,
|
||||
#endif
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct device_type pci_host_bridge_type = {
|
||||
.groups = pci_host_bridge_groups,
|
||||
.release = pci_release_host_bridge_dev,
|
||||
};
|
||||
|
||||
static void pci_init_host_bridge(struct pci_host_bridge *bridge)
|
||||
{
|
||||
INIT_LIST_HEAD(&bridge->windows);
|
||||
@@ -677,6 +689,8 @@ static void pci_init_host_bridge(struct pci_host_bridge *bridge)
|
||||
bridge->native_dpc = 1;
|
||||
bridge->domain_nr = PCI_DOMAIN_NR_NOT_SET;
|
||||
bridge->native_cxl_error = 1;
|
||||
bridge->dev.type = &pci_host_bridge_type;
|
||||
pci_ide_init_host_bridge(bridge);
|
||||
|
||||
device_initialize(&bridge->dev);
|
||||
}
|
||||
@@ -690,7 +704,6 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
|
||||
return NULL;
|
||||
|
||||
pci_init_host_bridge(bridge);
|
||||
bridge->dev.release = pci_release_host_bridge_dev;
|
||||
|
||||
return bridge;
|
||||
}
|
||||
@@ -2296,6 +2309,17 @@ int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pci_dev3_init(struct pci_dev *pdev)
|
||||
{
|
||||
u16 cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DEV3);
|
||||
u32 val = 0;
|
||||
|
||||
if (!cap)
|
||||
return;
|
||||
pci_read_config_dword(pdev, cap + PCI_DEV3_STA, &val);
|
||||
pdev->fm_enabled = !!(val & PCI_DEV3_STA_SEGMENT);
|
||||
}
|
||||
|
||||
/**
|
||||
* pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
|
||||
* @dev: PCI device to query
|
||||
@@ -2680,6 +2704,8 @@ static void pci_init_capabilities(struct pci_dev *dev)
|
||||
pci_doe_init(dev); /* Data Object Exchange */
|
||||
pci_tph_init(dev); /* TLP Processing Hints */
|
||||
pci_rebar_init(dev); /* Resizable BAR */
|
||||
pci_dev3_init(dev); /* Device 3 capabilities */
|
||||
pci_ide_init(dev); /* Link Integrity and Data Encryption */
|
||||
|
||||
pcie_report_downtraining(dev);
|
||||
pci_init_reset_methods(dev);
|
||||
@@ -2773,6 +2799,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
|
||||
ret = device_add(&dev->dev);
|
||||
WARN_ON(ret < 0);
|
||||
|
||||
/* Establish pdev->tsm for newly added (e.g. new SR-IOV VFs) */
|
||||
pci_tsm_init(dev);
|
||||
|
||||
pci_npem_create(dev);
|
||||
|
||||
pci_doe_sysfs_init(dev);
|
||||
|
||||
@@ -57,6 +57,12 @@ static void pci_destroy_dev(struct pci_dev *dev)
|
||||
pci_doe_sysfs_teardown(dev);
|
||||
pci_npem_remove(dev);
|
||||
|
||||
/*
|
||||
* While device is in D0 drop the device from TSM link operations
|
||||
* including unbind and disconnect (IDE + SPDM teardown).
|
||||
*/
|
||||
pci_tsm_destroy(dev);
|
||||
|
||||
device_del(&dev->dev);
|
||||
|
||||
down_write(&pci_bus_sem);
|
||||
@@ -64,6 +70,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
|
||||
up_write(&pci_bus_sem);
|
||||
|
||||
pci_doe_destroy(dev);
|
||||
pci_ide_destroy(dev);
|
||||
pcie_aspm_exit_link_state(dev);
|
||||
pci_bridge_d3_update(dev);
|
||||
pci_pwrctrl_unregister(&dev->dev);
|
||||
|
||||
@@ -282,6 +282,45 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
|
||||
return pdev;
|
||||
}
|
||||
|
||||
static struct pci_dev *pci_get_dev_by_id_reverse(const struct pci_device_id *id,
|
||||
struct pci_dev *from)
|
||||
{
|
||||
struct device *dev;
|
||||
struct device *dev_start = NULL;
|
||||
struct pci_dev *pdev = NULL;
|
||||
|
||||
if (from)
|
||||
dev_start = &from->dev;
|
||||
dev = bus_find_device_reverse(&pci_bus_type, dev_start, (void *)id,
|
||||
match_pci_dev_by_id);
|
||||
if (dev)
|
||||
pdev = to_pci_dev(dev);
|
||||
pci_dev_put(from);
|
||||
return pdev;
|
||||
}
|
||||
|
||||
enum pci_search_direction {
|
||||
PCI_SEARCH_FORWARD,
|
||||
PCI_SEARCH_REVERSE,
|
||||
};
|
||||
|
||||
static struct pci_dev *__pci_get_subsys(unsigned int vendor, unsigned int device,
|
||||
unsigned int ss_vendor, unsigned int ss_device,
|
||||
struct pci_dev *from, enum pci_search_direction dir)
|
||||
{
|
||||
struct pci_device_id id = {
|
||||
.vendor = vendor,
|
||||
.device = device,
|
||||
.subvendor = ss_vendor,
|
||||
.subdevice = ss_device,
|
||||
};
|
||||
|
||||
if (dir == PCI_SEARCH_FORWARD)
|
||||
return pci_get_dev_by_id(&id, from);
|
||||
else
|
||||
return pci_get_dev_by_id_reverse(&id, from);
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_get_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id
|
||||
* @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
|
||||
@@ -302,14 +341,8 @@ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
|
||||
unsigned int ss_vendor, unsigned int ss_device,
|
||||
struct pci_dev *from)
|
||||
{
|
||||
struct pci_device_id id = {
|
||||
.vendor = vendor,
|
||||
.device = device,
|
||||
.subvendor = ss_vendor,
|
||||
.subdevice = ss_device,
|
||||
};
|
||||
|
||||
return pci_get_dev_by_id(&id, from);
|
||||
return __pci_get_subsys(vendor, device, ss_vendor, ss_device, from,
|
||||
PCI_SEARCH_FORWARD);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_get_subsys);
|
||||
|
||||
@@ -334,6 +367,19 @@ struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
|
||||
}
|
||||
EXPORT_SYMBOL(pci_get_device);
|
||||
|
||||
/*
|
||||
* Same semantics as pci_get_device(), except walks the PCI device list
|
||||
* in reverse discovery order.
|
||||
*/
|
||||
struct pci_dev *pci_get_device_reverse(unsigned int vendor,
|
||||
unsigned int device,
|
||||
struct pci_dev *from)
|
||||
{
|
||||
return __pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from,
|
||||
PCI_SEARCH_REVERSE);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_get_device_reverse);
|
||||
|
||||
/**
|
||||
* pci_get_class - begin or continue searching for a PCI device by class
|
||||
* @class: search for a PCI device with this class designation
|
||||
|
||||
900
drivers/pci/tsm.c
Normal file
900
drivers/pci/tsm.c
Normal file
@@ -0,0 +1,900 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Interface with platform TEE Security Manager (TSM) objects as defined by
|
||||
* PCIe r7.0 section 11 TEE Device Interface Security Protocol (TDISP)
|
||||
*
|
||||
* Copyright(c) 2024-2025 Intel Corporation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define dev_fmt(fmt) "PCI/TSM: " fmt
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci-doe.h>
|
||||
#include <linux/pci-tsm.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/tsm.h>
|
||||
#include <linux/xarray.h>
|
||||
#include "pci.h"
|
||||
|
||||
/*
|
||||
* Provide a read/write lock against the init / exit of pdev tsm
|
||||
* capabilities and arrival/departure of a TSM instance
|
||||
*/
|
||||
static DECLARE_RWSEM(pci_tsm_rwsem);
|
||||
|
||||
/*
|
||||
* Count of TSMs registered that support physical link operations vs device
|
||||
* security state management.
|
||||
*/
|
||||
static int pci_tsm_link_count;
|
||||
static int pci_tsm_devsec_count;
|
||||
|
||||
static const struct pci_tsm_ops *to_pci_tsm_ops(struct pci_tsm *tsm)
|
||||
{
|
||||
return tsm->tsm_dev->pci_ops;
|
||||
}
|
||||
|
||||
static inline bool is_dsm(struct pci_dev *pdev)
|
||||
{
|
||||
return pdev->tsm && pdev->tsm->dsm_dev == pdev;
|
||||
}
|
||||
|
||||
static inline bool has_tee(struct pci_dev *pdev)
|
||||
{
|
||||
return pdev->devcap & PCI_EXP_DEVCAP_TEE;
|
||||
}
|
||||
|
||||
/* 'struct pci_tsm_pf0' wraps 'struct pci_tsm' when ->dsm_dev == ->pdev (self) */
|
||||
static struct pci_tsm_pf0 *to_pci_tsm_pf0(struct pci_tsm *tsm)
|
||||
{
|
||||
/*
|
||||
* All "link" TSM contexts reference the device that hosts the DSM
|
||||
* interface for a set of devices. Walk to the DSM device and cast its
|
||||
* ->tsm context to a 'struct pci_tsm_pf0 *'.
|
||||
*/
|
||||
struct pci_dev *pf0 = tsm->dsm_dev;
|
||||
|
||||
if (!is_pci_tsm_pf0(pf0) || !is_dsm(pf0)) {
|
||||
pci_WARN_ONCE(tsm->pdev, 1, "invalid context object\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return container_of(pf0->tsm, struct pci_tsm_pf0, base_tsm);
|
||||
}
|
||||
|
||||
static void tsm_remove(struct pci_tsm *tsm)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
|
||||
if (!tsm)
|
||||
return;
|
||||
|
||||
pdev = tsm->pdev;
|
||||
to_pci_tsm_ops(tsm)->remove(tsm);
|
||||
pdev->tsm = NULL;
|
||||
}
|
||||
DEFINE_FREE(tsm_remove, struct pci_tsm *, if (_T) tsm_remove(_T))
|
||||
|
||||
static void pci_tsm_walk_fns(struct pci_dev *pdev,
|
||||
int (*cb)(struct pci_dev *pdev, void *data),
|
||||
void *data)
|
||||
{
|
||||
/* Walk subordinate physical functions */
|
||||
for (int i = 0; i < 8; i++) {
|
||||
struct pci_dev *pf __free(pci_dev_put) = pci_get_slot(
|
||||
pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), i));
|
||||
|
||||
if (!pf)
|
||||
continue;
|
||||
|
||||
/* on entry function 0 has already run @cb */
|
||||
if (i > 0)
|
||||
cb(pf, data);
|
||||
|
||||
/* walk virtual functions of each pf */
|
||||
for (int j = 0; j < pci_num_vf(pf); j++) {
|
||||
struct pci_dev *vf __free(pci_dev_put) =
|
||||
pci_get_domain_bus_and_slot(
|
||||
pci_domain_nr(pf->bus),
|
||||
pci_iov_virtfn_bus(pf, j),
|
||||
pci_iov_virtfn_devfn(pf, j));
|
||||
|
||||
if (!vf)
|
||||
continue;
|
||||
|
||||
cb(vf, data);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk downstream devices, assumes that an upstream DSM is
|
||||
* limited to downstream physical functions
|
||||
*/
|
||||
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_UPSTREAM && is_dsm(pdev))
|
||||
pci_walk_bus(pdev->subordinate, cb, data);
|
||||
}
|
||||
|
||||
static void pci_tsm_walk_fns_reverse(struct pci_dev *pdev,
|
||||
int (*cb)(struct pci_dev *pdev,
|
||||
void *data),
|
||||
void *data)
|
||||
{
|
||||
/* Reverse walk downstream devices */
|
||||
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_UPSTREAM && is_dsm(pdev))
|
||||
pci_walk_bus_reverse(pdev->subordinate, cb, data);
|
||||
|
||||
/* Reverse walk subordinate physical functions */
|
||||
for (int i = 7; i >= 0; i--) {
|
||||
struct pci_dev *pf __free(pci_dev_put) = pci_get_slot(
|
||||
pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), i));
|
||||
|
||||
if (!pf)
|
||||
continue;
|
||||
|
||||
/* reverse walk virtual functions */
|
||||
for (int j = pci_num_vf(pf) - 1; j >= 0; j--) {
|
||||
struct pci_dev *vf __free(pci_dev_put) =
|
||||
pci_get_domain_bus_and_slot(
|
||||
pci_domain_nr(pf->bus),
|
||||
pci_iov_virtfn_bus(pf, j),
|
||||
pci_iov_virtfn_devfn(pf, j));
|
||||
|
||||
if (!vf)
|
||||
continue;
|
||||
cb(vf, data);
|
||||
}
|
||||
|
||||
/* on exit, caller will run @cb on function 0 */
|
||||
if (i > 0)
|
||||
cb(pf, data);
|
||||
}
|
||||
}
|
||||
|
||||
static void link_sysfs_disable(struct pci_dev *pdev)
|
||||
{
|
||||
sysfs_update_group(&pdev->dev.kobj, &pci_tsm_auth_attr_group);
|
||||
sysfs_update_group(&pdev->dev.kobj, &pci_tsm_attr_group);
|
||||
}
|
||||
|
||||
static void link_sysfs_enable(struct pci_dev *pdev)
|
||||
{
|
||||
bool tee = has_tee(pdev);
|
||||
|
||||
pci_dbg(pdev, "%s Security Manager detected (%s%s%s)\n",
|
||||
pdev->tsm ? "Device" : "Platform TEE",
|
||||
pdev->ide_cap ? "IDE" : "", pdev->ide_cap && tee ? " " : "",
|
||||
tee ? "TEE" : "");
|
||||
|
||||
sysfs_update_group(&pdev->dev.kobj, &pci_tsm_auth_attr_group);
|
||||
sysfs_update_group(&pdev->dev.kobj, &pci_tsm_attr_group);
|
||||
}
|
||||
|
||||
static int probe_fn(struct pci_dev *pdev, void *dsm)
|
||||
{
|
||||
struct pci_dev *dsm_dev = dsm;
|
||||
const struct pci_tsm_ops *ops = to_pci_tsm_ops(dsm_dev->tsm);
|
||||
|
||||
pdev->tsm = ops->probe(dsm_dev->tsm->tsm_dev, pdev);
|
||||
pci_dbg(pdev, "setup TSM context: DSM: %s status: %s\n",
|
||||
pci_name(dsm_dev), pdev->tsm ? "success" : "failed");
|
||||
if (pdev->tsm)
|
||||
link_sysfs_enable(pdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pci_tsm_connect(struct pci_dev *pdev, struct tsm_dev *tsm_dev)
|
||||
{
|
||||
int rc;
|
||||
struct pci_tsm_pf0 *tsm_pf0;
|
||||
const struct pci_tsm_ops *ops = tsm_dev->pci_ops;
|
||||
struct pci_tsm *pci_tsm __free(tsm_remove) = ops->probe(tsm_dev, pdev);
|
||||
|
||||
/* connect() mutually exclusive with subfunction pci_tsm_init() */
|
||||
lockdep_assert_held_write(&pci_tsm_rwsem);
|
||||
|
||||
if (!pci_tsm)
|
||||
return -ENXIO;
|
||||
|
||||
pdev->tsm = pci_tsm;
|
||||
tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
|
||||
|
||||
/* mutex_intr assumes connect() is always sysfs/user driven */
|
||||
ACQUIRE(mutex_intr, lock)(&tsm_pf0->lock);
|
||||
if ((rc = ACQUIRE_ERR(mutex_intr, &lock)))
|
||||
return rc;
|
||||
|
||||
rc = ops->connect(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
pdev->tsm = no_free_ptr(pci_tsm);
|
||||
|
||||
/*
|
||||
* Now that the DSM is established, probe() all the potential
|
||||
* dependent functions. Failure to probe a function is not fatal
|
||||
* to connect(), it just disables subsequent security operations
|
||||
* for that function.
|
||||
*
|
||||
* Note this is done unconditionally, without regard to finding
|
||||
* PCI_EXP_DEVCAP_TEE on the dependent function, for robustness. The DSM
|
||||
* is the ultimate arbiter of security state relative to a given
|
||||
* interface id, and if it says it can manage TDISP state of a function,
|
||||
* let it.
|
||||
*/
|
||||
if (has_tee(pdev))
|
||||
pci_tsm_walk_fns(pdev, probe_fn, pdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t connect_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct tsm_dev *tsm_dev;
|
||||
int rc;
|
||||
|
||||
ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
|
||||
if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
|
||||
return rc;
|
||||
|
||||
if (!pdev->tsm)
|
||||
return sysfs_emit(buf, "\n");
|
||||
|
||||
tsm_dev = pdev->tsm->tsm_dev;
|
||||
return sysfs_emit(buf, "%s\n", dev_name(&tsm_dev->dev));
|
||||
}
|
||||
|
||||
/* Is @tsm_dev managing physical link / session properties... */
|
||||
static bool is_link_tsm(struct tsm_dev *tsm_dev)
|
||||
{
|
||||
return tsm_dev && tsm_dev->pci_ops && tsm_dev->pci_ops->link_ops.probe;
|
||||
}
|
||||
|
||||
/* ...or is @tsm_dev managing device security state ? */
|
||||
static bool is_devsec_tsm(struct tsm_dev *tsm_dev)
|
||||
{
|
||||
return tsm_dev && tsm_dev->pci_ops && tsm_dev->pci_ops->devsec_ops.lock;
|
||||
}
|
||||
|
||||
static ssize_t connect_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
int rc, id;
|
||||
|
||||
rc = sscanf(buf, "tsm%d\n", &id);
|
||||
if (rc != 1)
|
||||
return -EINVAL;
|
||||
|
||||
ACQUIRE(rwsem_write_kill, lock)(&pci_tsm_rwsem);
|
||||
if ((rc = ACQUIRE_ERR(rwsem_write_kill, &lock)))
|
||||
return rc;
|
||||
|
||||
if (pdev->tsm)
|
||||
return -EBUSY;
|
||||
|
||||
struct tsm_dev *tsm_dev __free(put_tsm_dev) = find_tsm_dev(id);
|
||||
if (!is_link_tsm(tsm_dev))
|
||||
return -ENXIO;
|
||||
|
||||
rc = pci_tsm_connect(pdev, tsm_dev);
|
||||
if (rc)
|
||||
return rc;
|
||||
return len;
|
||||
}
|
||||
static DEVICE_ATTR_RW(connect);
|
||||
|
||||
static int remove_fn(struct pci_dev *pdev, void *data)
|
||||
{
|
||||
tsm_remove(pdev->tsm);
|
||||
link_sysfs_disable(pdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note, this helper only returns an error code and takes an argument for
|
||||
* compatibility with the pci_walk_bus() callback prototype. pci_tsm_unbind()
|
||||
* always succeeds.
|
||||
*/
|
||||
static int __pci_tsm_unbind(struct pci_dev *pdev, void *data)
|
||||
{
|
||||
struct pci_tdi *tdi;
|
||||
struct pci_tsm_pf0 *tsm_pf0;
|
||||
|
||||
lockdep_assert_held(&pci_tsm_rwsem);
|
||||
|
||||
if (!pdev->tsm)
|
||||
return 0;
|
||||
|
||||
tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
|
||||
guard(mutex)(&tsm_pf0->lock);
|
||||
|
||||
tdi = pdev->tsm->tdi;
|
||||
if (!tdi)
|
||||
return 0;
|
||||
|
||||
to_pci_tsm_ops(pdev->tsm)->unbind(tdi);
|
||||
pdev->tsm->tdi = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void pci_tsm_unbind(struct pci_dev *pdev)
|
||||
{
|
||||
guard(rwsem_read)(&pci_tsm_rwsem);
|
||||
__pci_tsm_unbind(pdev, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_tsm_unbind);
|
||||
|
||||
/**
|
||||
* pci_tsm_bind() - Bind @pdev as a TDI for @kvm
|
||||
* @pdev: PCI device function to bind
|
||||
* @kvm: Private memory attach context
|
||||
* @tdi_id: Identifier (virtual BDF) for the TDI as referenced by the TSM and DSM
|
||||
*
|
||||
* Returns 0 on success, or a negative error code on failure.
|
||||
*
|
||||
* Context: Caller is responsible for constraining the bind lifetime to the
|
||||
* registered state of the device. For example, pci_tsm_bind() /
|
||||
* pci_tsm_unbind() limited to the VFIO driver bound state of the device.
|
||||
*/
|
||||
int pci_tsm_bind(struct pci_dev *pdev, struct kvm *kvm, u32 tdi_id)
|
||||
{
|
||||
struct pci_tsm_pf0 *tsm_pf0;
|
||||
struct pci_tdi *tdi;
|
||||
|
||||
if (!kvm)
|
||||
return -EINVAL;
|
||||
|
||||
guard(rwsem_read)(&pci_tsm_rwsem);
|
||||
|
||||
if (!pdev->tsm)
|
||||
return -EINVAL;
|
||||
|
||||
if (!is_link_tsm(pdev->tsm->tsm_dev))
|
||||
return -ENXIO;
|
||||
|
||||
tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
|
||||
guard(mutex)(&tsm_pf0->lock);
|
||||
|
||||
/* Resolve races to bind a TDI */
|
||||
if (pdev->tsm->tdi) {
|
||||
if (pdev->tsm->tdi->kvm != kvm)
|
||||
return -EBUSY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
tdi = to_pci_tsm_ops(pdev->tsm)->bind(pdev, kvm, tdi_id);
|
||||
if (IS_ERR(tdi))
|
||||
return PTR_ERR(tdi);
|
||||
|
||||
pdev->tsm->tdi = tdi;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_tsm_bind);
|
||||
|
||||
/**
|
||||
* pci_tsm_guest_req() - helper to marshal guest requests to the TSM driver
|
||||
* @pdev: @pdev representing a bound tdi
|
||||
* @scope: caller asserts this passthrough request is limited to TDISP operations
|
||||
* @req_in: Input payload forwarded from the guest
|
||||
* @in_len: Length of @req_in
|
||||
* @req_out: Output payload buffer response to the guest
|
||||
* @out_len: Length of @req_out on input, bytes filled in @req_out on output
|
||||
* @tsm_code: Optional TSM arch specific result code for the guest TSM
|
||||
*
|
||||
* This is a common entry point for requests triggered by userspace KVM-exit
|
||||
* service handlers responding to TDI information or state change requests. The
|
||||
* scope parameter limits requests to TDISP state management, or limited debug.
|
||||
* This path is only suitable for commands and results that are the host kernel
|
||||
* has no use, the host is only facilitating guest to TSM communication.
|
||||
*
|
||||
* Returns 0 on success and -error on failure and positive "residue" on success
|
||||
* but @req_out is filled with less then @out_len, or @req_out is NULL and a
|
||||
* residue number of bytes were not consumed from @req_in. On success or
|
||||
* failure @tsm_code may be populated with a TSM implementation specific result
|
||||
* code for the guest to consume.
|
||||
*
|
||||
* Context: Caller is responsible for calling this within the pci_tsm_bind()
|
||||
* state of the TDI.
|
||||
*/
|
||||
ssize_t pci_tsm_guest_req(struct pci_dev *pdev, enum pci_tsm_req_scope scope,
|
||||
sockptr_t req_in, size_t in_len, sockptr_t req_out,
|
||||
size_t out_len, u64 *tsm_code)
|
||||
{
|
||||
struct pci_tsm_pf0 *tsm_pf0;
|
||||
struct pci_tdi *tdi;
|
||||
int rc;
|
||||
|
||||
/* Forbid requests that are not directly related to TDISP operations */
|
||||
if (scope > PCI_TSM_REQ_STATE_CHANGE)
|
||||
return -EINVAL;
|
||||
|
||||
ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
|
||||
if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
|
||||
return rc;
|
||||
|
||||
if (!pdev->tsm)
|
||||
return -ENXIO;
|
||||
|
||||
if (!is_link_tsm(pdev->tsm->tsm_dev))
|
||||
return -ENXIO;
|
||||
|
||||
tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
|
||||
ACQUIRE(mutex_intr, ops_lock)(&tsm_pf0->lock);
|
||||
if ((rc = ACQUIRE_ERR(mutex_intr, &ops_lock)))
|
||||
return rc;
|
||||
|
||||
tdi = pdev->tsm->tdi;
|
||||
if (!tdi)
|
||||
return -ENXIO;
|
||||
return to_pci_tsm_ops(pdev->tsm)->guest_req(tdi, scope, req_in, in_len,
|
||||
req_out, out_len, tsm_code);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_tsm_guest_req);
|
||||
|
||||
static void pci_tsm_unbind_all(struct pci_dev *pdev)
|
||||
{
|
||||
pci_tsm_walk_fns_reverse(pdev, __pci_tsm_unbind, NULL);
|
||||
__pci_tsm_unbind(pdev, NULL);
|
||||
}
|
||||
|
||||
static void __pci_tsm_disconnect(struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_tsm_pf0 *tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
|
||||
const struct pci_tsm_ops *ops = to_pci_tsm_ops(pdev->tsm);
|
||||
|
||||
/* disconnect() mutually exclusive with subfunction pci_tsm_init() */
|
||||
lockdep_assert_held_write(&pci_tsm_rwsem);
|
||||
|
||||
pci_tsm_unbind_all(pdev);
|
||||
|
||||
/*
|
||||
* disconnect() is uninterruptible as it may be called for device
|
||||
* teardown
|
||||
*/
|
||||
guard(mutex)(&tsm_pf0->lock);
|
||||
pci_tsm_walk_fns_reverse(pdev, remove_fn, NULL);
|
||||
ops->disconnect(pdev);
|
||||
}
|
||||
|
||||
static void pci_tsm_disconnect(struct pci_dev *pdev)
|
||||
{
|
||||
__pci_tsm_disconnect(pdev);
|
||||
tsm_remove(pdev->tsm);
|
||||
}
|
||||
|
||||
static ssize_t disconnect_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf,
|
||||
size_t len)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct tsm_dev *tsm_dev;
|
||||
int rc;
|
||||
|
||||
ACQUIRE(rwsem_write_kill, lock)(&pci_tsm_rwsem);
|
||||
if ((rc = ACQUIRE_ERR(rwsem_write_kill, &lock)))
|
||||
return rc;
|
||||
|
||||
if (!pdev->tsm)
|
||||
return -ENXIO;
|
||||
|
||||
tsm_dev = pdev->tsm->tsm_dev;
|
||||
if (!sysfs_streq(buf, dev_name(&tsm_dev->dev)))
|
||||
return -EINVAL;
|
||||
|
||||
pci_tsm_disconnect(pdev);
|
||||
return len;
|
||||
}
|
||||
static DEVICE_ATTR_WO(disconnect);
|
||||
|
||||
static ssize_t bound_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct pci_tsm_pf0 *tsm_pf0;
|
||||
struct pci_tsm *tsm;
|
||||
int rc;
|
||||
|
||||
ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
|
||||
if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
|
||||
return rc;
|
||||
|
||||
tsm = pdev->tsm;
|
||||
if (!tsm)
|
||||
return sysfs_emit(buf, "\n");
|
||||
tsm_pf0 = to_pci_tsm_pf0(tsm);
|
||||
|
||||
ACQUIRE(mutex_intr, ops_lock)(&tsm_pf0->lock);
|
||||
if ((rc = ACQUIRE_ERR(mutex_intr, &ops_lock)))
|
||||
return rc;
|
||||
|
||||
if (!tsm->tdi)
|
||||
return sysfs_emit(buf, "\n");
|
||||
return sysfs_emit(buf, "%s\n", dev_name(&tsm->tsm_dev->dev));
|
||||
}
|
||||
static DEVICE_ATTR_RO(bound);
|
||||
|
||||
static ssize_t dsm_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct pci_tsm *tsm;
|
||||
int rc;
|
||||
|
||||
ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
|
||||
if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
|
||||
return rc;
|
||||
|
||||
tsm = pdev->tsm;
|
||||
if (!tsm)
|
||||
return sysfs_emit(buf, "\n");
|
||||
|
||||
return sysfs_emit(buf, "%s\n", pci_name(tsm->dsm_dev));
|
||||
}
|
||||
static DEVICE_ATTR_RO(dsm);
|
||||
|
||||
/* The 'authenticated' attribute is exclusive to the presence of a 'link' TSM */
|
||||
static bool pci_tsm_link_group_visible(struct kobject *kobj)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
|
||||
|
||||
if (!pci_tsm_link_count)
|
||||
return false;
|
||||
|
||||
if (!pci_is_pcie(pdev))
|
||||
return false;
|
||||
|
||||
if (is_pci_tsm_pf0(pdev))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Show 'authenticated' and other attributes for the managed
|
||||
* sub-functions of a DSM.
|
||||
*/
|
||||
if (pdev->tsm)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(pci_tsm_link);
|
||||
|
||||
/*
|
||||
* 'link' and 'devsec' TSMs share the same 'tsm/' sysfs group, so the TSM type
|
||||
* specific attributes need individual visibility checks.
|
||||
*/
|
||||
static umode_t pci_tsm_attr_visible(struct kobject *kobj,
|
||||
struct attribute *attr, int n)
|
||||
{
|
||||
if (pci_tsm_link_group_visible(kobj)) {
|
||||
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
|
||||
|
||||
if (attr == &dev_attr_bound.attr) {
|
||||
if (is_pci_tsm_pf0(pdev) && has_tee(pdev))
|
||||
return attr->mode;
|
||||
if (pdev->tsm && has_tee(pdev->tsm->dsm_dev))
|
||||
return attr->mode;
|
||||
}
|
||||
|
||||
if (attr == &dev_attr_dsm.attr) {
|
||||
if (is_pci_tsm_pf0(pdev))
|
||||
return attr->mode;
|
||||
if (pdev->tsm && has_tee(pdev->tsm->dsm_dev))
|
||||
return attr->mode;
|
||||
}
|
||||
|
||||
if (attr == &dev_attr_connect.attr ||
|
||||
attr == &dev_attr_disconnect.attr) {
|
||||
if (is_pci_tsm_pf0(pdev))
|
||||
return attr->mode;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool pci_tsm_group_visible(struct kobject *kobj)
|
||||
{
|
||||
return pci_tsm_link_group_visible(kobj);
|
||||
}
|
||||
DEFINE_SYSFS_GROUP_VISIBLE(pci_tsm);
|
||||
|
||||
static struct attribute *pci_tsm_attrs[] = {
|
||||
&dev_attr_connect.attr,
|
||||
&dev_attr_disconnect.attr,
|
||||
&dev_attr_bound.attr,
|
||||
&dev_attr_dsm.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
const struct attribute_group pci_tsm_attr_group = {
|
||||
.name = "tsm",
|
||||
.attrs = pci_tsm_attrs,
|
||||
.is_visible = SYSFS_GROUP_VISIBLE(pci_tsm),
|
||||
};
|
||||
|
||||
static ssize_t authenticated_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
/*
|
||||
* When the SPDM session established via TSM the 'authenticated' state
|
||||
* of the device is identical to the connect state.
|
||||
*/
|
||||
return connect_show(dev, attr, buf);
|
||||
}
|
||||
static DEVICE_ATTR_RO(authenticated);
|
||||
|
||||
static struct attribute *pci_tsm_auth_attrs[] = {
|
||||
&dev_attr_authenticated.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
const struct attribute_group pci_tsm_auth_attr_group = {
|
||||
.attrs = pci_tsm_auth_attrs,
|
||||
.is_visible = SYSFS_GROUP_VISIBLE(pci_tsm_link),
|
||||
};
|
||||
|
||||
/*
|
||||
* Retrieve physical function0 device whether it has TEE capability or not
|
||||
*/
|
||||
static struct pci_dev *pf0_dev_get(struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_dev *pf_dev = pci_physfn(pdev);
|
||||
|
||||
if (PCI_FUNC(pf_dev->devfn) == 0)
|
||||
return pci_dev_get(pf_dev);
|
||||
|
||||
return pci_get_slot(pf_dev->bus,
|
||||
pf_dev->devfn - PCI_FUNC(pf_dev->devfn));
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the PCI Device instance that serves as the Device Security Manager (DSM)
|
||||
* for @pdev. Note that no additional reference is held for the resulting device
|
||||
* because that resulting object always has a registered lifetime
|
||||
* greater-than-or-equal to that of the @pdev argument. This is by virtue of
|
||||
* @pdev being a descendant of, or identical to, the returned DSM device.
|
||||
*/
|
||||
static struct pci_dev *find_dsm_dev(struct pci_dev *pdev)
|
||||
{
|
||||
struct device *grandparent;
|
||||
struct pci_dev *uport;
|
||||
|
||||
if (is_pci_tsm_pf0(pdev))
|
||||
return pdev;
|
||||
|
||||
struct pci_dev *pf0 __free(pci_dev_put) = pf0_dev_get(pdev);
|
||||
if (!pf0)
|
||||
return NULL;
|
||||
|
||||
if (is_dsm(pf0))
|
||||
return pf0;
|
||||
|
||||
/*
|
||||
* For cases where a switch may be hosting TDISP services on behalf of
|
||||
* downstream devices, check the first upstream port relative to this
|
||||
* endpoint.
|
||||
*/
|
||||
if (!pdev->dev.parent)
|
||||
return NULL;
|
||||
grandparent = pdev->dev.parent->parent;
|
||||
if (!grandparent)
|
||||
return NULL;
|
||||
if (!dev_is_pci(grandparent))
|
||||
return NULL;
|
||||
uport = to_pci_dev(grandparent);
|
||||
if (!pci_is_pcie(uport) ||
|
||||
pci_pcie_type(uport) != PCI_EXP_TYPE_UPSTREAM)
|
||||
return NULL;
|
||||
|
||||
if (is_dsm(uport))
|
||||
return uport;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_tsm_tdi_constructor() - base 'struct pci_tdi' initialization for link TSMs
|
||||
* @pdev: PCI device function representing the TDI
|
||||
* @tdi: context to initialize
|
||||
* @kvm: Private memory attach context
|
||||
* @tdi_id: Identifier (virtual BDF) for the TDI as referenced by the TSM and DSM
|
||||
*/
|
||||
void pci_tsm_tdi_constructor(struct pci_dev *pdev, struct pci_tdi *tdi,
|
||||
struct kvm *kvm, u32 tdi_id)
|
||||
{
|
||||
tdi->pdev = pdev;
|
||||
tdi->kvm = kvm;
|
||||
tdi->tdi_id = tdi_id;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_tsm_tdi_constructor);
|
||||
|
||||
/**
|
||||
* pci_tsm_link_constructor() - base 'struct pci_tsm' initialization for link TSMs
|
||||
* @pdev: The PCI device
|
||||
* @tsm: context to initialize
|
||||
* @tsm_dev: Platform TEE Security Manager, initiator of security operations
|
||||
*/
|
||||
int pci_tsm_link_constructor(struct pci_dev *pdev, struct pci_tsm *tsm,
|
||||
struct tsm_dev *tsm_dev)
|
||||
{
|
||||
if (!is_link_tsm(tsm_dev))
|
||||
return -EINVAL;
|
||||
|
||||
tsm->dsm_dev = find_dsm_dev(pdev);
|
||||
if (!tsm->dsm_dev) {
|
||||
pci_warn(pdev, "failed to find Device Security Manager\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
tsm->pdev = pdev;
|
||||
tsm->tsm_dev = tsm_dev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_tsm_link_constructor);
|
||||
|
||||
/**
|
||||
* pci_tsm_pf0_constructor() - common 'struct pci_tsm_pf0' (DSM) initialization
|
||||
* @pdev: Physical Function 0 PCI device (as indicated by is_pci_tsm_pf0())
|
||||
* @tsm: context to initialize
|
||||
* @tsm_dev: Platform TEE Security Manager, initiator of security operations
|
||||
*/
|
||||
int pci_tsm_pf0_constructor(struct pci_dev *pdev, struct pci_tsm_pf0 *tsm,
|
||||
struct tsm_dev *tsm_dev)
|
||||
{
|
||||
mutex_init(&tsm->lock);
|
||||
tsm->doe_mb = pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG,
|
||||
PCI_DOE_FEATURE_CMA);
|
||||
if (!tsm->doe_mb) {
|
||||
pci_warn(pdev, "TSM init failure, no CMA mailbox\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return pci_tsm_link_constructor(pdev, &tsm->base_tsm, tsm_dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_tsm_pf0_constructor);
|
||||
|
||||
void pci_tsm_pf0_destructor(struct pci_tsm_pf0 *pf0_tsm)
|
||||
{
|
||||
mutex_destroy(&pf0_tsm->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_tsm_pf0_destructor);
|
||||
|
||||
int pci_tsm_register(struct tsm_dev *tsm_dev)
|
||||
{
|
||||
struct pci_dev *pdev = NULL;
|
||||
|
||||
if (!tsm_dev)
|
||||
return -EINVAL;
|
||||
|
||||
/* The TSM device must only implement one of link_ops or devsec_ops */
|
||||
if (!is_link_tsm(tsm_dev) && !is_devsec_tsm(tsm_dev))
|
||||
return -EINVAL;
|
||||
|
||||
if (is_link_tsm(tsm_dev) && is_devsec_tsm(tsm_dev))
|
||||
return -EINVAL;
|
||||
|
||||
guard(rwsem_write)(&pci_tsm_rwsem);
|
||||
|
||||
/* On first enable, update sysfs groups */
|
||||
if (is_link_tsm(tsm_dev) && pci_tsm_link_count++ == 0) {
|
||||
for_each_pci_dev(pdev)
|
||||
if (is_pci_tsm_pf0(pdev))
|
||||
link_sysfs_enable(pdev);
|
||||
} else if (is_devsec_tsm(tsm_dev)) {
|
||||
pci_tsm_devsec_count++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pci_tsm_fn_exit(struct pci_dev *pdev)
|
||||
{
|
||||
__pci_tsm_unbind(pdev, NULL);
|
||||
tsm_remove(pdev->tsm);
|
||||
}
|
||||
|
||||
/**
|
||||
* __pci_tsm_destroy() - destroy the TSM context for @pdev
|
||||
* @pdev: device to cleanup
|
||||
* @tsm_dev: the TSM device being removed, or NULL if @pdev is being removed.
|
||||
*
|
||||
* At device removal or TSM unregistration all established context
|
||||
* with the TSM is torn down. Additionally, if there are no more TSMs
|
||||
* registered, the PCI tsm/ sysfs attributes are hidden.
|
||||
*/
|
||||
static void __pci_tsm_destroy(struct pci_dev *pdev, struct tsm_dev *tsm_dev)
|
||||
{
|
||||
struct pci_tsm *tsm = pdev->tsm;
|
||||
|
||||
lockdep_assert_held_write(&pci_tsm_rwsem);
|
||||
|
||||
/*
|
||||
* First, handle the TSM removal case to shutdown @pdev sysfs, this is
|
||||
* skipped if the device itself is being removed since sysfs goes away
|
||||
* naturally at that point
|
||||
*/
|
||||
if (is_link_tsm(tsm_dev) && is_pci_tsm_pf0(pdev) && !pci_tsm_link_count)
|
||||
link_sysfs_disable(pdev);
|
||||
|
||||
/* Nothing else to do if this device never attached to the departing TSM */
|
||||
if (!tsm)
|
||||
return;
|
||||
|
||||
/* Now lookup the tsm_dev to destroy TSM context */
|
||||
if (!tsm_dev)
|
||||
tsm_dev = tsm->tsm_dev;
|
||||
else if (tsm_dev != tsm->tsm_dev)
|
||||
return;
|
||||
|
||||
if (is_link_tsm(tsm_dev) && is_pci_tsm_pf0(pdev))
|
||||
pci_tsm_disconnect(pdev);
|
||||
else
|
||||
pci_tsm_fn_exit(pdev);
|
||||
}
|
||||
|
||||
void pci_tsm_destroy(struct pci_dev *pdev)
|
||||
{
|
||||
guard(rwsem_write)(&pci_tsm_rwsem);
|
||||
__pci_tsm_destroy(pdev, NULL);
|
||||
}
|
||||
|
||||
void pci_tsm_init(struct pci_dev *pdev)
|
||||
{
|
||||
guard(rwsem_read)(&pci_tsm_rwsem);
|
||||
|
||||
/*
|
||||
* Subfunctions are either probed synchronous with connect() or later
|
||||
* when either the SR-IOV configuration is changed, or, unlikely,
|
||||
* connect() raced initial bus scanning.
|
||||
*/
|
||||
if (pdev->tsm)
|
||||
return;
|
||||
|
||||
if (pci_tsm_link_count) {
|
||||
struct pci_dev *dsm = find_dsm_dev(pdev);
|
||||
|
||||
if (!dsm)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The only path to init a Device Security Manager capable
|
||||
* device is via connect().
|
||||
*/
|
||||
if (!dsm->tsm)
|
||||
return;
|
||||
|
||||
probe_fn(pdev, dsm);
|
||||
}
|
||||
}
|
||||
|
||||
void pci_tsm_unregister(struct tsm_dev *tsm_dev)
|
||||
{
|
||||
struct pci_dev *pdev = NULL;
|
||||
|
||||
guard(rwsem_write)(&pci_tsm_rwsem);
|
||||
if (is_link_tsm(tsm_dev))
|
||||
pci_tsm_link_count--;
|
||||
if (is_devsec_tsm(tsm_dev))
|
||||
pci_tsm_devsec_count--;
|
||||
for_each_pci_dev_reverse(pdev)
|
||||
__pci_tsm_destroy(pdev, tsm_dev);
|
||||
}
|
||||
|
||||
int pci_tsm_doe_transfer(struct pci_dev *pdev, u8 type, const void *req,
|
||||
size_t req_sz, void *resp, size_t resp_sz)
|
||||
{
|
||||
struct pci_tsm_pf0 *tsm;
|
||||
|
||||
if (!pdev->tsm || !is_pci_tsm_pf0(pdev))
|
||||
return -ENXIO;
|
||||
|
||||
tsm = to_pci_tsm_pf0(pdev->tsm);
|
||||
if (!tsm->doe_mb)
|
||||
return -ENXIO;
|
||||
|
||||
return pci_doe(tsm->doe_mb, PCI_VENDOR_ID_PCI_SIG, type, req, req_sz,
|
||||
resp, resp_sz);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_tsm_doe_transfer);
|
||||
@@ -81,10 +81,6 @@
|
||||
#define MVOLT_1800 0
|
||||
#define MVOLT_3300 1
|
||||
|
||||
/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
|
||||
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
|
||||
#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
|
||||
|
||||
static const char * const gpio_group_name[] = {
|
||||
"gpioa", "gpiob", "gpioc", "gpiod", "gpioe", "gpiof", "gpiog",
|
||||
"gpioh", "gpioi", "gpioj", "gpiok", "gpiol", "gpiom", "gpion",
|
||||
|
||||
@@ -261,56 +261,6 @@ static int imx8ulp_dsp_reset(struct imx_dsp_rproc *priv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Specific configuration for i.MX8MP */
|
||||
static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8mp = {
|
||||
.att = imx_dsp_rproc_att_imx8mp,
|
||||
.att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8mp),
|
||||
.method = IMX_RPROC_RESET_CONTROLLER,
|
||||
};
|
||||
|
||||
static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8mp = {
|
||||
.dcfg = &dsp_rproc_cfg_imx8mp,
|
||||
.reset = imx8mp_dsp_reset,
|
||||
};
|
||||
|
||||
/* Specific configuration for i.MX8ULP */
|
||||
static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8ulp = {
|
||||
.src_reg = IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
|
||||
.src_mask = IMX8ULP_SYSCTRL0_DSP_STALL,
|
||||
.src_start = 0,
|
||||
.src_stop = IMX8ULP_SYSCTRL0_DSP_STALL,
|
||||
.att = imx_dsp_rproc_att_imx8ulp,
|
||||
.att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8ulp),
|
||||
.method = IMX_RPROC_MMIO,
|
||||
};
|
||||
|
||||
static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8ulp = {
|
||||
.dcfg = &dsp_rproc_cfg_imx8ulp,
|
||||
.reset = imx8ulp_dsp_reset,
|
||||
};
|
||||
|
||||
/* Specific configuration for i.MX8QXP */
|
||||
static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qxp = {
|
||||
.att = imx_dsp_rproc_att_imx8qxp,
|
||||
.att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qxp),
|
||||
.method = IMX_RPROC_SCU_API,
|
||||
};
|
||||
|
||||
static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qxp = {
|
||||
.dcfg = &dsp_rproc_cfg_imx8qxp,
|
||||
};
|
||||
|
||||
/* Specific configuration for i.MX8QM */
|
||||
static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qm = {
|
||||
.att = imx_dsp_rproc_att_imx8qm,
|
||||
.att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qm),
|
||||
.method = IMX_RPROC_SCU_API,
|
||||
};
|
||||
|
||||
static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qm = {
|
||||
.dcfg = &dsp_rproc_cfg_imx8qm,
|
||||
};
|
||||
|
||||
static int imx_dsp_rproc_ready(struct rproc *rproc)
|
||||
{
|
||||
struct imx_dsp_rproc *priv = rproc->priv;
|
||||
@@ -388,6 +338,28 @@ static int imx_dsp_rproc_handle_rsc(struct rproc *rproc, u32 rsc_type,
|
||||
return RSC_HANDLED;
|
||||
}
|
||||
|
||||
static int imx_dsp_rproc_mmio_start(struct rproc *rproc)
|
||||
{
|
||||
struct imx_dsp_rproc *priv = rproc->priv;
|
||||
const struct imx_rproc_dcfg *dcfg = priv->dsp_dcfg->dcfg;
|
||||
|
||||
return regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask, dcfg->src_start);
|
||||
}
|
||||
|
||||
static int imx_dsp_rproc_reset_ctrl_start(struct rproc *rproc)
|
||||
{
|
||||
struct imx_dsp_rproc *priv = rproc->priv;
|
||||
|
||||
return reset_control_deassert(priv->run_stall);
|
||||
}
|
||||
|
||||
static int imx_dsp_rproc_scu_api_start(struct rproc *rproc)
|
||||
{
|
||||
struct imx_dsp_rproc *priv = rproc->priv;
|
||||
|
||||
return imx_sc_pm_cpu_start(priv->ipc_handle, IMX_SC_R_DSP, true, rproc->bootaddr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Start function for rproc_ops
|
||||
*
|
||||
@@ -404,32 +376,41 @@ static int imx_dsp_rproc_start(struct rproc *rproc)
|
||||
struct device *dev = rproc->dev.parent;
|
||||
int ret;
|
||||
|
||||
switch (dcfg->method) {
|
||||
case IMX_RPROC_MMIO:
|
||||
ret = regmap_update_bits(priv->regmap,
|
||||
dcfg->src_reg,
|
||||
dcfg->src_mask,
|
||||
dcfg->src_start);
|
||||
break;
|
||||
case IMX_RPROC_SCU_API:
|
||||
ret = imx_sc_pm_cpu_start(priv->ipc_handle,
|
||||
IMX_SC_R_DSP,
|
||||
true,
|
||||
rproc->bootaddr);
|
||||
break;
|
||||
case IMX_RPROC_RESET_CONTROLLER:
|
||||
ret = reset_control_deassert(priv->run_stall);
|
||||
break;
|
||||
default:
|
||||
if (!dcfg->ops || !dcfg->ops->start)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = dcfg->ops->start(rproc);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to enable remote core!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
dev_err(dev, "Failed to enable remote core!\n");
|
||||
else if (priv->flags & WAIT_FW_READY)
|
||||
if (priv->flags & WAIT_FW_READY)
|
||||
return imx_dsp_rproc_ready(rproc);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int imx_dsp_rproc_mmio_stop(struct rproc *rproc)
|
||||
{
|
||||
struct imx_dsp_rproc *priv = rproc->priv;
|
||||
const struct imx_rproc_dcfg *dcfg = priv->dsp_dcfg->dcfg;
|
||||
|
||||
return regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask, dcfg->src_stop);
|
||||
}
|
||||
|
||||
static int imx_dsp_rproc_reset_ctrl_stop(struct rproc *rproc)
|
||||
{
|
||||
struct imx_dsp_rproc *priv = rproc->priv;
|
||||
|
||||
return reset_control_assert(priv->run_stall);
|
||||
}
|
||||
|
||||
static int imx_dsp_rproc_scu_api_stop(struct rproc *rproc)
|
||||
{
|
||||
struct imx_dsp_rproc *priv = rproc->priv;
|
||||
|
||||
return imx_sc_pm_cpu_start(priv->ipc_handle, IMX_SC_R_DSP, false, rproc->bootaddr);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -449,30 +430,18 @@ static int imx_dsp_rproc_stop(struct rproc *rproc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (dcfg->method) {
|
||||
case IMX_RPROC_MMIO:
|
||||
ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
|
||||
dcfg->src_stop);
|
||||
break;
|
||||
case IMX_RPROC_SCU_API:
|
||||
ret = imx_sc_pm_cpu_start(priv->ipc_handle,
|
||||
IMX_SC_R_DSP,
|
||||
false,
|
||||
rproc->bootaddr);
|
||||
break;
|
||||
case IMX_RPROC_RESET_CONTROLLER:
|
||||
ret = reset_control_assert(priv->run_stall);
|
||||
break;
|
||||
default:
|
||||
if (!dcfg->ops || !dcfg->ops->stop)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = dcfg->ops->stop(rproc);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to stop remote core\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
dev_err(dev, "Failed to stop remote core\n");
|
||||
else
|
||||
priv->flags &= ~REMOTE_IS_READY;
|
||||
priv->flags &= ~REMOTE_IS_READY;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -689,11 +658,9 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
|
||||
struct rproc *rproc = priv->rproc;
|
||||
struct device *dev = rproc->dev.parent;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct of_phandle_iterator it;
|
||||
struct rproc_mem_entry *mem;
|
||||
struct reserved_mem *rmem;
|
||||
void __iomem *cpu_addr;
|
||||
int a;
|
||||
int a, i = 0;
|
||||
u64 da;
|
||||
|
||||
/* Remap required addresses */
|
||||
@@ -724,49 +691,40 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
|
||||
rproc_add_carveout(rproc, mem);
|
||||
}
|
||||
|
||||
of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
|
||||
while (of_phandle_iterator_next(&it) == 0) {
|
||||
while (1) {
|
||||
int err;
|
||||
struct resource res;
|
||||
|
||||
err = of_reserved_mem_region_to_resource(np, i++, &res);
|
||||
if (err)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Ignore the first memory region which will be used vdev buffer.
|
||||
* No need to do extra handlings, rproc_add_virtio_dev will handle it.
|
||||
*/
|
||||
if (!strcmp(it.node->name, "vdev0buffer"))
|
||||
if (strstarts(res.name, "vdev0buffer"))
|
||||
continue;
|
||||
|
||||
rmem = of_reserved_mem_lookup(it.node);
|
||||
if (!rmem) {
|
||||
of_node_put(it.node);
|
||||
dev_err(dev, "unable to acquire memory-region\n");
|
||||
if (imx_dsp_rproc_sys_to_da(priv, res.start, resource_size(&res), &da))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (imx_dsp_rproc_sys_to_da(priv, rmem->base, rmem->size, &da)) {
|
||||
of_node_put(it.node);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
|
||||
if (!cpu_addr) {
|
||||
of_node_put(it.node);
|
||||
dev_err(dev, "failed to map memory %p\n", &rmem->base);
|
||||
return -ENOMEM;
|
||||
cpu_addr = devm_ioremap_resource_wc(dev, &res);
|
||||
if (IS_ERR(cpu_addr)) {
|
||||
dev_err(dev, "failed to map memory %pR\n", &res);
|
||||
return PTR_ERR(cpu_addr);
|
||||
}
|
||||
|
||||
/* Register memory region */
|
||||
mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)rmem->base,
|
||||
rmem->size, da, NULL, NULL, it.node->name);
|
||||
|
||||
if (mem) {
|
||||
rproc_coredump_add_segment(rproc, da, rmem->size);
|
||||
} else {
|
||||
of_node_put(it.node);
|
||||
mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)res.start,
|
||||
resource_size(&res), da, NULL, NULL,
|
||||
"%.*s", strchrnul(res.name, '@') - res.name, res.name);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rproc_coredump_add_segment(rproc, da, resource_size(&res));
|
||||
rproc_add_carveout(rproc, mem);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Prepare function for rproc_ops */
|
||||
@@ -784,7 +742,7 @@ static int imx_dsp_rproc_prepare(struct rproc *rproc)
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Unprepare function for rproc_ops */
|
||||
@@ -792,7 +750,7 @@ static int imx_dsp_rproc_unprepare(struct rproc *rproc)
|
||||
{
|
||||
pm_runtime_put_sync(rproc->dev.parent);
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Kick function for rproc_ops */
|
||||
@@ -1062,14 +1020,50 @@ static const struct rproc_ops imx_dsp_rproc_ops = {
|
||||
static int imx_dsp_attach_pm_domains(struct imx_dsp_rproc *priv)
|
||||
{
|
||||
struct device *dev = priv->rproc->dev.parent;
|
||||
int ret;
|
||||
|
||||
/* A single PM domain is already attached. */
|
||||
if (dev->pm_domain)
|
||||
return 0;
|
||||
|
||||
ret = dev_pm_domain_attach_list(dev, NULL, &priv->pd_list);
|
||||
return ret < 0 ? ret : 0;
|
||||
return devm_pm_domain_attach_list(dev, NULL, &priv->pd_list);
|
||||
}
|
||||
|
||||
static int imx_dsp_rproc_mmio_detect_mode(struct rproc *rproc)
|
||||
{
|
||||
struct imx_dsp_rproc *priv = rproc->priv;
|
||||
struct device *dev = rproc->dev.parent;
|
||||
struct regmap *regmap;
|
||||
|
||||
regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,dsp-ctrl");
|
||||
if (IS_ERR(regmap)) {
|
||||
dev_err(dev, "failed to find syscon\n");
|
||||
return PTR_ERR(regmap);
|
||||
}
|
||||
|
||||
priv->regmap = regmap;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int imx_dsp_rproc_reset_ctrl_detect_mode(struct rproc *rproc)
|
||||
{
|
||||
struct imx_dsp_rproc *priv = rproc->priv;
|
||||
struct device *dev = rproc->dev.parent;
|
||||
|
||||
priv->run_stall = devm_reset_control_get_exclusive(dev, "runstall");
|
||||
if (IS_ERR(priv->run_stall)) {
|
||||
dev_err(dev, "Failed to get DSP runstall reset control\n");
|
||||
return PTR_ERR(priv->run_stall);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int imx_dsp_rproc_scu_api_detect_mode(struct rproc *rproc)
|
||||
{
|
||||
struct imx_dsp_rproc *priv = rproc->priv;
|
||||
|
||||
return imx_scu_get_handle(&priv->ipc_handle);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1087,38 +1081,12 @@ static int imx_dsp_attach_pm_domains(struct imx_dsp_rproc *priv)
|
||||
static int imx_dsp_rproc_detect_mode(struct imx_dsp_rproc *priv)
|
||||
{
|
||||
const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
|
||||
struct device *dev = priv->rproc->dev.parent;
|
||||
struct regmap *regmap;
|
||||
int ret = 0;
|
||||
const struct imx_rproc_dcfg *dcfg = dsp_dcfg->dcfg;
|
||||
|
||||
switch (dsp_dcfg->dcfg->method) {
|
||||
case IMX_RPROC_SCU_API:
|
||||
ret = imx_scu_get_handle(&priv->ipc_handle);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case IMX_RPROC_MMIO:
|
||||
regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,dsp-ctrl");
|
||||
if (IS_ERR(regmap)) {
|
||||
dev_err(dev, "failed to find syscon\n");
|
||||
return PTR_ERR(regmap);
|
||||
}
|
||||
if (dcfg->ops && dcfg->ops->detect_mode)
|
||||
return dcfg->ops->detect_mode(priv->rproc);
|
||||
|
||||
priv->regmap = regmap;
|
||||
break;
|
||||
case IMX_RPROC_RESET_CONTROLLER:
|
||||
priv->run_stall = devm_reset_control_get_exclusive(dev, "runstall");
|
||||
if (IS_ERR(priv->run_stall)) {
|
||||
dev_err(dev, "Failed to get DSP runstall reset control\n");
|
||||
return PTR_ERR(priv->run_stall);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static const char *imx_dsp_clks_names[DSP_RPROC_CLK_MAX] = {
|
||||
@@ -1152,11 +1120,8 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
|
||||
return -ENODEV;
|
||||
|
||||
ret = rproc_of_parse_firmware(dev, 0, &fw_name);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
|
||||
|
||||
rproc = devm_rproc_alloc(dev, "imx-dsp-rproc", &imx_dsp_rproc_ops,
|
||||
fw_name, sizeof(*priv));
|
||||
@@ -1179,52 +1144,28 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
|
||||
INIT_WORK(&priv->rproc_work, imx_dsp_rproc_vq_work);
|
||||
|
||||
ret = imx_dsp_rproc_detect_mode(priv);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed on imx_dsp_rproc_detect_mode\n");
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "failed on imx_dsp_rproc_detect_mode\n");
|
||||
|
||||
/* There are multiple power domains required by DSP on some platform */
|
||||
ret = imx_dsp_attach_pm_domains(priv);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed on imx_dsp_attach_pm_domains\n");
|
||||
return ret;
|
||||
}
|
||||
if (ret < 0)
|
||||
return dev_err_probe(dev, ret, "failed on imx_dsp_attach_pm_domains\n");
|
||||
|
||||
/* Get clocks */
|
||||
ret = imx_dsp_rproc_clk_get(priv);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed on imx_dsp_rproc_clk_get\n");
|
||||
goto err_detach_domains;
|
||||
}
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "failed on imx_dsp_rproc_clk_get\n");
|
||||
|
||||
init_completion(&priv->pm_comp);
|
||||
rproc->auto_boot = false;
|
||||
ret = rproc_add(rproc);
|
||||
if (ret) {
|
||||
dev_err(dev, "rproc_add failed\n");
|
||||
goto err_detach_domains;
|
||||
}
|
||||
ret = devm_rproc_add(dev, rproc);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "rproc_add failed\n");
|
||||
|
||||
rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_XTENSA);
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_detach_domains:
|
||||
dev_pm_domain_detach_list(priv->pd_list);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void imx_dsp_rproc_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct rproc *rproc = platform_get_drvdata(pdev);
|
||||
struct imx_dsp_rproc *priv = rproc->priv;
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
rproc_del(rproc);
|
||||
dev_pm_domain_detach_list(priv->pd_list);
|
||||
return devm_pm_runtime_enable(dev);
|
||||
}
|
||||
|
||||
/* pm runtime functions */
|
||||
@@ -1364,6 +1305,74 @@ static const struct dev_pm_ops imx_dsp_rproc_pm_ops = {
|
||||
RUNTIME_PM_OPS(imx_dsp_runtime_suspend, imx_dsp_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
static const struct imx_rproc_plat_ops imx_dsp_rproc_ops_mmio = {
|
||||
.start = imx_dsp_rproc_mmio_start,
|
||||
.stop = imx_dsp_rproc_mmio_stop,
|
||||
.detect_mode = imx_dsp_rproc_mmio_detect_mode,
|
||||
};
|
||||
|
||||
static const struct imx_rproc_plat_ops imx_dsp_rproc_ops_reset_ctrl = {
|
||||
.start = imx_dsp_rproc_reset_ctrl_start,
|
||||
.stop = imx_dsp_rproc_reset_ctrl_stop,
|
||||
.detect_mode = imx_dsp_rproc_reset_ctrl_detect_mode,
|
||||
};
|
||||
|
||||
static const struct imx_rproc_plat_ops imx_dsp_rproc_ops_scu_api = {
|
||||
.start = imx_dsp_rproc_scu_api_start,
|
||||
.stop = imx_dsp_rproc_scu_api_stop,
|
||||
.detect_mode = imx_dsp_rproc_scu_api_detect_mode,
|
||||
};
|
||||
|
||||
/* Specific configuration for i.MX8MP */
|
||||
static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8mp = {
|
||||
.att = imx_dsp_rproc_att_imx8mp,
|
||||
.att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8mp),
|
||||
.ops = &imx_dsp_rproc_ops_reset_ctrl,
|
||||
};
|
||||
|
||||
static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8mp = {
|
||||
.dcfg = &dsp_rproc_cfg_imx8mp,
|
||||
.reset = imx8mp_dsp_reset,
|
||||
};
|
||||
|
||||
/* Specific configuration for i.MX8ULP */
|
||||
static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8ulp = {
|
||||
.src_reg = IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
|
||||
.src_mask = IMX8ULP_SYSCTRL0_DSP_STALL,
|
||||
.src_start = 0,
|
||||
.src_stop = IMX8ULP_SYSCTRL0_DSP_STALL,
|
||||
.att = imx_dsp_rproc_att_imx8ulp,
|
||||
.att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8ulp),
|
||||
.ops = &imx_dsp_rproc_ops_mmio,
|
||||
};
|
||||
|
||||
static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8ulp = {
|
||||
.dcfg = &dsp_rproc_cfg_imx8ulp,
|
||||
.reset = imx8ulp_dsp_reset,
|
||||
};
|
||||
|
||||
/* Specific configuration for i.MX8QXP */
|
||||
static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qxp = {
|
||||
.att = imx_dsp_rproc_att_imx8qxp,
|
||||
.att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qxp),
|
||||
.ops = &imx_dsp_rproc_ops_scu_api,
|
||||
};
|
||||
|
||||
static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qxp = {
|
||||
.dcfg = &dsp_rproc_cfg_imx8qxp,
|
||||
};
|
||||
|
||||
/* Specific configuration for i.MX8QM */
|
||||
static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qm = {
|
||||
.att = imx_dsp_rproc_att_imx8qm,
|
||||
.att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qm),
|
||||
.ops = &imx_dsp_rproc_ops_scu_api,
|
||||
};
|
||||
|
||||
static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qm = {
|
||||
.dcfg = &dsp_rproc_cfg_imx8qm,
|
||||
};
|
||||
|
||||
static const struct of_device_id imx_dsp_rproc_of_match[] = {
|
||||
{ .compatible = "fsl,imx8qxp-hifi4", .data = &imx_dsp_rproc_cfg_imx8qxp },
|
||||
{ .compatible = "fsl,imx8qm-hifi4", .data = &imx_dsp_rproc_cfg_imx8qm },
|
||||
@@ -1375,7 +1384,6 @@ MODULE_DEVICE_TABLE(of, imx_dsp_rproc_of_match);
|
||||
|
||||
static struct platform_driver imx_dsp_rproc_driver = {
|
||||
.probe = imx_dsp_rproc_probe,
|
||||
.remove = imx_dsp_rproc_remove,
|
||||
.driver = {
|
||||
.name = "imx-dsp-rproc",
|
||||
.of_match_table = imx_dsp_rproc_of_match,
|
||||
|
||||
@@ -93,7 +93,7 @@ struct imx_rproc_mem {
|
||||
#define ATT_CORE(I) BIT((I))
|
||||
|
||||
static int imx_rproc_xtr_mbox_init(struct rproc *rproc, bool tx_block);
|
||||
static void imx_rproc_free_mbox(struct rproc *rproc);
|
||||
static void imx_rproc_free_mbox(void *data);
|
||||
|
||||
struct imx_rproc {
|
||||
struct device *dev;
|
||||
@@ -490,50 +490,44 @@ static int imx_rproc_prepare(struct rproc *rproc)
|
||||
{
|
||||
struct imx_rproc *priv = rproc->priv;
|
||||
struct device_node *np = priv->dev->of_node;
|
||||
struct of_phandle_iterator it;
|
||||
struct rproc_mem_entry *mem;
|
||||
struct reserved_mem *rmem;
|
||||
int i = 0;
|
||||
u32 da;
|
||||
|
||||
/* Register associated reserved memory regions */
|
||||
of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
|
||||
while (of_phandle_iterator_next(&it) == 0) {
|
||||
while (1) {
|
||||
int err;
|
||||
struct resource res;
|
||||
|
||||
err = of_reserved_mem_region_to_resource(np, i++, &res);
|
||||
if (err)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Ignore the first memory region which will be used vdev buffer.
|
||||
* No need to do extra handlings, rproc_add_virtio_dev will handle it.
|
||||
*/
|
||||
if (!strcmp(it.node->name, "vdev0buffer"))
|
||||
if (strstarts(res.name, "vdev0buffer"))
|
||||
continue;
|
||||
|
||||
if (!strcmp(it.node->name, "rsc-table"))
|
||||
if (strstarts(res.name, "rsc-table"))
|
||||
continue;
|
||||
|
||||
rmem = of_reserved_mem_lookup(it.node);
|
||||
if (!rmem) {
|
||||
of_node_put(it.node);
|
||||
dev_err(priv->dev, "unable to acquire memory-region\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* No need to translate pa to da, i.MX use same map */
|
||||
da = rmem->base;
|
||||
da = res.start;
|
||||
|
||||
/* Register memory region */
|
||||
mem = rproc_mem_entry_init(priv->dev, NULL, (dma_addr_t)rmem->base, rmem->size, da,
|
||||
mem = rproc_mem_entry_init(priv->dev, NULL, (dma_addr_t)res.start,
|
||||
resource_size(&res), da,
|
||||
imx_rproc_mem_alloc, imx_rproc_mem_release,
|
||||
it.node->name);
|
||||
|
||||
if (mem) {
|
||||
rproc_coredump_add_segment(rproc, da, rmem->size);
|
||||
} else {
|
||||
of_node_put(it.node);
|
||||
"%.*s", strchrnul(res.name, '@') - res.name,
|
||||
res.name);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rproc_coredump_add_segment(rproc, da, resource_size(&res));
|
||||
rproc_add_carveout(rproc, mem);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int imx_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
|
||||
@@ -575,13 +569,9 @@ static int imx_rproc_attach(struct rproc *rproc)
|
||||
return imx_rproc_xtr_mbox_init(rproc, true);
|
||||
}
|
||||
|
||||
static int imx_rproc_detach(struct rproc *rproc)
|
||||
static int imx_rproc_scu_api_detach(struct rproc *rproc)
|
||||
{
|
||||
struct imx_rproc *priv = rproc->priv;
|
||||
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
|
||||
|
||||
if (dcfg->method != IMX_RPROC_SCU_API)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id))
|
||||
return -EOPNOTSUPP;
|
||||
@@ -591,6 +581,17 @@ static int imx_rproc_detach(struct rproc *rproc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int imx_rproc_detach(struct rproc *rproc)
|
||||
{
|
||||
struct imx_rproc *priv = rproc->priv;
|
||||
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
|
||||
|
||||
if (!dcfg->ops || !dcfg->ops->detach)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return dcfg->ops->detach(rproc);
|
||||
}
|
||||
|
||||
static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
|
||||
{
|
||||
struct imx_rproc *priv = rproc->priv;
|
||||
@@ -664,47 +665,37 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
|
||||
}
|
||||
|
||||
/* memory-region is optional property */
|
||||
nph = of_count_phandle_with_args(np, "memory-region", NULL);
|
||||
nph = of_reserved_mem_region_count(np);
|
||||
if (nph <= 0)
|
||||
return 0;
|
||||
|
||||
/* remap optional addresses */
|
||||
for (a = 0; a < nph; a++) {
|
||||
struct device_node *node;
|
||||
struct resource res;
|
||||
|
||||
node = of_parse_phandle(np, "memory-region", a);
|
||||
if (!node)
|
||||
continue;
|
||||
/* Not map vdevbuffer, vdevring region */
|
||||
if (!strncmp(node->name, "vdev", strlen("vdev"))) {
|
||||
of_node_put(node);
|
||||
continue;
|
||||
}
|
||||
err = of_address_to_resource(node, 0, &res);
|
||||
err = of_reserved_mem_region_to_resource(np, a, &res);
|
||||
if (err) {
|
||||
dev_err(dev, "unable to resolve memory region\n");
|
||||
of_node_put(node);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (b >= IMX_RPROC_MEM_MAX) {
|
||||
of_node_put(node);
|
||||
/* Not map vdevbuffer, vdevring region */
|
||||
if (strstarts(res.name, "vdev"))
|
||||
continue;
|
||||
|
||||
if (b >= IMX_RPROC_MEM_MAX)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Not use resource version, because we might share region */
|
||||
priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev, res.start, resource_size(&res));
|
||||
priv->mem[b].cpu_addr = devm_ioremap_resource_wc(&pdev->dev, &res);
|
||||
if (!priv->mem[b].cpu_addr) {
|
||||
dev_err(dev, "failed to remap %pr\n", &res);
|
||||
of_node_put(node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
priv->mem[b].sys_addr = res.start;
|
||||
priv->mem[b].size = resource_size(&res);
|
||||
if (!strcmp(node->name, "rsc-table"))
|
||||
if (!strcmp(res.name, "rsc-table"))
|
||||
priv->rsc_table = priv->mem[b].cpu_addr;
|
||||
of_node_put(node);
|
||||
b++;
|
||||
}
|
||||
|
||||
@@ -780,8 +771,9 @@ static int imx_rproc_xtr_mbox_init(struct rproc *rproc, bool tx_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imx_rproc_free_mbox(struct rproc *rproc)
|
||||
static void imx_rproc_free_mbox(void *data)
|
||||
{
|
||||
struct rproc *rproc = data;
|
||||
struct imx_rproc *priv = rproc->priv;
|
||||
|
||||
if (priv->tx_ch) {
|
||||
@@ -795,13 +787,9 @@ static void imx_rproc_free_mbox(struct rproc *rproc)
|
||||
}
|
||||
}
|
||||
|
||||
static void imx_rproc_put_scu(struct rproc *rproc)
|
||||
static void imx_rproc_put_scu(void *data)
|
||||
{
|
||||
struct imx_rproc *priv = rproc->priv;
|
||||
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
|
||||
|
||||
if (dcfg->method != IMX_RPROC_SCU_API)
|
||||
return;
|
||||
struct imx_rproc *priv = data;
|
||||
|
||||
if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id)) {
|
||||
dev_pm_domain_detach_list(priv->pd_list);
|
||||
@@ -943,6 +931,10 @@ static int imx_rproc_scu_api_detect_mode(struct rproc *rproc)
|
||||
else
|
||||
priv->core_index = 0;
|
||||
|
||||
ret = devm_add_action_or_reset(dev, imx_rproc_put_scu, priv);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "Failed to add action for put scu\n");
|
||||
|
||||
/*
|
||||
* If Mcore resource is not owned by Acore partition, It is kicked by ROM,
|
||||
* and Linux could only do IPC with Mcore and nothing else.
|
||||
@@ -1001,35 +993,6 @@ static int imx_rproc_detect_mode(struct imx_rproc *priv)
|
||||
return dcfg->ops->detect_mode(priv->rproc);
|
||||
}
|
||||
|
||||
static int imx_rproc_clk_enable(struct imx_rproc *priv)
|
||||
{
|
||||
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
|
||||
struct device *dev = priv->dev;
|
||||
int ret;
|
||||
|
||||
/* Remote core is not under control of Linux or it is managed by SCU API */
|
||||
if (dcfg->method == IMX_RPROC_NONE || dcfg->method == IMX_RPROC_SCU_API)
|
||||
return 0;
|
||||
|
||||
priv->clk = devm_clk_get(dev, NULL);
|
||||
if (IS_ERR(priv->clk)) {
|
||||
dev_err(dev, "Failed to get clock\n");
|
||||
return PTR_ERR(priv->clk);
|
||||
}
|
||||
|
||||
/*
|
||||
* clk for M4 block including memory. Should be
|
||||
* enabled before .start for FW transfer.
|
||||
*/
|
||||
ret = clk_prepare_enable(priv->clk);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to enable clock\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int imx_rproc_sys_off_handler(struct sys_off_data *data)
|
||||
{
|
||||
struct rproc *rproc = data->cb_data;
|
||||
@@ -1046,6 +1009,13 @@ static int imx_rproc_sys_off_handler(struct sys_off_data *data)
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void imx_rproc_destroy_workqueue(void *data)
|
||||
{
|
||||
struct workqueue_struct *workqueue = data;
|
||||
|
||||
destroy_workqueue(workqueue);
|
||||
}
|
||||
|
||||
static int imx_rproc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
@@ -1077,25 +1047,38 @@ static int imx_rproc_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = devm_add_action_or_reset(dev, imx_rproc_destroy_workqueue, priv->workqueue);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "Failed to add devm destroy workqueue action\n");
|
||||
|
||||
INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
|
||||
|
||||
ret = imx_rproc_xtr_mbox_init(rproc, true);
|
||||
if (ret)
|
||||
goto err_put_wkq;
|
||||
return ret;
|
||||
|
||||
ret = devm_add_action_or_reset(dev, imx_rproc_free_mbox, rproc);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret,
|
||||
"Failed to add devm free mbox action: %d\n", ret);
|
||||
|
||||
ret = imx_rproc_addr_init(priv, pdev);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed on imx_rproc_addr_init\n");
|
||||
goto err_put_mbox;
|
||||
}
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "failed on imx_rproc_addr_init\n");
|
||||
|
||||
ret = imx_rproc_detect_mode(priv);
|
||||
if (ret)
|
||||
goto err_put_mbox;
|
||||
return dev_err_probe(dev, ret, "failed on detect mode\n");
|
||||
|
||||
ret = imx_rproc_clk_enable(priv);
|
||||
if (ret)
|
||||
goto err_put_scu;
|
||||
/*
|
||||
* Handle clocks when remote core is under control of Linux AND the
|
||||
* clocks are not managed by system firmware.
|
||||
*/
|
||||
if (dcfg->flags & IMX_RPROC_NEED_CLKS) {
|
||||
priv->clk = devm_clk_get_enabled(dev, NULL);
|
||||
if (IS_ERR(priv->clk))
|
||||
return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to enable clock\n");
|
||||
}
|
||||
|
||||
if (rproc->state != RPROC_DETACHED)
|
||||
rproc->auto_boot = of_property_read_bool(np, "fsl,auto-boot");
|
||||
@@ -1110,45 +1093,32 @@ static int imx_rproc_probe(struct platform_device *pdev)
|
||||
ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_POWER_OFF_PREPARE,
|
||||
SYS_OFF_PRIO_DEFAULT,
|
||||
imx_rproc_sys_off_handler, rproc);
|
||||
if (ret) {
|
||||
dev_err(dev, "register power off handler failure\n");
|
||||
goto err_put_clk;
|
||||
}
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "register power off handler failure\n");
|
||||
|
||||
ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART_PREPARE,
|
||||
SYS_OFF_PRIO_DEFAULT,
|
||||
imx_rproc_sys_off_handler, rproc);
|
||||
if (ret) {
|
||||
dev_err(dev, "register restart handler failure\n");
|
||||
goto err_put_clk;
|
||||
}
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "register restart handler failure\n");
|
||||
}
|
||||
|
||||
if (dcfg->method == IMX_RPROC_SCU_API) {
|
||||
pm_runtime_enable(dev);
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "pm_runtime get failed: %d\n", ret);
|
||||
goto err_put_clk;
|
||||
}
|
||||
}
|
||||
pm_runtime_enable(dev);
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "pm_runtime get failed\n");
|
||||
|
||||
ret = rproc_add(rproc);
|
||||
ret = devm_rproc_add(dev, rproc);
|
||||
if (ret) {
|
||||
dev_err(dev, "rproc_add failed\n");
|
||||
goto err_put_clk;
|
||||
goto err_put_pm;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_clk:
|
||||
clk_disable_unprepare(priv->clk);
|
||||
err_put_scu:
|
||||
imx_rproc_put_scu(rproc);
|
||||
err_put_mbox:
|
||||
imx_rproc_free_mbox(rproc);
|
||||
err_put_wkq:
|
||||
destroy_workqueue(priv->workqueue);
|
||||
err_put_pm:
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_put_noidle(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1158,15 +1128,8 @@ static void imx_rproc_remove(struct platform_device *pdev)
|
||||
struct rproc *rproc = platform_get_drvdata(pdev);
|
||||
struct imx_rproc *priv = rproc->priv;
|
||||
|
||||
if (priv->dcfg->method == IMX_RPROC_SCU_API) {
|
||||
pm_runtime_disable(priv->dev);
|
||||
pm_runtime_put(priv->dev);
|
||||
}
|
||||
clk_disable_unprepare(priv->clk);
|
||||
rproc_del(rproc);
|
||||
imx_rproc_put_scu(rproc);
|
||||
imx_rproc_free_mbox(rproc);
|
||||
destroy_workqueue(priv->workqueue);
|
||||
pm_runtime_disable(priv->dev);
|
||||
pm_runtime_put_noidle(priv->dev);
|
||||
}
|
||||
|
||||
static const struct imx_rproc_plat_ops imx_rproc_ops_arm_smc = {
|
||||
@@ -1184,6 +1147,7 @@ static const struct imx_rproc_plat_ops imx_rproc_ops_mmio = {
|
||||
static const struct imx_rproc_plat_ops imx_rproc_ops_scu_api = {
|
||||
.start = imx_rproc_scu_api_start,
|
||||
.stop = imx_rproc_scu_api_stop,
|
||||
.detach = imx_rproc_scu_api_detach,
|
||||
.detect_mode = imx_rproc_scu_api_detect_mode,
|
||||
};
|
||||
|
||||
@@ -1196,15 +1160,15 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn_mmio = {
|
||||
.gpr_wait = IMX8M_GPR22_CM7_CPUWAIT,
|
||||
.att = imx_rproc_att_imx8mn,
|
||||
.att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
|
||||
.method = IMX_RPROC_MMIO,
|
||||
.ops = &imx_rproc_ops_mmio,
|
||||
.flags = IMX_RPROC_NEED_CLKS,
|
||||
};
|
||||
|
||||
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn = {
|
||||
.att = imx_rproc_att_imx8mn,
|
||||
.att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
|
||||
.method = IMX_RPROC_SMC,
|
||||
.ops = &imx_rproc_ops_arm_smc,
|
||||
.flags = IMX_RPROC_NEED_CLKS,
|
||||
};
|
||||
|
||||
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
|
||||
@@ -1214,34 +1178,30 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
|
||||
.src_stop = IMX7D_M4_STOP,
|
||||
.att = imx_rproc_att_imx8mq,
|
||||
.att_size = ARRAY_SIZE(imx_rproc_att_imx8mq),
|
||||
.method = IMX_RPROC_MMIO,
|
||||
.ops = &imx_rproc_ops_mmio,
|
||||
.flags = IMX_RPROC_NEED_CLKS,
|
||||
};
|
||||
|
||||
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qm = {
|
||||
.att = imx_rproc_att_imx8qm,
|
||||
.att_size = ARRAY_SIZE(imx_rproc_att_imx8qm),
|
||||
.method = IMX_RPROC_SCU_API,
|
||||
.ops = &imx_rproc_ops_scu_api,
|
||||
};
|
||||
|
||||
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qxp = {
|
||||
.att = imx_rproc_att_imx8qxp,
|
||||
.att_size = ARRAY_SIZE(imx_rproc_att_imx8qxp),
|
||||
.method = IMX_RPROC_SCU_API,
|
||||
.ops = &imx_rproc_ops_scu_api,
|
||||
};
|
||||
|
||||
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8ulp = {
|
||||
.att = imx_rproc_att_imx8ulp,
|
||||
.att_size = ARRAY_SIZE(imx_rproc_att_imx8ulp),
|
||||
.method = IMX_RPROC_NONE,
|
||||
};
|
||||
|
||||
static const struct imx_rproc_dcfg imx_rproc_cfg_imx7ulp = {
|
||||
.att = imx_rproc_att_imx7ulp,
|
||||
.att_size = ARRAY_SIZE(imx_rproc_att_imx7ulp),
|
||||
.method = IMX_RPROC_NONE,
|
||||
.flags = IMX_RPROC_NEED_SYSTEM_OFF,
|
||||
};
|
||||
|
||||
@@ -1252,8 +1212,8 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
|
||||
.src_stop = IMX7D_M4_STOP,
|
||||
.att = imx_rproc_att_imx7d,
|
||||
.att_size = ARRAY_SIZE(imx_rproc_att_imx7d),
|
||||
.method = IMX_RPROC_MMIO,
|
||||
.ops = &imx_rproc_ops_mmio,
|
||||
.flags = IMX_RPROC_NEED_CLKS,
|
||||
};
|
||||
|
||||
static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
|
||||
@@ -1263,15 +1223,15 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
|
||||
.src_stop = IMX6SX_M4_STOP,
|
||||
.att = imx_rproc_att_imx6sx,
|
||||
.att_size = ARRAY_SIZE(imx_rproc_att_imx6sx),
|
||||
.method = IMX_RPROC_MMIO,
|
||||
.ops = &imx_rproc_ops_mmio,
|
||||
.flags = IMX_RPROC_NEED_CLKS,
|
||||
};
|
||||
|
||||
static const struct imx_rproc_dcfg imx_rproc_cfg_imx93 = {
|
||||
.att = imx_rproc_att_imx93,
|
||||
.att_size = ARRAY_SIZE(imx_rproc_att_imx93),
|
||||
.method = IMX_RPROC_SMC,
|
||||
.ops = &imx_rproc_ops_arm_smc,
|
||||
.flags = IMX_RPROC_NEED_CLKS,
|
||||
};
|
||||
|
||||
static const struct of_device_id imx_rproc_of_match[] = {
|
||||
|
||||
@@ -15,25 +15,14 @@ struct imx_rproc_att {
|
||||
int flags;
|
||||
};
|
||||
|
||||
/* Remote core start/stop method */
|
||||
enum imx_rproc_method {
|
||||
IMX_RPROC_NONE,
|
||||
/* Through syscon regmap */
|
||||
IMX_RPROC_MMIO,
|
||||
/* Through ARM SMCCC */
|
||||
IMX_RPROC_SMC,
|
||||
/* Through System Control Unit API */
|
||||
IMX_RPROC_SCU_API,
|
||||
/* Through Reset Controller API */
|
||||
IMX_RPROC_RESET_CONTROLLER,
|
||||
};
|
||||
|
||||
/* dcfg flags */
|
||||
#define IMX_RPROC_NEED_SYSTEM_OFF BIT(0)
|
||||
#define IMX_RPROC_NEED_CLKS BIT(1)
|
||||
|
||||
struct imx_rproc_plat_ops {
|
||||
int (*start)(struct rproc *rproc);
|
||||
int (*stop)(struct rproc *rproc);
|
||||
int (*detach)(struct rproc *rproc);
|
||||
int (*detect_mode)(struct rproc *rproc);
|
||||
};
|
||||
|
||||
@@ -46,7 +35,6 @@ struct imx_rproc_dcfg {
|
||||
u32 gpr_wait;
|
||||
const struct imx_rproc_att *att;
|
||||
size_t att_size;
|
||||
enum imx_rproc_method method;
|
||||
u32 flags;
|
||||
const struct imx_rproc_plat_ops *ops;
|
||||
};
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#include <linux/remoteproc.h>
|
||||
#include <linux/remoteproc/mtk_scp.h>
|
||||
#include <linux/rpmsg/mtk_rpmsg.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "mtk_common.h"
|
||||
#include "remoteproc_internal.h"
|
||||
@@ -1093,22 +1094,74 @@ static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* scp_get_default_fw_path() - Get default SCP firmware path
|
||||
* @dev: SCP Device
|
||||
* @core_id: SCP Core number
|
||||
*
|
||||
* This function generates a path based on the following format:
|
||||
* mediatek/(soc_model)/scp(_cX).img; for multi-core or
|
||||
* mediatek/(soc_model)/scp.img for single core SCP HW
|
||||
*
|
||||
* Return: A devm allocated string containing the full path to
|
||||
* a SCP firmware or an error pointer
|
||||
*/
|
||||
static const char *scp_get_default_fw_path(struct device *dev, int core_id)
|
||||
{
|
||||
struct device_node *np = core_id < 0 ? dev->of_node : dev->parent->of_node;
|
||||
const char *compatible, *soc;
|
||||
char scp_fw_file[7];
|
||||
int ret;
|
||||
|
||||
/* Use only the first compatible string */
|
||||
ret = of_property_read_string_index(np, "compatible", 0, &compatible);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
/* If the compatible string's length is implausible bail out early */
|
||||
if (strlen(compatible) < strlen("mediatek,mtXXXX-scp"))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* If the compatible string starts with "mediatek,mt" assume that it's ok */
|
||||
if (!str_has_prefix(compatible, "mediatek,mt"))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (core_id >= 0)
|
||||
ret = snprintf(scp_fw_file, sizeof(scp_fw_file), "scp_c%d", core_id);
|
||||
else
|
||||
ret = snprintf(scp_fw_file, sizeof(scp_fw_file), "scp");
|
||||
if (ret >= sizeof(scp_fw_file))
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
|
||||
/* Not using strchr here, as strlen of a const gets optimized by compiler */
|
||||
soc = &compatible[strlen("mediatek,")];
|
||||
|
||||
return devm_kasprintf(dev, GFP_KERNEL, "mediatek/%.*s/%s.img",
|
||||
(int)strlen("mtXXXX"), soc, scp_fw_file);
|
||||
}
|
||||
|
||||
static struct mtk_scp *scp_rproc_init(struct platform_device *pdev,
|
||||
struct mtk_scp_of_cluster *scp_cluster,
|
||||
const struct mtk_scp_of_data *of_data)
|
||||
const struct mtk_scp_of_data *of_data,
|
||||
int core_id)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct mtk_scp *scp;
|
||||
struct rproc *rproc;
|
||||
struct resource *res;
|
||||
const char *fw_name = "scp.img";
|
||||
const char *fw_name;
|
||||
int ret, i;
|
||||
const struct mtk_scp_sizes_data *scp_sizes;
|
||||
|
||||
ret = rproc_of_parse_firmware(dev, 0, &fw_name);
|
||||
if (ret < 0 && ret != -EINVAL)
|
||||
return ERR_PTR(ret);
|
||||
if (ret) {
|
||||
fw_name = scp_get_default_fw_path(dev, core_id);
|
||||
if (IS_ERR(fw_name)) {
|
||||
dev_err(dev, "Cannot get firmware path: %ld\n", PTR_ERR(fw_name));
|
||||
return ERR_CAST(fw_name);
|
||||
}
|
||||
}
|
||||
|
||||
rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
|
||||
if (!rproc) {
|
||||
@@ -1212,7 +1265,7 @@ static int scp_add_single_core(struct platform_device *pdev,
|
||||
struct mtk_scp *scp;
|
||||
int ret;
|
||||
|
||||
scp = scp_rproc_init(pdev, scp_cluster, of_device_get_match_data(dev));
|
||||
scp = scp_rproc_init(pdev, scp_cluster, of_device_get_match_data(dev), -1);
|
||||
if (IS_ERR(scp))
|
||||
return PTR_ERR(scp);
|
||||
|
||||
@@ -1259,7 +1312,7 @@ static int scp_add_multi_core(struct platform_device *pdev,
|
||||
goto init_fail;
|
||||
}
|
||||
|
||||
scp = scp_rproc_init(cpdev, scp_cluster, cluster_of_data[core_id]);
|
||||
scp = scp_rproc_init(cpdev, scp_cluster, cluster_of_data[core_id], core_id);
|
||||
put_device(&cpdev->dev);
|
||||
if (IS_ERR(scp)) {
|
||||
ret = PTR_ERR(scp);
|
||||
|
||||
@@ -555,7 +555,6 @@ static void omap_rproc_kick(struct rproc *rproc, int vqid)
|
||||
dev_err(dev, "failed to send mailbox message, status = %d\n",
|
||||
ret);
|
||||
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
}
|
||||
|
||||
@@ -656,7 +655,6 @@ static int omap_rproc_start(struct rproc *rproc)
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
pm_runtime_get_noresume(dev);
|
||||
pm_runtime_enable(dev);
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
|
||||
return 0;
|
||||
@@ -714,7 +712,6 @@ enable_device:
|
||||
reset_control_deassert(oproc->reset);
|
||||
out:
|
||||
/* schedule the next auto-suspend */
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -625,27 +625,22 @@ static int adsp_init_mmio(struct qcom_adsp *adsp,
|
||||
|
||||
static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
|
||||
{
|
||||
struct reserved_mem *rmem = NULL;
|
||||
struct device_node *node;
|
||||
int ret;
|
||||
struct resource res;
|
||||
|
||||
node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0);
|
||||
if (node)
|
||||
rmem = of_reserved_mem_lookup(node);
|
||||
of_node_put(node);
|
||||
|
||||
if (!rmem) {
|
||||
ret = of_reserved_mem_region_to_resource(adsp->dev->of_node, 0, &res);
|
||||
if (ret) {
|
||||
dev_err(adsp->dev, "unable to resolve memory-region\n");
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
adsp->mem_phys = adsp->mem_reloc = rmem->base;
|
||||
adsp->mem_size = rmem->size;
|
||||
adsp->mem_region = devm_ioremap_wc(adsp->dev,
|
||||
adsp->mem_phys, adsp->mem_size);
|
||||
if (!adsp->mem_region) {
|
||||
dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n",
|
||||
&rmem->base, adsp->mem_size);
|
||||
return -EBUSY;
|
||||
adsp->mem_phys = adsp->mem_reloc = res.start;
|
||||
adsp->mem_size = resource_size(&res);
|
||||
adsp->mem_region = devm_ioremap_resource_wc(adsp->dev, &res);
|
||||
if (IS_ERR(adsp->mem_region)) {
|
||||
dev_err(adsp->dev, "unable to map memory region: %pR\n", &res);
|
||||
return PTR_ERR(adsp->mem_region);
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -1970,8 +1970,8 @@ static int q6v5_init_reset(struct q6v5 *qproc)
|
||||
static int q6v5_alloc_memory_region(struct q6v5 *qproc)
|
||||
{
|
||||
struct device_node *child;
|
||||
struct reserved_mem *rmem;
|
||||
struct device_node *node;
|
||||
struct resource res;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* In the absence of mba/mpss sub-child, extract the mba and mpss
|
||||
@@ -1979,71 +1979,49 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
|
||||
*/
|
||||
child = of_get_child_by_name(qproc->dev->of_node, "mba");
|
||||
if (!child) {
|
||||
node = of_parse_phandle(qproc->dev->of_node,
|
||||
"memory-region", 0);
|
||||
ret = of_reserved_mem_region_to_resource(qproc->dev->of_node, 0, &res);
|
||||
} else {
|
||||
node = of_parse_phandle(child, "memory-region", 0);
|
||||
ret = of_reserved_mem_region_to_resource(child, 0, &res);
|
||||
of_node_put(child);
|
||||
}
|
||||
|
||||
if (!node) {
|
||||
dev_err(qproc->dev, "no mba memory-region specified\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rmem = of_reserved_mem_lookup(node);
|
||||
of_node_put(node);
|
||||
if (!rmem) {
|
||||
if (ret) {
|
||||
dev_err(qproc->dev, "unable to resolve mba region\n");
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
qproc->mba_phys = rmem->base;
|
||||
qproc->mba_size = rmem->size;
|
||||
qproc->mba_phys = res.start;
|
||||
qproc->mba_size = resource_size(&res);
|
||||
|
||||
if (!child) {
|
||||
node = of_parse_phandle(qproc->dev->of_node,
|
||||
"memory-region", 1);
|
||||
ret = of_reserved_mem_region_to_resource(qproc->dev->of_node, 1, &res);
|
||||
} else {
|
||||
child = of_get_child_by_name(qproc->dev->of_node, "mpss");
|
||||
node = of_parse_phandle(child, "memory-region", 0);
|
||||
ret = of_reserved_mem_region_to_resource(child, 0, &res);
|
||||
of_node_put(child);
|
||||
}
|
||||
|
||||
if (!node) {
|
||||
dev_err(qproc->dev, "no mpss memory-region specified\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rmem = of_reserved_mem_lookup(node);
|
||||
of_node_put(node);
|
||||
if (!rmem) {
|
||||
if (ret) {
|
||||
dev_err(qproc->dev, "unable to resolve mpss region\n");
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
qproc->mpss_phys = qproc->mpss_reloc = rmem->base;
|
||||
qproc->mpss_size = rmem->size;
|
||||
qproc->mpss_phys = qproc->mpss_reloc = res.start;
|
||||
qproc->mpss_size = resource_size(&res);
|
||||
|
||||
if (!child) {
|
||||
node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2);
|
||||
ret = of_reserved_mem_region_to_resource(qproc->dev->of_node, 2, &res);
|
||||
} else {
|
||||
child = of_get_child_by_name(qproc->dev->of_node, "metadata");
|
||||
node = of_parse_phandle(child, "memory-region", 0);
|
||||
ret = of_reserved_mem_region_to_resource(child, 0, &res);
|
||||
of_node_put(child);
|
||||
}
|
||||
|
||||
if (!node)
|
||||
if (ret)
|
||||
return 0;
|
||||
|
||||
rmem = of_reserved_mem_lookup(node);
|
||||
if (!rmem) {
|
||||
dev_err(qproc->dev, "unable to resolve metadata region\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
qproc->mdata_phys = rmem->base;
|
||||
qproc->mdata_size = rmem->size;
|
||||
qproc->mdata_phys = res.start;
|
||||
qproc->mdata_size = resource_size(&res);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -547,54 +547,38 @@ static void qcom_pas_pds_detach(struct qcom_pas *pas, struct device **pds, size_
|
||||
|
||||
static int qcom_pas_alloc_memory_region(struct qcom_pas *pas)
|
||||
{
|
||||
struct reserved_mem *rmem;
|
||||
struct device_node *node;
|
||||
struct resource res;
|
||||
int ret;
|
||||
|
||||
node = of_parse_phandle(pas->dev->of_node, "memory-region", 0);
|
||||
if (!node) {
|
||||
dev_err(pas->dev, "no memory-region specified\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rmem = of_reserved_mem_lookup(node);
|
||||
of_node_put(node);
|
||||
if (!rmem) {
|
||||
ret = of_reserved_mem_region_to_resource(pas->dev->of_node, 0, &res);
|
||||
if (ret) {
|
||||
dev_err(pas->dev, "unable to resolve memory-region\n");
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
pas->mem_phys = pas->mem_reloc = rmem->base;
|
||||
pas->mem_size = rmem->size;
|
||||
pas->mem_region = devm_ioremap_wc(pas->dev, pas->mem_phys, pas->mem_size);
|
||||
if (!pas->mem_region) {
|
||||
dev_err(pas->dev, "unable to map memory region: %pa+%zx\n",
|
||||
&rmem->base, pas->mem_size);
|
||||
return -EBUSY;
|
||||
pas->mem_phys = pas->mem_reloc = res.start;
|
||||
pas->mem_size = resource_size(&res);
|
||||
pas->mem_region = devm_ioremap_resource_wc(pas->dev, &res);
|
||||
if (IS_ERR(pas->mem_region)) {
|
||||
dev_err(pas->dev, "unable to map memory region: %pR\n", &res);
|
||||
return PTR_ERR(pas->mem_region);
|
||||
}
|
||||
|
||||
if (!pas->dtb_pas_id)
|
||||
return 0;
|
||||
|
||||
node = of_parse_phandle(pas->dev->of_node, "memory-region", 1);
|
||||
if (!node) {
|
||||
dev_err(pas->dev, "no dtb memory-region specified\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rmem = of_reserved_mem_lookup(node);
|
||||
of_node_put(node);
|
||||
if (!rmem) {
|
||||
ret = of_reserved_mem_region_to_resource(pas->dev->of_node, 1, &res);
|
||||
if (ret) {
|
||||
dev_err(pas->dev, "unable to resolve dtb memory-region\n");
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
pas->dtb_mem_phys = pas->dtb_mem_reloc = rmem->base;
|
||||
pas->dtb_mem_size = rmem->size;
|
||||
pas->dtb_mem_region = devm_ioremap_wc(pas->dev, pas->dtb_mem_phys, pas->dtb_mem_size);
|
||||
if (!pas->dtb_mem_region) {
|
||||
dev_err(pas->dev, "unable to map dtb memory region: %pa+%zx\n",
|
||||
&rmem->base, pas->dtb_mem_size);
|
||||
return -EBUSY;
|
||||
pas->dtb_mem_phys = pas->dtb_mem_reloc = res.start;
|
||||
pas->dtb_mem_size = resource_size(&res);
|
||||
pas->dtb_mem_region = devm_ioremap_resource_wc(pas->dev, &res);
|
||||
if (IS_ERR(pas->dtb_mem_region)) {
|
||||
dev_err(pas->dev, "unable to map dtb memory region: %pR\n", &res);
|
||||
return PTR_ERR(pas->dtb_mem_region);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -603,7 +587,6 @@ static int qcom_pas_alloc_memory_region(struct qcom_pas *pas)
|
||||
static int qcom_pas_assign_memory_region(struct qcom_pas *pas)
|
||||
{
|
||||
struct qcom_scm_vmperm perm[MAX_ASSIGN_COUNT];
|
||||
struct device_node *node;
|
||||
unsigned int perm_size;
|
||||
int offset;
|
||||
int ret;
|
||||
@@ -612,17 +595,15 @@ static int qcom_pas_assign_memory_region(struct qcom_pas *pas)
|
||||
return 0;
|
||||
|
||||
for (offset = 0; offset < pas->region_assign_count; ++offset) {
|
||||
struct reserved_mem *rmem = NULL;
|
||||
struct resource res;
|
||||
|
||||
node = of_parse_phandle(pas->dev->of_node, "memory-region",
|
||||
pas->region_assign_idx + offset);
|
||||
if (node)
|
||||
rmem = of_reserved_mem_lookup(node);
|
||||
of_node_put(node);
|
||||
if (!rmem) {
|
||||
ret = of_reserved_mem_region_to_resource(pas->dev->of_node,
|
||||
pas->region_assign_idx + offset,
|
||||
&res);
|
||||
if (ret) {
|
||||
dev_err(pas->dev, "unable to resolve shareable memory-region index %d\n",
|
||||
offset);
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (pas->region_assign_shared) {
|
||||
@@ -637,8 +618,8 @@ static int qcom_pas_assign_memory_region(struct qcom_pas *pas)
|
||||
perm_size = 1;
|
||||
}
|
||||
|
||||
pas->region_assign_phys[offset] = rmem->base;
|
||||
pas->region_assign_size[offset] = rmem->size;
|
||||
pas->region_assign_phys[offset] = res.start;
|
||||
pas->region_assign_size[offset] = resource_size(&res);
|
||||
pas->region_assign_owners[offset] = BIT(QCOM_SCM_VMID_HLOS);
|
||||
|
||||
ret = qcom_scm_assign_mem(pas->region_assign_phys[offset],
|
||||
@@ -1461,7 +1442,7 @@ static const struct of_device_id qcom_pas_of_match[] = {
|
||||
{ .compatible = "qcom,milos-wpss-pas", .data = &sc7280_wpss_resource},
|
||||
{ .compatible = "qcom,msm8226-adsp-pil", .data = &msm8996_adsp_resource},
|
||||
{ .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource},
|
||||
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
|
||||
{ .compatible = "qcom,msm8974-adsp-pil", .data = &msm8996_adsp_resource},
|
||||
{ .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource},
|
||||
{ .compatible = "qcom,msm8996-slpi-pil", .data = &msm8996_slpi_resource_init},
|
||||
{ .compatible = "qcom,msm8998-adsp-pas", .data = &msm8996_adsp_resource},
|
||||
@@ -1488,6 +1469,7 @@ static const struct of_device_id qcom_pas_of_match[] = {
|
||||
{ .compatible = "qcom,sc8280xp-nsp0-pas", .data = &sc8280xp_nsp0_resource},
|
||||
{ .compatible = "qcom,sc8280xp-nsp1-pas", .data = &sc8280xp_nsp1_resource},
|
||||
{ .compatible = "qcom,sdm660-adsp-pas", .data = &adsp_resource_init},
|
||||
{ .compatible = "qcom,sdm660-cdsp-pas", .data = &cdsp_resource_init},
|
||||
{ .compatible = "qcom,sdm845-adsp-pas", .data = &sdm845_adsp_resource_init},
|
||||
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init},
|
||||
{ .compatible = "qcom,sdm845-slpi-pas", .data = &sdm845_slpi_resource_init},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user