Linux 6.13
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmeNkBEeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGgN4IAIEa4TrwIPP0cI4h iLI7rXcQaQplFlEGcmLutzItlf2YnoSoIa7fJoThJwKZIcz5o76sqtbTvQebZGsO SRmthEixpFdJK//5fQR1OZaSsMAifH2kQrEjvQqF7OxwvVOOAHZ7bUuyOvrTRFE8 Su6kUXjmtN4O2oBEVgPKiStjd2sIqT8+Y67WjGwbDY7cU7m0qN4aBegPg5wrHQWm edIBWN53dv/5R197qntCxaTGG+OsiFHr6LMfg6tLhq8Pw+hFGAcdgcUZ2YeCbmw8 noN0ukiaOewRgYmZI8oj8x6+zncNR/SWFNgAMxnLvK8o5oHx0R/0CtgNZSi7ocn3 WIm9hzg= =m02S -----END PGP SIGNATURE----- Merge v6.13 into drm-next A regression was caused by commite4b5ccd392
("drm/v3d: Ensure job pointer is set to NULL after job completion"), but this commit is not yet in next-fixes, fast-forward it. Note that this recreates Linus merge in96c84703f1
("Merge tag 'drm-next-2025-01-17' of https://gitlab.freedesktop.org/drm/kernel") because I didn't want to backmerge a random point in the merge window. Signed-off-by: Simona Vetter <simona.vetter@ffwll.ch>
This commit is contained in:
commit
07c5b27720
472 changed files with 4401 additions and 2641 deletions
3
.mailmap
3
.mailmap
|
@ -121,6 +121,8 @@ Ben Widawsky <bwidawsk@kernel.org> <benjamin.widawsky@intel.com>
|
|||
Benjamin Poirier <benjamin.poirier@gmail.com> <bpoirier@suse.de>
|
||||
Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@gmail.com>
|
||||
Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@redhat.com>
|
||||
Bingwu Zhang <xtex@aosc.io> <xtexchooser@duck.com>
|
||||
Bingwu Zhang <xtex@aosc.io> <xtex@xtexx.eu.org>
|
||||
Bjorn Andersson <andersson@kernel.org> <bjorn@kryo.se>
|
||||
Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@linaro.org>
|
||||
Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@sonymobile.com>
|
||||
|
@ -200,6 +202,7 @@ Elliot Berman <quic_eberman@quicinc.com> <eberman@codeaurora.org>
|
|||
Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
|
||||
Enric Balletbo i Serra <eballetbo@kernel.org> <eballetbo@iseebcn.com>
|
||||
Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
|
||||
Ethan Carter Edwards <ethan@ethancedwards.com> Ethan Edwards <ethancarteredwards@gmail.com>
|
||||
Eugen Hristev <eugen.hristev@linaro.org> <eugen.hristev@microchip.com>
|
||||
Eugen Hristev <eugen.hristev@linaro.org> <eugen.hristev@collabora.com>
|
||||
Evgeniy Polyakov <johnpol@2ka.mipt.ru>
|
||||
|
|
12
CREDITS
12
CREDITS
|
@ -20,6 +20,10 @@ N: Thomas Abraham
|
|||
E: thomas.ab@samsung.com
|
||||
D: Samsung pin controller driver
|
||||
|
||||
N: Jose Abreu
|
||||
E: jose.abreu@synopsys.com
|
||||
D: Synopsys DesignWare XPCS MDIO/PCS driver.
|
||||
|
||||
N: Dragos Acostachioaie
|
||||
E: dragos@iname.com
|
||||
W: http://www.arbornet.org/~dragos
|
||||
|
@ -1428,6 +1432,10 @@ S: 8124 Constitution Apt. 7
|
|||
S: Sterling Heights, Michigan 48313
|
||||
S: USA
|
||||
|
||||
N: Andy Gospodarek
|
||||
E: andy@greyhouse.net
|
||||
D: Maintenance and contributions to the network interface bonding driver.
|
||||
|
||||
N: Wolfgang Grandegger
|
||||
E: wg@grandegger.com
|
||||
D: Controller Area Network (device drivers)
|
||||
|
@ -1812,6 +1820,10 @@ D: Author/maintainer of most DRM drivers (especially ATI, MGA)
|
|||
D: Core DRM templates, general DRM and 3D-related hacking
|
||||
S: No fixed address
|
||||
|
||||
N: Woojung Huh
|
||||
E: woojung.huh@microchip.com
|
||||
D: Microchip LAN78XX USB Ethernet driver
|
||||
|
||||
N: Kenn Humborg
|
||||
E: kenn@wombat.ie
|
||||
D: Mods to loop device to support sparse backing files
|
||||
|
|
|
@ -269,27 +269,7 @@ Namely, when invoked to select an idle state for a CPU (i.e. an idle state that
|
|||
the CPU will ask the processor hardware to enter), it attempts to predict the
|
||||
idle duration and uses the predicted value for idle state selection.
|
||||
|
||||
It first obtains the time until the closest timer event with the assumption
|
||||
that the scheduler tick will be stopped. That time, referred to as the *sleep
|
||||
length* in what follows, is the upper bound on the time before the next CPU
|
||||
wakeup. It is used to determine the sleep length range, which in turn is needed
|
||||
to get the sleep length correction factor.
|
||||
|
||||
The ``menu`` governor maintains two arrays of sleep length correction factors.
|
||||
One of them is used when tasks previously running on the given CPU are waiting
|
||||
for some I/O operations to complete and the other one is used when that is not
|
||||
the case. Each array contains several correction factor values that correspond
|
||||
to different sleep length ranges organized so that each range represented in the
|
||||
array is approximately 10 times wider than the previous one.
|
||||
|
||||
The correction factor for the given sleep length range (determined before
|
||||
selecting the idle state for the CPU) is updated after the CPU has been woken
|
||||
up and the closer the sleep length is to the observed idle duration, the closer
|
||||
to 1 the correction factor becomes (it must fall between 0 and 1 inclusive).
|
||||
The sleep length is multiplied by the correction factor for the range that it
|
||||
falls into to obtain the first approximation of the predicted idle duration.
|
||||
|
||||
Next, the governor uses a simple pattern recognition algorithm to refine its
|
||||
It first uses a simple pattern recognition algorithm to obtain a preliminary
|
||||
idle duration prediction. Namely, it saves the last 8 observed idle duration
|
||||
values and, when predicting the idle duration next time, it computes the average
|
||||
and variance of them. If the variance is small (smaller than 400 square
|
||||
|
@ -301,29 +281,39 @@ Again, if the variance of them is small (in the above sense), the average is
|
|||
taken as the "typical interval" value and so on, until either the "typical
|
||||
interval" is determined or too many data points are disregarded, in which case
|
||||
the "typical interval" is assumed to equal "infinity" (the maximum unsigned
|
||||
integer value). The "typical interval" computed this way is compared with the
|
||||
sleep length multiplied by the correction factor and the minimum of the two is
|
||||
taken as the predicted idle duration.
|
||||
integer value).
|
||||
|
||||
Then, the governor computes an extra latency limit to help "interactive"
|
||||
workloads. It uses the observation that if the exit latency of the selected
|
||||
idle state is comparable with the predicted idle duration, the total time spent
|
||||
in that state probably will be very short and the amount of energy to save by
|
||||
entering it will be relatively small, so likely it is better to avoid the
|
||||
overhead related to entering that state and exiting it. Thus selecting a
|
||||
shallower state is likely to be a better option then. The first approximation
|
||||
of the extra latency limit is the predicted idle duration itself which
|
||||
additionally is divided by a value depending on the number of tasks that
|
||||
previously ran on the given CPU and now they are waiting for I/O operations to
|
||||
complete. The result of that division is compared with the latency limit coming
|
||||
from the power management quality of service, or `PM QoS <cpu-pm-qos_>`_,
|
||||
framework and the minimum of the two is taken as the limit for the idle states'
|
||||
exit latency.
|
||||
If the "typical interval" computed this way is long enough, the governor obtains
|
||||
the time until the closest timer event with the assumption that the scheduler
|
||||
tick will be stopped. That time, referred to as the *sleep length* in what follows,
|
||||
is the upper bound on the time before the next CPU wakeup. It is used to determine
|
||||
the sleep length range, which in turn is needed to get the sleep length correction
|
||||
factor.
|
||||
|
||||
The ``menu`` governor maintains an array containing several correction factor
|
||||
values that correspond to different sleep length ranges organized so that each
|
||||
range represented in the array is approximately 10 times wider than the previous
|
||||
one.
|
||||
|
||||
The correction factor for the given sleep length range (determined before
|
||||
selecting the idle state for the CPU) is updated after the CPU has been woken
|
||||
up and the closer the sleep length is to the observed idle duration, the closer
|
||||
to 1 the correction factor becomes (it must fall between 0 and 1 inclusive).
|
||||
The sleep length is multiplied by the correction factor for the range that it
|
||||
falls into to obtain an approximation of the predicted idle duration that is
|
||||
compared to the "typical interval" determined previously and the minimum of
|
||||
the two is taken as the idle duration prediction.
|
||||
|
||||
If the "typical interval" value is small, which means that the CPU is likely
|
||||
to be woken up soon enough, the sleep length computation is skipped as it may
|
||||
be costly and the idle duration is simply predicted to equal the "typical
|
||||
interval" value.
|
||||
|
||||
Now, the governor is ready to walk the list of idle states and choose one of
|
||||
them. For this purpose, it compares the target residency of each state with
|
||||
the predicted idle duration and the exit latency of it with the computed latency
|
||||
limit. It selects the state with the target residency closest to the predicted
|
||||
the predicted idle duration and the exit latency of it with the with the latency
|
||||
limit coming from the power management quality of service, or `PM QoS <cpu-pm-qos_>`_,
|
||||
framework. It selects the state with the target residency closest to the predicted
|
||||
idle duration, but still below it, and exit latency that does not exceed the
|
||||
limit.
|
||||
|
||||
|
|
|
@ -42,6 +42,9 @@ properties:
|
|||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
'#sound-dai-cells':
|
||||
const: 0
|
||||
|
||||
ports:
|
||||
$ref: /schemas/graph.yaml#/properties/ports
|
||||
properties:
|
||||
|
@ -85,7 +88,21 @@ required:
|
|||
- ports
|
||||
- max-linkrate-mhz
|
||||
|
||||
additionalProperties: false
|
||||
allOf:
|
||||
- $ref: /schemas/sound/dai-common.yaml#
|
||||
- if:
|
||||
not:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- mediatek,mt8188-dp-tx
|
||||
- mediatek,mt8195-dp-tx
|
||||
then:
|
||||
properties:
|
||||
'#sound-dai-cells': false
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
|
|
|
@ -65,6 +65,7 @@ properties:
|
|||
- st,lsm9ds0-gyro
|
||||
- description: STMicroelectronics Magnetometers
|
||||
enum:
|
||||
- st,iis2mdc
|
||||
- st,lis2mdl
|
||||
- st,lis3mdl-magn
|
||||
- st,lsm303agr-magn
|
||||
|
|
|
@ -81,7 +81,7 @@ properties:
|
|||
List of phandles, each pointing to the power supply for the
|
||||
corresponding pairset named in 'pairset-names'. This property
|
||||
aligns with IEEE 802.3-2022, Section 33.2.3 and 145.2.4.
|
||||
PSE Pinout Alternatives (as per IEEE 802.3-2022 Table 145\u20133)
|
||||
PSE Pinout Alternatives (as per IEEE 802.3-2022 Table 145-3)
|
||||
|-----------|---------------|---------------|---------------|---------------|
|
||||
| Conductor | Alternative A | Alternative A | Alternative B | Alternative B |
|
||||
| | (MDI-X) | (MDI) | (X) | (S) |
|
||||
|
|
292
Documentation/sound/codecs/cs35l56.rst
Normal file
292
Documentation/sound/codecs/cs35l56.rst
Normal file
|
@ -0,0 +1,292 @@
|
|||
.. SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
=====================================================================
|
||||
Audio drivers for Cirrus Logic CS35L54/56/57 Boosted Smart Amplifiers
|
||||
=====================================================================
|
||||
:Copyright: 2025 Cirrus Logic, Inc. and
|
||||
Cirrus Logic International Semiconductor Ltd.
|
||||
|
||||
Contact: patches@opensource.cirrus.com
|
||||
|
||||
Summary
|
||||
=======
|
||||
|
||||
The high-level summary of this document is:
|
||||
|
||||
**If you have a laptop that uses CS35L54/56/57 amplifiers but audio is not
|
||||
working, DO NOT ATTEMPT TO USE FIRMWARE AND SETTINGS FROM ANOTHER LAPTOP,
|
||||
EVEN IF THAT LAPTOP SEEMS SIMILAR.**
|
||||
|
||||
The CS35L54/56/57 amplifiers must be correctly configured for the power
|
||||
supply voltage, speaker impedance, maximum speaker voltage/current, and
|
||||
other external hardware connections.
|
||||
|
||||
The amplifiers feature advanced boost technology that increases the voltage
|
||||
used to drive the speakers, while proprietary speaker protection algorithms
|
||||
allow these boosted amplifiers to push the limits of the speakers without
|
||||
causing damage. These **must** be configured correctly.
|
||||
|
||||
Supported Cirrus Logic amplifiers
|
||||
---------------------------------
|
||||
|
||||
The cs35l56 drivers support:
|
||||
|
||||
* CS35L54
|
||||
* CS35L56
|
||||
* CS35L57
|
||||
|
||||
There are two drivers in the kernel
|
||||
|
||||
*For systems using SoundWire*: sound/soc/codecs/cs35l56.c and associated files
|
||||
|
||||
*For systems using HDA*: sound/pci/hda/cs35l56_hda.c
|
||||
|
||||
Firmware
|
||||
========
|
||||
|
||||
The amplifier is controlled and managed by firmware running on the internal
|
||||
DSP. Firmware files are essential to enable the full capabilities of the
|
||||
amplifier.
|
||||
|
||||
Firmware is distributed in the linux-firmware repository:
|
||||
https://gitlab.com/kernel-firmware/linux-firmware.git
|
||||
|
||||
On most SoundWire systems the amplifier has a default minimum capability to
|
||||
produce audio. However this will be
|
||||
|
||||
* at low volume, to protect the speakers, since the speaker specifications
|
||||
and power supply voltages are unknown.
|
||||
* a mono mix of left and right channels.
|
||||
|
||||
On some SoundWire systems that have both CS42L43 and CS35L56/57 the CS35L56/57
|
||||
receive their audio from the CS42L43 instead of directly from the host
|
||||
SoundWire interface. These systems can be identified by the CS42L43 showing
|
||||
in dmesg as a SoundWire device, but the CS35L56/57 as SPI. On these systems
|
||||
the firmware is *mandatory* to enable receiving the audio from the CS42L43.
|
||||
|
||||
On HDA systems the firmware is *mandatory* to enable HDA bridge mode. There
|
||||
will not be any audio from the amplifiers without firmware.
|
||||
|
||||
Cirrus Logic firmware files
|
||||
---------------------------
|
||||
|
||||
Each amplifier requires two firmware files. One file has a .wmfw suffix, the
|
||||
other has a .bin suffix.
|
||||
|
||||
The firmware is customized by the OEM to match the hardware of each laptop,
|
||||
and the firmware is specific to that laptop. Because of this, there are many
|
||||
firmware files in linux-firmware for these amplifiers. Firmware files are
|
||||
**not interchangeable between laptops**.
|
||||
|
||||
Cirrus Logic submits files for known laptops to the upstream linux-firmware
|
||||
repository. Providing Cirrus Logic is aware of a particular laptop and has
|
||||
permission from the manufacturer to publish the firmware, it will be pushed
|
||||
to linux-firmware. You may need to upgrade to a newer release of
|
||||
linux-firmware to obtain the firmware for your laptop.
|
||||
|
||||
**Important:** the Makefile for linux-firmware creates symlinks that are listed
|
||||
in the WHENCE file. These symlinks are required for the CS35L56 driver to be
|
||||
able to load the firmware.
|
||||
|
||||
How do I know which firmware file I should have?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
All firmware file names are qualified with a unique "system ID". On normal
|
||||
x86 PCs with PCI audio this is the Vendor Subsystem ID (SSID) of the host
|
||||
PCI audio interface.
|
||||
|
||||
The SSID can be viewed using the lspci tool::
|
||||
|
||||
lspci -v -nn | grep -A2 -i audio
|
||||
0000:00:1f.3 Audio device [0403]: Intel Corporation Meteor Lake-P HD Audio Controller [8086:7e28]
|
||||
Subsystem: Dell Meteor Lake-P HD Audio Controller [1028:0c63]
|
||||
|
||||
In this example the SSID is 10280c63.
|
||||
|
||||
The format of the firmware file names is:
|
||||
|
||||
cs35lxx-b0-dsp1-misc-SSID[-spkidX]-ampN
|
||||
|
||||
Where:
|
||||
|
||||
* cs35lxx-b0 is the amplifier model and silicon revision. This information
|
||||
is logged by the driver during initialization.
|
||||
* SSID is the 8-digit hexadecimal SSID value.
|
||||
* ampN is the amplifier number (for example amp1). This is the same as
|
||||
the prefix on the ALSA control names except that it is always lower-case
|
||||
in the file name.
|
||||
* spkidX is an optional part, used for laptops that have firmware
|
||||
configurations for different makes and models of internal speakers.
|
||||
|
||||
Sound Open Firmware and ALSA topology files
|
||||
-------------------------------------------
|
||||
|
||||
All SoundWire systems will require a Sound Open Firmware (SOF) for the
|
||||
host CPU audio DSP, together with an ALSA topology file (.tplg).
|
||||
|
||||
The SOF firmware will usually be provided by the manufacturer of the host
|
||||
CPU (i.e. Intel or AMD). The .tplg file is normally part of the SOF firmware
|
||||
release.
|
||||
|
||||
SOF binary builds are available from: https://github.com/thesofproject/sof-bin/releases
|
||||
|
||||
The main SOF source is here: https://github.com/thesofproject
|
||||
|
||||
ALSA-ucm configurations
|
||||
-----------------------
|
||||
Typically an appropriate ALSA-ucm configuration file is needed for
|
||||
use-case managers and audio servers such as PipeWire.
|
||||
|
||||
Configuration files are available from the alsa-ucm-conf repository:
|
||||
https://git.alsa-project.org/?p=alsa-ucm-conf.git
|
||||
|
||||
Kernel log messages
|
||||
===================
|
||||
|
||||
SoundWire
|
||||
---------
|
||||
A successful initialization will look like this (this will be repeated for
|
||||
each amplifier)::
|
||||
|
||||
[ 7.568374] cs35l56 sdw:0:0:01fa:3556:01:0: supply VDD_P not found, using dummy regulator
|
||||
[ 7.605208] cs35l56 sdw:0:0:01fa:3556:01:0: supply VDD_IO not found, using dummy regulator
|
||||
[ 7.605313] cs35l56 sdw:0:0:01fa:3556:01:0: supply VDD_A not found, using dummy regulator
|
||||
[ 7.939279] cs35l56 sdw:0:0:01fa:3556:01:0: Cirrus Logic CS35L56 Rev B0 OTP3 fw:3.4.4 (patched=0)
|
||||
[ 7.947844] cs35l56 sdw:0:0:01fa:3556:01:0: Slave 4 state check1: UNATTACHED, status was 1
|
||||
[ 8.740280] cs35l56 sdw:0:0:01fa:3556:01:0: supply VDD_B not found, using dummy regulator
|
||||
[ 8.740552] cs35l56 sdw:0:0:01fa:3556:01:0: supply VDD_AMP not found, using dummy regulator
|
||||
[ 9.242164] cs35l56 sdw:0:0:01fa:3556:01:0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx.wmfw: format 3 timestamp 0x66b2b872
|
||||
[ 9.242173] cs35l56 sdw:0:0:01fa:3556:01:0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx.wmfw: Tue 05 Dec 2023 21:37:21 GMT Standard Time
|
||||
[ 9.991709] cs35l56 sdw:0:0:01fa:3556:01:0: DSP1: Firmware: 1a00d6 vendor: 0x2 v3.11.23, 41 algorithms
|
||||
[10.039098] cs35l56 sdw:0:0:01fa:3556:01:0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx-amp1.bin: v3.11.23
|
||||
[10.879235] cs35l56 sdw:0:0:01fa:3556:01:0: Slave 4 state check1: UNATTACHED, status was 1
|
||||
[11.401536] cs35l56 sdw:0:0:01fa:3556:01:0: Calibration applied
|
||||
|
||||
HDA
|
||||
---
|
||||
A successful initialization will look like this (this will be repeated for
|
||||
each amplifier)::
|
||||
|
||||
[ 6.306475] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: Cirrus Logic CS35L56 Rev B0 OTP3 fw:3.4.4 (patched=0)
|
||||
[ 6.613892] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: DSP system name: 'xxxxxxxx', amp name: 'AMP1'
|
||||
[ 8.266660] snd_hda_codec_cs8409 ehdaudio0D0: bound i2c-CSC3556:00-cs35l56-hda.0 (ops cs35l56_hda_comp_ops [snd_hda_scodec_cs35l56])
|
||||
[ 8.287525] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx.wmfw: format 3 timestamp 0x66b2b872
|
||||
[ 8.287528] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx.wmfw: Tue 05 Dec 2023 21:37:21 GMT Standard Time
|
||||
[ 9.984335] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: DSP1: Firmware: 1a00d6 vendor: 0x2 v3.11.23, 41 algorithms
|
||||
[10.085797] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx-amp1.bin: v3.11.23
|
||||
[10.655237] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: Calibration applied
|
||||
|
||||
Important messages
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
Cirrus Logic CS35L56 Rev B0 OTP3 fw:3.4.4 (patched=0)
|
||||
Shows that the driver has been able to read device ID registers from the
|
||||
amplifier.
|
||||
|
||||
* The actual amplifier type and silicon revision (CS35L56 B0 in this
|
||||
example) is shown, as read from the amplifier identification registers.
|
||||
* (patched=0) is normal, and indicates that the amplifier has been hard
|
||||
reset and is running default ROM firmware.
|
||||
* (patched=1) means that something has previously downloaded firmware
|
||||
to the amplifier and the driver does not have control of the RESET
|
||||
signal to be able to replace this preloaded firmware. This is normal
|
||||
for systems where the BIOS downloads firmware to the amplifiers
|
||||
before OS boot.
|
||||
This status can also be seen if the cs35l56 kernel module is unloaded
|
||||
and reloaded on a system where the driver does not have control of
|
||||
RESET. SoundWire systems typically do not give the driver control of
|
||||
RESET and only a BIOS (re)boot can reset the amplifiers.
|
||||
|
||||
DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx.wmfw
|
||||
Shows that a .wmfw firmware file was found and downloaded.
|
||||
|
||||
DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx-amp1.bin
|
||||
Shows that a .bin firmware file was found and downloaded.
|
||||
|
||||
Calibration applied
|
||||
Factory calibration data in EFI was written to the amplifier.
|
||||
|
||||
Error messages
|
||||
==============
|
||||
This section explains some of the error messages that the driver can log.
|
||||
|
||||
Algorithm coefficient version %d.%d.%d but expected %d.%d.%d
|
||||
The version of the .bin file content does not match the loaded firmware.
|
||||
Caused by mismatched .wmfw and .bin file, or .bin file was found but
|
||||
.wmfw was not.
|
||||
|
||||
No %s for algorithm %x
|
||||
The version of the .bin file content does not match the loaded firmware.
|
||||
Caused by mismatched .wmfw and .bin file, or .bin file was found but
|
||||
.wmfw was not.
|
||||
|
||||
.bin file required but not found
|
||||
HDA driver did not find a .bin file that matches this hardware.
|
||||
|
||||
Calibration disabled due to missing firmware controls
|
||||
Driver was not able to write EFI calibration data to firmware registers.
|
||||
This typically means that either:
|
||||
|
||||
* The driver did not find a suitable wmfw for this hardware, or
|
||||
* The amplifier has already been patched with firmware by something
|
||||
previously, and the driver does not have control of a hard RESET line
|
||||
to be able to reset the amplifier and download the firmware files it
|
||||
found. This situation is indicated by the device identification
|
||||
string in the kernel log shows "(patched=1)"
|
||||
|
||||
Failed to write calibration
|
||||
Same meaning and cause as "Calibration disabled due to missing firmware
|
||||
controls"
|
||||
|
||||
Failed to read calibration data from EFI
|
||||
Factory calibration data in EFI is missing, empty or corrupt.
|
||||
This is most likely to be cause by accidentally deleting the file from
|
||||
the EFI filesystem.
|
||||
|
||||
No calibration for silicon ID
|
||||
The factory calibration data in EFI does not match this hardware.
|
||||
The most likely cause is that an amplifier has been replaced on the
|
||||
motherboard without going through manufacturer calibration process to
|
||||
generate calibration data for the new amplifier.
|
||||
|
||||
Did not find any buses for CSCxxxx
|
||||
Only on HDA systems. The HDA codec driver found an ACPI entry for
|
||||
Cirrus Logic companion amps, but could not enumerate the ACPI entries for
|
||||
the I2C/SPI buses. The most likely cause of this is that:
|
||||
|
||||
* The relevant bus driver (I2C or SPI) is not part of the kernel.
|
||||
* The HDA codec driver was built-in to the kernel but the I2C/SPI
|
||||
bus driver is a module and so the HDA codec driver cannot call the
|
||||
bus driver functions.
|
||||
|
||||
init_completion timed out
|
||||
The SoundWire bus controller (host end) did not enumerate the amplifier.
|
||||
In other words, the ACPI says there is an amplifier but for some reason
|
||||
it was not detected on the bus.
|
||||
|
||||
No AF01 node
|
||||
Indicates an error in ACPI. A SoundWire system should have a Device()
|
||||
node named "AF01" but it was not found.
|
||||
|
||||
Failed to get spk-id-gpios
|
||||
ACPI says that the driver should request a GPIO but the driver was not
|
||||
able to get that GPIO. The most likely cause is that the kernel does not
|
||||
include the correct GPIO or PINCTRL driver for this system.
|
||||
|
||||
Failed to read spk-id
|
||||
ACPI says that the driver should request a GPIO but the driver was not
|
||||
able to read that GPIO.
|
||||
|
||||
Unexpected spk-id element count
|
||||
AF01 contains more speaker ID GPIO entries than the driver supports
|
||||
|
||||
Overtemp error
|
||||
Amplifier overheat protection was triggered and the amplifier shut down
|
||||
to protect itself.
|
||||
|
||||
Amp short error
|
||||
Amplifier detected a short-circuit on the speaker output pins and shut
|
||||
down for protection. This would normally indicate a damaged speaker.
|
||||
|
||||
Hibernate wake failed
|
||||
The driver tried to wake the amplifier from its power-saving state but
|
||||
did not see the expected responses from the amplifier. This can be caused
|
||||
by using firmware that does not match the hardware.
|
9
Documentation/sound/codecs/index.rst
Normal file
9
Documentation/sound/codecs/index.rst
Normal file
|
@ -0,0 +1,9 @@
|
|||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
Codec-Specific Information
|
||||
==========================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
cs35l56
|
|
@ -13,6 +13,7 @@ Sound Subsystem Documentation
|
|||
alsa-configuration
|
||||
hd-audio/index
|
||||
cards/index
|
||||
codecs/index
|
||||
utimers
|
||||
|
||||
.. only:: subproject and html
|
||||
|
|
|
@ -810,6 +810,12 @@ Here is the list of current tracers that may be configured.
|
|||
to draw a graph of function calls similar to C code
|
||||
source.
|
||||
|
||||
Note that the function graph calculates the timings of when the
|
||||
function starts and returns internally and for each instance. If
|
||||
there are two instances that run function graph tracer and traces
|
||||
the same functions, the length of the timings may be slightly off as
|
||||
each read the timestamp separately and not at the same time.
|
||||
|
||||
"blk"
|
||||
|
||||
The block tracer. The tracer used by the blktrace user
|
||||
|
|
|
@ -1914,6 +1914,9 @@ No flags are specified so far, the corresponding field must be set to zero.
|
|||
#define KVM_IRQ_ROUTING_HV_SINT 4
|
||||
#define KVM_IRQ_ROUTING_XEN_EVTCHN 5
|
||||
|
||||
On s390, adding a KVM_IRQ_ROUTING_S390_ADAPTER is rejected on ucontrol VMs with
|
||||
error -EINVAL.
|
||||
|
||||
flags:
|
||||
|
||||
- KVM_MSI_VALID_DEVID: used along with KVM_IRQ_ROUTING_MSI routing entry
|
||||
|
|
|
@ -58,11 +58,15 @@ Groups:
|
|||
Enables async page faults for the guest. So in case of a major page fault
|
||||
the host is allowed to handle this async and continues the guest.
|
||||
|
||||
-EINVAL is returned when called on the FLIC of a ucontrol VM.
|
||||
|
||||
KVM_DEV_FLIC_APF_DISABLE_WAIT
|
||||
Disables async page faults for the guest and waits until already pending
|
||||
async page faults are done. This is necessary to trigger a completion interrupt
|
||||
for every init interrupt before migrating the interrupt list.
|
||||
|
||||
-EINVAL is returned when called on the FLIC of a ucontrol VM.
|
||||
|
||||
KVM_DEV_FLIC_ADAPTER_REGISTER
|
||||
Register an I/O adapter interrupt source. Takes a kvm_s390_io_adapter
|
||||
describing the adapter to register::
|
||||
|
|
23
MAINTAINERS
23
MAINTAINERS
|
@ -949,7 +949,6 @@ AMAZON ETHERNET DRIVERS
|
|||
M: Shay Agroskin <shayagr@amazon.com>
|
||||
M: Arthur Kiyanovski <akiyano@amazon.com>
|
||||
R: David Arinzon <darinzon@amazon.com>
|
||||
R: Noam Dagan <ndagan@amazon.com>
|
||||
R: Saeed Bishara <saeedb@amazon.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
@ -2701,7 +2700,6 @@ N: at91
|
|||
N: atmel
|
||||
|
||||
ARM/Microchip Sparx5 SoC support
|
||||
M: Lars Povlsen <lars.povlsen@microchip.com>
|
||||
M: Steen Hegelund <Steen.Hegelund@microchip.com>
|
||||
M: Daniel Machon <daniel.machon@microchip.com>
|
||||
M: UNGLinuxDriver@microchip.com
|
||||
|
@ -4069,7 +4067,6 @@ F: net/bluetooth/
|
|||
|
||||
BONDING DRIVER
|
||||
M: Jay Vosburgh <jv@jvosburgh.net>
|
||||
M: Andy Gospodarek <andy@greyhouse.net>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/networking/bonding.rst
|
||||
|
@ -4142,7 +4139,6 @@ S: Odd Fixes
|
|||
F: drivers/net/ethernet/netronome/nfp/bpf/
|
||||
|
||||
BPF JIT for POWERPC (32-BIT AND 64-BIT)
|
||||
M: Michael Ellerman <mpe@ellerman.id.au>
|
||||
M: Hari Bathini <hbathini@linux.ibm.com>
|
||||
M: Christophe Leroy <christophe.leroy@csgroup.eu>
|
||||
R: Naveen N Rao <naveen@kernel.org>
|
||||
|
@ -5480,6 +5476,7 @@ L: linux-sound@vger.kernel.org
|
|||
L: patches@opensource.cirrus.com
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/cirrus,cs*
|
||||
F: Documentation/sound/codecs/cs*
|
||||
F: drivers/mfd/cs42l43*
|
||||
F: drivers/pinctrl/cirrus/pinctrl-cs42l43*
|
||||
F: drivers/spi/spi-cs42l43*
|
||||
|
@ -12645,7 +12642,7 @@ F: arch/mips/include/uapi/asm/kvm*
|
|||
F: arch/mips/kvm/
|
||||
|
||||
KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc)
|
||||
M: Michael Ellerman <mpe@ellerman.id.au>
|
||||
M: Madhavan Srinivasan <maddy@linux.ibm.com>
|
||||
R: Nicholas Piggin <npiggin@gmail.com>
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
L: kvm@vger.kernel.org
|
||||
|
@ -13224,11 +13221,11 @@ X: drivers/macintosh/adb-iop.c
|
|||
X: drivers/macintosh/via-macii.c
|
||||
|
||||
LINUX FOR POWERPC (32-BIT AND 64-BIT)
|
||||
M: Madhavan Srinivasan <maddy@linux.ibm.com>
|
||||
M: Michael Ellerman <mpe@ellerman.id.au>
|
||||
R: Nicholas Piggin <npiggin@gmail.com>
|
||||
R: Christophe Leroy <christophe.leroy@csgroup.eu>
|
||||
R: Naveen N Rao <naveen@kernel.org>
|
||||
M: Madhavan Srinivasan <maddy@linux.ibm.com>
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Supported
|
||||
W: https://github.com/linuxppc/wiki/wiki
|
||||
|
@ -14579,7 +14576,6 @@ F: drivers/dma/mediatek/
|
|||
MEDIATEK ETHERNET DRIVER
|
||||
M: Felix Fietkau <nbd@nbd.name>
|
||||
M: Sean Wang <sean.wang@mediatek.com>
|
||||
M: Mark Lee <Mark-MC.Lee@mediatek.com>
|
||||
M: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -22006,6 +22002,7 @@ W: https://github.com/thesofproject/linux/
|
|||
F: sound/soc/sof/
|
||||
|
||||
SOUND - GENERIC SOUND CARD (Simple-Audio-Card, Audio-Graph-Card)
|
||||
M: Mark Brown <broonie@kernel.org>
|
||||
M: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
|
||||
S: Supported
|
||||
L: linux-sound@vger.kernel.org
|
||||
|
@ -22516,11 +22513,8 @@ F: Documentation/devicetree/bindings/phy/st,stm32mp25-combophy.yaml
|
|||
F: drivers/phy/st/phy-stm32-combophy.c
|
||||
|
||||
STMMAC ETHERNET DRIVER
|
||||
M: Alexandre Torgue <alexandre.torgue@foss.st.com>
|
||||
M: Jose Abreu <joabreu@synopsys.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.stlinux.com
|
||||
S: Orphan
|
||||
F: Documentation/networking/device_drivers/ethernet/stmicro/
|
||||
F: drivers/net/ethernet/stmicro/stmmac/
|
||||
|
||||
|
@ -22752,9 +22746,8 @@ S: Supported
|
|||
F: drivers/net/ethernet/synopsys/
|
||||
|
||||
SYNOPSYS DESIGNWARE ETHERNET XPCS DRIVER
|
||||
M: Jose Abreu <Jose.Abreu@synopsys.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
S: Orphan
|
||||
F: drivers/net/pcs/pcs-xpcs.c
|
||||
F: drivers/net/pcs/pcs-xpcs.h
|
||||
F: include/linux/pcs/pcs-xpcs.h
|
||||
|
@ -23662,7 +23655,6 @@ F: tools/testing/selftests/timers/
|
|||
|
||||
TIPC NETWORK LAYER
|
||||
M: Jon Maloy <jmaloy@redhat.com>
|
||||
M: Ying Xue <ying.xue@windriver.com>
|
||||
L: netdev@vger.kernel.org (core kernel code)
|
||||
L: tipc-discussion@lists.sourceforge.net (user apps, general discussion)
|
||||
S: Maintained
|
||||
|
@ -24268,7 +24260,8 @@ F: Documentation/devicetree/bindings/usb/nxp,isp1760.yaml
|
|||
F: drivers/usb/isp1760/*
|
||||
|
||||
USB LAN78XX ETHERNET DRIVER
|
||||
M: Woojung Huh <woojung.huh@microchip.com>
|
||||
M: Thangaraj Samynathan <Thangaraj.S@microchip.com>
|
||||
M: Rengarajan Sundararajan <Rengarajan.S@microchip.com>
|
||||
M: UNGLinuxDriver@microchip.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION =
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -87,7 +87,7 @@
|
|||
reg = <0x402c0000 0x4000>;
|
||||
interrupts = <110>;
|
||||
clocks = <&clks IMXRT1050_CLK_IPG_PDOF>,
|
||||
<&clks IMXRT1050_CLK_OSC>,
|
||||
<&clks IMXRT1050_CLK_AHB_PODF>,
|
||||
<&clks IMXRT1050_CLK_USDHC1>;
|
||||
clock-names = "ipg", "ahb", "per";
|
||||
bus-width = <4>;
|
||||
|
|
|
@ -323,6 +323,7 @@ CONFIG_SND_SOC_IMX_SGTL5000=y
|
|||
CONFIG_SND_SOC_FSL_ASOC_CARD=y
|
||||
CONFIG_SND_SOC_AC97_CODEC=y
|
||||
CONFIG_SND_SOC_CS42XX8_I2C=y
|
||||
CONFIG_SND_SOC_SPDIF=y
|
||||
CONFIG_SND_SOC_TLV320AIC3X_I2C=y
|
||||
CONFIG_SND_SOC_WM8960=y
|
||||
CONFIG_SND_SOC_WM8962=y
|
||||
|
|
|
@ -165,7 +165,7 @@ audio_subsys: bus@59000000 {
|
|||
};
|
||||
|
||||
esai0: esai@59010000 {
|
||||
compatible = "fsl,imx8qm-esai";
|
||||
compatible = "fsl,imx8qm-esai", "fsl,imx6ull-esai";
|
||||
reg = <0x59010000 0x10000>;
|
||||
interrupts = <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&esai0_lpcg IMX_LPCG_CLK_4>,
|
||||
|
|
|
@ -134,7 +134,7 @@
|
|||
};
|
||||
|
||||
esai1: esai@59810000 {
|
||||
compatible = "fsl,imx8qm-esai";
|
||||
compatible = "fsl,imx8qm-esai", "fsl,imx6ull-esai";
|
||||
reg = <0x59810000 0x10000>;
|
||||
interrupts = <GIC_SPI 411 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&esai1_lpcg IMX_LPCG_CLK_0>,
|
||||
|
|
|
@ -1673,7 +1673,7 @@
|
|||
|
||||
netcmix_blk_ctrl: syscon@4c810000 {
|
||||
compatible = "nxp,imx95-netcmix-blk-ctrl", "syscon";
|
||||
reg = <0x0 0x4c810000 0x0 0x10000>;
|
||||
reg = <0x0 0x4c810000 0x0 0x8>;
|
||||
#clock-cells = <1>;
|
||||
clocks = <&scmi_clk IMX95_CLK_BUSNETCMIX>;
|
||||
assigned-clocks = <&scmi_clk IMX95_CLK_BUSNETCMIX>;
|
||||
|
|
|
@ -2440,6 +2440,7 @@
|
|||
|
||||
qcom,cmb-element-bits = <32>;
|
||||
qcom,cmb-msrs-num = <32>;
|
||||
status = "disabled";
|
||||
|
||||
out-ports {
|
||||
port {
|
||||
|
@ -6092,7 +6093,7 @@
|
|||
<0x0 0x40000000 0x0 0xf20>,
|
||||
<0x0 0x40000f20 0x0 0xa8>,
|
||||
<0x0 0x40001000 0x0 0x4000>,
|
||||
<0x0 0x40200000 0x0 0x100000>,
|
||||
<0x0 0x40200000 0x0 0x1fe00000>,
|
||||
<0x0 0x01c03000 0x0 0x1000>,
|
||||
<0x0 0x40005000 0x0 0x2000>;
|
||||
reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
|
||||
|
@ -6250,7 +6251,7 @@
|
|||
<0x0 0x60000000 0x0 0xf20>,
|
||||
<0x0 0x60000f20 0x0 0xa8>,
|
||||
<0x0 0x60001000 0x0 0x4000>,
|
||||
<0x0 0x60200000 0x0 0x100000>,
|
||||
<0x0 0x60200000 0x0 0x1fe00000>,
|
||||
<0x0 0x01c13000 0x0 0x1000>,
|
||||
<0x0 0x60005000 0x0 0x2000>;
|
||||
reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
|
||||
|
|
|
@ -773,6 +773,10 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
&usb_1_ss0_dwc3 {
|
||||
dr_mode = "host";
|
||||
};
|
||||
|
||||
&usb_1_ss0_dwc3_hs {
|
||||
remote-endpoint = <&pmic_glink_ss0_hs_in>;
|
||||
};
|
||||
|
@ -801,6 +805,10 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
&usb_1_ss1_dwc3 {
|
||||
dr_mode = "host";
|
||||
};
|
||||
|
||||
&usb_1_ss1_dwc3_hs {
|
||||
remote-endpoint = <&pmic_glink_ss1_hs_in>;
|
||||
};
|
||||
|
|
|
@ -1197,6 +1197,10 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
&usb_1_ss0_dwc3 {
|
||||
dr_mode = "host";
|
||||
};
|
||||
|
||||
&usb_1_ss0_dwc3_hs {
|
||||
remote-endpoint = <&pmic_glink_ss0_hs_in>;
|
||||
};
|
||||
|
@ -1225,6 +1229,10 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
&usb_1_ss1_dwc3 {
|
||||
dr_mode = "host";
|
||||
};
|
||||
|
||||
&usb_1_ss1_dwc3_hs {
|
||||
remote-endpoint = <&pmic_glink_ss1_hs_in>;
|
||||
};
|
||||
|
@ -1253,6 +1261,10 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
&usb_1_ss2_dwc3 {
|
||||
dr_mode = "host";
|
||||
};
|
||||
|
||||
&usb_1_ss2_dwc3_hs {
|
||||
remote-endpoint = <&pmic_glink_ss2_hs_in>;
|
||||
};
|
||||
|
|
|
@ -2924,7 +2924,7 @@
|
|||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
ranges = <0x01000000 0x0 0x00000000 0x0 0x70200000 0x0 0x100000>,
|
||||
<0x02000000 0x0 0x70300000 0x0 0x70300000 0x0 0x1d00000>;
|
||||
<0x02000000 0x0 0x70300000 0x0 0x70300000 0x0 0x3d00000>;
|
||||
bus-range = <0x00 0xff>;
|
||||
|
||||
dma-coherent;
|
||||
|
@ -4066,8 +4066,6 @@
|
|||
|
||||
dma-coherent;
|
||||
|
||||
usb-role-switch;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
@ -4321,8 +4319,6 @@
|
|||
|
||||
dma-coherent;
|
||||
|
||||
usb-role-switch;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
@ -4421,8 +4417,6 @@
|
|||
|
||||
dma-coherent;
|
||||
|
||||
usb-role-switch;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
|
|
@ -333,6 +333,7 @@
|
|||
|
||||
power-domain@RK3328_PD_HEVC {
|
||||
reg = <RK3328_PD_HEVC>;
|
||||
clocks = <&cru SCLK_VENC_CORE>;
|
||||
#power-domain-cells = <0>;
|
||||
};
|
||||
power-domain@RK3328_PD_VIDEO {
|
||||
|
|
|
@ -350,6 +350,7 @@
|
|||
assigned-clocks = <&pmucru CLK_PCIEPHY0_REF>;
|
||||
assigned-clock-rates = <100000000>;
|
||||
resets = <&cru SRST_PIPEPHY0>;
|
||||
reset-names = "phy";
|
||||
rockchip,pipe-grf = <&pipegrf>;
|
||||
rockchip,pipe-phy-grf = <&pipe_phy_grf0>;
|
||||
#phy-cells = <1>;
|
||||
|
|
|
@ -1681,6 +1681,7 @@
|
|||
assigned-clocks = <&pmucru CLK_PCIEPHY1_REF>;
|
||||
assigned-clock-rates = <100000000>;
|
||||
resets = <&cru SRST_PIPEPHY1>;
|
||||
reset-names = "phy";
|
||||
rockchip,pipe-grf = <&pipegrf>;
|
||||
rockchip,pipe-phy-grf = <&pipe_phy_grf1>;
|
||||
#phy-cells = <1>;
|
||||
|
@ -1697,6 +1698,7 @@
|
|||
assigned-clocks = <&pmucru CLK_PCIEPHY2_REF>;
|
||||
assigned-clock-rates = <100000000>;
|
||||
resets = <&cru SRST_PIPEPHY2>;
|
||||
reset-names = "phy";
|
||||
rockchip,pipe-grf = <&pipegrf>;
|
||||
rockchip,pipe-phy-grf = <&pipe_phy_grf2>;
|
||||
#phy-cells = <1>;
|
||||
|
|
|
@ -72,7 +72,7 @@
|
|||
|
||||
rfkill {
|
||||
compatible = "rfkill-gpio";
|
||||
label = "rfkill-pcie-wlan";
|
||||
label = "rfkill-m2-wlan";
|
||||
radio-type = "wlan";
|
||||
shutdown-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
|
||||
};
|
||||
|
|
|
@ -434,6 +434,7 @@
|
|||
&sdmmc {
|
||||
bus-width = <4>;
|
||||
cap-sd-highspeed;
|
||||
cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
|
||||
disable-wp;
|
||||
max-frequency = <150000000>;
|
||||
no-mmc;
|
||||
|
|
|
@ -783,9 +783,6 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
|
|||
if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
|
||||
return -EBUSY;
|
||||
|
||||
if (__hyp_ack_skip_pgtable_check(tx))
|
||||
return 0;
|
||||
|
||||
return __hyp_check_page_state_range(addr, size,
|
||||
PKVM_PAGE_SHARED_BORROWED);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ static DEFINE_MUTEX(arm_pmus_lock);
|
|||
|
||||
static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
|
||||
static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
|
||||
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
|
||||
|
||||
static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
|
||||
{
|
||||
|
@ -327,48 +328,25 @@ u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
|
|||
return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_enable_counter_mask - enable selected PMU counters
|
||||
* @vcpu: The vcpu pointer
|
||||
* @val: the value guest writes to PMCNTENSET register
|
||||
*
|
||||
* Call perf_event_enable to start counting the perf event
|
||||
*/
|
||||
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc)
|
||||
{
|
||||
int i;
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
if (!pmc->perf_event) {
|
||||
kvm_pmu_create_perf_event(pmc);
|
||||
return;
|
||||
|
||||
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
|
||||
return;
|
||||
|
||||
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
struct kvm_pmc *pmc;
|
||||
|
||||
if (!(val & BIT(i)))
|
||||
continue;
|
||||
|
||||
pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
|
||||
|
||||
if (!pmc->perf_event) {
|
||||
kvm_pmu_create_perf_event(pmc);
|
||||
} else {
|
||||
perf_event_enable(pmc->perf_event);
|
||||
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
kvm_debug("fail to enable perf event\n");
|
||||
}
|
||||
}
|
||||
|
||||
perf_event_enable(pmc->perf_event);
|
||||
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
kvm_debug("fail to enable perf event\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_disable_counter_mask - disable selected PMU counters
|
||||
* @vcpu: The vcpu pointer
|
||||
* @val: the value guest writes to PMCNTENCLR register
|
||||
*
|
||||
* Call perf_event_disable to stop counting the perf event
|
||||
*/
|
||||
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc)
|
||||
{
|
||||
if (pmc->perf_event)
|
||||
perf_event_disable(pmc->perf_event);
|
||||
}
|
||||
|
||||
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -376,16 +354,18 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
|||
return;
|
||||
|
||||
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
struct kvm_pmc *pmc;
|
||||
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
|
||||
|
||||
if (!(val & BIT(i)))
|
||||
continue;
|
||||
|
||||
pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
|
||||
|
||||
if (pmc->perf_event)
|
||||
perf_event_disable(pmc->perf_event);
|
||||
if (kvm_pmu_counter_is_enabled(pmc))
|
||||
kvm_pmc_enable_perf_event(pmc);
|
||||
else
|
||||
kvm_pmc_disable_perf_event(pmc);
|
||||
}
|
||||
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -626,27 +606,28 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
|||
if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
|
||||
val &= ~ARMV8_PMU_PMCR_LP;
|
||||
|
||||
/* Request a reload of the PMU to enable/disable affected counters */
|
||||
if ((__vcpu_sys_reg(vcpu, PMCR_EL0) ^ val) & ARMV8_PMU_PMCR_E)
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
|
||||
/* The reset bits don't indicate any state, and shouldn't be saved. */
|
||||
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_E) {
|
||||
kvm_pmu_enable_counter_mask(vcpu,
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
|
||||
} else {
|
||||
kvm_pmu_disable_counter_mask(vcpu,
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
|
||||
}
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_C)
|
||||
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_P) {
|
||||
unsigned long mask = kvm_pmu_accessible_counter_mask(vcpu);
|
||||
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
/*
|
||||
* Unlike other PMU sysregs, the controls in PMCR_EL0 always apply
|
||||
* to the 'guest' range of counters and never the 'hyp' range.
|
||||
*/
|
||||
unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu) &
|
||||
~kvm_pmu_hyp_counter_mask(vcpu) &
|
||||
~BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
|
||||
for_each_set_bit(i, &mask, 32)
|
||||
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
|
||||
}
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
}
|
||||
|
||||
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
|
||||
|
@ -910,11 +891,11 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
|
||||
|
||||
kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
|
||||
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
|
||||
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
|
||||
|
||||
kvm_pmu_reprogram_counter_mask(vcpu, mask);
|
||||
}
|
||||
|
||||
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -1208,16 +1208,14 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
mask = kvm_pmu_accessible_counter_mask(vcpu);
|
||||
if (p->is_write) {
|
||||
val = p->regval & mask;
|
||||
if (r->Op2 & 0x1) {
|
||||
if (r->Op2 & 0x1)
|
||||
/* accessing PMCNTENSET_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
|
||||
kvm_pmu_enable_counter_mask(vcpu, val);
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
} else {
|
||||
else
|
||||
/* accessing PMCNTENCLR_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
|
||||
kvm_pmu_disable_counter_mask(vcpu, val);
|
||||
}
|
||||
|
||||
kvm_pmu_reprogram_counter_mask(vcpu, val);
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||
}
|
||||
|
@ -2450,6 +2448,26 @@ static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
|
|||
return __el2_visibility(vcpu, rd, s1pie_visibility);
|
||||
}
|
||||
|
||||
static bool access_mdcr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 old = __vcpu_sys_reg(vcpu, MDCR_EL2);
|
||||
|
||||
if (!access_rw(vcpu, p, r))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Request a reload of the PMU to enable/disable the counters affected
|
||||
* by HPME.
|
||||
*/
|
||||
if ((old ^ __vcpu_sys_reg(vcpu, MDCR_EL2)) & MDCR_EL2_HPME)
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Architected system registers.
|
||||
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
|
||||
|
@ -2983,7 +3001,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
|
||||
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
|
||||
EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
|
||||
EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
|
||||
EL2_REG(MDCR_EL2, access_mdcr, reset_val, 0),
|
||||
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
|
||||
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
|
||||
EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0),
|
||||
|
|
|
@ -34,6 +34,8 @@ enum vcpu_ftr {
|
|||
#define E500_TLB_BITMAP (1 << 30)
|
||||
/* TLB1 entry is mapped by host TLB0 */
|
||||
#define E500_TLB_TLB0 (1 << 29)
|
||||
/* entry is writable on the host */
|
||||
#define E500_TLB_WRITABLE (1 << 28)
|
||||
/* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
|
||||
#define E500_TLB_MAS2_ATTR (0x7f)
|
||||
|
||||
|
|
|
@ -45,11 +45,14 @@ static inline unsigned int tlb1_max_shadow_size(void)
|
|||
return host_tlb_params[1].entries - tlbcam_index - 1;
|
||||
}
|
||||
|
||||
static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
|
||||
static inline u32 e500_shadow_mas3_attrib(u32 mas3, bool writable, int usermode)
|
||||
{
|
||||
/* Mask off reserved bits. */
|
||||
mas3 &= MAS3_ATTRIB_MASK;
|
||||
|
||||
if (!writable)
|
||||
mas3 &= ~(MAS3_UW|MAS3_SW);
|
||||
|
||||
#ifndef CONFIG_KVM_BOOKE_HV
|
||||
if (!usermode) {
|
||||
/* Guest is in supervisor mode,
|
||||
|
@ -242,17 +245,18 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
|
|||
return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
|
||||
}
|
||||
|
||||
static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref,
|
||||
static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
|
||||
struct kvm_book3e_206_tlb_entry *gtlbe,
|
||||
kvm_pfn_t pfn, unsigned int wimg)
|
||||
kvm_pfn_t pfn, unsigned int wimg,
|
||||
bool writable)
|
||||
{
|
||||
ref->pfn = pfn;
|
||||
ref->flags = E500_TLB_VALID;
|
||||
if (writable)
|
||||
ref->flags |= E500_TLB_WRITABLE;
|
||||
|
||||
/* Use guest supplied MAS2_G and MAS2_E */
|
||||
ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
|
||||
|
||||
return tlbe_is_writable(gtlbe);
|
||||
}
|
||||
|
||||
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
|
||||
|
@ -305,6 +309,7 @@ static void kvmppc_e500_setup_stlbe(
|
|||
{
|
||||
kvm_pfn_t pfn = ref->pfn;
|
||||
u32 pr = vcpu->arch.shared->msr & MSR_PR;
|
||||
bool writable = !!(ref->flags & E500_TLB_WRITABLE);
|
||||
|
||||
BUG_ON(!(ref->flags & E500_TLB_VALID));
|
||||
|
||||
|
@ -312,7 +317,7 @@ static void kvmppc_e500_setup_stlbe(
|
|||
stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
|
||||
stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
|
||||
stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
|
||||
e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
|
||||
e500_shadow_mas3_attrib(gtlbe->mas7_3, writable, pr);
|
||||
}
|
||||
|
||||
static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
|
@ -321,15 +326,14 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|||
struct tlbe_ref *ref)
|
||||
{
|
||||
struct kvm_memory_slot *slot;
|
||||
unsigned long pfn = 0; /* silence GCC warning */
|
||||
unsigned int psize;
|
||||
unsigned long pfn;
|
||||
struct page *page = NULL;
|
||||
unsigned long hva;
|
||||
int pfnmap = 0;
|
||||
int tsize = BOOK3E_PAGESZ_4K;
|
||||
int ret = 0;
|
||||
unsigned long mmu_seq;
|
||||
struct kvm *kvm = vcpu_e500->vcpu.kvm;
|
||||
unsigned long tsize_pages = 0;
|
||||
pte_t *ptep;
|
||||
unsigned int wimg = 0;
|
||||
pgd_t *pgdir;
|
||||
|
@ -351,110 +355,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|||
slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
|
||||
hva = gfn_to_hva_memslot(slot, gfn);
|
||||
|
||||
if (tlbsel == 1) {
|
||||
struct vm_area_struct *vma;
|
||||
mmap_read_lock(kvm->mm);
|
||||
|
||||
vma = find_vma(kvm->mm, hva);
|
||||
if (vma && hva >= vma->vm_start &&
|
||||
(vma->vm_flags & VM_PFNMAP)) {
|
||||
/*
|
||||
* This VMA is a physically contiguous region (e.g.
|
||||
* /dev/mem) that bypasses normal Linux page
|
||||
* management. Find the overlap between the
|
||||
* vma and the memslot.
|
||||
*/
|
||||
|
||||
unsigned long start, end;
|
||||
unsigned long slot_start, slot_end;
|
||||
|
||||
pfnmap = 1;
|
||||
|
||||
start = vma->vm_pgoff;
|
||||
end = start +
|
||||
vma_pages(vma);
|
||||
|
||||
pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
|
||||
|
||||
slot_start = pfn - (gfn - slot->base_gfn);
|
||||
slot_end = slot_start + slot->npages;
|
||||
|
||||
if (start < slot_start)
|
||||
start = slot_start;
|
||||
if (end > slot_end)
|
||||
end = slot_end;
|
||||
|
||||
tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
|
||||
MAS1_TSIZE_SHIFT;
|
||||
|
||||
/*
|
||||
* e500 doesn't implement the lowest tsize bit,
|
||||
* or 1K pages.
|
||||
*/
|
||||
tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
|
||||
|
||||
/*
|
||||
* Now find the largest tsize (up to what the guest
|
||||
* requested) that will cover gfn, stay within the
|
||||
* range, and for which gfn and pfn are mutually
|
||||
* aligned.
|
||||
*/
|
||||
|
||||
for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
|
||||
unsigned long gfn_start, gfn_end;
|
||||
tsize_pages = 1UL << (tsize - 2);
|
||||
|
||||
gfn_start = gfn & ~(tsize_pages - 1);
|
||||
gfn_end = gfn_start + tsize_pages;
|
||||
|
||||
if (gfn_start + pfn - gfn < start)
|
||||
continue;
|
||||
if (gfn_end + pfn - gfn > end)
|
||||
continue;
|
||||
if ((gfn & (tsize_pages - 1)) !=
|
||||
(pfn & (tsize_pages - 1)))
|
||||
continue;
|
||||
|
||||
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
|
||||
pfn &= ~(tsize_pages - 1);
|
||||
break;
|
||||
}
|
||||
} else if (vma && hva >= vma->vm_start &&
|
||||
is_vm_hugetlb_page(vma)) {
|
||||
unsigned long psize = vma_kernel_pagesize(vma);
|
||||
|
||||
tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
|
||||
MAS1_TSIZE_SHIFT;
|
||||
|
||||
/*
|
||||
* Take the largest page size that satisfies both host
|
||||
* and guest mapping
|
||||
*/
|
||||
tsize = min(__ilog2(psize) - 10, tsize);
|
||||
|
||||
/*
|
||||
* e500 doesn't implement the lowest tsize bit,
|
||||
* or 1K pages.
|
||||
*/
|
||||
tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
|
||||
}
|
||||
|
||||
mmap_read_unlock(kvm->mm);
|
||||
}
|
||||
|
||||
if (likely(!pfnmap)) {
|
||||
tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
|
||||
pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, NULL, &page);
|
||||
if (is_error_noslot_pfn(pfn)) {
|
||||
if (printk_ratelimit())
|
||||
pr_err("%s: real page not found for gfn %lx\n",
|
||||
__func__, (long)gfn);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Align guest and physical address to page map boundaries */
|
||||
pfn &= ~(tsize_pages - 1);
|
||||
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
|
||||
pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, &writable, &page);
|
||||
if (is_error_noslot_pfn(pfn)) {
|
||||
if (printk_ratelimit())
|
||||
pr_err("%s: real page not found for gfn %lx\n",
|
||||
__func__, (long)gfn);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
|
@ -472,14 +378,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|||
* can't run hence pfn won't change.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
ptep = find_linux_pte(pgdir, hva, NULL, NULL);
|
||||
ptep = find_linux_pte(pgdir, hva, NULL, &psize);
|
||||
if (ptep) {
|
||||
pte_t pte = READ_ONCE(*ptep);
|
||||
|
||||
if (pte_present(pte)) {
|
||||
wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
|
||||
MAS2_WIMGE_MASK;
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
local_irq_restore(flags);
|
||||
pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
|
||||
|
@ -488,10 +393,72 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|||
goto out;
|
||||
}
|
||||
}
|
||||
writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (psize && tlbsel == 1) {
|
||||
unsigned long psize_pages, tsize_pages;
|
||||
unsigned long start, end;
|
||||
unsigned long slot_start, slot_end;
|
||||
|
||||
psize_pages = 1UL << (psize - PAGE_SHIFT);
|
||||
start = pfn & ~(psize_pages - 1);
|
||||
end = start + psize_pages;
|
||||
|
||||
slot_start = pfn - (gfn - slot->base_gfn);
|
||||
slot_end = slot_start + slot->npages;
|
||||
|
||||
if (start < slot_start)
|
||||
start = slot_start;
|
||||
if (end > slot_end)
|
||||
end = slot_end;
|
||||
|
||||
tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
|
||||
MAS1_TSIZE_SHIFT;
|
||||
|
||||
/*
|
||||
* Any page size that doesn't satisfy the host mapping
|
||||
* will fail the start and end tests.
|
||||
*/
|
||||
tsize = min(psize - PAGE_SHIFT + BOOK3E_PAGESZ_4K, tsize);
|
||||
|
||||
/*
|
||||
* e500 doesn't implement the lowest tsize bit,
|
||||
* or 1K pages.
|
||||
*/
|
||||
tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
|
||||
|
||||
/*
|
||||
* Now find the largest tsize (up to what the guest
|
||||
* requested) that will cover gfn, stay within the
|
||||
* range, and for which gfn and pfn are mutually
|
||||
* aligned.
|
||||
*/
|
||||
|
||||
for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
|
||||
unsigned long gfn_start, gfn_end;
|
||||
tsize_pages = 1UL << (tsize - 2);
|
||||
|
||||
gfn_start = gfn & ~(tsize_pages - 1);
|
||||
gfn_end = gfn_start + tsize_pages;
|
||||
|
||||
if (gfn_start + pfn - gfn < start)
|
||||
continue;
|
||||
if (gfn_end + pfn - gfn > end)
|
||||
continue;
|
||||
if ((gfn & (tsize_pages - 1)) !=
|
||||
(pfn & (tsize_pages - 1)))
|
||||
continue;
|
||||
|
||||
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
|
||||
pfn &= ~(tsize_pages - 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg, writable);
|
||||
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
|
||||
ref, gvaddr, stlbe);
|
||||
writable = tlbe_is_writable(stlbe);
|
||||
|
||||
/* Clear i-cache for new pages */
|
||||
kvmppc_mmu_flush_icache(pfn);
|
||||
|
|
|
@ -122,6 +122,7 @@ struct kernel_mapping {
|
|||
|
||||
extern struct kernel_mapping kernel_map;
|
||||
extern phys_addr_t phys_ram_base;
|
||||
extern unsigned long vmemmap_start_pfn;
|
||||
|
||||
#define is_kernel_mapping(x) \
|
||||
((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
|
||||
|
|
|
@ -87,7 +87,7 @@
|
|||
* Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
|
||||
* is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
|
||||
*/
|
||||
#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
|
||||
#define vmemmap ((struct page *)VMEMMAP_START - vmemmap_start_pfn)
|
||||
|
||||
#define PCI_IO_SIZE SZ_16M
|
||||
#define PCI_IO_END VMEMMAP_START
|
||||
|
|
|
@ -159,6 +159,7 @@ struct riscv_pmu_snapshot_data {
|
|||
};
|
||||
|
||||
#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
|
||||
#define RISCV_PMU_PLAT_FW_EVENT_MASK GENMASK_ULL(61, 0)
|
||||
#define RISCV_PMU_RAW_EVENT_IDX 0x20000
|
||||
#define RISCV_PLAT_FW_EVENT 0xFFFF
|
||||
|
||||
|
|
|
@ -3,8 +3,11 @@
|
|||
#ifndef __ASM_RISCV_SPINLOCK_H
|
||||
#define __ASM_RISCV_SPINLOCK_H
|
||||
|
||||
#ifdef CONFIG_RISCV_COMBO_SPINLOCKS
|
||||
#ifdef CONFIG_QUEUED_SPINLOCKS
|
||||
#define _Q_PENDING_LOOPS (1 << 9)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RISCV_COMBO_SPINLOCKS
|
||||
|
||||
#define __no_arch_spinlock_redefine
|
||||
#include <asm/ticket_spinlock.h>
|
||||
|
|
|
@ -23,21 +23,21 @@
|
|||
REG_S a0, TASK_TI_A0(tp)
|
||||
csrr a0, CSR_CAUSE
|
||||
/* Exclude IRQs */
|
||||
blt a0, zero, _new_vmalloc_restore_context_a0
|
||||
blt a0, zero, .Lnew_vmalloc_restore_context_a0
|
||||
|
||||
REG_S a1, TASK_TI_A1(tp)
|
||||
/* Only check new_vmalloc if we are in page/protection fault */
|
||||
li a1, EXC_LOAD_PAGE_FAULT
|
||||
beq a0, a1, _new_vmalloc_kernel_address
|
||||
beq a0, a1, .Lnew_vmalloc_kernel_address
|
||||
li a1, EXC_STORE_PAGE_FAULT
|
||||
beq a0, a1, _new_vmalloc_kernel_address
|
||||
beq a0, a1, .Lnew_vmalloc_kernel_address
|
||||
li a1, EXC_INST_PAGE_FAULT
|
||||
bne a0, a1, _new_vmalloc_restore_context_a1
|
||||
bne a0, a1, .Lnew_vmalloc_restore_context_a1
|
||||
|
||||
_new_vmalloc_kernel_address:
|
||||
.Lnew_vmalloc_kernel_address:
|
||||
/* Is it a kernel address? */
|
||||
csrr a0, CSR_TVAL
|
||||
bge a0, zero, _new_vmalloc_restore_context_a1
|
||||
bge a0, zero, .Lnew_vmalloc_restore_context_a1
|
||||
|
||||
/* Check if a new vmalloc mapping appeared that could explain the trap */
|
||||
REG_S a2, TASK_TI_A2(tp)
|
||||
|
@ -69,7 +69,7 @@ _new_vmalloc_kernel_address:
|
|||
/* Check the value of new_vmalloc for this cpu */
|
||||
REG_L a2, 0(a0)
|
||||
and a2, a2, a1
|
||||
beq a2, zero, _new_vmalloc_restore_context
|
||||
beq a2, zero, .Lnew_vmalloc_restore_context
|
||||
|
||||
/* Atomically reset the current cpu bit in new_vmalloc */
|
||||
amoxor.d a0, a1, (a0)
|
||||
|
@ -83,11 +83,11 @@ _new_vmalloc_kernel_address:
|
|||
csrw CSR_SCRATCH, x0
|
||||
sret
|
||||
|
||||
_new_vmalloc_restore_context:
|
||||
.Lnew_vmalloc_restore_context:
|
||||
REG_L a2, TASK_TI_A2(tp)
|
||||
_new_vmalloc_restore_context_a1:
|
||||
.Lnew_vmalloc_restore_context_a1:
|
||||
REG_L a1, TASK_TI_A1(tp)
|
||||
_new_vmalloc_restore_context_a0:
|
||||
.Lnew_vmalloc_restore_context_a0:
|
||||
REG_L a0, TASK_TI_A0(tp)
|
||||
.endm
|
||||
|
||||
|
@ -278,6 +278,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
|
|||
#else
|
||||
sret
|
||||
#endif
|
||||
SYM_INNER_LABEL(ret_from_exception_end, SYM_L_GLOBAL)
|
||||
SYM_CODE_END(ret_from_exception)
|
||||
ASM_NOKPROBE(ret_from_exception)
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ struct used_bucket {
|
|||
|
||||
struct relocation_head {
|
||||
struct hlist_node node;
|
||||
struct list_head *rel_entry;
|
||||
struct list_head rel_entry;
|
||||
void *location;
|
||||
};
|
||||
|
||||
|
@ -634,7 +634,7 @@ process_accumulated_relocations(struct module *me,
|
|||
location = rel_head_iter->location;
|
||||
list_for_each_entry_safe(rel_entry_iter,
|
||||
rel_entry_iter_tmp,
|
||||
rel_head_iter->rel_entry,
|
||||
&rel_head_iter->rel_entry,
|
||||
head) {
|
||||
curr_type = rel_entry_iter->type;
|
||||
reloc_handlers[curr_type].reloc_handler(
|
||||
|
@ -704,16 +704,7 @@ static int add_relocation_to_accumulate(struct module *me, int type,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rel_head->rel_entry =
|
||||
kmalloc(sizeof(struct list_head), GFP_KERNEL);
|
||||
|
||||
if (!rel_head->rel_entry) {
|
||||
kfree(entry);
|
||||
kfree(rel_head);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(rel_head->rel_entry);
|
||||
INIT_LIST_HEAD(&rel_head->rel_entry);
|
||||
rel_head->location = location;
|
||||
INIT_HLIST_NODE(&rel_head->node);
|
||||
if (!current_head->first) {
|
||||
|
@ -722,7 +713,6 @@ static int add_relocation_to_accumulate(struct module *me, int type,
|
|||
|
||||
if (!bucket) {
|
||||
kfree(entry);
|
||||
kfree(rel_head->rel_entry);
|
||||
kfree(rel_head);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -735,7 +725,7 @@ static int add_relocation_to_accumulate(struct module *me, int type,
|
|||
}
|
||||
|
||||
/* Add relocation to head of discovered rel_head */
|
||||
list_add_tail(&entry->head, rel_head->rel_entry);
|
||||
list_add_tail(&entry->head, &rel_head->rel_entry);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
|
|||
p->ainsn.api.restore = (unsigned long)p->addr + len;
|
||||
|
||||
patch_text_nosync(p->ainsn.api.insn, &p->opcode, len);
|
||||
patch_text_nosync(p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn));
|
||||
patch_text_nosync((void *)p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn));
|
||||
}
|
||||
|
||||
static void __kprobes arch_prepare_simulate(struct kprobe *p)
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#ifdef CONFIG_FRAME_POINTER
|
||||
|
||||
extern asmlinkage void handle_exception(void);
|
||||
extern unsigned long ret_from_exception_end;
|
||||
|
||||
static inline int fp_is_valid(unsigned long fp, unsigned long sp)
|
||||
{
|
||||
|
@ -71,7 +72,8 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
|||
fp = frame->fp;
|
||||
pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
|
||||
&frame->ra);
|
||||
if (pc == (unsigned long)handle_exception) {
|
||||
if (pc >= (unsigned long)handle_exception &&
|
||||
pc < (unsigned long)&ret_from_exception_end) {
|
||||
if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
|
||||
break;
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
|
||||
int show_unhandled_signals = 1;
|
||||
|
||||
static DEFINE_SPINLOCK(die_lock);
|
||||
static DEFINE_RAW_SPINLOCK(die_lock);
|
||||
|
||||
static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns)
|
||||
{
|
||||
|
@ -81,7 +81,7 @@ void die(struct pt_regs *regs, const char *str)
|
|||
|
||||
oops_enter();
|
||||
|
||||
spin_lock_irqsave(&die_lock, flags);
|
||||
raw_spin_lock_irqsave(&die_lock, flags);
|
||||
console_verbose();
|
||||
bust_spinlocks(1);
|
||||
|
||||
|
@ -100,7 +100,7 @@ void die(struct pt_regs *regs, const char *str)
|
|||
|
||||
bust_spinlocks(0);
|
||||
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
|
||||
spin_unlock_irqrestore(&die_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&die_lock, flags);
|
||||
oops_exit();
|
||||
|
||||
if (in_interrupt())
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include <asm/pgtable.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/soc.h>
|
||||
#include <asm/sparsemem.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#include "../kernel/head.h"
|
||||
|
@ -62,6 +63,13 @@ EXPORT_SYMBOL(pgtable_l5_enabled);
|
|||
phys_addr_t phys_ram_base __ro_after_init;
|
||||
EXPORT_SYMBOL(phys_ram_base);
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
#define VMEMMAP_ADDR_ALIGN (1ULL << SECTION_SIZE_BITS)
|
||||
|
||||
unsigned long vmemmap_start_pfn __ro_after_init;
|
||||
EXPORT_SYMBOL(vmemmap_start_pfn);
|
||||
#endif
|
||||
|
||||
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
|
||||
__page_aligned_bss;
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
@ -240,8 +248,12 @@ static void __init setup_bootmem(void)
|
|||
* Make sure we align the start of the memory on a PMD boundary so that
|
||||
* at worst, we map the linear mapping with PMD mappings.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_XIP_KERNEL))
|
||||
if (!IS_ENABLED(CONFIG_XIP_KERNEL)) {
|
||||
phys_ram_base = memblock_start_of_DRAM() & PMD_MASK;
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
vmemmap_start_pfn = round_down(phys_ram_base, VMEMMAP_ADDR_ALIGN) >> PAGE_SHIFT;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* In 64-bit, any use of __va/__pa before this point is wrong as we
|
||||
|
@ -1101,6 +1113,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
|
|||
kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
|
||||
|
||||
phys_ram_base = CONFIG_PHYS_RAM_BASE;
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
vmemmap_start_pfn = round_down(phys_ram_base, VMEMMAP_ADDR_ALIGN) >> PAGE_SHIFT;
|
||||
#endif
|
||||
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
|
||||
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
|
||||
|
||||
|
|
|
@ -2678,9 +2678,13 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
|
|||
kvm_s390_clear_float_irqs(dev->kvm);
|
||||
break;
|
||||
case KVM_DEV_FLIC_APF_ENABLE:
|
||||
if (kvm_is_ucontrol(dev->kvm))
|
||||
return -EINVAL;
|
||||
dev->kvm->arch.gmap->pfault_enabled = 1;
|
||||
break;
|
||||
case KVM_DEV_FLIC_APF_DISABLE_WAIT:
|
||||
if (kvm_is_ucontrol(dev->kvm))
|
||||
return -EINVAL;
|
||||
dev->kvm->arch.gmap->pfault_enabled = 0;
|
||||
/*
|
||||
* Make sure no async faults are in transition when
|
||||
|
@ -2894,6 +2898,8 @@ int kvm_set_routing_entry(struct kvm *kvm,
|
|||
switch (ue->type) {
|
||||
/* we store the userspace addresses instead of the guest addresses */
|
||||
case KVM_IRQ_ROUTING_S390_ADAPTER:
|
||||
if (kvm_is_ucontrol(kvm))
|
||||
return -EINVAL;
|
||||
e->set = set_adapter_int;
|
||||
uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr);
|
||||
if (uaddr == -EFAULT)
|
||||
|
|
|
@ -854,7 +854,7 @@ unpin:
|
|||
static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
|
||||
gpa_t gpa)
|
||||
{
|
||||
hpa_t hpa = (hpa_t) vsie_page->scb_o;
|
||||
hpa_t hpa = virt_to_phys(vsie_page->scb_o);
|
||||
|
||||
if (hpa)
|
||||
unpin_guest_page(vcpu->kvm, gpa, hpa);
|
||||
|
|
|
@ -83,7 +83,6 @@ config X86
|
|||
select ARCH_HAS_DMA_OPS if GART_IOMMU || XEN
|
||||
select ARCH_HAS_EARLY_DEBUG if KGDB
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_EXECMEM_ROX if X86_64
|
||||
select ARCH_HAS_FAST_MULTIPLIER
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
|
|
|
@ -217,7 +217,7 @@ fail:
|
|||
|
||||
#define nop() asm volatile ("nop")
|
||||
|
||||
static inline void serialize(void)
|
||||
static __always_inline void serialize(void)
|
||||
{
|
||||
/* Instruction opcode for SERIALIZE; supported in binutils >= 2.35. */
|
||||
asm volatile(".byte 0xf, 0x1, 0xe8" ::: "memory");
|
||||
|
|
|
@ -190,7 +190,8 @@ int ssp_get(struct task_struct *target, const struct user_regset *regset,
|
|||
struct fpu *fpu = &target->thread.fpu;
|
||||
struct cet_user_state *cetregs;
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
|
||||
if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) ||
|
||||
!ssp_active(target, regset))
|
||||
return -ENODEV;
|
||||
|
||||
sync_fpstate(fpu);
|
||||
|
|
|
@ -50,7 +50,13 @@ void cpu_init_fred_exceptions(void)
|
|||
FRED_CONFIG_ENTRYPOINT(asm_fred_entrypoint_user));
|
||||
|
||||
wrmsrl(MSR_IA32_FRED_STKLVLS, 0);
|
||||
wrmsrl(MSR_IA32_FRED_RSP0, 0);
|
||||
|
||||
/*
|
||||
* Ater a CPU offline/online cycle, the FRED RSP0 MSR should be
|
||||
* resynchronized with its per-CPU cache.
|
||||
*/
|
||||
wrmsrl(MSR_IA32_FRED_RSP0, __this_cpu_read(fred_rsp0));
|
||||
|
||||
wrmsrl(MSR_IA32_FRED_RSP1, 0);
|
||||
wrmsrl(MSR_IA32_FRED_RSP2, 0);
|
||||
wrmsrl(MSR_IA32_FRED_RSP3, 0);
|
||||
|
|
|
@ -175,7 +175,6 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform);
|
|||
noinstr void __static_call_update_early(void *tramp, void *func)
|
||||
{
|
||||
BUG_ON(system_state != SYSTEM_BOOTING);
|
||||
BUG_ON(!early_boot_irqs_disabled);
|
||||
BUG_ON(static_call_initialized);
|
||||
__text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE);
|
||||
sync_core();
|
||||
|
|
|
@ -1080,7 +1080,8 @@ struct execmem_info __init *execmem_arch_setup(void)
|
|||
|
||||
start = MODULES_VADDR + offset;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX)) {
|
||||
if (IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX) &&
|
||||
cpu_feature_enabled(X86_FEATURE_PSE)) {
|
||||
pgprot = PAGE_KERNEL_ROX;
|
||||
flags = EXECMEM_KASAN_SHADOW | EXECMEM_ROX_CACHE;
|
||||
} else {
|
||||
|
|
|
@ -6844,16 +6844,24 @@ static struct bfq_queue *bfq_waker_bfqq(struct bfq_queue *bfqq)
|
|||
if (new_bfqq == waker_bfqq) {
|
||||
/*
|
||||
* If waker_bfqq is in the merge chain, and current
|
||||
* is the only procress.
|
||||
* is the only process, waker_bfqq can be freed.
|
||||
*/
|
||||
if (bfqq_process_refs(waker_bfqq) == 1)
|
||||
return NULL;
|
||||
break;
|
||||
|
||||
return waker_bfqq;
|
||||
}
|
||||
|
||||
new_bfqq = new_bfqq->new_bfqq;
|
||||
}
|
||||
|
||||
/*
|
||||
* If waker_bfqq is not in the merge chain, and it's procress reference
|
||||
* is 0, waker_bfqq can be freed.
|
||||
*/
|
||||
if (bfqq_process_refs(waker_bfqq) == 0)
|
||||
return NULL;
|
||||
|
||||
return waker_bfqq;
|
||||
}
|
||||
|
||||
|
|
|
@ -610,16 +610,28 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_video_device_EDID() - Get EDID from ACPI _DDC
|
||||
* @device: video output device (LCD, CRT, ..)
|
||||
* @edid: address for returned EDID pointer
|
||||
* @length: _DDC length to request (must be a multiple of 128)
|
||||
*
|
||||
* Get EDID from ACPI _DDC. On success, a pointer to the EDID data is written
|
||||
* to the @edid address, and the length of the EDID is returned. The caller is
|
||||
* responsible for freeing the edid pointer.
|
||||
*
|
||||
* Return the length of EDID (positive value) on success or error (negative
|
||||
* value).
|
||||
*/
|
||||
static int
|
||||
acpi_video_device_EDID(struct acpi_video_device *device,
|
||||
union acpi_object **edid, int length)
|
||||
acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length)
|
||||
{
|
||||
int status;
|
||||
acpi_status status;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *obj;
|
||||
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
|
||||
struct acpi_object_list args = { 1, &arg0 };
|
||||
|
||||
int ret;
|
||||
|
||||
*edid = NULL;
|
||||
|
||||
|
@ -636,16 +648,17 @@ acpi_video_device_EDID(struct acpi_video_device *device,
|
|||
|
||||
obj = buffer.pointer;
|
||||
|
||||
if (obj && obj->type == ACPI_TYPE_BUFFER)
|
||||
*edid = obj;
|
||||
else {
|
||||
if (obj && obj->type == ACPI_TYPE_BUFFER) {
|
||||
*edid = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL);
|
||||
ret = *edid ? obj->buffer.length : -ENOMEM;
|
||||
} else {
|
||||
acpi_handle_debug(device->dev->handle,
|
||||
"Invalid _DDC data for length %d\n", length);
|
||||
status = -EFAULT;
|
||||
kfree(obj);
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
return status;
|
||||
kfree(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* bus */
|
||||
|
@ -1435,9 +1448,7 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
|
|||
{
|
||||
struct acpi_video_bus *video;
|
||||
struct acpi_video_device *video_device;
|
||||
union acpi_object *buffer = NULL;
|
||||
acpi_status status;
|
||||
int i, length;
|
||||
int i, length, ret;
|
||||
|
||||
if (!device || !acpi_driver_data(device))
|
||||
return -EINVAL;
|
||||
|
@ -1477,16 +1488,10 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
|
|||
}
|
||||
|
||||
for (length = 512; length > 0; length -= 128) {
|
||||
status = acpi_video_device_EDID(video_device, &buffer,
|
||||
length);
|
||||
if (ACPI_SUCCESS(status))
|
||||
break;
|
||||
ret = acpi_video_device_EDID(video_device, edid, length);
|
||||
if (ret > 0)
|
||||
return ret;
|
||||
}
|
||||
if (!length)
|
||||
continue;
|
||||
|
||||
*edid = buffer->buffer.pointer;
|
||||
return length;
|
||||
}
|
||||
|
||||
return -ENODEV;
|
||||
|
|
|
@ -440,6 +440,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
|
|||
DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus Vivobook X1504VAP */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "X1504VAP"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus Vivobook X1704VAP */
|
||||
.matches = {
|
||||
|
@ -646,6 +653,17 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
|
|||
DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* TongFang GM5HG0A in case of the SKIKK Vanaheim relabel the
|
||||
* board-name is changed, so check OEM strings instead. Note
|
||||
* OEM string matches are always exact matches.
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=219614
|
||||
*/
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_OEM_STRING, "GM5HG0A"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -671,11 +689,11 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
|
|||
for (i = 0; i < ARRAY_SIZE(override_table); i++) {
|
||||
const struct irq_override_cmp *entry = &override_table[i];
|
||||
|
||||
if (dmi_check_system(entry->system) &&
|
||||
entry->irq == gsi &&
|
||||
if (entry->irq == gsi &&
|
||||
entry->triggering == triggering &&
|
||||
entry->polarity == polarity &&
|
||||
entry->shareable == shareable)
|
||||
entry->shareable == shareable &&
|
||||
dmi_check_system(entry->system))
|
||||
return entry->override;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,9 +27,17 @@ static ssize_t name##_read(struct file *file, struct kobject *kobj, \
|
|||
loff_t off, size_t count) \
|
||||
{ \
|
||||
struct device *dev = kobj_to_dev(kobj); \
|
||||
cpumask_var_t mask; \
|
||||
ssize_t n; \
|
||||
\
|
||||
return cpumap_print_bitmask_to_buf(buf, topology_##mask(dev->id), \
|
||||
off, count); \
|
||||
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
|
||||
return -ENOMEM; \
|
||||
\
|
||||
cpumask_copy(mask, topology_##mask(dev->id)); \
|
||||
n = cpumap_print_bitmask_to_buf(buf, mask, off, count); \
|
||||
free_cpumask_var(mask); \
|
||||
\
|
||||
return n; \
|
||||
} \
|
||||
\
|
||||
static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
|
||||
|
@ -37,9 +45,17 @@ static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
|
|||
loff_t off, size_t count) \
|
||||
{ \
|
||||
struct device *dev = kobj_to_dev(kobj); \
|
||||
cpumask_var_t mask; \
|
||||
ssize_t n; \
|
||||
\
|
||||
return cpumap_print_list_to_buf(buf, topology_##mask(dev->id), \
|
||||
off, count); \
|
||||
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
|
||||
return -ENOMEM; \
|
||||
\
|
||||
cpumask_copy(mask, topology_##mask(dev->id)); \
|
||||
n = cpumap_print_list_to_buf(buf, mask, off, count); \
|
||||
free_cpumask_var(mask); \
|
||||
\
|
||||
return n; \
|
||||
}
|
||||
|
||||
define_id_show_func(physical_package_id, "%d");
|
||||
|
|
|
@ -1468,6 +1468,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
|
|||
zram->mem_pool = zs_create_pool(zram->disk->disk_name);
|
||||
if (!zram->mem_pool) {
|
||||
vfree(zram->table);
|
||||
zram->table = NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -1472,10 +1472,15 @@ EXPORT_SYMBOL_GPL(btmtk_usb_setup);
|
|||
|
||||
int btmtk_usb_shutdown(struct hci_dev *hdev)
|
||||
{
|
||||
struct btmtk_data *data = hci_get_priv(hdev);
|
||||
struct btmtk_hci_wmt_params wmt_params;
|
||||
u8 param = 0;
|
||||
int err;
|
||||
|
||||
err = usb_autopm_get_interface(data->intf);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* Disable the device */
|
||||
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
|
||||
wmt_params.flag = 0;
|
||||
|
@ -1486,9 +1491,11 @@ int btmtk_usb_shutdown(struct hci_dev *hdev)
|
|||
err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
|
||||
if (err < 0) {
|
||||
bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
|
||||
usb_autopm_put_interface(data->intf);
|
||||
return err;
|
||||
}
|
||||
|
||||
usb_autopm_put_interface(data->intf);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btmtk_usb_shutdown);
|
||||
|
|
|
@ -1381,6 +1381,7 @@ static void btnxpuart_tx_work(struct work_struct *work)
|
|||
|
||||
while ((skb = nxp_dequeue(nxpdev))) {
|
||||
len = serdev_device_write_buf(serdev, skb->data, skb->len);
|
||||
serdev_device_wait_until_sent(serdev, 0);
|
||||
hdev->stat.byte_tx += len;
|
||||
|
||||
skb_pull(skb, len);
|
||||
|
|
|
@ -917,7 +917,7 @@ static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
|
|||
return err;
|
||||
}
|
||||
|
||||
mhi_cntrl->regs = pcim_iomap_region(pdev, 1 << bar_num, pci_name(pdev));
|
||||
mhi_cntrl->regs = pcim_iomap_region(pdev, bar_num, pci_name(pdev));
|
||||
if (IS_ERR(mhi_cntrl->regs)) {
|
||||
err = PTR_ERR(mhi_cntrl->regs);
|
||||
dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
|
||||
|
|
|
@ -325,8 +325,6 @@ config QORIQ_CPUFREQ
|
|||
This adds the CPUFreq driver support for Freescale QorIQ SoCs
|
||||
which are capable of changing the CPU's frequency dynamically.
|
||||
|
||||
endif
|
||||
|
||||
config ACPI_CPPC_CPUFREQ
|
||||
tristate "CPUFreq driver based on the ACPI CPPC spec"
|
||||
depends on ACPI_PROCESSOR
|
||||
|
@ -355,4 +353,6 @@ config ACPI_CPPC_CPUFREQ_FIE
|
|||
|
||||
If in doubt, say N.
|
||||
|
||||
endif
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -504,12 +504,12 @@ static int sbi_cpuidle_probe(struct platform_device *pdev)
|
|||
int cpu, ret;
|
||||
struct cpuidle_driver *drv;
|
||||
struct cpuidle_device *dev;
|
||||
struct device_node *np, *pds_node;
|
||||
struct device_node *pds_node;
|
||||
|
||||
/* Detect OSI support based on CPU DT nodes */
|
||||
sbi_cpuidle_use_osi = true;
|
||||
for_each_possible_cpu(cpu) {
|
||||
np = of_cpu_device_node_get(cpu);
|
||||
struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
|
||||
if (np &&
|
||||
of_property_present(np, "power-domains") &&
|
||||
of_property_present(np, "power-domain-names")) {
|
||||
|
|
|
@ -10,25 +10,27 @@
|
|||
* DOC: teo-description
|
||||
*
|
||||
* The idea of this governor is based on the observation that on many systems
|
||||
* timer events are two or more orders of magnitude more frequent than any
|
||||
* other interrupts, so they are likely to be the most significant cause of CPU
|
||||
* wakeups from idle states. Moreover, information about what happened in the
|
||||
* (relatively recent) past can be used to estimate whether or not the deepest
|
||||
* idle state with target residency within the (known) time till the closest
|
||||
* timer event, referred to as the sleep length, is likely to be suitable for
|
||||
* the upcoming CPU idle period and, if not, then which of the shallower idle
|
||||
* states to choose instead of it.
|
||||
* timer interrupts are two or more orders of magnitude more frequent than any
|
||||
* other interrupt types, so they are likely to dominate CPU wakeup patterns.
|
||||
* Moreover, in principle, the time when the next timer event is going to occur
|
||||
* can be determined at the idle state selection time, although doing that may
|
||||
* be costly, so it can be regarded as the most reliable source of information
|
||||
* for idle state selection.
|
||||
*
|
||||
* Of course, non-timer wakeup sources are more important in some use cases
|
||||
* which can be covered by taking a few most recent idle time intervals of the
|
||||
* CPU into account. However, even in that context it is not necessary to
|
||||
* consider idle duration values greater than the sleep length, because the
|
||||
* closest timer will ultimately wake up the CPU anyway unless it is woken up
|
||||
* earlier.
|
||||
* Of course, non-timer wakeup sources are more important in some use cases,
|
||||
* but even then it is generally unnecessary to consider idle duration values
|
||||
* greater than the time time till the next timer event, referred as the sleep
|
||||
* length in what follows, because the closest timer will ultimately wake up the
|
||||
* CPU anyway unless it is woken up earlier.
|
||||
*
|
||||
* Thus this governor estimates whether or not the prospective idle duration of
|
||||
* a CPU is likely to be significantly shorter than the sleep length and selects
|
||||
* an idle state for it accordingly.
|
||||
* However, since obtaining the sleep length may be costly, the governor first
|
||||
* checks if it can select a shallow idle state using wakeup pattern information
|
||||
* from recent times, in which case it can do without knowing the sleep length
|
||||
* at all. For this purpose, it counts CPU wakeup events and looks for an idle
|
||||
* state whose target residency has not exceeded the idle duration (measured
|
||||
* after wakeup) in the majority of relevant recent cases. If the target
|
||||
* residency of that state is small enough, it may be used right away and the
|
||||
* sleep length need not be determined.
|
||||
*
|
||||
* The computations carried out by this governor are based on using bins whose
|
||||
* boundaries are aligned with the target residency parameter values of the CPU
|
||||
|
@ -39,7 +41,11 @@
|
|||
* idle state 2, the third bin spans from the target residency of idle state 2
|
||||
* up to, but not including, the target residency of idle state 3 and so on.
|
||||
* The last bin spans from the target residency of the deepest idle state
|
||||
* supplied by the driver to infinity.
|
||||
* supplied by the driver to the scheduler tick period length or to infinity if
|
||||
* the tick period length is less than the target residency of that state. In
|
||||
* the latter case, the governor also counts events with the measured idle
|
||||
* duration between the tick period length and the target residency of the
|
||||
* deepest idle state.
|
||||
*
|
||||
* Two metrics called "hits" and "intercepts" are associated with each bin.
|
||||
* They are updated every time before selecting an idle state for the given CPU
|
||||
|
@ -49,47 +55,46 @@
|
|||
* sleep length and the idle duration measured after CPU wakeup fall into the
|
||||
* same bin (that is, the CPU appears to wake up "on time" relative to the sleep
|
||||
* length). In turn, the "intercepts" metric reflects the relative frequency of
|
||||
* situations in which the measured idle duration is so much shorter than the
|
||||
* sleep length that the bin it falls into corresponds to an idle state
|
||||
* shallower than the one whose bin is fallen into by the sleep length (these
|
||||
* situations are referred to as "intercepts" below).
|
||||
* non-timer wakeup events for which the measured idle duration falls into a bin
|
||||
* that corresponds to an idle state shallower than the one whose bin is fallen
|
||||
* into by the sleep length (these events are also referred to as "intercepts"
|
||||
* below).
|
||||
*
|
||||
* In order to select an idle state for a CPU, the governor takes the following
|
||||
* steps (modulo the possible latency constraint that must be taken into account
|
||||
* too):
|
||||
*
|
||||
* 1. Find the deepest CPU idle state whose target residency does not exceed
|
||||
* the current sleep length (the candidate idle state) and compute 2 sums as
|
||||
* follows:
|
||||
* 1. Find the deepest enabled CPU idle state (the candidate idle state) and
|
||||
* compute 2 sums as follows:
|
||||
*
|
||||
* - The sum of the "hits" and "intercepts" metrics for the candidate state
|
||||
* and all of the deeper idle states (it represents the cases in which the
|
||||
* CPU was idle long enough to avoid being intercepted if the sleep length
|
||||
* had been equal to the current one).
|
||||
* - The sum of the "hits" metric for all of the idle states shallower than
|
||||
* the candidate one (it represents the cases in which the CPU was likely
|
||||
* woken up by a timer).
|
||||
*
|
||||
* - The sum of the "intercepts" metrics for all of the idle states shallower
|
||||
* than the candidate one (it represents the cases in which the CPU was not
|
||||
* idle long enough to avoid being intercepted if the sleep length had been
|
||||
* equal to the current one).
|
||||
* - The sum of the "intercepts" metric for all of the idle states shallower
|
||||
* than the candidate one (it represents the cases in which the CPU was
|
||||
* likely woken up by a non-timer wakeup source).
|
||||
*
|
||||
* 2. If the second sum is greater than the first one the CPU is likely to wake
|
||||
* up early, so look for an alternative idle state to select.
|
||||
* 2. If the second sum computed in step 1 is greater than a half of the sum of
|
||||
* both metrics for the candidate state bin and all subsequent bins(if any),
|
||||
* a shallower idle state is likely to be more suitable, so look for it.
|
||||
*
|
||||
* - Traverse the idle states shallower than the candidate one in the
|
||||
* - Traverse the enabled idle states shallower than the candidate one in the
|
||||
* descending order.
|
||||
*
|
||||
* - For each of them compute the sum of the "intercepts" metrics over all
|
||||
* of the idle states between it and the candidate one (including the
|
||||
* former and excluding the latter).
|
||||
*
|
||||
* - If each of these sums that needs to be taken into account (because the
|
||||
* check related to it has indicated that the CPU is likely to wake up
|
||||
* early) is greater than a half of the corresponding sum computed in step
|
||||
* 1 (which means that the target residency of the state in question had
|
||||
* not exceeded the idle duration in over a half of the relevant cases),
|
||||
* select the given idle state instead of the candidate one.
|
||||
* - If this sum is greater than a half of the second sum computed in step 1,
|
||||
* use the given idle state as the new candidate one.
|
||||
*
|
||||
* 3. By default, select the candidate state.
|
||||
* 3. If the current candidate state is state 0 or its target residency is short
|
||||
* enough, return it and prevent the scheduler tick from being stopped.
|
||||
*
|
||||
* 4. Obtain the sleep length value and check if it is below the target
|
||||
* residency of the current candidate state, in which case a new shallower
|
||||
* candidate state needs to be found, so look for it.
|
||||
*/
|
||||
|
||||
#include <linux/cpuidle.h>
|
||||
|
|
|
@ -237,9 +237,9 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data1 = {
|
|||
static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data2 = {
|
||||
.label = "ls2k2000_gpio",
|
||||
.mode = BIT_CTRL_MODE,
|
||||
.conf_offset = 0x84,
|
||||
.in_offset = 0x88,
|
||||
.out_offset = 0x80,
|
||||
.conf_offset = 0x4,
|
||||
.in_offset = 0x8,
|
||||
.out_offset = 0x0,
|
||||
};
|
||||
|
||||
static const struct loongson_gpio_chip_data loongson_gpio_ls3a5000_data = {
|
||||
|
|
|
@ -1027,6 +1027,30 @@ static void gpio_sim_device_deactivate(struct gpio_sim_device *dev)
|
|||
dev->pdev = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
gpio_sim_device_lockup_configfs(struct gpio_sim_device *dev, bool lock)
|
||||
{
|
||||
struct configfs_subsystem *subsys = dev->group.cg_subsys;
|
||||
struct gpio_sim_bank *bank;
|
||||
struct gpio_sim_line *line;
|
||||
|
||||
/*
|
||||
* The device only needs to depend on leaf line entries. This is
|
||||
* sufficient to lock up all the configfs entries that the
|
||||
* instantiated, alive device depends on.
|
||||
*/
|
||||
list_for_each_entry(bank, &dev->bank_list, siblings) {
|
||||
list_for_each_entry(line, &bank->line_list, siblings) {
|
||||
if (lock)
|
||||
WARN_ON(configfs_depend_item_unlocked(
|
||||
subsys, &line->group.cg_item));
|
||||
else
|
||||
configfs_undepend_item_unlocked(
|
||||
&line->group.cg_item);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
gpio_sim_device_config_live_store(struct config_item *item,
|
||||
const char *page, size_t count)
|
||||
|
@ -1039,14 +1063,24 @@ gpio_sim_device_config_live_store(struct config_item *item,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
guard(mutex)(&dev->lock);
|
||||
if (live)
|
||||
gpio_sim_device_lockup_configfs(dev, true);
|
||||
|
||||
if (live == gpio_sim_device_is_live(dev))
|
||||
ret = -EPERM;
|
||||
else if (live)
|
||||
ret = gpio_sim_device_activate(dev);
|
||||
else
|
||||
gpio_sim_device_deactivate(dev);
|
||||
scoped_guard(mutex, &dev->lock) {
|
||||
if (live == gpio_sim_device_is_live(dev))
|
||||
ret = -EPERM;
|
||||
else if (live)
|
||||
ret = gpio_sim_device_activate(dev);
|
||||
else
|
||||
gpio_sim_device_deactivate(dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Undepend is required only if device disablement (live == 0)
|
||||
* succeeds or if device enablement (live == 1) fails.
|
||||
*/
|
||||
if (live == !!ret)
|
||||
gpio_sim_device_lockup_configfs(dev, false);
|
||||
|
||||
return ret ?: count;
|
||||
}
|
||||
|
|
|
@ -1410,7 +1410,7 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
|
|||
size_t num_entries = gpio_virtuser_get_lookup_count(dev);
|
||||
struct gpio_virtuser_lookup_entry *entry;
|
||||
struct gpio_virtuser_lookup *lookup;
|
||||
unsigned int i = 0;
|
||||
unsigned int i = 0, idx;
|
||||
|
||||
lockdep_assert_held(&dev->lock);
|
||||
|
||||
|
@ -1424,12 +1424,12 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
|
|||
return -ENOMEM;
|
||||
|
||||
list_for_each_entry(lookup, &dev->lookup_list, siblings) {
|
||||
idx = 0;
|
||||
list_for_each_entry(entry, &lookup->entry_list, siblings) {
|
||||
table->table[i] =
|
||||
table->table[i++] =
|
||||
GPIO_LOOKUP_IDX(entry->key,
|
||||
entry->offset < 0 ? U16_MAX : entry->offset,
|
||||
lookup->con_id, i, entry->flags);
|
||||
i++;
|
||||
lookup->con_id, idx++, entry->flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1439,6 +1439,15 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gpio_virtuser_remove_lookup_table(struct gpio_virtuser_device *dev)
|
||||
{
|
||||
gpiod_remove_lookup_table(dev->lookup_table);
|
||||
kfree(dev->lookup_table->dev_id);
|
||||
kfree(dev->lookup_table);
|
||||
dev->lookup_table = NULL;
|
||||
}
|
||||
|
||||
static struct fwnode_handle *
|
||||
gpio_virtuser_make_device_swnode(struct gpio_virtuser_device *dev)
|
||||
{
|
||||
|
@ -1487,10 +1496,8 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
|
|||
pdevinfo.fwnode = swnode;
|
||||
|
||||
ret = gpio_virtuser_make_lookup_table(dev);
|
||||
if (ret) {
|
||||
fwnode_remove_software_node(swnode);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto err_remove_swnode;
|
||||
|
||||
reinit_completion(&dev->probe_completion);
|
||||
dev->driver_bound = false;
|
||||
|
@ -1498,23 +1505,31 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
|
|||
|
||||
pdev = platform_device_register_full(&pdevinfo);
|
||||
if (IS_ERR(pdev)) {
|
||||
ret = PTR_ERR(pdev);
|
||||
bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
|
||||
fwnode_remove_software_node(swnode);
|
||||
return PTR_ERR(pdev);
|
||||
goto err_remove_lookup_table;
|
||||
}
|
||||
|
||||
wait_for_completion(&dev->probe_completion);
|
||||
bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
|
||||
|
||||
if (!dev->driver_bound) {
|
||||
platform_device_unregister(pdev);
|
||||
fwnode_remove_software_node(swnode);
|
||||
return -ENXIO;
|
||||
ret = -ENXIO;
|
||||
goto err_unregister_pdev;
|
||||
}
|
||||
|
||||
dev->pdev = pdev;
|
||||
|
||||
return 0;
|
||||
|
||||
err_unregister_pdev:
|
||||
platform_device_unregister(pdev);
|
||||
err_remove_lookup_table:
|
||||
gpio_virtuser_remove_lookup_table(dev);
|
||||
err_remove_swnode:
|
||||
fwnode_remove_software_node(swnode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1526,10 +1541,33 @@ gpio_virtuser_device_deactivate(struct gpio_virtuser_device *dev)
|
|||
|
||||
swnode = dev_fwnode(&dev->pdev->dev);
|
||||
platform_device_unregister(dev->pdev);
|
||||
gpio_virtuser_remove_lookup_table(dev);
|
||||
fwnode_remove_software_node(swnode);
|
||||
dev->pdev = NULL;
|
||||
gpiod_remove_lookup_table(dev->lookup_table);
|
||||
kfree(dev->lookup_table);
|
||||
}
|
||||
|
||||
static void
|
||||
gpio_virtuser_device_lockup_configfs(struct gpio_virtuser_device *dev, bool lock)
|
||||
{
|
||||
struct configfs_subsystem *subsys = dev->group.cg_subsys;
|
||||
struct gpio_virtuser_lookup_entry *entry;
|
||||
struct gpio_virtuser_lookup *lookup;
|
||||
|
||||
/*
|
||||
* The device only needs to depend on leaf lookup entries. This is
|
||||
* sufficient to lock up all the configfs entries that the
|
||||
* instantiated, alive device depends on.
|
||||
*/
|
||||
list_for_each_entry(lookup, &dev->lookup_list, siblings) {
|
||||
list_for_each_entry(entry, &lookup->entry_list, siblings) {
|
||||
if (lock)
|
||||
WARN_ON(configfs_depend_item_unlocked(
|
||||
subsys, &entry->group.cg_item));
|
||||
else
|
||||
configfs_undepend_item_unlocked(
|
||||
&entry->group.cg_item);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -1544,15 +1582,24 @@ gpio_virtuser_device_config_live_store(struct config_item *item,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
guard(mutex)(&dev->lock);
|
||||
|
||||
if (live == gpio_virtuser_device_is_live(dev))
|
||||
return -EPERM;
|
||||
|
||||
if (live)
|
||||
ret = gpio_virtuser_device_activate(dev);
|
||||
else
|
||||
gpio_virtuser_device_deactivate(dev);
|
||||
gpio_virtuser_device_lockup_configfs(dev, true);
|
||||
|
||||
scoped_guard(mutex, &dev->lock) {
|
||||
if (live == gpio_virtuser_device_is_live(dev))
|
||||
ret = -EPERM;
|
||||
else if (live)
|
||||
ret = gpio_virtuser_device_activate(dev);
|
||||
else
|
||||
gpio_virtuser_device_deactivate(dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Undepend is required only if device disablement (live == 0)
|
||||
* succeeds or if device enablement (live == 1) fails.
|
||||
*/
|
||||
if (live == !!ret)
|
||||
gpio_virtuser_device_lockup_configfs(dev, false);
|
||||
|
||||
return ret ?: count;
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ struct xgpio_instance {
|
|||
DECLARE_BITMAP(state, 64);
|
||||
DECLARE_BITMAP(last_irq_read, 64);
|
||||
DECLARE_BITMAP(dir, 64);
|
||||
spinlock_t gpio_lock; /* For serializing operations */
|
||||
raw_spinlock_t gpio_lock; /* For serializing operations */
|
||||
int irq;
|
||||
DECLARE_BITMAP(enable, 64);
|
||||
DECLARE_BITMAP(rising_edge, 64);
|
||||
|
@ -179,14 +179,14 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
|
|||
struct xgpio_instance *chip = gpiochip_get_data(gc);
|
||||
int bit = xgpio_to_bit(chip, gpio);
|
||||
|
||||
spin_lock_irqsave(&chip->gpio_lock, flags);
|
||||
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
|
||||
|
||||
/* Write to GPIO signal and set its direction to output */
|
||||
__assign_bit(bit, chip->state, val);
|
||||
|
||||
xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state);
|
||||
|
||||
spin_unlock_irqrestore(&chip->gpio_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -210,7 +210,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
|
|||
bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64);
|
||||
bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64);
|
||||
|
||||
spin_lock_irqsave(&chip->gpio_lock, flags);
|
||||
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
|
||||
|
||||
bitmap_replace(state, chip->state, hw_bits, hw_mask, 64);
|
||||
|
||||
|
@ -218,7 +218,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
|
|||
|
||||
bitmap_copy(chip->state, state, 64);
|
||||
|
||||
spin_unlock_irqrestore(&chip->gpio_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -236,13 +236,13 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
|
|||
struct xgpio_instance *chip = gpiochip_get_data(gc);
|
||||
int bit = xgpio_to_bit(chip, gpio);
|
||||
|
||||
spin_lock_irqsave(&chip->gpio_lock, flags);
|
||||
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
|
||||
|
||||
/* Set the GPIO bit in shadow register and set direction as input */
|
||||
__set_bit(bit, chip->dir);
|
||||
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
|
||||
|
||||
spin_unlock_irqrestore(&chip->gpio_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -265,7 +265,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
|
|||
struct xgpio_instance *chip = gpiochip_get_data(gc);
|
||||
int bit = xgpio_to_bit(chip, gpio);
|
||||
|
||||
spin_lock_irqsave(&chip->gpio_lock, flags);
|
||||
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
|
||||
|
||||
/* Write state of GPIO signal */
|
||||
__assign_bit(bit, chip->state, val);
|
||||
|
@ -275,7 +275,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
|
|||
__clear_bit(bit, chip->dir);
|
||||
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
|
||||
|
||||
spin_unlock_irqrestore(&chip->gpio_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -398,7 +398,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
|
|||
int bit = xgpio_to_bit(chip, irq_offset);
|
||||
u32 mask = BIT(bit / 32), temp;
|
||||
|
||||
spin_lock_irqsave(&chip->gpio_lock, flags);
|
||||
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
|
||||
|
||||
__clear_bit(bit, chip->enable);
|
||||
|
||||
|
@ -408,7 +408,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
|
|||
temp &= ~mask;
|
||||
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
|
||||
}
|
||||
spin_unlock_irqrestore(&chip->gpio_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
|
||||
|
||||
gpiochip_disable_irq(&chip->gc, irq_offset);
|
||||
}
|
||||
|
@ -428,7 +428,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
|
|||
|
||||
gpiochip_enable_irq(&chip->gc, irq_offset);
|
||||
|
||||
spin_lock_irqsave(&chip->gpio_lock, flags);
|
||||
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
|
||||
|
||||
__set_bit(bit, chip->enable);
|
||||
|
||||
|
@ -447,7 +447,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
|
|||
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&chip->gpio_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -512,7 +512,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
|
|||
|
||||
chained_irq_enter(irqchip, desc);
|
||||
|
||||
spin_lock(&chip->gpio_lock);
|
||||
raw_spin_lock(&chip->gpio_lock);
|
||||
|
||||
xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all);
|
||||
|
||||
|
@ -529,7 +529,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
|
|||
bitmap_copy(chip->last_irq_read, all, 64);
|
||||
bitmap_or(all, rising, falling, 64);
|
||||
|
||||
spin_unlock(&chip->gpio_lock);
|
||||
raw_spin_unlock(&chip->gpio_lock);
|
||||
|
||||
dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling);
|
||||
|
||||
|
@ -620,7 +620,7 @@ static int xgpio_probe(struct platform_device *pdev)
|
|||
bitmap_set(chip->hw_map, 0, width[0]);
|
||||
bitmap_set(chip->hw_map, 32, width[1]);
|
||||
|
||||
spin_lock_init(&chip->gpio_lock);
|
||||
raw_spin_lock_init(&chip->gpio_lock);
|
||||
|
||||
chip->gc.base = -1;
|
||||
chip->gc.ngpio = bitmap_weight(chip->hw_map, 64);
|
||||
|
|
|
@ -715,8 +715,9 @@ err:
|
|||
void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
|
||||
{
|
||||
enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
|
||||
if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
|
||||
((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
|
||||
if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
|
||||
((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) ||
|
||||
(IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 12)) {
|
||||
pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
|
||||
amdgpu_gfx_off_ctrl(adev, idle);
|
||||
} else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
|
||||
|
|
|
@ -122,6 +122,10 @@ static int amdgpu_is_fw_attestation_supported(struct amdgpu_device *adev)
|
|||
if (adev->flags & AMD_IS_APU)
|
||||
return 0;
|
||||
|
||||
if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 2) ||
|
||||
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 3))
|
||||
return 0;
|
||||
|
||||
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
|
||||
return 1;
|
||||
|
||||
|
|
|
@ -2054,6 +2054,7 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
|
|||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 idx;
|
||||
bool sched_work = false;
|
||||
|
||||
if (!adev->gfx.enable_cleaner_shader)
|
||||
return;
|
||||
|
@ -2072,9 +2073,12 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
|
|||
mutex_lock(&adev->enforce_isolation_mutex);
|
||||
if (adev->enforce_isolation[idx]) {
|
||||
if (adev->kfd.init_complete)
|
||||
amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
|
||||
sched_work = true;
|
||||
}
|
||||
mutex_unlock(&adev->enforce_isolation_mutex);
|
||||
|
||||
if (sched_work)
|
||||
amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2090,6 +2094,7 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
|
|||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 idx;
|
||||
bool sched_work = false;
|
||||
|
||||
if (!adev->gfx.enable_cleaner_shader)
|
||||
return;
|
||||
|
@ -2105,9 +2110,12 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
|
|||
mutex_lock(&adev->enforce_isolation_mutex);
|
||||
if (adev->enforce_isolation[idx]) {
|
||||
if (adev->kfd.init_complete)
|
||||
amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
|
||||
sched_work = true;
|
||||
}
|
||||
mutex_unlock(&adev->enforce_isolation_mutex);
|
||||
|
||||
if (sched_work)
|
||||
amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -191,8 +191,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
|
|||
need_ctx_switch = ring->current_ctx != fence_ctx;
|
||||
if (ring->funcs->emit_pipeline_sync && job &&
|
||||
((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
|
||||
(amdgpu_sriov_vf(adev) && need_ctx_switch) ||
|
||||
amdgpu_vm_need_pipeline_sync(ring, job))) {
|
||||
need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) {
|
||||
|
||||
need_pipe_sync = true;
|
||||
|
||||
if (tmp)
|
||||
|
|
|
@ -854,8 +854,8 @@ static int it6263_probe(struct i2c_client *client)
|
|||
it->lvds_i2c = devm_i2c_new_dummy_device(dev, client->adapter,
|
||||
LVDS_INPUT_CTRL_I2C_ADDR);
|
||||
if (IS_ERR(it->lvds_i2c))
|
||||
dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c),
|
||||
"failed to allocate I2C device for LVDS\n");
|
||||
return dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c),
|
||||
"failed to allocate I2C device for LVDS\n");
|
||||
|
||||
it->lvds_regmap = devm_regmap_init_i2c(it->lvds_i2c,
|
||||
&it6263_lvds_regmap_config);
|
||||
|
|
|
@ -596,6 +596,9 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
|
|||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (bridge_connector->bridge_hdmi) {
|
||||
if (!connector->ycbcr_420_allowed)
|
||||
supported_formats &= ~BIT(HDMI_COLORSPACE_YUV420);
|
||||
|
||||
bridge = bridge_connector->bridge_hdmi;
|
||||
|
||||
ret = drmm_connector_hdmi_init(drm, connector,
|
||||
|
|
|
@ -207,6 +207,10 @@ void drm_bridge_add(struct drm_bridge *bridge)
|
|||
{
|
||||
mutex_init(&bridge->hpd_mutex);
|
||||
|
||||
if (bridge->ops & DRM_BRIDGE_OP_HDMI)
|
||||
bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
|
||||
BIT(HDMI_COLORSPACE_YUV420));
|
||||
|
||||
mutex_lock(&bridge_lock);
|
||||
list_add_tail(&bridge->list, &bridge_list);
|
||||
mutex_unlock(&bridge_lock);
|
||||
|
|
|
@ -592,6 +592,9 @@ int drmm_connector_hdmi_init(struct drm_device *dev,
|
|||
if (!supported_formats || !(supported_formats & BIT(HDMI_COLORSPACE_RGB)))
|
||||
return -EINVAL;
|
||||
|
||||
if (connector->ycbcr_420_allowed != !!(supported_formats & BIT(HDMI_COLORSPACE_YUV420)))
|
||||
return -EINVAL;
|
||||
|
||||
if (!(max_bpc == 8 || max_bpc == 10 || max_bpc == 12))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -1694,7 +1694,7 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
|
|||
* arithmetic related to alignment and offset calculation.
|
||||
*/
|
||||
if (is_gen12_ccs_cc_plane(&fb->base, i)) {
|
||||
if (IS_ALIGNED(fb->base.offsets[i], PAGE_SIZE))
|
||||
if (IS_ALIGNED(fb->base.offsets[i], 64))
|
||||
continue;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
|
|
@ -14,9 +14,6 @@ config DRM_MEDIATEK
|
|||
select DRM_BRIDGE_CONNECTOR
|
||||
select DRM_MIPI_DSI
|
||||
select DRM_PANEL
|
||||
select MEMORY
|
||||
select MTK_SMI
|
||||
select PHY_MTK_MIPI_DSI
|
||||
select VIDEOMODE_HELPERS
|
||||
help
|
||||
Choose this option if you have a Mediatek SoCs.
|
||||
|
@ -27,7 +24,6 @@ config DRM_MEDIATEK
|
|||
config DRM_MEDIATEK_DP
|
||||
tristate "DRM DPTX Support for MediaTek SoCs"
|
||||
depends on DRM_MEDIATEK
|
||||
select PHY_MTK_DP
|
||||
select DRM_DISPLAY_HELPER
|
||||
select DRM_DISPLAY_DP_HELPER
|
||||
select DRM_DISPLAY_DP_AUX_BUS
|
||||
|
@ -38,6 +34,5 @@ config DRM_MEDIATEK_HDMI
|
|||
tristate "DRM HDMI Support for Mediatek SoCs"
|
||||
depends on DRM_MEDIATEK
|
||||
select SND_SOC_HDMI_CODEC if SND_SOC
|
||||
select PHY_MTK_HDMI
|
||||
help
|
||||
DRM/KMS HDMI driver for Mediatek SoCs
|
||||
|
|
|
@ -112,6 +112,11 @@ static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc)
|
|||
|
||||
drm_crtc_handle_vblank(&mtk_crtc->base);
|
||||
|
||||
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
|
||||
if (mtk_crtc->cmdq_client.chan)
|
||||
return;
|
||||
#endif
|
||||
|
||||
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
|
||||
if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
|
||||
mtk_crtc_finish_page_flip(mtk_crtc);
|
||||
|
@ -284,10 +289,8 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
|
|||
state = to_mtk_crtc_state(mtk_crtc->base.state);
|
||||
|
||||
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
|
||||
if (mtk_crtc->config_updating) {
|
||||
spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
|
||||
if (mtk_crtc->config_updating)
|
||||
goto ddp_cmdq_cb_out;
|
||||
}
|
||||
|
||||
state->pending_config = false;
|
||||
|
||||
|
@ -315,10 +318,15 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
|
|||
mtk_crtc->pending_async_planes = false;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
|
||||
|
||||
ddp_cmdq_cb_out:
|
||||
|
||||
if (mtk_crtc->pending_needs_vblank) {
|
||||
mtk_crtc_finish_page_flip(mtk_crtc);
|
||||
mtk_crtc->pending_needs_vblank = false;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
|
||||
|
||||
mtk_crtc->cmdq_vblank_cnt = 0;
|
||||
wake_up(&mtk_crtc->cb_blocking_queue);
|
||||
}
|
||||
|
@ -606,13 +614,18 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
|
|||
*/
|
||||
mtk_crtc->cmdq_vblank_cnt = 3;
|
||||
|
||||
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
|
||||
mtk_crtc->config_updating = false;
|
||||
spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
|
||||
|
||||
mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
|
||||
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
|
||||
mtk_crtc->config_updating = false;
|
||||
spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
|
||||
#endif
|
||||
|
||||
mutex_unlock(&mtk_crtc->hw_lock);
|
||||
}
|
||||
|
|
|
@ -460,6 +460,29 @@ static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl,
|
|||
}
|
||||
}
|
||||
|
||||
static void mtk_ovl_afbc_layer_config(struct mtk_disp_ovl *ovl,
|
||||
unsigned int idx,
|
||||
struct mtk_plane_pending_state *pending,
|
||||
struct cmdq_pkt *cmdq_pkt)
|
||||
{
|
||||
unsigned int pitch_msb = pending->pitch >> 16;
|
||||
unsigned int hdr_pitch = pending->hdr_pitch;
|
||||
unsigned int hdr_addr = pending->hdr_addr;
|
||||
|
||||
if (pending->modifier != DRM_FORMAT_MOD_LINEAR) {
|
||||
mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
|
||||
DISP_REG_OVL_HDR_ADDR(ovl, idx));
|
||||
mtk_ddp_write_relaxed(cmdq_pkt,
|
||||
OVL_PITCH_MSB_2ND_SUBBUF | pitch_msb,
|
||||
&ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
|
||||
mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
|
||||
DISP_REG_OVL_HDR_PITCH(ovl, idx));
|
||||
} else {
|
||||
mtk_ddp_write_relaxed(cmdq_pkt, pitch_msb,
|
||||
&ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
|
||||
}
|
||||
}
|
||||
|
||||
void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
|
||||
struct mtk_plane_state *state,
|
||||
struct cmdq_pkt *cmdq_pkt)
|
||||
|
@ -467,25 +490,14 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
|
|||
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
|
||||
struct mtk_plane_pending_state *pending = &state->pending;
|
||||
unsigned int addr = pending->addr;
|
||||
unsigned int hdr_addr = pending->hdr_addr;
|
||||
unsigned int pitch = pending->pitch;
|
||||
unsigned int hdr_pitch = pending->hdr_pitch;
|
||||
unsigned int pitch_lsb = pending->pitch & GENMASK(15, 0);
|
||||
unsigned int fmt = pending->format;
|
||||
unsigned int rotation = pending->rotation;
|
||||
unsigned int offset = (pending->y << 16) | pending->x;
|
||||
unsigned int src_size = (pending->height << 16) | pending->width;
|
||||
unsigned int blend_mode = state->base.pixel_blend_mode;
|
||||
unsigned int ignore_pixel_alpha = 0;
|
||||
unsigned int con;
|
||||
bool is_afbc = pending->modifier != DRM_FORMAT_MOD_LINEAR;
|
||||
union overlay_pitch {
|
||||
struct split_pitch {
|
||||
u16 lsb;
|
||||
u16 msb;
|
||||
} split_pitch;
|
||||
u32 pitch;
|
||||
} overlay_pitch;
|
||||
|
||||
overlay_pitch.pitch = pitch;
|
||||
|
||||
if (!pending->enable) {
|
||||
mtk_ovl_layer_off(dev, idx, cmdq_pkt);
|
||||
|
@ -513,22 +525,30 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
|
|||
ignore_pixel_alpha = OVL_CONST_BLEND;
|
||||
}
|
||||
|
||||
if (pending->rotation & DRM_MODE_REFLECT_Y) {
|
||||
/*
|
||||
* Treat rotate 180 as flip x + flip y, and XOR the original rotation value
|
||||
* to flip x + flip y to support both in the same time.
|
||||
*/
|
||||
if (rotation & DRM_MODE_ROTATE_180)
|
||||
rotation ^= DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
|
||||
|
||||
if (rotation & DRM_MODE_REFLECT_Y) {
|
||||
con |= OVL_CON_VIRT_FLIP;
|
||||
addr += (pending->height - 1) * pending->pitch;
|
||||
}
|
||||
|
||||
if (pending->rotation & DRM_MODE_REFLECT_X) {
|
||||
if (rotation & DRM_MODE_REFLECT_X) {
|
||||
con |= OVL_CON_HORZ_FLIP;
|
||||
addr += pending->pitch - 1;
|
||||
}
|
||||
|
||||
if (ovl->data->supports_afbc)
|
||||
mtk_ovl_set_afbc(ovl, cmdq_pkt, idx, is_afbc);
|
||||
mtk_ovl_set_afbc(ovl, cmdq_pkt, idx,
|
||||
pending->modifier != DRM_FORMAT_MOD_LINEAR);
|
||||
|
||||
mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs,
|
||||
DISP_REG_OVL_CON(idx));
|
||||
mtk_ddp_write_relaxed(cmdq_pkt, overlay_pitch.split_pitch.lsb | ignore_pixel_alpha,
|
||||
mtk_ddp_write_relaxed(cmdq_pkt, pitch_lsb | ignore_pixel_alpha,
|
||||
&ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH(idx));
|
||||
mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs,
|
||||
DISP_REG_OVL_SRC_SIZE(idx));
|
||||
|
@ -537,19 +557,8 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
|
|||
mtk_ddp_write_relaxed(cmdq_pkt, addr, &ovl->cmdq_reg, ovl->regs,
|
||||
DISP_REG_OVL_ADDR(ovl, idx));
|
||||
|
||||
if (is_afbc) {
|
||||
mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
|
||||
DISP_REG_OVL_HDR_ADDR(ovl, idx));
|
||||
mtk_ddp_write_relaxed(cmdq_pkt,
|
||||
OVL_PITCH_MSB_2ND_SUBBUF | overlay_pitch.split_pitch.msb,
|
||||
&ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
|
||||
mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
|
||||
DISP_REG_OVL_HDR_PITCH(ovl, idx));
|
||||
} else {
|
||||
mtk_ddp_write_relaxed(cmdq_pkt,
|
||||
overlay_pitch.split_pitch.msb,
|
||||
&ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
|
||||
}
|
||||
if (ovl->data->supports_afbc)
|
||||
mtk_ovl_afbc_layer_config(ovl, idx, pending, cmdq_pkt);
|
||||
|
||||
mtk_ovl_set_bit_depth(dev, idx, fmt, cmdq_pkt);
|
||||
mtk_ovl_layer_on(dev, idx, cmdq_pkt);
|
||||
|
|
|
@ -543,18 +543,16 @@ static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp,
|
|||
enum dp_pixelformat color_format)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* update MISC0 */
|
||||
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
|
||||
color_format << DP_TEST_COLOR_FORMAT_SHIFT,
|
||||
DP_TEST_COLOR_FORMAT_MASK);
|
||||
u32 misc0_color;
|
||||
|
||||
switch (color_format) {
|
||||
case DP_PIXELFORMAT_YUV422:
|
||||
val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR422;
|
||||
misc0_color = DP_COLOR_FORMAT_YCbCr422;
|
||||
break;
|
||||
case DP_PIXELFORMAT_RGB:
|
||||
val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_RGB;
|
||||
misc0_color = DP_COLOR_FORMAT_RGB;
|
||||
break;
|
||||
default:
|
||||
drm_warn(mtk_dp->drm_dev, "Unsupported color format: %d\n",
|
||||
|
@ -562,6 +560,11 @@ static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* update MISC0 */
|
||||
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
|
||||
misc0_color,
|
||||
DP_TEST_COLOR_FORMAT_MASK);
|
||||
|
||||
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
|
||||
val, PIXEL_ENCODE_FORMAT_DP_ENC0_P0_MASK);
|
||||
return 0;
|
||||
|
@ -2120,7 +2123,6 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
|
|||
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
|
||||
enum drm_connector_status ret = connector_status_disconnected;
|
||||
bool enabled = mtk_dp->enabled;
|
||||
u8 sink_count = 0;
|
||||
|
||||
if (!mtk_dp->train_info.cable_plugged_in)
|
||||
return ret;
|
||||
|
@ -2135,8 +2137,8 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
|
|||
* function, we just need to check the HPD connection to check
|
||||
* whether we connect to a sink device.
|
||||
*/
|
||||
drm_dp_dpcd_readb(&mtk_dp->aux, DP_SINK_COUNT, &sink_count);
|
||||
if (DP_GET_SINK_COUNT(sink_count))
|
||||
|
||||
if (drm_dp_read_sink_count(&mtk_dp->aux) > 0)
|
||||
ret = connector_status_connected;
|
||||
|
||||
if (!enabled)
|
||||
|
@ -2431,12 +2433,19 @@ mtk_dp_bridge_mode_valid(struct drm_bridge *bridge,
|
|||
{
|
||||
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
|
||||
u32 bpp = info->color_formats & DRM_COLOR_FORMAT_YCBCR422 ? 16 : 24;
|
||||
u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
|
||||
drm_dp_max_lane_count(mtk_dp->rx_cap),
|
||||
drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
|
||||
mtk_dp->max_lanes);
|
||||
u32 lane_count_min = mtk_dp->train_info.lane_count;
|
||||
u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) *
|
||||
lane_count_min;
|
||||
|
||||
if (rate < mode->clock * bpp / 8)
|
||||
/*
|
||||
*FEC overhead is approximately 2.4% from DP 1.4a spec 2.2.1.4.2.
|
||||
*The down-spread amplitude shall either be disabled (0.0%) or up
|
||||
*to 0.5% from 1.4a 3.5.2.6. Add up to approximately 3% total overhead.
|
||||
*
|
||||
*Because rate is already divided by 10,
|
||||
*mode->clock does not need to be multiplied by 10
|
||||
*/
|
||||
if ((rate * 97 / 100) < (mode->clock * bpp / 8))
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
return MODE_OK;
|
||||
|
@ -2477,10 +2486,9 @@ static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
|
|||
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
|
||||
struct drm_display_info *display_info =
|
||||
&conn_state->connector->display_info;
|
||||
u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
|
||||
drm_dp_max_lane_count(mtk_dp->rx_cap),
|
||||
drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
|
||||
mtk_dp->max_lanes);
|
||||
u32 lane_count_min = mtk_dp->train_info.lane_count;
|
||||
u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) *
|
||||
lane_count_min;
|
||||
|
||||
*num_input_fmts = 0;
|
||||
|
||||
|
@ -2489,8 +2497,8 @@ static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
|
|||
* datarate of YUV422 and sink device supports YUV422, we output YUV422
|
||||
* format. Use this condition, we can support more resolution.
|
||||
*/
|
||||
if ((rate < (mode->clock * 24 / 8)) &&
|
||||
(rate > (mode->clock * 16 / 8)) &&
|
||||
if (((rate * 97 / 100) < (mode->clock * 24 / 8)) &&
|
||||
((rate * 97 / 100) > (mode->clock * 16 / 8)) &&
|
||||
(display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422)) {
|
||||
input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
|
||||
if (!input_fmts)
|
||||
|
|
|
@ -372,11 +372,12 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
|
|||
struct mtk_drm_private *temp_drm_priv;
|
||||
struct device_node *phandle = dev->parent->of_node;
|
||||
const struct of_device_id *of_id;
|
||||
struct device_node *node;
|
||||
struct device *drm_dev;
|
||||
unsigned int cnt = 0;
|
||||
int i, j;
|
||||
|
||||
for_each_child_of_node_scoped(phandle->parent, node) {
|
||||
for_each_child_of_node(phandle->parent, node) {
|
||||
struct platform_device *pdev;
|
||||
|
||||
of_id = of_match_node(mtk_drm_of_ids, node);
|
||||
|
@ -405,8 +406,10 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
|
|||
if (temp_drm_priv->mtk_drm_bound)
|
||||
cnt++;
|
||||
|
||||
if (cnt == MAX_CRTC)
|
||||
if (cnt == MAX_CRTC) {
|
||||
of_node_put(node);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (drm_priv->data->mmsys_dev_num == cnt) {
|
||||
|
@ -671,6 +674,8 @@ err_deinit:
|
|||
err_free:
|
||||
private->drm = NULL;
|
||||
drm_dev_put(drm);
|
||||
for (i = 0; i < private->data->mmsys_dev_num; i++)
|
||||
private->all_drm_private[i]->drm = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -898,7 +903,7 @@ static int mtk_drm_of_ddp_path_build_one(struct device *dev, enum mtk_crtc_path
|
|||
const unsigned int **out_path,
|
||||
unsigned int *out_path_len)
|
||||
{
|
||||
struct device_node *next, *prev, *vdo = dev->parent->of_node;
|
||||
struct device_node *next = NULL, *prev, *vdo = dev->parent->of_node;
|
||||
unsigned int temp_path[DDP_COMPONENT_DRM_ID_MAX] = { 0 };
|
||||
unsigned int *final_ddp_path;
|
||||
unsigned short int idx = 0;
|
||||
|
@ -1087,7 +1092,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
|
|||
/* No devicetree graphs support: go with hardcoded paths if present */
|
||||
dev_dbg(dev, "Using hardcoded paths for MMSYS %u\n", mtk_drm_data->mmsys_id);
|
||||
private->data = mtk_drm_data;
|
||||
};
|
||||
}
|
||||
|
||||
private->all_drm_private = devm_kmalloc_array(dev, private->data->mmsys_dev_num,
|
||||
sizeof(*private->all_drm_private),
|
||||
|
|
|
@ -139,11 +139,11 @@
|
|||
#define CLK_HS_POST GENMASK(15, 8)
|
||||
#define CLK_HS_EXIT GENMASK(23, 16)
|
||||
|
||||
#define DSI_VM_CMD_CON 0x130
|
||||
/* DSI_VM_CMD_CON */
|
||||
#define VM_CMD_EN BIT(0)
|
||||
#define TS_VFP_EN BIT(5)
|
||||
|
||||
#define DSI_SHADOW_DEBUG 0x190U
|
||||
/* DSI_SHADOW_DEBUG */
|
||||
#define FORCE_COMMIT BIT(0)
|
||||
#define BYPASS_SHADOW BIT(1)
|
||||
|
||||
|
@ -187,6 +187,8 @@ struct phy;
|
|||
|
||||
struct mtk_dsi_driver_data {
|
||||
const u32 reg_cmdq_off;
|
||||
const u32 reg_vm_cmd_off;
|
||||
const u32 reg_shadow_dbg_off;
|
||||
bool has_shadow_ctl;
|
||||
bool has_size_ctl;
|
||||
bool cmdq_long_packet_ctl;
|
||||
|
@ -246,23 +248,22 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
|
|||
u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, HZ_PER_MHZ);
|
||||
struct mtk_phy_timing *timing = &dsi->phy_timing;
|
||||
|
||||
timing->lpx = (80 * data_rate_mhz / (8 * 1000)) + 1;
|
||||
timing->da_hs_prepare = (59 * data_rate_mhz + 4 * 1000) / 8000 + 1;
|
||||
timing->da_hs_zero = (163 * data_rate_mhz + 11 * 1000) / 8000 + 1 -
|
||||
timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
|
||||
timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
|
||||
timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
|
||||
timing->da_hs_prepare;
|
||||
timing->da_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
|
||||
timing->da_hs_trail = timing->da_hs_prepare + 1;
|
||||
|
||||
timing->ta_go = 4 * timing->lpx;
|
||||
timing->ta_sure = 3 * timing->lpx / 2;
|
||||
timing->ta_get = 5 * timing->lpx;
|
||||
timing->da_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
|
||||
timing->ta_go = 4 * timing->lpx - 2;
|
||||
timing->ta_sure = timing->lpx + 2;
|
||||
timing->ta_get = 4 * timing->lpx;
|
||||
timing->da_hs_exit = 2 * timing->lpx + 1;
|
||||
|
||||
timing->clk_hs_prepare = (57 * data_rate_mhz / (8 * 1000)) + 1;
|
||||
timing->clk_hs_post = (65 * data_rate_mhz + 53 * 1000) / 8000 + 1;
|
||||
timing->clk_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
|
||||
timing->clk_hs_zero = (330 * data_rate_mhz / (8 * 1000)) + 1 -
|
||||
timing->clk_hs_prepare;
|
||||
timing->clk_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
|
||||
timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
|
||||
timing->clk_hs_post = timing->clk_hs_prepare + 8;
|
||||
timing->clk_hs_trail = timing->clk_hs_prepare;
|
||||
timing->clk_hs_zero = timing->clk_hs_trail * 4;
|
||||
timing->clk_hs_exit = 2 * timing->clk_hs_trail;
|
||||
|
||||
timcon0 = FIELD_PREP(LPX, timing->lpx) |
|
||||
FIELD_PREP(HS_PREP, timing->da_hs_prepare) |
|
||||
|
@ -367,8 +368,8 @@ static void mtk_dsi_set_mode(struct mtk_dsi *dsi)
|
|||
|
||||
static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
|
||||
{
|
||||
mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN);
|
||||
mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
|
||||
mtk_dsi_mask(dsi, dsi->driver_data->reg_vm_cmd_off, VM_CMD_EN, VM_CMD_EN);
|
||||
mtk_dsi_mask(dsi, dsi->driver_data->reg_vm_cmd_off, TS_VFP_EN, TS_VFP_EN);
|
||||
}
|
||||
|
||||
static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
|
||||
|
@ -714,7 +715,7 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
|
|||
|
||||
if (dsi->driver_data->has_shadow_ctl)
|
||||
writel(FORCE_COMMIT | BYPASS_SHADOW,
|
||||
dsi->regs + DSI_SHADOW_DEBUG);
|
||||
dsi->regs + dsi->driver_data->reg_shadow_dbg_off);
|
||||
|
||||
mtk_dsi_reset_engine(dsi);
|
||||
mtk_dsi_phy_timconfig(dsi);
|
||||
|
@ -1263,26 +1264,36 @@ static void mtk_dsi_remove(struct platform_device *pdev)
|
|||
|
||||
static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
|
||||
.reg_cmdq_off = 0x200,
|
||||
.reg_vm_cmd_off = 0x130,
|
||||
.reg_shadow_dbg_off = 0x190
|
||||
};
|
||||
|
||||
static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = {
|
||||
.reg_cmdq_off = 0x180,
|
||||
.reg_vm_cmd_off = 0x130,
|
||||
.reg_shadow_dbg_off = 0x190
|
||||
};
|
||||
|
||||
static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = {
|
||||
.reg_cmdq_off = 0x200,
|
||||
.reg_vm_cmd_off = 0x130,
|
||||
.reg_shadow_dbg_off = 0x190,
|
||||
.has_shadow_ctl = true,
|
||||
.has_size_ctl = true,
|
||||
};
|
||||
|
||||
static const struct mtk_dsi_driver_data mt8186_dsi_driver_data = {
|
||||
.reg_cmdq_off = 0xd00,
|
||||
.reg_vm_cmd_off = 0x200,
|
||||
.reg_shadow_dbg_off = 0xc00,
|
||||
.has_shadow_ctl = true,
|
||||
.has_size_ctl = true,
|
||||
};
|
||||
|
||||
static const struct mtk_dsi_driver_data mt8188_dsi_driver_data = {
|
||||
.reg_cmdq_off = 0xd00,
|
||||
.reg_vm_cmd_off = 0x200,
|
||||
.reg_shadow_dbg_off = 0xc00,
|
||||
.has_shadow_ctl = true,
|
||||
.has_size_ctl = true,
|
||||
.cmdq_long_packet_ctl = true,
|
||||
|
|
|
@ -384,7 +384,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
|
|||
if (ret < 0)
|
||||
return NULL;
|
||||
|
||||
return kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
|
||||
return edid;
|
||||
}
|
||||
|
||||
bool nouveau_acpi_video_backlight_use_native(void)
|
||||
|
|
|
@ -387,11 +387,13 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
|
|||
if (f) {
|
||||
struct nouveau_channel *prev;
|
||||
bool must_wait = true;
|
||||
bool local;
|
||||
|
||||
rcu_read_lock();
|
||||
prev = rcu_dereference(f->channel);
|
||||
if (prev && (prev == chan ||
|
||||
fctx->sync(f, prev, chan) == 0))
|
||||
local = prev && prev->cli->drm == chan->cli->drm;
|
||||
if (local && (prev == chan ||
|
||||
fctx->sync(f, prev, chan) == 0))
|
||||
must_wait = false;
|
||||
rcu_read_unlock();
|
||||
if (!must_wait)
|
||||
|
|
|
@ -31,6 +31,7 @@ mcp77_sor = {
|
|||
.state = g94_sor_state,
|
||||
.power = nv50_sor_power,
|
||||
.clock = nv50_sor_clock,
|
||||
.bl = &nv50_sor_bl,
|
||||
.hdmi = &g84_sor_hdmi,
|
||||
.dp = &g94_sor_dp,
|
||||
};
|
||||
|
|
|
@ -1095,6 +1095,64 @@ static void drm_test_connector_hdmi_init_formats_no_rgb(struct kunit *test)
|
|||
KUNIT_EXPECT_LT(test, ret, 0);
|
||||
}
|
||||
|
||||
struct drm_connector_hdmi_init_formats_yuv420_allowed_test {
|
||||
unsigned long supported_formats;
|
||||
bool yuv420_allowed;
|
||||
int expected_result;
|
||||
};
|
||||
|
||||
#define YUV420_ALLOWED_TEST(_formats, _allowed, _result) \
|
||||
{ \
|
||||
.supported_formats = BIT(HDMI_COLORSPACE_RGB) | (_formats), \
|
||||
.yuv420_allowed = _allowed, \
|
||||
.expected_result = _result, \
|
||||
}
|
||||
|
||||
static const struct drm_connector_hdmi_init_formats_yuv420_allowed_test
|
||||
drm_connector_hdmi_init_formats_yuv420_allowed_tests[] = {
|
||||
YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV420), true, 0),
|
||||
YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV420), false, -EINVAL),
|
||||
YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV422), true, -EINVAL),
|
||||
YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV422), false, 0),
|
||||
};
|
||||
|
||||
static void
|
||||
drm_connector_hdmi_init_formats_yuv420_allowed_desc(const struct drm_connector_hdmi_init_formats_yuv420_allowed_test *t,
|
||||
char *desc)
|
||||
{
|
||||
sprintf(desc, "supported_formats=0x%lx yuv420_allowed=%d",
|
||||
t->supported_formats, t->yuv420_allowed);
|
||||
}
|
||||
|
||||
KUNIT_ARRAY_PARAM(drm_connector_hdmi_init_formats_yuv420_allowed,
|
||||
drm_connector_hdmi_init_formats_yuv420_allowed_tests,
|
||||
drm_connector_hdmi_init_formats_yuv420_allowed_desc);
|
||||
|
||||
/*
|
||||
* Test that the registration of an HDMI connector succeeds only when
|
||||
* the presence of YUV420 in the supported formats matches the value
|
||||
* of the ycbcr_420_allowed flag.
|
||||
*/
|
||||
static void drm_test_connector_hdmi_init_formats_yuv420_allowed(struct kunit *test)
|
||||
{
|
||||
const struct drm_connector_hdmi_init_formats_yuv420_allowed_test *params;
|
||||
struct drm_connector_init_priv *priv = test->priv;
|
||||
int ret;
|
||||
|
||||
params = test->param_value;
|
||||
priv->connector.ycbcr_420_allowed = params->yuv420_allowed;
|
||||
|
||||
ret = drmm_connector_hdmi_init(&priv->drm, &priv->connector,
|
||||
"Vendor", "Product",
|
||||
&dummy_funcs,
|
||||
&dummy_hdmi_funcs,
|
||||
DRM_MODE_CONNECTOR_HDMIA,
|
||||
&priv->ddc,
|
||||
params->supported_formats,
|
||||
8);
|
||||
KUNIT_EXPECT_EQ(test, ret, params->expected_result);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test that the registration of an HDMI connector with an HDMI
|
||||
* connector type succeeds.
|
||||
|
@ -1186,6 +1244,8 @@ static struct kunit_case drmm_connector_hdmi_init_tests[] = {
|
|||
KUNIT_CASE(drm_test_connector_hdmi_init_bpc_null),
|
||||
KUNIT_CASE(drm_test_connector_hdmi_init_formats_empty),
|
||||
KUNIT_CASE(drm_test_connector_hdmi_init_formats_no_rgb),
|
||||
KUNIT_CASE_PARAM(drm_test_connector_hdmi_init_formats_yuv420_allowed,
|
||||
drm_connector_hdmi_init_formats_yuv420_allowed_gen_params),
|
||||
KUNIT_CASE(drm_test_connector_hdmi_init_null_ddc),
|
||||
KUNIT_CASE(drm_test_connector_hdmi_init_null_product),
|
||||
KUNIT_CASE(drm_test_connector_hdmi_init_null_vendor),
|
||||
|
|
|
@ -320,8 +320,7 @@ static void kunit_action_drm_mode_destroy(void *ptr)
|
|||
}
|
||||
|
||||
/**
|
||||
* drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC
|
||||
for a KUnit test
|
||||
* drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC for a KUnit test
|
||||
* @test: The test context object
|
||||
* @dev: DRM device
|
||||
* @video_code: CEA VIC of the mode
|
||||
|
|
|
@ -108,6 +108,7 @@ v3d_irq(int irq, void *arg)
|
|||
v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
|
||||
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
|
||||
dma_fence_signal(&fence->base);
|
||||
v3d->bin_job = NULL;
|
||||
status = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -118,6 +119,7 @@ v3d_irq(int irq, void *arg)
|
|||
v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
|
||||
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
|
||||
dma_fence_signal(&fence->base);
|
||||
v3d->render_job = NULL;
|
||||
status = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -128,6 +130,7 @@ v3d_irq(int irq, void *arg)
|
|||
v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
|
||||
trace_v3d_csd_irq(&v3d->drm, fence->seqno);
|
||||
dma_fence_signal(&fence->base);
|
||||
v3d->csd_job = NULL;
|
||||
status = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -165,6 +168,7 @@ v3d_hub_irq(int irq, void *arg)
|
|||
v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
|
||||
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
|
||||
dma_fence_signal(&fence->base);
|
||||
v3d->tfu_job = NULL;
|
||||
status = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
|
|
@ -228,7 +228,6 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
|
|||
VMW_BO_DOMAIN_VRAM,
|
||||
VMW_BO_DOMAIN_VRAM);
|
||||
buf->places[0].lpfn = PFN_UP(bo->resource->size);
|
||||
buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
|
||||
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
|
||||
|
||||
/* For some reason we didn't end up at the start of vram */
|
||||
|
@ -443,7 +442,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
|
|||
|
||||
if (params->pin)
|
||||
ttm_bo_pin(&vmw_bo->tbo);
|
||||
ttm_bo_unreserve(&vmw_bo->tbo);
|
||||
if (!params->keep_resv)
|
||||
ttm_bo_unreserve(&vmw_bo->tbo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -56,8 +56,9 @@ struct vmw_bo_params {
|
|||
u32 domain;
|
||||
u32 busy_domain;
|
||||
enum ttm_bo_type bo_type;
|
||||
size_t size;
|
||||
bool pin;
|
||||
bool keep_resv;
|
||||
size_t size;
|
||||
struct dma_resv *resv;
|
||||
struct sg_table *sg;
|
||||
};
|
||||
|
@ -83,7 +84,6 @@ struct vmw_bo {
|
|||
|
||||
struct ttm_placement placement;
|
||||
struct ttm_place places[5];
|
||||
struct ttm_place busy_places[5];
|
||||
|
||||
/* Protected by reservation */
|
||||
struct ttm_bo_kmap_obj map;
|
||||
|
|
|
@ -403,7 +403,8 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
|
|||
.busy_domain = VMW_BO_DOMAIN_SYS,
|
||||
.bo_type = ttm_bo_type_kernel,
|
||||
.size = PAGE_SIZE,
|
||||
.pin = true
|
||||
.pin = true,
|
||||
.keep_resv = true,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -415,10 +416,6 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
|
|||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
|
||||
BUG_ON(ret != 0);
|
||||
vmw_bo_pin_reserved(vbo, true);
|
||||
|
||||
ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
|
||||
if (likely(ret == 0)) {
|
||||
result = ttm_kmap_obj_virtual(&map, &dummy);
|
||||
|
|
|
@ -206,6 +206,7 @@ struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
|
|||
.bo_type = ttm_bo_type_sg,
|
||||
.size = attach->dmabuf->size,
|
||||
.pin = false,
|
||||
.keep_resv = true,
|
||||
.resv = attach->dmabuf->resv,
|
||||
.sg = table,
|
||||
|
||||
|
|
|
@ -750,6 +750,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
|
|||
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
|
||||
struct vmw_bo *old_bo = NULL;
|
||||
struct vmw_bo *new_bo = NULL;
|
||||
struct ww_acquire_ctx ctx;
|
||||
s32 hotspot_x, hotspot_y;
|
||||
int ret;
|
||||
|
||||
|
@ -769,9 +770,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
|
|||
if (du->cursor_surface)
|
||||
du->cursor_age = du->cursor_surface->snooper.age;
|
||||
|
||||
ww_acquire_init(&ctx, &reservation_ww_class);
|
||||
|
||||
if (!vmw_user_object_is_null(&old_vps->uo)) {
|
||||
old_bo = vmw_user_object_buffer(&old_vps->uo);
|
||||
ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL);
|
||||
ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
|
||||
if (ret != 0)
|
||||
return;
|
||||
}
|
||||
|
@ -779,9 +782,14 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
|
|||
if (!vmw_user_object_is_null(&vps->uo)) {
|
||||
new_bo = vmw_user_object_buffer(&vps->uo);
|
||||
if (old_bo != new_bo) {
|
||||
ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL);
|
||||
if (ret != 0)
|
||||
ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
|
||||
if (ret != 0) {
|
||||
if (old_bo) {
|
||||
ttm_bo_unreserve(&old_bo->tbo);
|
||||
ww_acquire_fini(&ctx);
|
||||
}
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
new_bo = NULL;
|
||||
}
|
||||
|
@ -803,10 +811,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
|
|||
hotspot_x, hotspot_y);
|
||||
}
|
||||
|
||||
if (old_bo)
|
||||
ttm_bo_unreserve(&old_bo->tbo);
|
||||
if (new_bo)
|
||||
ttm_bo_unreserve(&new_bo->tbo);
|
||||
if (old_bo)
|
||||
ttm_bo_unreserve(&old_bo->tbo);
|
||||
|
||||
ww_acquire_fini(&ctx);
|
||||
|
||||
du->cursor_x = new_state->crtc_x + du->set_gui_x;
|
||||
du->cursor_y = new_state->crtc_y + du->set_gui_y;
|
||||
|
|
|
@ -896,7 +896,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
|
|||
.busy_domain = VMW_BO_DOMAIN_SYS,
|
||||
.bo_type = ttm_bo_type_device,
|
||||
.size = size,
|
||||
.pin = true
|
||||
.pin = true,
|
||||
.keep_resv = true,
|
||||
};
|
||||
|
||||
if (!vmw_shader_id_ok(user_key, shader_type))
|
||||
|
@ -906,10 +907,6 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
|
|||
if (unlikely(ret != 0))
|
||||
goto out;
|
||||
|
||||
ret = ttm_bo_reserve(&buf->tbo, false, true, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto no_reserve;
|
||||
|
||||
/* Map and copy shader bytecode. */
|
||||
ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map);
|
||||
if (unlikely(ret != 0)) {
|
||||
|
|
|
@ -572,15 +572,14 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
|
|||
.busy_domain = domain,
|
||||
.bo_type = ttm_bo_type_kernel,
|
||||
.size = bo_size,
|
||||
.pin = true
|
||||
.pin = true,
|
||||
.keep_resv = true,
|
||||
};
|
||||
|
||||
ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
|
||||
BUG_ON(ret != 0);
|
||||
ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx);
|
||||
if (likely(ret == 0)) {
|
||||
struct vmw_ttm_tt *vmw_tt =
|
||||
|
|
|
@ -264,10 +264,9 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
|
|||
* however seems quite fragile not to also restart the GT. Try
|
||||
* to do that here by triggering a GT reset.
|
||||
*/
|
||||
for_each_gt(__gt, xe, id) {
|
||||
xe_gt_reset_async(__gt);
|
||||
flush_work(&__gt->reset.worker);
|
||||
}
|
||||
for_each_gt(__gt, xe, id)
|
||||
xe_gt_reset(__gt);
|
||||
|
||||
if (err) {
|
||||
KUNIT_FAIL(test, "restore kernel err=%pe\n",
|
||||
ERR_PTR(err));
|
||||
|
|
|
@ -162,8 +162,7 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
|
|||
if (flags & HAS_LNCF_MOCS)
|
||||
read_l3cc_table(gt, &mocs.table);
|
||||
|
||||
xe_gt_reset_async(gt);
|
||||
flush_work(>->reset.worker);
|
||||
xe_gt_reset(gt);
|
||||
|
||||
kunit_info(test, "mocs_reset_test after reset\n");
|
||||
if (flags & HAS_GLOBAL_MOCS)
|
||||
|
|
|
@ -56,6 +56,31 @@ void xe_gt_sanitize(struct xe_gt *gt);
|
|||
int xe_gt_sanitize_freq(struct xe_gt *gt);
|
||||
void xe_gt_remove(struct xe_gt *gt);
|
||||
|
||||
/**
|
||||
* xe_gt_wait_for_reset - wait for gt's async reset to finalize.
|
||||
* @gt: GT structure
|
||||
* Return:
|
||||
* %true if it waited for the work to finish execution,
|
||||
* %false if there was no scheduled reset or it was done.
|
||||
*/
|
||||
static inline bool xe_gt_wait_for_reset(struct xe_gt *gt)
|
||||
{
|
||||
return flush_work(>->reset.worker);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_reset - perform synchronous reset
|
||||
* @gt: GT structure
|
||||
* Return:
|
||||
* %true if it waited for the reset to finish,
|
||||
* %false if there was no scheduled reset.
|
||||
*/
|
||||
static inline bool xe_gt_reset(struct xe_gt *gt)
|
||||
{
|
||||
xe_gt_reset_async(gt);
|
||||
return xe_gt_wait_for_reset(gt);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
|
||||
* first that matches the same reset domain as @class
|
||||
|
|
|
@ -150,7 +150,7 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr,
|
|||
xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
|
||||
gt->ccs_mode = num_engines;
|
||||
xe_gt_record_user_engines(gt);
|
||||
xe_gt_reset_async(gt);
|
||||
xe_gt_reset(gt);
|
||||
}
|
||||
|
||||
mutex_unlock(&xe->drm.filelist_mutex);
|
||||
|
|
|
@ -132,11 +132,9 @@ static int force_reset(struct xe_gt *gt, struct drm_printer *p)
|
|||
static int force_reset_sync(struct xe_gt *gt, struct drm_printer *p)
|
||||
{
|
||||
xe_pm_runtime_get(gt_to_xe(gt));
|
||||
xe_gt_reset_async(gt);
|
||||
xe_gt_reset(gt);
|
||||
xe_pm_runtime_put(gt_to_xe(gt));
|
||||
|
||||
flush_work(>->reset.worker);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -422,7 +422,7 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
|
|||
* Bspec: 72161
|
||||
*/
|
||||
const u8 mocs_write_idx = gt->mocs.uc_index;
|
||||
const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE &&
|
||||
const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE && IS_DGFX(xe) &&
|
||||
(GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
|
||||
gt->mocs.wb_index : gt->mocs.uc_index;
|
||||
u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue