Linux 4.12-rc5
-----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJZPdbLAAoJEHm+PkMAQRiGx4wH/1nCjfnl6fE8oJ24/1gEAOUh biFdqJkYZmlLYHVtYfLm4Ueg4adJdg0wx6qM/4RaAzmQVvLfDV34bc1qBf1+P95G kVF+osWyXrZo5cTwkwapHW/KNu4VJwAx2D1wrlxKDVG5AOrULH1pYOYGOpApEkZU 4N+q5+M0ce0GJpqtUZX+UnI33ygjdDbBxXoFKsr24B7eA0ouGbAJ7dC88WcaETL+ 2/7tT01SvDMo0jBSV0WIqlgXwZ5gp3yPGnklC3F4159Yze6VFrzHMKS/UpPF8o8E W9EbuzwxsKyXUifX2GY348L1f+47glen/1sedbuKnFhP6E9aqUQQJXvEO7ueQl4= =m2Gx -----END PGP SIGNATURE----- BackMerge tag 'v4.12-rc5' into drm-next Linux 4.12-rc5 for nouveau fixes
This commit is contained in:
commit
925344ccc9
618 changed files with 5571 additions and 3351 deletions
|
@ -866,6 +866,15 @@
|
||||||
|
|
||||||
dscc4.setup= [NET]
|
dscc4.setup= [NET]
|
||||||
|
|
||||||
|
dt_cpu_ftrs= [PPC]
|
||||||
|
Format: {"off" | "known"}
|
||||||
|
Control how the dt_cpu_ftrs device-tree binding is
|
||||||
|
used for CPU feature discovery and setup (if it
|
||||||
|
exists).
|
||||||
|
off: Do not use it, fall back to legacy cpu table.
|
||||||
|
known: Do not pass through unknown features to guests
|
||||||
|
or userspace, only those that the kernel is aware of.
|
||||||
|
|
||||||
dump_apple_properties [X86]
|
dump_apple_properties [X86]
|
||||||
Dump name and content of EFI device properties on
|
Dump name and content of EFI device properties on
|
||||||
x86 Macs. Useful for driver authors to determine
|
x86 Macs. Useful for driver authors to determine
|
||||||
|
|
|
@ -26,6 +26,10 @@ Optional properties:
|
||||||
- interrupt-controller : Indicates the switch is itself an interrupt
|
- interrupt-controller : Indicates the switch is itself an interrupt
|
||||||
controller. This is used for the PHY interrupts.
|
controller. This is used for the PHY interrupts.
|
||||||
#interrupt-cells = <2> : Controller uses two cells, number and flag
|
#interrupt-cells = <2> : Controller uses two cells, number and flag
|
||||||
|
- eeprom-length : Set to the length of an EEPROM connected to the
|
||||||
|
switch. Must be set if the switch can not detect
|
||||||
|
the presence and/or size of a connected EEPROM,
|
||||||
|
otherwise optional.
|
||||||
- mdio : Container of PHY and devices on the switches MDIO
|
- mdio : Container of PHY and devices on the switches MDIO
|
||||||
bus.
|
bus.
|
||||||
- mdio? : Container of PHYs and devices on the external MDIO
|
- mdio? : Container of PHYs and devices on the external MDIO
|
||||||
|
|
|
@ -247,7 +247,6 @@ bias-bus-hold - latch weakly
|
||||||
bias-pull-up - pull up the pin
|
bias-pull-up - pull up the pin
|
||||||
bias-pull-down - pull down the pin
|
bias-pull-down - pull down the pin
|
||||||
bias-pull-pin-default - use pin-default pull state
|
bias-pull-pin-default - use pin-default pull state
|
||||||
bi-directional - pin supports simultaneous input/output operations
|
|
||||||
drive-push-pull - drive actively high and low
|
drive-push-pull - drive actively high and low
|
||||||
drive-open-drain - drive with open drain
|
drive-open-drain - drive with open drain
|
||||||
drive-open-source - drive with open source
|
drive-open-source - drive with open source
|
||||||
|
@ -260,7 +259,6 @@ input-debounce - debounce mode with debound time X
|
||||||
power-source - select between different power supplies
|
power-source - select between different power supplies
|
||||||
low-power-enable - enable low power mode
|
low-power-enable - enable low power mode
|
||||||
low-power-disable - disable low power mode
|
low-power-disable - disable low power mode
|
||||||
output-enable - enable output on pin regardless of output value
|
|
||||||
output-low - set the pin to output mode with low level
|
output-low - set the pin to output mode with low level
|
||||||
output-high - set the pin to output mode with high level
|
output-high - set the pin to output mode with high level
|
||||||
slew-rate - set the slew rate
|
slew-rate - set the slew rate
|
||||||
|
|
|
@ -10,6 +10,7 @@ Required properties:
|
||||||
- "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc;
|
- "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc;
|
||||||
- "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs;
|
- "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs;
|
||||||
- "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs;
|
- "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs;
|
||||||
|
- "amlogic,meson8-usb": The DWC2 USB controller instance in Amlogic Meson8 SoCs;
|
||||||
- "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs;
|
- "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs;
|
||||||
- "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
|
- "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
|
||||||
- "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
|
- "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
|
||||||
|
|
194
Documentation/networking/dpaa.txt
Normal file
194
Documentation/networking/dpaa.txt
Normal file
|
@ -0,0 +1,194 @@
|
||||||
|
The QorIQ DPAA Ethernet Driver
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Authors:
|
||||||
|
Madalin Bucur <madalin.bucur@nxp.com>
|
||||||
|
Camelia Groza <camelia.groza@nxp.com>
|
||||||
|
|
||||||
|
Contents
|
||||||
|
========
|
||||||
|
|
||||||
|
- DPAA Ethernet Overview
|
||||||
|
- DPAA Ethernet Supported SoCs
|
||||||
|
- Configuring DPAA Ethernet in your kernel
|
||||||
|
- DPAA Ethernet Frame Processing
|
||||||
|
- DPAA Ethernet Features
|
||||||
|
- Debugging
|
||||||
|
|
||||||
|
DPAA Ethernet Overview
|
||||||
|
======================
|
||||||
|
|
||||||
|
DPAA stands for Data Path Acceleration Architecture and it is a
|
||||||
|
set of networking acceleration IPs that are available on several
|
||||||
|
generations of SoCs, both on PowerPC and ARM64.
|
||||||
|
|
||||||
|
The Freescale DPAA architecture consists of a series of hardware blocks
|
||||||
|
that support Ethernet connectivity. The Ethernet driver depends upon the
|
||||||
|
following drivers in the Linux kernel:
|
||||||
|
|
||||||
|
- Peripheral Access Memory Unit (PAMU) (* needed only for PPC platforms)
|
||||||
|
drivers/iommu/fsl_*
|
||||||
|
- Frame Manager (FMan)
|
||||||
|
drivers/net/ethernet/freescale/fman
|
||||||
|
- Queue Manager (QMan), Buffer Manager (BMan)
|
||||||
|
drivers/soc/fsl/qbman
|
||||||
|
|
||||||
|
A simplified view of the dpaa_eth interfaces mapped to FMan MACs:
|
||||||
|
|
||||||
|
dpaa_eth /eth0\ ... /ethN\
|
||||||
|
driver | | | |
|
||||||
|
------------- ---- ----------- ---- -------------
|
||||||
|
-Ports / Tx Rx \ ... / Tx Rx \
|
||||||
|
FMan | | | |
|
||||||
|
-MACs | MAC0 | | MACN |
|
||||||
|
/ dtsec0 \ ... / dtsecN \ (or tgec)
|
||||||
|
/ \ / \(or memac)
|
||||||
|
--------- -------------- --- -------------- ---------
|
||||||
|
FMan, FMan Port, FMan SP, FMan MURAM drivers
|
||||||
|
---------------------------------------------------------
|
||||||
|
FMan HW blocks: MURAM, MACs, Ports, SP
|
||||||
|
---------------------------------------------------------
|
||||||
|
|
||||||
|
The dpaa_eth relation to the QMan, BMan and FMan:
|
||||||
|
________________________________
|
||||||
|
dpaa_eth / eth0 \
|
||||||
|
driver / \
|
||||||
|
--------- -^- -^- -^- --- ---------
|
||||||
|
QMan driver / \ / \ / \ \ / | BMan |
|
||||||
|
|Rx | |Rx | |Tx | |Tx | | driver |
|
||||||
|
--------- |Dfl| |Err| |Cnf| |FQs| | |
|
||||||
|
QMan HW |FQ | |FQ | |FQs| | | | |
|
||||||
|
/ \ / \ / \ \ / | |
|
||||||
|
--------- --- --- --- -v- ---------
|
||||||
|
| FMan QMI | |
|
||||||
|
| FMan HW FMan BMI | BMan HW |
|
||||||
|
----------------------- --------
|
||||||
|
|
||||||
|
where the acronyms used above (and in the code) are:
|
||||||
|
DPAA = Data Path Acceleration Architecture
|
||||||
|
FMan = DPAA Frame Manager
|
||||||
|
QMan = DPAA Queue Manager
|
||||||
|
BMan = DPAA Buffers Manager
|
||||||
|
QMI = QMan interface in FMan
|
||||||
|
BMI = BMan interface in FMan
|
||||||
|
FMan SP = FMan Storage Profiles
|
||||||
|
MURAM = Multi-user RAM in FMan
|
||||||
|
FQ = QMan Frame Queue
|
||||||
|
Rx Dfl FQ = default reception FQ
|
||||||
|
Rx Err FQ = Rx error frames FQ
|
||||||
|
Tx Cnf FQ = Tx confirmation FQs
|
||||||
|
Tx FQs = transmission frame queues
|
||||||
|
dtsec = datapath three speed Ethernet controller (10/100/1000 Mbps)
|
||||||
|
tgec = ten gigabit Ethernet controller (10 Gbps)
|
||||||
|
memac = multirate Ethernet MAC (10/100/1000/10000)
|
||||||
|
|
||||||
|
DPAA Ethernet Supported SoCs
|
||||||
|
============================
|
||||||
|
|
||||||
|
The DPAA drivers enable the Ethernet controllers present on the following SoCs:
|
||||||
|
|
||||||
|
# PPC
|
||||||
|
P1023
|
||||||
|
P2041
|
||||||
|
P3041
|
||||||
|
P4080
|
||||||
|
P5020
|
||||||
|
P5040
|
||||||
|
T1023
|
||||||
|
T1024
|
||||||
|
T1040
|
||||||
|
T1042
|
||||||
|
T2080
|
||||||
|
T4240
|
||||||
|
B4860
|
||||||
|
|
||||||
|
# ARM
|
||||||
|
LS1043A
|
||||||
|
LS1046A
|
||||||
|
|
||||||
|
Configuring DPAA Ethernet in your kernel
|
||||||
|
========================================
|
||||||
|
|
||||||
|
To enable the DPAA Ethernet driver, the following Kconfig options are required:
|
||||||
|
|
||||||
|
# common for arch/arm64 and arch/powerpc platforms
|
||||||
|
CONFIG_FSL_DPAA=y
|
||||||
|
CONFIG_FSL_FMAN=y
|
||||||
|
CONFIG_FSL_DPAA_ETH=y
|
||||||
|
CONFIG_FSL_XGMAC_MDIO=y
|
||||||
|
|
||||||
|
# for arch/powerpc only
|
||||||
|
CONFIG_FSL_PAMU=y
|
||||||
|
|
||||||
|
# common options needed for the PHYs used on the RDBs
|
||||||
|
CONFIG_VITESSE_PHY=y
|
||||||
|
CONFIG_REALTEK_PHY=y
|
||||||
|
CONFIG_AQUANTIA_PHY=y
|
||||||
|
|
||||||
|
DPAA Ethernet Frame Processing
|
||||||
|
==============================
|
||||||
|
|
||||||
|
On Rx, buffers for the incoming frames are retrieved from one of the three
|
||||||
|
existing buffers pools. The driver initializes and seeds these, each with
|
||||||
|
buffers of different sizes: 1KB, 2KB and 4KB.
|
||||||
|
|
||||||
|
On Tx, all transmitted frames are returned to the driver through Tx
|
||||||
|
confirmation frame queues. The driver is then responsible for freeing the
|
||||||
|
buffers. In order to do this properly, a backpointer is added to the buffer
|
||||||
|
before transmission that points to the skb. When the buffer returns to the
|
||||||
|
driver on a confirmation FQ, the skb can be correctly consumed.
|
||||||
|
|
||||||
|
DPAA Ethernet Features
|
||||||
|
======================
|
||||||
|
|
||||||
|
Currently the DPAA Ethernet driver enables the basic features required for
|
||||||
|
a Linux Ethernet driver. The support for advanced features will be added
|
||||||
|
gradually.
|
||||||
|
|
||||||
|
The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx
|
||||||
|
checksum offload feature is enabled by default and cannot be controlled through
|
||||||
|
ethtool.
|
||||||
|
|
||||||
|
The driver has support for multiple prioritized Tx traffic classes. Priorities
|
||||||
|
range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with
|
||||||
|
strict priority levels. Each traffic class contains NR_CPU TX queues. By
|
||||||
|
default, only one traffic class is enabled and the lowest priority Tx queues
|
||||||
|
are used. Higher priority traffic classes can be enabled with the mqprio
|
||||||
|
qdisc. For example, all four traffic classes are enabled on an interface with
|
||||||
|
the following command. Furthermore, skb priority levels are mapped to traffic
|
||||||
|
classes as follows:
|
||||||
|
|
||||||
|
* priorities 0 to 3 - traffic class 0 (low priority)
|
||||||
|
* priorities 4 to 7 - traffic class 1 (medium-low priority)
|
||||||
|
* priorities 8 to 11 - traffic class 2 (medium-high priority)
|
||||||
|
* priorities 12 to 15 - traffic class 3 (high priority)
|
||||||
|
|
||||||
|
tc qdisc add dev <int> root handle 1: \
|
||||||
|
mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1
|
||||||
|
|
||||||
|
Debugging
|
||||||
|
=========
|
||||||
|
|
||||||
|
The following statistics are exported for each interface through ethtool:
|
||||||
|
|
||||||
|
- interrupt count per CPU
|
||||||
|
- Rx packets count per CPU
|
||||||
|
- Tx packets count per CPU
|
||||||
|
- Tx confirmed packets count per CPU
|
||||||
|
- Tx S/G frames count per CPU
|
||||||
|
- Tx error count per CPU
|
||||||
|
- Rx error count per CPU
|
||||||
|
- Rx error count per type
|
||||||
|
- congestion related statistics:
|
||||||
|
- congestion status
|
||||||
|
- time spent in congestion
|
||||||
|
- number of time the device entered congestion
|
||||||
|
- dropped packets count per cause
|
||||||
|
|
||||||
|
The driver also exports the following information in sysfs:
|
||||||
|
|
||||||
|
- the FQ IDs for each FQ type
|
||||||
|
/sys/devices/platform/dpaa-ethernet.0/net/<int>/fqids
|
||||||
|
|
||||||
|
- the IDs of the buffer pools in use
|
||||||
|
/sys/devices/platform/dpaa-ethernet.0/net/<int>/bpids
|
|
@ -1,7 +1,7 @@
|
||||||
TCP protocol
|
TCP protocol
|
||||||
============
|
============
|
||||||
|
|
||||||
Last updated: 9 February 2008
|
Last updated: 3 June 2017
|
||||||
|
|
||||||
Contents
|
Contents
|
||||||
========
|
========
|
||||||
|
@ -29,18 +29,19 @@ As of 2.6.13, Linux supports pluggable congestion control algorithms.
|
||||||
A congestion control mechanism can be registered through functions in
|
A congestion control mechanism can be registered through functions in
|
||||||
tcp_cong.c. The functions used by the congestion control mechanism are
|
tcp_cong.c. The functions used by the congestion control mechanism are
|
||||||
registered via passing a tcp_congestion_ops struct to
|
registered via passing a tcp_congestion_ops struct to
|
||||||
tcp_register_congestion_control. As a minimum name, ssthresh,
|
tcp_register_congestion_control. As a minimum, the congestion control
|
||||||
cong_avoid must be valid.
|
mechanism must provide a valid name and must implement either ssthresh,
|
||||||
|
cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook.
|
||||||
|
|
||||||
Private data for a congestion control mechanism is stored in tp->ca_priv.
|
Private data for a congestion control mechanism is stored in tp->ca_priv.
|
||||||
tcp_ca(tp) returns a pointer to this space. This is preallocated space - it
|
tcp_ca(tp) returns a pointer to this space. This is preallocated space - it
|
||||||
is important to check the size of your private data will fit this space, or
|
is important to check the size of your private data will fit this space, or
|
||||||
alternatively space could be allocated elsewhere and a pointer to it could
|
alternatively, space could be allocated elsewhere and a pointer to it could
|
||||||
be stored here.
|
be stored here.
|
||||||
|
|
||||||
There are three kinds of congestion control algorithms currently: The
|
There are three kinds of congestion control algorithms currently: The
|
||||||
simplest ones are derived from TCP reno (highspeed, scalable) and just
|
simplest ones are derived from TCP reno (highspeed, scalable) and just
|
||||||
provide an alternative the congestion window calculation. More complex
|
provide an alternative congestion window calculation. More complex
|
||||||
ones like BIC try to look at other events to provide better
|
ones like BIC try to look at other events to provide better
|
||||||
heuristics. There are also round trip time based algorithms like
|
heuristics. There are also round trip time based algorithms like
|
||||||
Vegas and Westwood+.
|
Vegas and Westwood+.
|
||||||
|
@ -49,21 +50,15 @@ Good TCP congestion control is a complex problem because the algorithm
|
||||||
needs to maintain fairness and performance. Please review current
|
needs to maintain fairness and performance. Please review current
|
||||||
research and RFC's before developing new modules.
|
research and RFC's before developing new modules.
|
||||||
|
|
||||||
The method that is used to determine which congestion control mechanism is
|
The default congestion control mechanism is chosen based on the
|
||||||
determined by the setting of the sysctl net.ipv4.tcp_congestion_control.
|
DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default
|
||||||
The default congestion control will be the last one registered (LIFO);
|
value then you can set it using sysctl net.ipv4.tcp_congestion_control. The
|
||||||
so if you built everything as modules, the default will be reno. If you
|
module will be autoloaded if needed and you will get the expected protocol. If
|
||||||
build with the defaults from Kconfig, then CUBIC will be builtin (not a
|
you ask for an unknown congestion method, then the sysctl attempt will fail.
|
||||||
module) and it will end up the default.
|
|
||||||
|
|
||||||
If you really want a particular default value then you will need
|
If you remove a TCP congestion control module, then you will get the next
|
||||||
to set it with the sysctl. If you use a sysctl, the module will be autoloaded
|
|
||||||
if needed and you will get the expected protocol. If you ask for an
|
|
||||||
unknown congestion method, then the sysctl attempt will fail.
|
|
||||||
|
|
||||||
If you remove a tcp congestion control module, then you will get the next
|
|
||||||
available one. Since reno cannot be built as a module, and cannot be
|
available one. Since reno cannot be built as a module, and cannot be
|
||||||
deleted, it will always be available.
|
removed, it will always be available.
|
||||||
|
|
||||||
How the new TCP output machine [nyi] works.
|
How the new TCP output machine [nyi] works.
|
||||||
===========================================
|
===========================================
|
||||||
|
|
33
MAINTAINERS
33
MAINTAINERS
|
@ -1172,7 +1172,7 @@ N: clps711x
|
||||||
|
|
||||||
ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
|
ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
|
||||||
M: Hartley Sweeten <hsweeten@visionengravers.com>
|
M: Hartley Sweeten <hsweeten@visionengravers.com>
|
||||||
M: Ryan Mallon <rmallon@gmail.com>
|
M: Alexander Sverdlin <alexander.sverdlin@gmail.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: arch/arm/mach-ep93xx/
|
F: arch/arm/mach-ep93xx/
|
||||||
|
@ -1489,13 +1489,15 @@ M: Gregory Clement <gregory.clement@free-electrons.com>
|
||||||
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: arch/arm/mach-mvebu/
|
|
||||||
F: drivers/rtc/rtc-armada38x.c
|
|
||||||
F: arch/arm/boot/dts/armada*
|
F: arch/arm/boot/dts/armada*
|
||||||
F: arch/arm/boot/dts/kirkwood*
|
F: arch/arm/boot/dts/kirkwood*
|
||||||
|
F: arch/arm/configs/mvebu_*_defconfig
|
||||||
|
F: arch/arm/mach-mvebu/
|
||||||
F: arch/arm64/boot/dts/marvell/armada*
|
F: arch/arm64/boot/dts/marvell/armada*
|
||||||
F: drivers/cpufreq/mvebu-cpufreq.c
|
F: drivers/cpufreq/mvebu-cpufreq.c
|
||||||
F: arch/arm/configs/mvebu_*_defconfig
|
F: drivers/irqchip/irq-armada-370-xp.c
|
||||||
|
F: drivers/irqchip/irq-mvebu-*
|
||||||
|
F: drivers/rtc/rtc-armada38x.c
|
||||||
|
|
||||||
ARM/Marvell Berlin SoC support
|
ARM/Marvell Berlin SoC support
|
||||||
M: Jisheng Zhang <jszhang@marvell.com>
|
M: Jisheng Zhang <jszhang@marvell.com>
|
||||||
|
@ -1721,7 +1723,6 @@ N: rockchip
|
||||||
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
|
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
|
||||||
M: Kukjin Kim <kgene@kernel.org>
|
M: Kukjin Kim <kgene@kernel.org>
|
||||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||||
R: Javier Martinez Canillas <javier@osg.samsung.com>
|
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
|
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
|
||||||
Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
|
Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
|
||||||
|
@ -1829,7 +1830,6 @@ F: drivers/edac/altera_edac.
|
||||||
ARM/STI ARCHITECTURE
|
ARM/STI ARCHITECTURE
|
||||||
M: Patrice Chotard <patrice.chotard@st.com>
|
M: Patrice Chotard <patrice.chotard@st.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
L: kernel@stlinux.com
|
|
||||||
W: http://www.stlinux.com
|
W: http://www.stlinux.com
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: arch/arm/mach-sti/
|
F: arch/arm/mach-sti/
|
||||||
|
@ -5641,7 +5641,7 @@ F: scripts/get_maintainer.pl
|
||||||
|
|
||||||
GENWQE (IBM Generic Workqueue Card)
|
GENWQE (IBM Generic Workqueue Card)
|
||||||
M: Frank Haverkamp <haver@linux.vnet.ibm.com>
|
M: Frank Haverkamp <haver@linux.vnet.ibm.com>
|
||||||
M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
|
M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/misc/genwqe/
|
F: drivers/misc/genwqe/
|
||||||
|
|
||||||
|
@ -5686,7 +5686,6 @@ F: tools/testing/selftests/gpio/
|
||||||
|
|
||||||
GPIO SUBSYSTEM
|
GPIO SUBSYSTEM
|
||||||
M: Linus Walleij <linus.walleij@linaro.org>
|
M: Linus Walleij <linus.walleij@linaro.org>
|
||||||
M: Alexandre Courbot <gnurou@gmail.com>
|
|
||||||
L: linux-gpio@vger.kernel.org
|
L: linux-gpio@vger.kernel.org
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -7726,7 +7725,7 @@ F: drivers/platform/x86/hp_accel.c
|
||||||
|
|
||||||
LIVE PATCHING
|
LIVE PATCHING
|
||||||
M: Josh Poimboeuf <jpoimboe@redhat.com>
|
M: Josh Poimboeuf <jpoimboe@redhat.com>
|
||||||
M: Jessica Yu <jeyu@redhat.com>
|
M: Jessica Yu <jeyu@kernel.org>
|
||||||
M: Jiri Kosina <jikos@kernel.org>
|
M: Jiri Kosina <jikos@kernel.org>
|
||||||
M: Miroslav Benes <mbenes@suse.cz>
|
M: Miroslav Benes <mbenes@suse.cz>
|
||||||
R: Petr Mladek <pmladek@suse.com>
|
R: Petr Mladek <pmladek@suse.com>
|
||||||
|
@ -8527,7 +8526,7 @@ S: Odd Fixes
|
||||||
F: drivers/media/radio/radio-miropcm20*
|
F: drivers/media/radio/radio-miropcm20*
|
||||||
|
|
||||||
MELLANOX MLX4 core VPI driver
|
MELLANOX MLX4 core VPI driver
|
||||||
M: Yishai Hadas <yishaih@mellanox.com>
|
M: Tariq Toukan <tariqt@mellanox.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
L: linux-rdma@vger.kernel.org
|
L: linux-rdma@vger.kernel.org
|
||||||
W: http://www.mellanox.com
|
W: http://www.mellanox.com
|
||||||
|
@ -8535,7 +8534,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/mellanox/mlx4/
|
F: drivers/net/ethernet/mellanox/mlx4/
|
||||||
F: include/linux/mlx4/
|
F: include/linux/mlx4/
|
||||||
F: include/uapi/rdma/mlx4-abi.h
|
|
||||||
|
|
||||||
MELLANOX MLX4 IB driver
|
MELLANOX MLX4 IB driver
|
||||||
M: Yishai Hadas <yishaih@mellanox.com>
|
M: Yishai Hadas <yishaih@mellanox.com>
|
||||||
|
@ -8545,6 +8543,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/infiniband/hw/mlx4/
|
F: drivers/infiniband/hw/mlx4/
|
||||||
F: include/linux/mlx4/
|
F: include/linux/mlx4/
|
||||||
|
F: include/uapi/rdma/mlx4-abi.h
|
||||||
|
|
||||||
MELLANOX MLX5 core VPI driver
|
MELLANOX MLX5 core VPI driver
|
||||||
M: Saeed Mahameed <saeedm@mellanox.com>
|
M: Saeed Mahameed <saeedm@mellanox.com>
|
||||||
|
@ -8557,7 +8556,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/mellanox/mlx5/core/
|
F: drivers/net/ethernet/mellanox/mlx5/core/
|
||||||
F: include/linux/mlx5/
|
F: include/linux/mlx5/
|
||||||
F: include/uapi/rdma/mlx5-abi.h
|
|
||||||
|
|
||||||
MELLANOX MLX5 IB driver
|
MELLANOX MLX5 IB driver
|
||||||
M: Matan Barak <matanb@mellanox.com>
|
M: Matan Barak <matanb@mellanox.com>
|
||||||
|
@ -8568,6 +8566,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/infiniband/hw/mlx5/
|
F: drivers/infiniband/hw/mlx5/
|
||||||
F: include/linux/mlx5/
|
F: include/linux/mlx5/
|
||||||
|
F: include/uapi/rdma/mlx5-abi.h
|
||||||
|
|
||||||
MELEXIS MLX90614 DRIVER
|
MELEXIS MLX90614 DRIVER
|
||||||
M: Crt Mori <cmo@melexis.com>
|
M: Crt Mori <cmo@melexis.com>
|
||||||
|
@ -8607,7 +8606,7 @@ S: Maintained
|
||||||
F: drivers/media/dvb-frontends/mn88473*
|
F: drivers/media/dvb-frontends/mn88473*
|
||||||
|
|
||||||
MODULE SUPPORT
|
MODULE SUPPORT
|
||||||
M: Jessica Yu <jeyu@redhat.com>
|
M: Jessica Yu <jeyu@kernel.org>
|
||||||
M: Rusty Russell <rusty@rustcorp.com.au>
|
M: Rusty Russell <rusty@rustcorp.com.au>
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -10469,7 +10468,7 @@ S: Orphan
|
||||||
|
|
||||||
PXA RTC DRIVER
|
PXA RTC DRIVER
|
||||||
M: Robert Jarzmik <robert.jarzmik@free.fr>
|
M: Robert Jarzmik <robert.jarzmik@free.fr>
|
||||||
L: rtc-linux@googlegroups.com
|
L: linux-rtc@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
|
||||||
QAT DRIVER
|
QAT DRIVER
|
||||||
|
@ -10776,7 +10775,7 @@ X: kernel/torture.c
|
||||||
REAL TIME CLOCK (RTC) SUBSYSTEM
|
REAL TIME CLOCK (RTC) SUBSYSTEM
|
||||||
M: Alessandro Zummo <a.zummo@towertech.it>
|
M: Alessandro Zummo <a.zummo@towertech.it>
|
||||||
M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
|
M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
|
||||||
L: rtc-linux@googlegroups.com
|
L: linux-rtc@vger.kernel.org
|
||||||
Q: http://patchwork.ozlabs.org/project/rtc-linux/list/
|
Q: http://patchwork.ozlabs.org/project/rtc-linux/list/
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -11287,7 +11286,6 @@ F: drivers/media/rc/serial_ir.c
|
||||||
|
|
||||||
STI CEC DRIVER
|
STI CEC DRIVER
|
||||||
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
|
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
|
||||||
L: kernel@stlinux.com
|
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/staging/media/st-cec/
|
F: drivers/staging/media/st-cec/
|
||||||
F: Documentation/devicetree/bindings/media/stih-cec.txt
|
F: Documentation/devicetree/bindings/media/stih-cec.txt
|
||||||
|
@ -11797,6 +11795,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
|
||||||
S: Supported
|
S: Supported
|
||||||
F: arch/arm/mach-davinci/
|
F: arch/arm/mach-davinci/
|
||||||
F: drivers/i2c/busses/i2c-davinci.c
|
F: drivers/i2c/busses/i2c-davinci.c
|
||||||
|
F: arch/arm/boot/dts/da850*
|
||||||
|
|
||||||
TI DAVINCI SERIES MEDIA DRIVER
|
TI DAVINCI SERIES MEDIA DRIVER
|
||||||
M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
|
M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
|
||||||
|
@ -13880,7 +13879,7 @@ S: Odd fixes
|
||||||
F: drivers/net/wireless/wl3501*
|
F: drivers/net/wireless/wl3501*
|
||||||
|
|
||||||
WOLFSON MICROELECTRONICS DRIVERS
|
WOLFSON MICROELECTRONICS DRIVERS
|
||||||
L: patches@opensource.wolfsonmicro.com
|
L: patches@opensource.cirrus.com
|
||||||
T: git https://github.com/CirrusLogic/linux-drivers.git
|
T: git https://github.com/CirrusLogic/linux-drivers.git
|
||||||
W: https://github.com/CirrusLogic/linux-drivers/wiki
|
W: https://github.com/CirrusLogic/linux-drivers/wiki
|
||||||
S: Supported
|
S: Supported
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 12
|
PATCHLEVEL = 12
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc3
|
EXTRAVERSION = -rc5
|
||||||
NAME = Fearless Coyote
|
NAME = Fearless Coyote
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|
|
@ -17,14 +17,12 @@
|
||||||
@ there.
|
@ there.
|
||||||
.inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
|
.inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
|
||||||
#else
|
#else
|
||||||
mov r0, r0
|
W(mov) r0, r0
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro __EFI_HEADER
|
.macro __EFI_HEADER
|
||||||
#ifdef CONFIG_EFI_STUB
|
#ifdef CONFIG_EFI_STUB
|
||||||
b __efi_start
|
|
||||||
|
|
||||||
.set start_offset, __efi_start - start
|
.set start_offset, __efi_start - start
|
||||||
.org start + 0x3c
|
.org start + 0x3c
|
||||||
@
|
@
|
||||||
|
|
|
@ -130,19 +130,22 @@ start:
|
||||||
.rept 7
|
.rept 7
|
||||||
__nop
|
__nop
|
||||||
.endr
|
.endr
|
||||||
ARM( mov r0, r0 )
|
#ifndef CONFIG_THUMB2_KERNEL
|
||||||
ARM( b 1f )
|
mov r0, r0
|
||||||
THUMB( badr r12, 1f )
|
#else
|
||||||
THUMB( bx r12 )
|
AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
|
||||||
|
M_CLASS( nop.w ) @ M: already in Thumb2 mode
|
||||||
|
.thumb
|
||||||
|
#endif
|
||||||
|
W(b) 1f
|
||||||
|
|
||||||
.word _magic_sig @ Magic numbers to help the loader
|
.word _magic_sig @ Magic numbers to help the loader
|
||||||
.word _magic_start @ absolute load/run zImage address
|
.word _magic_start @ absolute load/run zImage address
|
||||||
.word _magic_end @ zImage end address
|
.word _magic_end @ zImage end address
|
||||||
.word 0x04030201 @ endianness flag
|
.word 0x04030201 @ endianness flag
|
||||||
|
|
||||||
THUMB( .thumb )
|
__EFI_HEADER
|
||||||
1: __EFI_HEADER
|
1:
|
||||||
|
|
||||||
ARM_BE8( setend be ) @ go BE8 if compiled for BE8
|
ARM_BE8( setend be ) @ go BE8 if compiled for BE8
|
||||||
AR_CLASS( mrs r9, cpsr )
|
AR_CLASS( mrs r9, cpsr )
|
||||||
#ifdef CONFIG_ARM_VIRT_EXT
|
#ifdef CONFIG_ARM_VIRT_EXT
|
||||||
|
|
|
@ -3,6 +3,11 @@
|
||||||
#include <dt-bindings/clock/bcm2835-aux.h>
|
#include <dt-bindings/clock/bcm2835-aux.h>
|
||||||
#include <dt-bindings/gpio/gpio.h>
|
#include <dt-bindings/gpio/gpio.h>
|
||||||
|
|
||||||
|
/* firmware-provided startup stubs live here, where the secondary CPUs are
|
||||||
|
* spinning.
|
||||||
|
*/
|
||||||
|
/memreserve/ 0x00000000 0x00001000;
|
||||||
|
|
||||||
/* This include file covers the common peripherals and configuration between
|
/* This include file covers the common peripherals and configuration between
|
||||||
* bcm2835 and bcm2836 implementations, leaving the CPU configuration to
|
* bcm2835 and bcm2836 implementations, leaving the CPU configuration to
|
||||||
* bcm2835.dtsi and bcm2836.dtsi.
|
* bcm2835.dtsi and bcm2836.dtsi.
|
||||||
|
|
|
@ -120,10 +120,16 @@
|
||||||
|
|
||||||
ethphy0: ethernet-phy@2 {
|
ethphy0: ethernet-phy@2 {
|
||||||
reg = <2>;
|
reg = <2>;
|
||||||
|
micrel,led-mode = <1>;
|
||||||
|
clocks = <&clks IMX6UL_CLK_ENET_REF>;
|
||||||
|
clock-names = "rmii-ref";
|
||||||
};
|
};
|
||||||
|
|
||||||
ethphy1: ethernet-phy@1 {
|
ethphy1: ethernet-phy@1 {
|
||||||
reg = <1>;
|
reg = <1>;
|
||||||
|
micrel,led-mode = <1>;
|
||||||
|
clocks = <&clks IMX6UL_CLK_ENET2_REF>;
|
||||||
|
clock-names = "rmii-ref";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -137,8 +137,8 @@ netcp: netcp@26000000 {
|
||||||
/* NetCP address range */
|
/* NetCP address range */
|
||||||
ranges = <0 0x26000000 0x1000000>;
|
ranges = <0 0x26000000 0x1000000>;
|
||||||
|
|
||||||
clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>, <&clkosr>;
|
clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>;
|
||||||
clock-names = "pa_clk", "ethss_clk", "cpts", "osr_clk";
|
clock-names = "pa_clk", "ethss_clk", "cpts";
|
||||||
dma-coherent;
|
dma-coherent;
|
||||||
|
|
||||||
ti,navigator-dmas = <&dma_gbe 0>,
|
ti,navigator-dmas = <&dma_gbe 0>,
|
||||||
|
|
|
@ -232,6 +232,14 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
osr: sram@70000000 {
|
||||||
|
compatible = "mmio-sram";
|
||||||
|
reg = <0x70000000 0x10000>;
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <1>;
|
||||||
|
clocks = <&clkosr>;
|
||||||
|
};
|
||||||
|
|
||||||
dspgpio0: keystone_dsp_gpio@02620240 {
|
dspgpio0: keystone_dsp_gpio@02620240 {
|
||||||
compatible = "ti,keystone-dsp-gpio";
|
compatible = "ti,keystone-dsp-gpio";
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#include <versatile-ab.dts>
|
#include "versatile-ab.dts"
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
model = "ARM Versatile PB";
|
model = "ARM Versatile PB";
|
||||||
|
|
|
@ -235,7 +235,7 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef void (*phys_reset_t)(unsigned long);
|
typedef typeof(cpu_reset) phys_reset_t;
|
||||||
|
|
||||||
void mcpm_cpu_power_down(void)
|
void mcpm_cpu_power_down(void)
|
||||||
{
|
{
|
||||||
|
@ -300,7 +300,7 @@ void mcpm_cpu_power_down(void)
|
||||||
* on the CPU.
|
* on the CPU.
|
||||||
*/
|
*/
|
||||||
phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
|
phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
|
||||||
phys_reset(__pa_symbol(mcpm_entry_point));
|
phys_reset(__pa_symbol(mcpm_entry_point), false);
|
||||||
|
|
||||||
/* should never get here */
|
/* should never get here */
|
||||||
BUG();
|
BUG();
|
||||||
|
@ -389,7 +389,7 @@ static int __init nocache_trampoline(unsigned long _arg)
|
||||||
__mcpm_cpu_down(cpu, cluster);
|
__mcpm_cpu_down(cpu, cluster);
|
||||||
|
|
||||||
phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
|
phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
|
||||||
phys_reset(__pa_symbol(mcpm_entry_point));
|
phys_reset(__pa_symbol(mcpm_entry_point), false);
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,8 @@ struct dev_archdata {
|
||||||
#ifdef CONFIG_XEN
|
#ifdef CONFIG_XEN
|
||||||
const struct dma_map_ops *dev_dma_ops;
|
const struct dma_map_ops *dev_dma_ops;
|
||||||
#endif
|
#endif
|
||||||
bool dma_coherent;
|
unsigned int dma_coherent:1;
|
||||||
|
unsigned int dma_ops_setup:1;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct omap_device;
|
struct omap_device;
|
||||||
|
|
|
@ -66,6 +66,7 @@ typedef pte_t *pte_addr_t;
|
||||||
#define pgprot_noncached(prot) (prot)
|
#define pgprot_noncached(prot) (prot)
|
||||||
#define pgprot_writecombine(prot) (prot)
|
#define pgprot_writecombine(prot) (prot)
|
||||||
#define pgprot_dmacoherent(prot) (prot)
|
#define pgprot_dmacoherent(prot) (prot)
|
||||||
|
#define pgprot_device(prot) (prot)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -104,7 +104,6 @@ __do_hyp_init:
|
||||||
@ - Write permission implies XN: disabled
|
@ - Write permission implies XN: disabled
|
||||||
@ - Instruction cache: enabled
|
@ - Instruction cache: enabled
|
||||||
@ - Data/Unified cache: enabled
|
@ - Data/Unified cache: enabled
|
||||||
@ - Memory alignment checks: enabled
|
|
||||||
@ - MMU: enabled (this code must be run from an identity mapping)
|
@ - MMU: enabled (this code must be run from an identity mapping)
|
||||||
mrc p15, 4, r0, c1, c0, 0 @ HSCR
|
mrc p15, 4, r0, c1, c0, 0 @ HSCR
|
||||||
ldr r2, =HSCTLR_MASK
|
ldr r2, =HSCTLR_MASK
|
||||||
|
@ -112,8 +111,8 @@ __do_hyp_init:
|
||||||
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
|
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
|
||||||
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
|
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
|
||||||
and r1, r1, r2
|
and r1, r1, r2
|
||||||
ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) )
|
ARM( ldr r2, =(HSCTLR_M) )
|
||||||
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
|
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
|
||||||
orr r1, r1, r2
|
orr r1, r1, r2
|
||||||
orr r0, r0, r1
|
orr r0, r0, r1
|
||||||
mcr p15, 4, r0, c1, c0, 0 @ HSCR
|
mcr p15, 4, r0, c1, c0, 0 @ HSCR
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
menuconfig ARCH_AT91
|
menuconfig ARCH_AT91
|
||||||
bool "Atmel SoCs"
|
bool "Atmel SoCs"
|
||||||
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
|
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
|
||||||
|
select ARM_CPU_SUSPEND if PM
|
||||||
select COMMON_CLK_AT91
|
select COMMON_CLK_AT91
|
||||||
select GPIOLIB
|
select GPIOLIB
|
||||||
select PINCTRL
|
select PINCTRL
|
||||||
|
|
|
@ -153,7 +153,8 @@ int __init davinci_pm_init(void)
|
||||||
davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
|
davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
|
||||||
if (!davinci_sram_suspend) {
|
if (!davinci_sram_suspend) {
|
||||||
pr_err("PM: cannot allocate SRAM memory\n");
|
pr_err("PM: cannot allocate SRAM memory\n");
|
||||||
return -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
goto no_sram_mem;
|
||||||
}
|
}
|
||||||
|
|
||||||
davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
|
davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
|
||||||
|
@ -161,6 +162,10 @@ int __init davinci_pm_init(void)
|
||||||
|
|
||||||
suspend_set_ops(&davinci_pm_ops);
|
suspend_set_ops(&davinci_pm_ops);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
no_sram_mem:
|
||||||
|
iounmap(pm_config.ddrpsc_reg_base);
|
||||||
no_ddrpsc_mem:
|
no_ddrpsc_mem:
|
||||||
iounmap(pm_config.ddrpll_reg_base);
|
iounmap(pm_config.ddrpll_reg_base);
|
||||||
no_ddrpll_mem:
|
no_ddrpll_mem:
|
||||||
|
|
|
@ -2311,7 +2311,14 @@ int arm_iommu_attach_device(struct device *dev,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
|
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
|
||||||
|
|
||||||
static void __arm_iommu_detach_device(struct device *dev)
|
/**
|
||||||
|
* arm_iommu_detach_device
|
||||||
|
* @dev: valid struct device pointer
|
||||||
|
*
|
||||||
|
* Detaches the provided device from a previously attached map.
|
||||||
|
* This voids the dma operations (dma_map_ops pointer)
|
||||||
|
*/
|
||||||
|
void arm_iommu_detach_device(struct device *dev)
|
||||||
{
|
{
|
||||||
struct dma_iommu_mapping *mapping;
|
struct dma_iommu_mapping *mapping;
|
||||||
|
|
||||||
|
@ -2324,22 +2331,10 @@ static void __arm_iommu_detach_device(struct device *dev)
|
||||||
iommu_detach_device(mapping->domain, dev);
|
iommu_detach_device(mapping->domain, dev);
|
||||||
kref_put(&mapping->kref, release_iommu_mapping);
|
kref_put(&mapping->kref, release_iommu_mapping);
|
||||||
to_dma_iommu_mapping(dev) = NULL;
|
to_dma_iommu_mapping(dev) = NULL;
|
||||||
|
set_dma_ops(dev, NULL);
|
||||||
|
|
||||||
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
|
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* arm_iommu_detach_device
|
|
||||||
* @dev: valid struct device pointer
|
|
||||||
*
|
|
||||||
* Detaches the provided device from a previously attached map.
|
|
||||||
* This voids the dma operations (dma_map_ops pointer)
|
|
||||||
*/
|
|
||||||
void arm_iommu_detach_device(struct device *dev)
|
|
||||||
{
|
|
||||||
__arm_iommu_detach_device(dev);
|
|
||||||
set_dma_ops(dev, NULL);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
|
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
|
||||||
|
|
||||||
static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
|
static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
|
||||||
|
@ -2379,7 +2374,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
|
||||||
if (!mapping)
|
if (!mapping)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
__arm_iommu_detach_device(dev);
|
arm_iommu_detach_device(dev);
|
||||||
arm_iommu_release_mapping(mapping);
|
arm_iommu_release_mapping(mapping);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2430,9 +2425,13 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||||
dev->dma_ops = xen_dma_ops;
|
dev->dma_ops = xen_dma_ops;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
dev->archdata.dma_ops_setup = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_teardown_dma_ops(struct device *dev)
|
void arch_teardown_dma_ops(struct device *dev)
|
||||||
{
|
{
|
||||||
|
if (!dev->archdata.dma_ops_setup)
|
||||||
|
return;
|
||||||
|
|
||||||
arm_teardown_iommu_dma_ops(dev);
|
arm_teardown_iommu_dma_ops(dev);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1084,10 +1084,6 @@ config SYSVIPC_COMPAT
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on COMPAT && SYSVIPC
|
depends on COMPAT && SYSVIPC
|
||||||
|
|
||||||
config KEYS_COMPAT
|
|
||||||
def_bool y
|
|
||||||
depends on COMPAT && KEYS
|
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
menu "Power management options"
|
menu "Power management options"
|
||||||
|
|
|
@ -231,8 +231,7 @@
|
||||||
cpm_crypto: crypto@800000 {
|
cpm_crypto: crypto@800000 {
|
||||||
compatible = "inside-secure,safexcel-eip197";
|
compatible = "inside-secure,safexcel-eip197";
|
||||||
reg = <0x800000 0x200000>;
|
reg = <0x800000 0x200000>;
|
||||||
interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
|
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
| IRQ_TYPE_LEVEL_HIGH)>,
|
|
||||||
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
|
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
|
<GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
|
<GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
|
|
|
@ -221,8 +221,7 @@
|
||||||
cps_crypto: crypto@800000 {
|
cps_crypto: crypto@800000 {
|
||||||
compatible = "inside-secure,safexcel-eip197";
|
compatible = "inside-secure,safexcel-eip197";
|
||||||
reg = <0x800000 0x200000>;
|
reg = <0x800000 0x200000>;
|
||||||
interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
|
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
| IRQ_TYPE_LEVEL_HIGH)>,
|
|
||||||
<GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
|
<GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
|
<GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
|
<GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
|
|
|
@ -68,6 +68,7 @@ CONFIG_PCIE_QCOM=y
|
||||||
CONFIG_PCIE_ARMADA_8K=y
|
CONFIG_PCIE_ARMADA_8K=y
|
||||||
CONFIG_PCI_AARDVARK=y
|
CONFIG_PCI_AARDVARK=y
|
||||||
CONFIG_PCIE_RCAR=y
|
CONFIG_PCIE_RCAR=y
|
||||||
|
CONFIG_PCIE_ROCKCHIP=m
|
||||||
CONFIG_PCI_HOST_GENERIC=y
|
CONFIG_PCI_HOST_GENERIC=y
|
||||||
CONFIG_PCI_XGENE=y
|
CONFIG_PCI_XGENE=y
|
||||||
CONFIG_ARM64_VA_BITS_48=y
|
CONFIG_ARM64_VA_BITS_48=y
|
||||||
|
@ -208,6 +209,8 @@ CONFIG_BRCMFMAC=m
|
||||||
CONFIG_WL18XX=m
|
CONFIG_WL18XX=m
|
||||||
CONFIG_WLCORE_SDIO=m
|
CONFIG_WLCORE_SDIO=m
|
||||||
CONFIG_INPUT_EVDEV=y
|
CONFIG_INPUT_EVDEV=y
|
||||||
|
CONFIG_KEYBOARD_ADC=m
|
||||||
|
CONFIG_KEYBOARD_CROS_EC=y
|
||||||
CONFIG_KEYBOARD_GPIO=y
|
CONFIG_KEYBOARD_GPIO=y
|
||||||
CONFIG_INPUT_MISC=y
|
CONFIG_INPUT_MISC=y
|
||||||
CONFIG_INPUT_PM8941_PWRKEY=y
|
CONFIG_INPUT_PM8941_PWRKEY=y
|
||||||
|
@ -263,6 +266,7 @@ CONFIG_SPI_MESON_SPIFC=m
|
||||||
CONFIG_SPI_ORION=y
|
CONFIG_SPI_ORION=y
|
||||||
CONFIG_SPI_PL022=y
|
CONFIG_SPI_PL022=y
|
||||||
CONFIG_SPI_QUP=y
|
CONFIG_SPI_QUP=y
|
||||||
|
CONFIG_SPI_ROCKCHIP=y
|
||||||
CONFIG_SPI_S3C64XX=y
|
CONFIG_SPI_S3C64XX=y
|
||||||
CONFIG_SPI_SPIDEV=m
|
CONFIG_SPI_SPIDEV=m
|
||||||
CONFIG_SPMI=y
|
CONFIG_SPMI=y
|
||||||
|
@ -292,6 +296,7 @@ CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
|
||||||
CONFIG_CPU_THERMAL=y
|
CONFIG_CPU_THERMAL=y
|
||||||
CONFIG_THERMAL_EMULATION=y
|
CONFIG_THERMAL_EMULATION=y
|
||||||
CONFIG_EXYNOS_THERMAL=y
|
CONFIG_EXYNOS_THERMAL=y
|
||||||
|
CONFIG_ROCKCHIP_THERMAL=m
|
||||||
CONFIG_WATCHDOG=y
|
CONFIG_WATCHDOG=y
|
||||||
CONFIG_S3C2410_WATCHDOG=y
|
CONFIG_S3C2410_WATCHDOG=y
|
||||||
CONFIG_MESON_GXBB_WATCHDOG=m
|
CONFIG_MESON_GXBB_WATCHDOG=m
|
||||||
|
@ -300,12 +305,14 @@ CONFIG_RENESAS_WDT=y
|
||||||
CONFIG_BCM2835_WDT=y
|
CONFIG_BCM2835_WDT=y
|
||||||
CONFIG_MFD_CROS_EC=y
|
CONFIG_MFD_CROS_EC=y
|
||||||
CONFIG_MFD_CROS_EC_I2C=y
|
CONFIG_MFD_CROS_EC_I2C=y
|
||||||
|
CONFIG_MFD_CROS_EC_SPI=y
|
||||||
CONFIG_MFD_EXYNOS_LPASS=m
|
CONFIG_MFD_EXYNOS_LPASS=m
|
||||||
CONFIG_MFD_HI655X_PMIC=y
|
CONFIG_MFD_HI655X_PMIC=y
|
||||||
CONFIG_MFD_MAX77620=y
|
CONFIG_MFD_MAX77620=y
|
||||||
CONFIG_MFD_SPMI_PMIC=y
|
CONFIG_MFD_SPMI_PMIC=y
|
||||||
CONFIG_MFD_RK808=y
|
CONFIG_MFD_RK808=y
|
||||||
CONFIG_MFD_SEC_CORE=y
|
CONFIG_MFD_SEC_CORE=y
|
||||||
|
CONFIG_REGULATOR_FAN53555=y
|
||||||
CONFIG_REGULATOR_FIXED_VOLTAGE=y
|
CONFIG_REGULATOR_FIXED_VOLTAGE=y
|
||||||
CONFIG_REGULATOR_GPIO=y
|
CONFIG_REGULATOR_GPIO=y
|
||||||
CONFIG_REGULATOR_HI655X=y
|
CONFIG_REGULATOR_HI655X=y
|
||||||
|
@ -473,8 +480,10 @@ CONFIG_ARCH_TEGRA_186_SOC=y
|
||||||
CONFIG_EXTCON_USB_GPIO=y
|
CONFIG_EXTCON_USB_GPIO=y
|
||||||
CONFIG_IIO=y
|
CONFIG_IIO=y
|
||||||
CONFIG_EXYNOS_ADC=y
|
CONFIG_EXYNOS_ADC=y
|
||||||
|
CONFIG_ROCKCHIP_SARADC=m
|
||||||
CONFIG_PWM=y
|
CONFIG_PWM=y
|
||||||
CONFIG_PWM_BCM2835=m
|
CONFIG_PWM_BCM2835=m
|
||||||
|
CONFIG_PWM_CROS_EC=m
|
||||||
CONFIG_PWM_MESON=m
|
CONFIG_PWM_MESON=m
|
||||||
CONFIG_PWM_ROCKCHIP=y
|
CONFIG_PWM_ROCKCHIP=y
|
||||||
CONFIG_PWM_SAMSUNG=y
|
CONFIG_PWM_SAMSUNG=y
|
||||||
|
@ -484,6 +493,7 @@ CONFIG_PHY_HI6220_USB=y
|
||||||
CONFIG_PHY_SUN4I_USB=y
|
CONFIG_PHY_SUN4I_USB=y
|
||||||
CONFIG_PHY_ROCKCHIP_INNO_USB2=y
|
CONFIG_PHY_ROCKCHIP_INNO_USB2=y
|
||||||
CONFIG_PHY_ROCKCHIP_EMMC=y
|
CONFIG_PHY_ROCKCHIP_EMMC=y
|
||||||
|
CONFIG_PHY_ROCKCHIP_PCIE=m
|
||||||
CONFIG_PHY_XGENE=y
|
CONFIG_PHY_XGENE=y
|
||||||
CONFIG_PHY_TEGRA_XUSB=y
|
CONFIG_PHY_TEGRA_XUSB=y
|
||||||
CONFIG_ARM_SCPI_PROTOCOL=y
|
CONFIG_ARM_SCPI_PROTOCOL=y
|
||||||
|
|
|
@ -24,8 +24,8 @@
|
||||||
(acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
|
(acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
|
||||||
|
|
||||||
#define BAD_MADT_GICC_ENTRY(entry, end) \
|
#define BAD_MADT_GICC_ENTRY(entry, end) \
|
||||||
(!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
|
(!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
|
||||||
(entry)->header.length != ACPI_MADT_GICC_LENGTH)
|
(unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
|
||||||
|
|
||||||
/* Basic configuration for ACPI */
|
/* Basic configuration for ACPI */
|
||||||
#ifdef CONFIG_ACPI
|
#ifdef CONFIG_ACPI
|
||||||
|
|
|
@ -286,6 +286,10 @@
|
||||||
#define SCTLR_ELx_A (1 << 1)
|
#define SCTLR_ELx_A (1 << 1)
|
||||||
#define SCTLR_ELx_M 1
|
#define SCTLR_ELx_M 1
|
||||||
|
|
||||||
|
#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
|
||||||
|
(1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
|
||||||
|
(1 << 28) | (1 << 29))
|
||||||
|
|
||||||
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
|
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
|
||||||
SCTLR_ELx_SA | SCTLR_ELx_I)
|
SCTLR_ELx_SA | SCTLR_ELx_I)
|
||||||
|
|
||||||
|
|
|
@ -191,8 +191,10 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
|
root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
|
||||||
if (!root_ops)
|
if (!root_ops) {
|
||||||
|
kfree(ri);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
ri->cfg = pci_acpi_setup_ecam_mapping(root);
|
ri->cfg = pci_acpi_setup_ecam_mapping(root);
|
||||||
if (!ri->cfg) {
|
if (!ri->cfg) {
|
||||||
|
|
|
@ -106,10 +106,13 @@ __do_hyp_init:
|
||||||
tlbi alle2
|
tlbi alle2
|
||||||
dsb sy
|
dsb sy
|
||||||
|
|
||||||
mrs x4, sctlr_el2
|
/*
|
||||||
and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2
|
* Preserve all the RES1 bits while setting the default flags,
|
||||||
ldr x5, =SCTLR_ELx_FLAGS
|
* as well as the EE bit on BE. Drop the A flag since the compiler
|
||||||
orr x4, x4, x5
|
* is allowed to generate unaligned accesses.
|
||||||
|
*/
|
||||||
|
ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
|
||||||
|
CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
|
||||||
msr sctlr_el2, x4
|
msr sctlr_el2, x4
|
||||||
isb
|
isb
|
||||||
|
|
||||||
|
|
|
@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||||
* Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
|
* Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
|
||||||
* The vgic_set_vmcr() will convert to ICH_VMCR layout.
|
* The vgic_set_vmcr() will convert to ICH_VMCR layout.
|
||||||
*/
|
*/
|
||||||
vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK;
|
vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
|
||||||
vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK;
|
vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
|
||||||
vgic_set_vmcr(vcpu, &vmcr);
|
vgic_set_vmcr(vcpu, &vmcr);
|
||||||
} else {
|
} else {
|
||||||
val = 0;
|
val = 0;
|
||||||
|
@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||||
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
|
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
|
||||||
* Extract it directly using ICC_CTLR_EL1 reg definitions.
|
* Extract it directly using ICC_CTLR_EL1 reg definitions.
|
||||||
*/
|
*/
|
||||||
val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK;
|
val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
|
||||||
val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK;
|
val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
|
||||||
|
|
||||||
p->regval = val;
|
p->regval = val;
|
||||||
}
|
}
|
||||||
|
@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||||
p->regval = 0;
|
p->regval = 0;
|
||||||
|
|
||||||
vgic_get_vmcr(vcpu, &vmcr);
|
vgic_get_vmcr(vcpu, &vmcr);
|
||||||
if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) {
|
if (!vmcr.cbpr) {
|
||||||
if (p->is_write) {
|
if (p->is_write) {
|
||||||
vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
|
vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
|
||||||
ICC_BPR1_EL1_SHIFT;
|
ICC_BPR1_EL1_SHIFT;
|
||||||
|
|
|
@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void)
|
||||||
#define vxtime_lock() do {} while (0)
|
#define vxtime_lock() do {} while (0)
|
||||||
#define vxtime_unlock() do {} while (0)
|
#define vxtime_unlock() do {} while (0)
|
||||||
|
|
||||||
|
/* This attribute is used in include/linux/jiffies.h alongside with
|
||||||
|
* __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp
|
||||||
|
* for frv does not contain another section specification.
|
||||||
|
*/
|
||||||
|
#define __jiffy_arch_data __attribute__((__section__(".data")))
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -37,15 +37,14 @@ __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
|
||||||
long uncleared;
|
long uncleared;
|
||||||
|
|
||||||
while (count > PAGE_SIZE) {
|
while (count > PAGE_SIZE) {
|
||||||
uncleared = __copy_to_user_hexagon(dest, &empty_zero_page,
|
uncleared = raw_copy_to_user(dest, &empty_zero_page, PAGE_SIZE);
|
||||||
PAGE_SIZE);
|
|
||||||
if (uncleared)
|
if (uncleared)
|
||||||
return count - (PAGE_SIZE - uncleared);
|
return count - (PAGE_SIZE - uncleared);
|
||||||
count -= PAGE_SIZE;
|
count -= PAGE_SIZE;
|
||||||
dest += PAGE_SIZE;
|
dest += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
if (count)
|
if (count)
|
||||||
count = __copy_to_user_hexagon(dest, &empty_zero_page, count);
|
count = raw_copy_to_user(dest, &empty_zero_page, count);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
|
@ -120,7 +120,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
|
||||||
struct thread_info *ti = task_thread_info(p);
|
struct thread_info *ti = task_thread_info(p);
|
||||||
struct pt_regs *childregs, *regs = current_pt_regs();
|
struct pt_regs *childregs, *regs = current_pt_regs();
|
||||||
unsigned long childksp;
|
unsigned long childksp;
|
||||||
p->set_child_tid = p->clear_child_tid = NULL;
|
|
||||||
|
|
||||||
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
|
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
|
||||||
|
|
||||||
|
|
|
@ -167,8 +167,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||||
|
|
||||||
top_of_kernel_stack = sp;
|
top_of_kernel_stack = sp;
|
||||||
|
|
||||||
p->set_child_tid = p->clear_child_tid = NULL;
|
|
||||||
|
|
||||||
/* Locate userspace context on stack... */
|
/* Locate userspace context on stack... */
|
||||||
sp -= STACK_FRAME_OVERHEAD; /* redzone */
|
sp -= STACK_FRAME_OVERHEAD; /* redzone */
|
||||||
sp -= sizeof(struct pt_regs);
|
sp -= sizeof(struct pt_regs);
|
||||||
|
|
|
@ -380,22 +380,6 @@ source "arch/powerpc/platforms/Kconfig"
|
||||||
|
|
||||||
menu "Kernel options"
|
menu "Kernel options"
|
||||||
|
|
||||||
config PPC_DT_CPU_FTRS
|
|
||||||
bool "Device-tree based CPU feature discovery & setup"
|
|
||||||
depends on PPC_BOOK3S_64
|
|
||||||
default n
|
|
||||||
help
|
|
||||||
This enables code to use a new device tree binding for describing CPU
|
|
||||||
compatibility and features. Saying Y here will attempt to use the new
|
|
||||||
binding if the firmware provides it. Currently only the skiboot
|
|
||||||
firmware provides this binding.
|
|
||||||
If you're not sure say Y.
|
|
||||||
|
|
||||||
config PPC_CPUFEATURES_ENABLE_UNKNOWN
|
|
||||||
bool "cpufeatures pass through unknown features to guest/userspace"
|
|
||||||
depends on PPC_DT_CPU_FTRS
|
|
||||||
default y
|
|
||||||
|
|
||||||
config HIGHMEM
|
config HIGHMEM
|
||||||
bool "High memory support"
|
bool "High memory support"
|
||||||
depends on PPC32
|
depends on PPC32
|
||||||
|
@ -1215,11 +1199,6 @@ source "arch/powerpc/Kconfig.debug"
|
||||||
|
|
||||||
source "security/Kconfig"
|
source "security/Kconfig"
|
||||||
|
|
||||||
config KEYS_COMPAT
|
|
||||||
bool
|
|
||||||
depends on COMPAT && KEYS
|
|
||||||
default y
|
|
||||||
|
|
||||||
source "crypto/Kconfig"
|
source "crypto/Kconfig"
|
||||||
|
|
||||||
config PPC_LIB_RHEAP
|
config PPC_LIB_RHEAP
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
#define H_PTE_INDEX_SIZE 9
|
#define H_PTE_INDEX_SIZE 9
|
||||||
#define H_PMD_INDEX_SIZE 7
|
#define H_PMD_INDEX_SIZE 7
|
||||||
#define H_PUD_INDEX_SIZE 9
|
#define H_PUD_INDEX_SIZE 9
|
||||||
#define H_PGD_INDEX_SIZE 12
|
#define H_PGD_INDEX_SIZE 9
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
|
#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
|
||||||
|
|
|
@ -214,7 +214,6 @@ enum {
|
||||||
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
|
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
|
||||||
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
|
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
|
||||||
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
|
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
|
||||||
#define CPU_FTR_SUBCORE LONG_ASM_CONST(0x2000000000000000)
|
|
||||||
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
|
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
@ -463,7 +462,7 @@ enum {
|
||||||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
||||||
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
|
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
|
||||||
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
|
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
|
||||||
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE)
|
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
|
||||||
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
|
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
|
||||||
#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
|
#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
|
||||||
#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||||
|
|
|
@ -110,13 +110,18 @@ void release_thread(struct task_struct *);
|
||||||
#define TASK_SIZE_128TB (0x0000800000000000UL)
|
#define TASK_SIZE_128TB (0x0000800000000000UL)
|
||||||
#define TASK_SIZE_512TB (0x0002000000000000UL)
|
#define TASK_SIZE_512TB (0x0002000000000000UL)
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
/*
|
||||||
|
* For now 512TB is only supported with book3s and 64K linux page size.
|
||||||
|
*/
|
||||||
|
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
|
||||||
/*
|
/*
|
||||||
* Max value currently used:
|
* Max value currently used:
|
||||||
*/
|
*/
|
||||||
#define TASK_SIZE_USER64 TASK_SIZE_512TB
|
#define TASK_SIZE_USER64 TASK_SIZE_512TB
|
||||||
|
#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB
|
||||||
#else
|
#else
|
||||||
#define TASK_SIZE_USER64 TASK_SIZE_64TB
|
#define TASK_SIZE_USER64 TASK_SIZE_64TB
|
||||||
|
#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -132,7 +137,7 @@ void release_thread(struct task_struct *);
|
||||||
* space during mmap's.
|
* space during mmap's.
|
||||||
*/
|
*/
|
||||||
#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
|
#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
|
||||||
#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4))
|
#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
|
||||||
|
|
||||||
#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
|
#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
|
||||||
TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
|
TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
|
||||||
|
@ -144,20 +149,14 @@ void release_thread(struct task_struct *);
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \
|
#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \
|
||||||
TASK_SIZE_USER32 : TASK_SIZE_128TB)
|
TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
|
||||||
#else
|
#else
|
||||||
#define DEFAULT_MAP_WINDOW TASK_SIZE
|
#define DEFAULT_MAP_WINDOW TASK_SIZE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __powerpc64__
|
#ifdef __powerpc64__
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
|
||||||
/* Limit stack to 128TB */
|
|
||||||
#define STACK_TOP_USER64 TASK_SIZE_128TB
|
|
||||||
#else
|
|
||||||
#define STACK_TOP_USER64 TASK_SIZE_USER64
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define STACK_TOP_USER32 TASK_SIZE_USER32
|
#define STACK_TOP_USER32 TASK_SIZE_USER32
|
||||||
|
|
||||||
#define STACK_TOP (is_32bit_task() ? \
|
#define STACK_TOP (is_32bit_task() ? \
|
||||||
|
|
|
@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
|
||||||
extern int sysfs_add_device_to_node(struct device *dev, int nid);
|
extern int sysfs_add_device_to_node(struct device *dev, int nid);
|
||||||
extern void sysfs_remove_device_from_node(struct device *dev, int nid);
|
extern void sysfs_remove_device_from_node(struct device *dev, int nid);
|
||||||
|
|
||||||
|
static inline int early_cpu_to_node(int cpu)
|
||||||
|
{
|
||||||
|
int nid;
|
||||||
|
|
||||||
|
nid = numa_cpu_lookup_table[cpu];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Fall back to node 0 if nid is unset (it should be, except bugs).
|
||||||
|
* This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
|
||||||
|
*/
|
||||||
|
return (nid < 0) ? 0 : nid;
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
static inline int early_cpu_to_node(int cpu) { return 0; }
|
||||||
|
|
||||||
static inline void dump_numa_cpu_topology(void) {}
|
static inline void dump_numa_cpu_topology(void) {}
|
||||||
|
|
||||||
static inline int sysfs_add_device_to_node(struct device *dev, int nid)
|
static inline int sysfs_add_device_to_node(struct device *dev, int nid)
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/jump_label.h>
|
#include <linux/jump_label.h>
|
||||||
|
#include <linux/libfdt.h>
|
||||||
#include <linux/memblock.h>
|
#include <linux/memblock.h>
|
||||||
#include <linux/printk.h>
|
#include <linux/printk.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
@ -642,7 +643,6 @@ static struct dt_cpu_feature_match __initdata
|
||||||
{"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
|
{"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
|
||||||
{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
|
{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
|
||||||
{"processor-utilization-of-resources-register", feat_enable_purr, 0},
|
{"processor-utilization-of-resources-register", feat_enable_purr, 0},
|
||||||
{"subcore", feat_enable, CPU_FTR_SUBCORE},
|
|
||||||
{"no-execute", feat_enable, 0},
|
{"no-execute", feat_enable, 0},
|
||||||
{"strong-access-ordering", feat_enable, CPU_FTR_SAO},
|
{"strong-access-ordering", feat_enable, CPU_FTR_SAO},
|
||||||
{"cache-inhibited-large-page", feat_enable_large_ci, 0},
|
{"cache-inhibited-large-page", feat_enable_large_ci, 0},
|
||||||
|
@ -671,12 +671,24 @@ static struct dt_cpu_feature_match __initdata
|
||||||
{"wait-v3", feat_enable, 0},
|
{"wait-v3", feat_enable, 0},
|
||||||
};
|
};
|
||||||
|
|
||||||
/* XXX: how to configure this? Default + boot time? */
|
static bool __initdata using_dt_cpu_ftrs;
|
||||||
#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN
|
static bool __initdata enable_unknown = true;
|
||||||
#define CPU_FEATURE_ENABLE_UNKNOWN 1
|
|
||||||
#else
|
static int __init dt_cpu_ftrs_parse(char *str)
|
||||||
#define CPU_FEATURE_ENABLE_UNKNOWN 0
|
{
|
||||||
#endif
|
if (!str)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (!strcmp(str, "off"))
|
||||||
|
using_dt_cpu_ftrs = false;
|
||||||
|
else if (!strcmp(str, "known"))
|
||||||
|
enable_unknown = false;
|
||||||
|
else
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
|
||||||
|
|
||||||
static void __init cpufeatures_setup_start(u32 isa)
|
static void __init cpufeatures_setup_start(u32 isa)
|
||||||
{
|
{
|
||||||
|
@ -707,7 +719,7 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!known && CPU_FEATURE_ENABLE_UNKNOWN) {
|
if (!known && enable_unknown) {
|
||||||
if (!feat_try_enable_unknown(f)) {
|
if (!feat_try_enable_unknown(f)) {
|
||||||
pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
|
pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
|
||||||
f->name);
|
f->name);
|
||||||
|
@ -756,6 +768,26 @@ static void __init cpufeatures_setup_finished(void)
|
||||||
cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
|
cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init disabled_on_cmdline(void)
|
||||||
|
{
|
||||||
|
unsigned long root, chosen;
|
||||||
|
const char *p;
|
||||||
|
|
||||||
|
root = of_get_flat_dt_root();
|
||||||
|
chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
|
||||||
|
if (chosen == -FDT_ERR_NOTFOUND)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
|
||||||
|
if (!p)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (strstr(p, "dt_cpu_ftrs=off"))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
|
static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
|
||||||
int depth, void *data)
|
int depth, void *data)
|
||||||
{
|
{
|
||||||
|
@ -766,8 +798,6 @@ static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool __initdata using_dt_cpu_ftrs = false;
|
|
||||||
|
|
||||||
bool __init dt_cpu_ftrs_in_use(void)
|
bool __init dt_cpu_ftrs_in_use(void)
|
||||||
{
|
{
|
||||||
return using_dt_cpu_ftrs;
|
return using_dt_cpu_ftrs;
|
||||||
|
@ -775,6 +805,8 @@ bool __init dt_cpu_ftrs_in_use(void)
|
||||||
|
|
||||||
bool __init dt_cpu_ftrs_init(void *fdt)
|
bool __init dt_cpu_ftrs_init(void *fdt)
|
||||||
{
|
{
|
||||||
|
using_dt_cpu_ftrs = false;
|
||||||
|
|
||||||
/* Setup and verify the FDT, if it fails we just bail */
|
/* Setup and verify the FDT, if it fails we just bail */
|
||||||
if (!early_init_dt_verify(fdt))
|
if (!early_init_dt_verify(fdt))
|
||||||
return false;
|
return false;
|
||||||
|
@ -782,6 +814,9 @@ bool __init dt_cpu_ftrs_init(void *fdt)
|
||||||
if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
|
if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
if (disabled_on_cmdline())
|
||||||
|
return false;
|
||||||
|
|
||||||
cpufeatures_setup_cpu();
|
cpufeatures_setup_cpu();
|
||||||
|
|
||||||
using_dt_cpu_ftrs = true;
|
using_dt_cpu_ftrs = true;
|
||||||
|
@ -1027,5 +1062,8 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
|
||||||
|
|
||||||
void __init dt_cpu_ftrs_scan(void)
|
void __init dt_cpu_ftrs_scan(void)
|
||||||
{
|
{
|
||||||
|
if (!using_dt_cpu_ftrs)
|
||||||
|
return;
|
||||||
|
|
||||||
of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
|
of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1666,6 +1666,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
|
||||||
#ifdef CONFIG_VSX
|
#ifdef CONFIG_VSX
|
||||||
current->thread.used_vsr = 0;
|
current->thread.used_vsr = 0;
|
||||||
#endif
|
#endif
|
||||||
|
current->thread.load_fp = 0;
|
||||||
memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
|
memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
|
||||||
current->thread.fp_save_area = NULL;
|
current->thread.fp_save_area = NULL;
|
||||||
#ifdef CONFIG_ALTIVEC
|
#ifdef CONFIG_ALTIVEC
|
||||||
|
@ -1674,6 +1675,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
|
||||||
current->thread.vr_save_area = NULL;
|
current->thread.vr_save_area = NULL;
|
||||||
current->thread.vrsave = 0;
|
current->thread.vrsave = 0;
|
||||||
current->thread.used_vr = 0;
|
current->thread.used_vr = 0;
|
||||||
|
current->thread.load_vec = 0;
|
||||||
#endif /* CONFIG_ALTIVEC */
|
#endif /* CONFIG_ALTIVEC */
|
||||||
#ifdef CONFIG_SPE
|
#ifdef CONFIG_SPE
|
||||||
memset(current->thread.evr, 0, sizeof(current->thread.evr));
|
memset(current->thread.evr, 0, sizeof(current->thread.evr));
|
||||||
|
@ -1685,6 +1687,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
|
||||||
current->thread.tm_tfhar = 0;
|
current->thread.tm_tfhar = 0;
|
||||||
current->thread.tm_texasr = 0;
|
current->thread.tm_texasr = 0;
|
||||||
current->thread.tm_tfiar = 0;
|
current->thread.tm_tfiar = 0;
|
||||||
|
current->thread.load_tm = 0;
|
||||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(start_thread);
|
EXPORT_SYMBOL(start_thread);
|
||||||
|
|
|
@ -928,7 +928,7 @@ void __init setup_arch(char **cmdline_p)
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_MM_SLICES
|
#ifdef CONFIG_PPC_MM_SLICES
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
init_mm.context.addr_limit = TASK_SIZE_128TB;
|
init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
|
||||||
#else
|
#else
|
||||||
#error "context.addr_limit not initialized."
|
#error "context.addr_limit not initialized."
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -661,7 +661,7 @@ void __init emergency_stack_init(void)
|
||||||
|
|
||||||
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
|
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
|
||||||
{
|
{
|
||||||
return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
|
return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
|
||||||
__pa(MAX_DMA_ADDRESS));
|
__pa(MAX_DMA_ADDRESS));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -672,7 +672,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
|
||||||
|
|
||||||
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
|
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
|
||||||
{
|
{
|
||||||
if (cpu_to_node(from) == cpu_to_node(to))
|
if (early_cpu_to_node(from) == early_cpu_to_node(to))
|
||||||
return LOCAL_DISTANCE;
|
return LOCAL_DISTANCE;
|
||||||
else
|
else
|
||||||
return REMOTE_DISTANCE;
|
return REMOTE_DISTANCE;
|
||||||
|
|
|
@ -99,7 +99,7 @@ static int hash__init_new_context(struct mm_struct *mm)
|
||||||
* mm->context.addr_limit. Default to max task size so that we copy the
|
* mm->context.addr_limit. Default to max task size so that we copy the
|
||||||
* default values to paca which will help us to handle slb miss early.
|
* default values to paca which will help us to handle slb miss early.
|
||||||
*/
|
*/
|
||||||
mm->context.addr_limit = TASK_SIZE_128TB;
|
mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The old code would re-promote on fork, we don't do that when using
|
* The old code would re-promote on fork, we don't do that when using
|
||||||
|
|
|
@ -402,7 +402,7 @@ static struct power_pmu power9_isa207_pmu = {
|
||||||
.name = "POWER9",
|
.name = "POWER9",
|
||||||
.n_counter = MAX_PMU_COUNTERS,
|
.n_counter = MAX_PMU_COUNTERS,
|
||||||
.add_fields = ISA207_ADD_FIELDS,
|
.add_fields = ISA207_ADD_FIELDS,
|
||||||
.test_adder = ISA207_TEST_ADDER,
|
.test_adder = P9_DD1_TEST_ADDER,
|
||||||
.compute_mmcr = isa207_compute_mmcr,
|
.compute_mmcr = isa207_compute_mmcr,
|
||||||
.config_bhrb = power9_config_bhrb,
|
.config_bhrb = power9_config_bhrb,
|
||||||
.bhrb_filter_map = power9_bhrb_filter_map,
|
.bhrb_filter_map = power9_bhrb_filter_map,
|
||||||
|
@ -421,7 +421,7 @@ static struct power_pmu power9_pmu = {
|
||||||
.name = "POWER9",
|
.name = "POWER9",
|
||||||
.n_counter = MAX_PMU_COUNTERS,
|
.n_counter = MAX_PMU_COUNTERS,
|
||||||
.add_fields = ISA207_ADD_FIELDS,
|
.add_fields = ISA207_ADD_FIELDS,
|
||||||
.test_adder = P9_DD1_TEST_ADDER,
|
.test_adder = ISA207_TEST_ADDER,
|
||||||
.compute_mmcr = isa207_compute_mmcr,
|
.compute_mmcr = isa207_compute_mmcr,
|
||||||
.config_bhrb = power9_config_bhrb,
|
.config_bhrb = power9_config_bhrb,
|
||||||
.bhrb_filter_map = power9_bhrb_filter_map,
|
.bhrb_filter_map = power9_bhrb_filter_map,
|
||||||
|
|
|
@ -59,6 +59,17 @@ config PPC_OF_BOOT_TRAMPOLINE
|
||||||
|
|
||||||
In case of doubt, say Y
|
In case of doubt, say Y
|
||||||
|
|
||||||
|
config PPC_DT_CPU_FTRS
|
||||||
|
bool "Device-tree based CPU feature discovery & setup"
|
||||||
|
depends on PPC_BOOK3S_64
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
This enables code to use a new device tree binding for describing CPU
|
||||||
|
compatibility and features. Saying Y here will attempt to use the new
|
||||||
|
binding if the firmware provides it. Currently only the skiboot
|
||||||
|
firmware provides this binding.
|
||||||
|
If you're not sure say Y.
|
||||||
|
|
||||||
config UDBG_RTAS_CONSOLE
|
config UDBG_RTAS_CONSOLE
|
||||||
bool "RTAS based debug console"
|
bool "RTAS based debug console"
|
||||||
depends on PPC_RTAS
|
depends on PPC_RTAS
|
||||||
|
|
|
@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
|
||||||
skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
|
skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
|
||||||
if (!dump_skip(cprm, skip))
|
if (!dump_skip(cprm, skip))
|
||||||
goto Eio;
|
goto Eio;
|
||||||
|
|
||||||
|
rc = 0;
|
||||||
out:
|
out:
|
||||||
free_page((unsigned long)buf);
|
free_page((unsigned long)buf);
|
||||||
return rc;
|
return rc;
|
||||||
|
|
|
@ -407,7 +407,13 @@ static DEVICE_ATTR(subcores_per_core, 0644,
|
||||||
|
|
||||||
static int subcore_init(void)
|
static int subcore_init(void)
|
||||||
{
|
{
|
||||||
if (!cpu_has_feature(CPU_FTR_SUBCORE))
|
unsigned pvr_ver;
|
||||||
|
|
||||||
|
pvr_ver = PVR_VER(mfspr(SPRN_PVR));
|
||||||
|
|
||||||
|
if (pvr_ver != PVR_POWER8 &&
|
||||||
|
pvr_ver != PVR_POWER8E &&
|
||||||
|
pvr_ver != PVR_POWER8NVL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
|
||||||
for (i = 0; i < num_lmbs; i++) {
|
for (i = 0; i < num_lmbs; i++) {
|
||||||
lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
|
lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
|
||||||
lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
|
lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
|
||||||
|
lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
|
||||||
lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
|
lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
|
||||||
for (i = 0; i < num_lmbs; i++) {
|
for (i = 0; i < num_lmbs; i++) {
|
||||||
lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
|
lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
|
||||||
lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
|
lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
|
||||||
|
lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
|
||||||
lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
|
lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
|
||||||
|
|
||||||
static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
|
static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
|
||||||
{
|
{
|
||||||
struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
|
struct u8_gpio_chip *u8_gc =
|
||||||
|
container_of(mm_gc, struct u8_gpio_chip, mm_gc);
|
||||||
|
|
||||||
u8_gc->data = in_8(mm_gc->regs);
|
u8_gc->data = in_8(mm_gc->regs);
|
||||||
}
|
}
|
||||||
|
|
|
@ -363,9 +363,6 @@ config COMPAT
|
||||||
config SYSVIPC_COMPAT
|
config SYSVIPC_COMPAT
|
||||||
def_bool y if COMPAT && SYSVIPC
|
def_bool y if COMPAT && SYSVIPC
|
||||||
|
|
||||||
config KEYS_COMPAT
|
|
||||||
def_bool y if COMPAT && KEYS
|
|
||||||
|
|
||||||
config SMP
|
config SMP
|
||||||
def_bool y
|
def_bool y
|
||||||
prompt "Symmetric multi-processing support"
|
prompt "Symmetric multi-processing support"
|
||||||
|
|
|
@ -541,7 +541,6 @@ struct kvm_s390_float_interrupt {
|
||||||
struct mutex ais_lock;
|
struct mutex ais_lock;
|
||||||
u8 simm;
|
u8 simm;
|
||||||
u8 nimm;
|
u8 nimm;
|
||||||
int ais_enabled;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kvm_hw_wp_info_arch {
|
struct kvm_hw_wp_info_arch {
|
||||||
|
|
|
@ -2160,7 +2160,7 @@ static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||||
struct kvm_s390_ais_req req;
|
struct kvm_s390_ais_req req;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!fi->ais_enabled)
|
if (!test_kvm_facility(kvm, 72))
|
||||||
return -ENOTSUPP;
|
return -ENOTSUPP;
|
||||||
|
|
||||||
if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
|
if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
|
||||||
|
@ -2204,7 +2204,7 @@ static int kvm_s390_inject_airq(struct kvm *kvm,
|
||||||
};
|
};
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!fi->ais_enabled || !adapter->suppressible)
|
if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
|
||||||
return kvm_s390_inject_vm(kvm, &s390int);
|
return kvm_s390_inject_vm(kvm, &s390int);
|
||||||
|
|
||||||
mutex_lock(&fi->ais_lock);
|
mutex_lock(&fi->ais_lock);
|
||||||
|
|
|
@ -558,7 +558,6 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
|
||||||
} else {
|
} else {
|
||||||
set_kvm_facility(kvm->arch.model.fac_mask, 72);
|
set_kvm_facility(kvm->arch.model.fac_mask, 72);
|
||||||
set_kvm_facility(kvm->arch.model.fac_list, 72);
|
set_kvm_facility(kvm->arch.model.fac_list, 72);
|
||||||
kvm->arch.float_int.ais_enabled = 1;
|
|
||||||
r = 0;
|
r = 0;
|
||||||
}
|
}
|
||||||
mutex_unlock(&kvm->lock);
|
mutex_unlock(&kvm->lock);
|
||||||
|
@ -1533,7 +1532,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||||
mutex_init(&kvm->arch.float_int.ais_lock);
|
mutex_init(&kvm->arch.float_int.ais_lock);
|
||||||
kvm->arch.float_int.simm = 0;
|
kvm->arch.float_int.simm = 0;
|
||||||
kvm->arch.float_int.nimm = 0;
|
kvm->arch.float_int.nimm = 0;
|
||||||
kvm->arch.float_int.ais_enabled = 0;
|
|
||||||
spin_lock_init(&kvm->arch.float_int.lock);
|
spin_lock_init(&kvm->arch.float_int.lock);
|
||||||
for (i = 0; i < FIRQ_LIST_COUNT; i++)
|
for (i = 0; i < FIRQ_LIST_COUNT; i++)
|
||||||
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
|
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
|
||||||
|
|
|
@ -192,9 +192,9 @@ config NR_CPUS
|
||||||
int "Maximum number of CPUs"
|
int "Maximum number of CPUs"
|
||||||
depends on SMP
|
depends on SMP
|
||||||
range 2 32 if SPARC32
|
range 2 32 if SPARC32
|
||||||
range 2 1024 if SPARC64
|
range 2 4096 if SPARC64
|
||||||
default 32 if SPARC32
|
default 32 if SPARC32
|
||||||
default 64 if SPARC64
|
default 4096 if SPARC64
|
||||||
|
|
||||||
source kernel/Kconfig.hz
|
source kernel/Kconfig.hz
|
||||||
|
|
||||||
|
@ -295,9 +295,13 @@ config NUMA
|
||||||
depends on SPARC64 && SMP
|
depends on SPARC64 && SMP
|
||||||
|
|
||||||
config NODES_SHIFT
|
config NODES_SHIFT
|
||||||
int
|
int "Maximum NUMA Nodes (as a power of 2)"
|
||||||
default "4"
|
range 4 5 if SPARC64
|
||||||
|
default "5"
|
||||||
depends on NEED_MULTIPLE_NODES
|
depends on NEED_MULTIPLE_NODES
|
||||||
|
help
|
||||||
|
Specify the maximum number of NUMA Nodes available on the target
|
||||||
|
system. Increases memory reserved to accommodate various tables.
|
||||||
|
|
||||||
# Some NUMA nodes have memory ranges that span
|
# Some NUMA nodes have memory ranges that span
|
||||||
# other nodes. Even though a pfn is valid and
|
# other nodes. Even though a pfn is valid and
|
||||||
|
@ -573,9 +577,6 @@ config SYSVIPC_COMPAT
|
||||||
depends on COMPAT && SYSVIPC
|
depends on COMPAT && SYSVIPC
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config KEYS_COMPAT
|
|
||||||
def_bool y if COMPAT && KEYS
|
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
source "net/Kconfig"
|
source "net/Kconfig"
|
||||||
|
|
|
@ -52,7 +52,7 @@
|
||||||
#define CTX_NR_MASK TAG_CONTEXT_BITS
|
#define CTX_NR_MASK TAG_CONTEXT_BITS
|
||||||
#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
|
#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
|
||||||
|
|
||||||
#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
|
#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT)
|
||||||
#define CTX_VALID(__ctx) \
|
#define CTX_VALID(__ctx) \
|
||||||
(!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
|
(!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
|
||||||
#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
|
#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
|
||||||
|
|
|
@ -19,13 +19,8 @@ extern spinlock_t ctx_alloc_lock;
|
||||||
extern unsigned long tlb_context_cache;
|
extern unsigned long tlb_context_cache;
|
||||||
extern unsigned long mmu_context_bmap[];
|
extern unsigned long mmu_context_bmap[];
|
||||||
|
|
||||||
|
DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
|
||||||
void get_new_mmu_context(struct mm_struct *mm);
|
void get_new_mmu_context(struct mm_struct *mm);
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
void smp_new_mmu_context_version(void);
|
|
||||||
#else
|
|
||||||
#define smp_new_mmu_context_version() do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
||||||
void destroy_context(struct mm_struct *mm);
|
void destroy_context(struct mm_struct *mm);
|
||||||
|
|
||||||
|
@ -76,8 +71,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
|
||||||
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
|
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
unsigned long ctx_valid, flags;
|
unsigned long ctx_valid, flags;
|
||||||
int cpu;
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
|
per_cpu(per_cpu_secondary_mm, cpu) = mm;
|
||||||
if (unlikely(mm == &init_mm))
|
if (unlikely(mm == &init_mm))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -123,7 +119,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
|
||||||
* for the first time, we must flush that context out of the
|
* for the first time, we must flush that context out of the
|
||||||
* local TLB.
|
* local TLB.
|
||||||
*/
|
*/
|
||||||
cpu = smp_processor_id();
|
|
||||||
if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
|
if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
|
||||||
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
||||||
__flush_tlb_mm(CTX_HWBITS(mm->context),
|
__flush_tlb_mm(CTX_HWBITS(mm->context),
|
||||||
|
@ -133,26 +128,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
|
||||||
}
|
}
|
||||||
|
|
||||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||||
|
#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
|
||||||
/* Activate a new MM instance for the current task. */
|
|
||||||
static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&mm->context.lock, flags);
|
|
||||||
if (!CTX_VALID(mm->context))
|
|
||||||
get_new_mmu_context(mm);
|
|
||||||
cpu = smp_processor_id();
|
|
||||||
if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
|
|
||||||
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
|
||||||
|
|
||||||
load_secondary_context(mm);
|
|
||||||
__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
|
|
||||||
tsb_context_switch(mm);
|
|
||||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* !(__ASSEMBLY__) */
|
#endif /* !(__ASSEMBLY__) */
|
||||||
|
|
||||||
#endif /* !(__SPARC64_MMU_CONTEXT_H) */
|
#endif /* !(__SPARC64_MMU_CONTEXT_H) */
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
#define PIL_SMP_CALL_FUNC 1
|
#define PIL_SMP_CALL_FUNC 1
|
||||||
#define PIL_SMP_RECEIVE_SIGNAL 2
|
#define PIL_SMP_RECEIVE_SIGNAL 2
|
||||||
#define PIL_SMP_CAPTURE 3
|
#define PIL_SMP_CAPTURE 3
|
||||||
#define PIL_SMP_CTX_NEW_VERSION 4
|
|
||||||
#define PIL_DEVICE_IRQ 5
|
#define PIL_DEVICE_IRQ 5
|
||||||
#define PIL_SMP_CALL_FUNC_SNGL 6
|
#define PIL_SMP_CALL_FUNC_SNGL 6
|
||||||
#define PIL_DEFERRED_PCR_WORK 7
|
#define PIL_DEFERRED_PCR_WORK 7
|
||||||
|
|
|
@ -327,6 +327,7 @@ struct vio_dev {
|
||||||
int compat_len;
|
int compat_len;
|
||||||
|
|
||||||
u64 dev_no;
|
u64 dev_no;
|
||||||
|
u64 id;
|
||||||
|
|
||||||
unsigned long channel_id;
|
unsigned long channel_id;
|
||||||
|
|
||||||
|
|
|
@ -909,7 +909,7 @@ static int register_services(struct ds_info *dp)
|
||||||
pbuf.req.handle = cp->handle;
|
pbuf.req.handle = cp->handle;
|
||||||
pbuf.req.major = 1;
|
pbuf.req.major = 1;
|
||||||
pbuf.req.minor = 0;
|
pbuf.req.minor = 0;
|
||||||
strcpy(pbuf.req.svc_id, cp->service_id);
|
strcpy(pbuf.id_buf, cp->service_id);
|
||||||
|
|
||||||
err = __ds_send(lp, &pbuf, msg_len);
|
err = __ds_send(lp, &pbuf, msg_len);
|
||||||
if (err > 0)
|
if (err > 0)
|
||||||
|
|
|
@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
unsigned long page;
|
unsigned long page;
|
||||||
|
void *mondo, *p;
|
||||||
|
|
||||||
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
|
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
|
||||||
|
|
||||||
|
/* Make sure mondo block is 64byte aligned */
|
||||||
|
p = kzalloc(127, GFP_KERNEL);
|
||||||
|
if (!p) {
|
||||||
|
prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
|
||||||
|
prom_halt();
|
||||||
|
}
|
||||||
|
mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
|
||||||
|
tb->cpu_mondo_block_pa = __pa(mondo);
|
||||||
|
|
||||||
page = get_zeroed_page(GFP_KERNEL);
|
page = get_zeroed_page(GFP_KERNEL);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
|
prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
|
||||||
prom_halt();
|
prom_halt();
|
||||||
}
|
}
|
||||||
|
|
||||||
tb->cpu_mondo_block_pa = __pa(page);
|
tb->cpu_list_pa = __pa(page);
|
||||||
tb->cpu_list_pa = __pa(page + 64);
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
|
||||||
/* smp_64.c */
|
/* smp_64.c */
|
||||||
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
|
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
|
||||||
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
|
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
|
||||||
void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
|
|
||||||
void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
|
void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
|
||||||
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
|
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
|
||||||
|
|
||||||
|
|
|
@ -964,37 +964,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
struct mm_struct *mm;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
clear_softint(1 << irq);
|
|
||||||
|
|
||||||
/* See if we need to allocate a new TLB context because
|
|
||||||
* the version of the one we are using is now out of date.
|
|
||||||
*/
|
|
||||||
mm = current->active_mm;
|
|
||||||
if (unlikely(!mm || (mm == &init_mm)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&mm->context.lock, flags);
|
|
||||||
|
|
||||||
if (unlikely(!CTX_VALID(mm->context)))
|
|
||||||
get_new_mmu_context(mm);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
|
||||||
|
|
||||||
load_secondary_context(mm);
|
|
||||||
__flush_tlb_mm(CTX_HWBITS(mm->context),
|
|
||||||
SECONDARY_CONTEXT);
|
|
||||||
}
|
|
||||||
|
|
||||||
void smp_new_mmu_context_version(void)
|
|
||||||
{
|
|
||||||
smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_KGDB
|
#ifdef CONFIG_KGDB
|
||||||
void kgdb_roundup_cpus(unsigned long flags)
|
void kgdb_roundup_cpus(unsigned long flags)
|
||||||
{
|
{
|
||||||
|
|
|
@ -455,13 +455,16 @@ __tsb_context_switch:
|
||||||
.type copy_tsb,#function
|
.type copy_tsb,#function
|
||||||
copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
|
copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
|
||||||
* %o2=new_tsb_base, %o3=new_tsb_size
|
* %o2=new_tsb_base, %o3=new_tsb_size
|
||||||
|
* %o4=page_size_shift
|
||||||
*/
|
*/
|
||||||
sethi %uhi(TSB_PASS_BITS), %g7
|
sethi %uhi(TSB_PASS_BITS), %g7
|
||||||
srlx %o3, 4, %o3
|
srlx %o3, 4, %o3
|
||||||
add %o0, %o1, %g1 /* end of old tsb */
|
add %o0, %o1, %o1 /* end of old tsb */
|
||||||
sllx %g7, 32, %g7
|
sllx %g7, 32, %g7
|
||||||
sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
|
sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
|
||||||
|
|
||||||
|
mov %o4, %g1 /* page_size_shift */
|
||||||
|
|
||||||
661: prefetcha [%o0] ASI_N, #one_read
|
661: prefetcha [%o0] ASI_N, #one_read
|
||||||
.section .tsb_phys_patch, "ax"
|
.section .tsb_phys_patch, "ax"
|
||||||
.word 661b
|
.word 661b
|
||||||
|
@ -486,9 +489,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
|
||||||
/* This can definitely be computed faster... */
|
/* This can definitely be computed faster... */
|
||||||
srlx %o0, 4, %o5 /* Build index */
|
srlx %o0, 4, %o5 /* Build index */
|
||||||
and %o5, 511, %o5 /* Mask index */
|
and %o5, 511, %o5 /* Mask index */
|
||||||
sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
|
sllx %o5, %g1, %o5 /* Put into vaddr position */
|
||||||
or %o4, %o5, %o4 /* Full VADDR. */
|
or %o4, %o5, %o4 /* Full VADDR. */
|
||||||
srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
|
srlx %o4, %g1, %o4 /* Shift down to create index */
|
||||||
and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
|
and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
|
||||||
sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
|
sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
|
||||||
TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
|
TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
|
||||||
|
@ -496,7 +499,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
|
||||||
TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
|
TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
|
||||||
|
|
||||||
80: add %o0, 16, %o0
|
80: add %o0, 16, %o0
|
||||||
cmp %o0, %g1
|
cmp %o0, %o1
|
||||||
bne,pt %xcc, 90b
|
bne,pt %xcc, 90b
|
||||||
nop
|
nop
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
|
||||||
tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
|
tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
|
||||||
tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
|
tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
|
||||||
tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
|
tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
|
||||||
tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4)
|
tl0_irq4: BTRAP(0x44)
|
||||||
#else
|
#else
|
||||||
tl0_irq1: BTRAP(0x41)
|
tl0_irq1: BTRAP(0x41)
|
||||||
tl0_irq2: BTRAP(0x42)
|
tl0_irq2: BTRAP(0x42)
|
||||||
|
|
|
@ -302,13 +302,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
|
||||||
if (!id) {
|
if (!id) {
|
||||||
dev_set_name(&vdev->dev, "%s", bus_id_name);
|
dev_set_name(&vdev->dev, "%s", bus_id_name);
|
||||||
vdev->dev_no = ~(u64)0;
|
vdev->dev_no = ~(u64)0;
|
||||||
|
vdev->id = ~(u64)0;
|
||||||
} else if (!cfg_handle) {
|
} else if (!cfg_handle) {
|
||||||
dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
|
dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
|
||||||
vdev->dev_no = *id;
|
vdev->dev_no = *id;
|
||||||
|
vdev->id = ~(u64)0;
|
||||||
} else {
|
} else {
|
||||||
dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
|
dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
|
||||||
*cfg_handle, *id);
|
*cfg_handle, *id);
|
||||||
vdev->dev_no = *cfg_handle;
|
vdev->dev_no = *cfg_handle;
|
||||||
|
vdev->id = *id;
|
||||||
}
|
}
|
||||||
|
|
||||||
vdev->dev.parent = parent;
|
vdev->dev.parent = parent;
|
||||||
|
@ -351,27 +354,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
|
||||||
(void) vio_create_one(hp, node, &root_vdev->dev);
|
(void) vio_create_one(hp, node, &root_vdev->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct vio_md_node_query {
|
||||||
|
const char *type;
|
||||||
|
u64 dev_no;
|
||||||
|
u64 id;
|
||||||
|
};
|
||||||
|
|
||||||
static int vio_md_node_match(struct device *dev, void *arg)
|
static int vio_md_node_match(struct device *dev, void *arg)
|
||||||
{
|
{
|
||||||
|
struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
|
||||||
struct vio_dev *vdev = to_vio_dev(dev);
|
struct vio_dev *vdev = to_vio_dev(dev);
|
||||||
|
|
||||||
if (vdev->mp == (u64) arg)
|
if (vdev->dev_no != query->dev_no)
|
||||||
return 1;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
if (vdev->id != query->id)
|
||||||
|
return 0;
|
||||||
|
if (strcmp(vdev->type, query->type))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vio_remove(struct mdesc_handle *hp, u64 node)
|
static void vio_remove(struct mdesc_handle *hp, u64 node)
|
||||||
{
|
{
|
||||||
|
const char *type;
|
||||||
|
const u64 *id, *cfg_handle;
|
||||||
|
u64 a;
|
||||||
|
struct vio_md_node_query query;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
|
||||||
dev = device_find_child(&root_vdev->dev, (void *) node,
|
type = mdesc_get_property(hp, node, "device-type", NULL);
|
||||||
|
if (!type) {
|
||||||
|
type = mdesc_get_property(hp, node, "name", NULL);
|
||||||
|
if (!type)
|
||||||
|
type = mdesc_node_name(hp, node);
|
||||||
|
}
|
||||||
|
|
||||||
|
query.type = type;
|
||||||
|
|
||||||
|
id = mdesc_get_property(hp, node, "id", NULL);
|
||||||
|
cfg_handle = NULL;
|
||||||
|
mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
|
||||||
|
u64 target;
|
||||||
|
|
||||||
|
target = mdesc_arc_target(hp, a);
|
||||||
|
cfg_handle = mdesc_get_property(hp, target,
|
||||||
|
"cfg-handle", NULL);
|
||||||
|
if (cfg_handle)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!id) {
|
||||||
|
query.dev_no = ~(u64)0;
|
||||||
|
query.id = ~(u64)0;
|
||||||
|
} else if (!cfg_handle) {
|
||||||
|
query.dev_no = *id;
|
||||||
|
query.id = ~(u64)0;
|
||||||
|
} else {
|
||||||
|
query.dev_no = *cfg_handle;
|
||||||
|
query.id = *id;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev = device_find_child(&root_vdev->dev, &query,
|
||||||
vio_md_node_match);
|
vio_md_node_match);
|
||||||
if (dev) {
|
if (dev) {
|
||||||
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
|
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
|
||||||
|
|
||||||
device_unregister(dev);
|
device_unregister(dev);
|
||||||
put_device(dev);
|
put_device(dev);
|
||||||
|
} else {
|
||||||
|
if (!id)
|
||||||
|
printk(KERN_ERR "VIO: Removed unknown %s node.\n",
|
||||||
|
type);
|
||||||
|
else if (!cfg_handle)
|
||||||
|
printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
|
||||||
|
type, *id);
|
||||||
|
else
|
||||||
|
printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
|
||||||
|
type, *cfg_handle, *id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o
|
||||||
lib-$(CONFIG_SPARC64) += atomic_64.o
|
lib-$(CONFIG_SPARC64) += atomic_64.o
|
||||||
lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
|
lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
|
||||||
lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
|
lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
|
||||||
|
lib-$(CONFIG_SPARC64) += multi3.o
|
||||||
|
|
||||||
lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
|
lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
|
||||||
lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
|
lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
|
||||||
|
|
35
arch/sparc/lib/multi3.S
Normal file
35
arch/sparc/lib/multi3.S
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
#include <linux/linkage.h>
|
||||||
|
#include <asm/export.h>
|
||||||
|
|
||||||
|
.text
|
||||||
|
.align 4
|
||||||
|
ENTRY(__multi3) /* %o0 = u, %o1 = v */
|
||||||
|
mov %o1, %g1
|
||||||
|
srl %o3, 0, %g4
|
||||||
|
mulx %g4, %g1, %o1
|
||||||
|
srlx %g1, 0x20, %g3
|
||||||
|
mulx %g3, %g4, %g5
|
||||||
|
sllx %g5, 0x20, %o5
|
||||||
|
srl %g1, 0, %g4
|
||||||
|
sub %o1, %o5, %o5
|
||||||
|
srlx %o5, 0x20, %o5
|
||||||
|
addcc %g5, %o5, %g5
|
||||||
|
srlx %o3, 0x20, %o5
|
||||||
|
mulx %g4, %o5, %g4
|
||||||
|
mulx %g3, %o5, %o5
|
||||||
|
sethi %hi(0x80000000), %g3
|
||||||
|
addcc %g5, %g4, %g5
|
||||||
|
srlx %g5, 0x20, %g5
|
||||||
|
add %g3, %g3, %g3
|
||||||
|
movcc %xcc, %g0, %g3
|
||||||
|
addcc %o5, %g5, %o5
|
||||||
|
sllx %g4, 0x20, %g4
|
||||||
|
add %o1, %g4, %o1
|
||||||
|
add %o5, %g3, %g2
|
||||||
|
mulx %g1, %o2, %g1
|
||||||
|
add %g1, %g2, %g1
|
||||||
|
mulx %o0, %o3, %o0
|
||||||
|
retl
|
||||||
|
add %g1, %o0, %o0
|
||||||
|
ENDPROC(__multi3)
|
||||||
|
EXPORT_SYMBOL(__multi3)
|
|
@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
|
if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
|
||||||
pr_warn("hugepagesz=%llu not supported by MMU.\n",
|
hugetlb_bad_size();
|
||||||
|
pr_err("hugepagesz=%llu not supported by MMU.\n",
|
||||||
hugepage_size);
|
hugepage_size);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
|
||||||
|
|
||||||
/* get_new_mmu_context() uses "cache + 1". */
|
/* get_new_mmu_context() uses "cache + 1". */
|
||||||
DEFINE_SPINLOCK(ctx_alloc_lock);
|
DEFINE_SPINLOCK(ctx_alloc_lock);
|
||||||
unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
|
unsigned long tlb_context_cache = CTX_FIRST_VERSION;
|
||||||
#define MAX_CTX_NR (1UL << CTX_NR_BITS)
|
#define MAX_CTX_NR (1UL << CTX_NR_BITS)
|
||||||
#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
|
#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
|
||||||
DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
|
DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
|
||||||
|
DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
|
||||||
|
|
||||||
|
static void mmu_context_wrap(void)
|
||||||
|
{
|
||||||
|
unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
|
||||||
|
unsigned long new_ver, new_ctx, old_ctx;
|
||||||
|
struct mm_struct *mm;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
|
||||||
|
|
||||||
|
/* Reserve kernel context */
|
||||||
|
set_bit(0, mmu_context_bmap);
|
||||||
|
|
||||||
|
new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
|
||||||
|
if (unlikely(new_ver == 0))
|
||||||
|
new_ver = CTX_FIRST_VERSION;
|
||||||
|
tlb_context_cache = new_ver;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure that any new mm that are added into per_cpu_secondary_mm,
|
||||||
|
* are going to go through get_new_mmu_context() path.
|
||||||
|
*/
|
||||||
|
mb();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Updated versions to current on those CPUs that had valid secondary
|
||||||
|
* contexts
|
||||||
|
*/
|
||||||
|
for_each_online_cpu(cpu) {
|
||||||
|
/*
|
||||||
|
* If a new mm is stored after we took this mm from the array,
|
||||||
|
* it will go into get_new_mmu_context() path, because we
|
||||||
|
* already bumped the version in tlb_context_cache.
|
||||||
|
*/
|
||||||
|
mm = per_cpu(per_cpu_secondary_mm, cpu);
|
||||||
|
|
||||||
|
if (unlikely(!mm || mm == &init_mm))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
old_ctx = mm->context.sparc64_ctx_val;
|
||||||
|
if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
|
||||||
|
new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
|
||||||
|
set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
|
||||||
|
mm->context.sparc64_ctx_val = new_ctx;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Caller does TLB context flushing on local CPU if necessary.
|
/* Caller does TLB context flushing on local CPU if necessary.
|
||||||
* The caller also ensures that CTX_VALID(mm->context) is false.
|
* The caller also ensures that CTX_VALID(mm->context) is false.
|
||||||
|
@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
unsigned long ctx, new_ctx;
|
unsigned long ctx, new_ctx;
|
||||||
unsigned long orig_pgsz_bits;
|
unsigned long orig_pgsz_bits;
|
||||||
int new_version;
|
|
||||||
|
|
||||||
spin_lock(&ctx_alloc_lock);
|
spin_lock(&ctx_alloc_lock);
|
||||||
|
retry:
|
||||||
|
/* wrap might have happened, test again if our context became valid */
|
||||||
|
if (unlikely(CTX_VALID(mm->context)))
|
||||||
|
goto out;
|
||||||
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
|
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
|
||||||
ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
|
ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
|
||||||
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
|
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
|
||||||
new_version = 0;
|
|
||||||
if (new_ctx >= (1 << CTX_NR_BITS)) {
|
if (new_ctx >= (1 << CTX_NR_BITS)) {
|
||||||
new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
|
new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
|
||||||
if (new_ctx >= ctx) {
|
if (new_ctx >= ctx) {
|
||||||
int i;
|
mmu_context_wrap();
|
||||||
new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
|
goto retry;
|
||||||
CTX_FIRST_VERSION;
|
|
||||||
if (new_ctx == 1)
|
|
||||||
new_ctx = CTX_FIRST_VERSION;
|
|
||||||
|
|
||||||
/* Don't call memset, for 16 entries that's just
|
|
||||||
* plain silly...
|
|
||||||
*/
|
|
||||||
mmu_context_bmap[0] = 3;
|
|
||||||
mmu_context_bmap[1] = 0;
|
|
||||||
mmu_context_bmap[2] = 0;
|
|
||||||
mmu_context_bmap[3] = 0;
|
|
||||||
for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
|
|
||||||
mmu_context_bmap[i + 0] = 0;
|
|
||||||
mmu_context_bmap[i + 1] = 0;
|
|
||||||
mmu_context_bmap[i + 2] = 0;
|
|
||||||
mmu_context_bmap[i + 3] = 0;
|
|
||||||
}
|
|
||||||
new_version = 1;
|
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (mm->context.sparc64_ctx_val)
|
||||||
|
cpumask_clear(mm_cpumask(mm));
|
||||||
mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
|
mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
|
||||||
new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
|
new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
|
||||||
out:
|
|
||||||
tlb_context_cache = new_ctx;
|
tlb_context_cache = new_ctx;
|
||||||
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
|
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
|
||||||
|
out:
|
||||||
spin_unlock(&ctx_alloc_lock);
|
spin_unlock(&ctx_alloc_lock);
|
||||||
|
|
||||||
if (unlikely(new_version))
|
|
||||||
smp_new_mmu_context_version();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int numa_enabled = 1;
|
static int numa_enabled = 1;
|
||||||
|
|
|
@ -496,7 +496,8 @@ retry_tsb_alloc:
|
||||||
extern void copy_tsb(unsigned long old_tsb_base,
|
extern void copy_tsb(unsigned long old_tsb_base,
|
||||||
unsigned long old_tsb_size,
|
unsigned long old_tsb_size,
|
||||||
unsigned long new_tsb_base,
|
unsigned long new_tsb_base,
|
||||||
unsigned long new_tsb_size);
|
unsigned long new_tsb_size,
|
||||||
|
unsigned long page_size_shift);
|
||||||
unsigned long old_tsb_base = (unsigned long) old_tsb;
|
unsigned long old_tsb_base = (unsigned long) old_tsb;
|
||||||
unsigned long new_tsb_base = (unsigned long) new_tsb;
|
unsigned long new_tsb_base = (unsigned long) new_tsb;
|
||||||
|
|
||||||
|
@ -504,7 +505,9 @@ retry_tsb_alloc:
|
||||||
old_tsb_base = __pa(old_tsb_base);
|
old_tsb_base = __pa(old_tsb_base);
|
||||||
new_tsb_base = __pa(new_tsb_base);
|
new_tsb_base = __pa(new_tsb_base);
|
||||||
}
|
}
|
||||||
copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
|
copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
|
||||||
|
tsb_index == MM_TSB_BASE ?
|
||||||
|
PAGE_SHIFT : REAL_HPAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
mm->context.tsb_block[tsb_index].tsb = new_tsb;
|
mm->context.tsb_block[tsb_index].tsb = new_tsb;
|
||||||
|
|
|
@ -971,11 +971,6 @@ xcall_capture:
|
||||||
wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
|
wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
|
||||||
retry
|
retry
|
||||||
|
|
||||||
.globl xcall_new_mmu_context_version
|
|
||||||
xcall_new_mmu_context_version:
|
|
||||||
wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
|
|
||||||
retry
|
|
||||||
|
|
||||||
#ifdef CONFIG_KGDB
|
#ifdef CONFIG_KGDB
|
||||||
.globl xcall_kgdb_capture
|
.globl xcall_kgdb_capture
|
||||||
xcall_kgdb_capture:
|
xcall_kgdb_capture:
|
||||||
|
|
|
@ -2776,10 +2776,6 @@ config COMPAT_FOR_U64_ALIGNMENT
|
||||||
config SYSVIPC_COMPAT
|
config SYSVIPC_COMPAT
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on SYSVIPC
|
depends on SYSVIPC
|
||||||
|
|
||||||
config KEYS_COMPAT
|
|
||||||
def_bool y
|
|
||||||
depends on KEYS
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
|
@ -255,6 +255,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
|
case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
|
||||||
|
case 11: /* GX1 with inverted Device ID */
|
||||||
#ifdef CONFIG_PCI
|
#ifdef CONFIG_PCI
|
||||||
{
|
{
|
||||||
u32 vendor, device;
|
u32 vendor, device;
|
||||||
|
|
|
@ -320,7 +320,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum ucode_state
|
static enum ucode_state
|
||||||
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
|
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
|
||||||
|
|
||||||
int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
||||||
{
|
{
|
||||||
|
@ -338,8 +338,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
||||||
if (!desc.mc)
|
if (!desc.mc)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax),
|
ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
|
||||||
desc.data, desc.size);
|
|
||||||
if (ret != UCODE_OK)
|
if (ret != UCODE_OK)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -675,7 +674,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum ucode_state
|
static enum ucode_state
|
||||||
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
|
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
|
||||||
{
|
{
|
||||||
enum ucode_state ret;
|
enum ucode_state ret;
|
||||||
|
|
||||||
|
@ -689,8 +688,8 @@ load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
/* save BSP's matching patch for early load */
|
/* save BSP's matching patch for early load */
|
||||||
if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
|
if (save) {
|
||||||
struct ucode_patch *p = find_patch(cpu);
|
struct ucode_patch *p = find_patch(0);
|
||||||
if (p) {
|
if (p) {
|
||||||
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
|
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
|
||||||
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
|
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
|
||||||
|
@ -722,11 +721,12 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
|
||||||
{
|
{
|
||||||
char fw_name[36] = "amd-ucode/microcode_amd.bin";
|
char fw_name[36] = "amd-ucode/microcode_amd.bin";
|
||||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||||
|
bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
|
||||||
enum ucode_state ret = UCODE_NFOUND;
|
enum ucode_state ret = UCODE_NFOUND;
|
||||||
const struct firmware *fw;
|
const struct firmware *fw;
|
||||||
|
|
||||||
/* reload ucode container only on the boot cpu */
|
/* reload ucode container only on the boot cpu */
|
||||||
if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
|
if (!refresh_fw || !bsp)
|
||||||
return UCODE_OK;
|
return UCODE_OK;
|
||||||
|
|
||||||
if (c->x86 >= 0x15)
|
if (c->x86 >= 0x15)
|
||||||
|
@ -743,7 +743,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
|
||||||
goto fw_release;
|
goto fw_release;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
|
ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
|
||||||
|
|
||||||
fw_release:
|
fw_release:
|
||||||
release_firmware(fw);
|
release_firmware(fw);
|
||||||
|
|
|
@ -619,6 +619,9 @@ int __init save_microcode_in_initrd_intel(void)
|
||||||
|
|
||||||
show_saved_mc();
|
show_saved_mc();
|
||||||
|
|
||||||
|
/* initrd is going away, clear patch ptr. */
|
||||||
|
intel_ucode_patch = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token)
|
||||||
*/
|
*/
|
||||||
rcu_irq_exit();
|
rcu_irq_exit();
|
||||||
native_safe_halt();
|
native_safe_halt();
|
||||||
rcu_irq_enter();
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
rcu_irq_enter();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!n.halted)
|
if (!n.halted)
|
||||||
|
|
|
@ -78,7 +78,7 @@ void __show_regs(struct pt_regs *regs, int all)
|
||||||
|
|
||||||
printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip);
|
printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip);
|
||||||
printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags,
|
printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags,
|
||||||
smp_processor_id());
|
raw_smp_processor_id());
|
||||||
|
|
||||||
printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
|
printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
|
||||||
regs->ax, regs->bx, regs->cx, regs->dx);
|
regs->ax, regs->bx, regs->cx, regs->dx);
|
||||||
|
|
|
@ -780,19 +780,21 @@ out:
|
||||||
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
|
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
|
||||||
{
|
{
|
||||||
struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
|
struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
|
||||||
int j, nent = vcpu->arch.cpuid_nent;
|
struct kvm_cpuid_entry2 *ej;
|
||||||
|
int j = i;
|
||||||
|
int nent = vcpu->arch.cpuid_nent;
|
||||||
|
|
||||||
e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
|
e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
|
||||||
/* when no next entry is found, the current entry[i] is reselected */
|
/* when no next entry is found, the current entry[i] is reselected */
|
||||||
for (j = i + 1; ; j = (j + 1) % nent) {
|
do {
|
||||||
struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
|
j = (j + 1) % nent;
|
||||||
if (ej->function == e->function) {
|
ej = &vcpu->arch.cpuid_entries[j];
|
||||||
|
} while (ej->function != e->function);
|
||||||
|
|
||||||
ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
|
ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
|
||||||
|
|
||||||
return j;
|
return j;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return 0; /* silence gcc, even though control never reaches here */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* find an entry with matching function, matching index (if needed), and that
|
/* find an entry with matching function, matching index (if needed), and that
|
||||||
* should be read next (if it's stateful) */
|
* should be read next (if it's stateful) */
|
||||||
|
|
|
@ -1495,8 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
|
||||||
|
|
||||||
static void cancel_hv_timer(struct kvm_lapic *apic)
|
static void cancel_hv_timer(struct kvm_lapic *apic)
|
||||||
{
|
{
|
||||||
|
preempt_disable();
|
||||||
kvm_x86_ops->cancel_hv_timer(apic->vcpu);
|
kvm_x86_ops->cancel_hv_timer(apic->vcpu);
|
||||||
apic->lapic_timer.hv_timer_in_use = false;
|
apic->lapic_timer.hv_timer_in_use = false;
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool start_hv_timer(struct kvm_lapic *apic)
|
static bool start_hv_timer(struct kvm_lapic *apic)
|
||||||
|
@ -1934,7 +1936,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||||
for (i = 0; i < KVM_APIC_LVT_NUM; i++)
|
for (i = 0; i < KVM_APIC_LVT_NUM; i++)
|
||||||
kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
|
kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
|
||||||
apic_update_lvtt(apic);
|
apic_update_lvtt(apic);
|
||||||
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
|
if (kvm_vcpu_is_reset_bsp(vcpu) &&
|
||||||
|
kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
|
||||||
kvm_lapic_set_reg(apic, APIC_LVT0,
|
kvm_lapic_set_reg(apic, APIC_LVT0,
|
||||||
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
|
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
|
||||||
apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
|
apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
|
||||||
|
|
|
@ -3698,12 +3698,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
|
||||||
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
|
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool can_do_async_pf(struct kvm_vcpu *vcpu)
|
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (unlikely(!lapic_in_kernel(vcpu) ||
|
if (unlikely(!lapic_in_kernel(vcpu) ||
|
||||||
kvm_event_needs_reinjection(vcpu)))
|
kvm_event_needs_reinjection(vcpu)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
if (is_guest_mode(vcpu))
|
||||||
|
return false;
|
||||||
|
|
||||||
return kvm_x86_ops->interrupt_allowed(vcpu);
|
return kvm_x86_ops->interrupt_allowed(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3719,7 +3722,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
|
||||||
if (!async)
|
if (!async)
|
||||||
return false; /* *pfn has correct page already */
|
return false; /* *pfn has correct page already */
|
||||||
|
|
||||||
if (!prefault && can_do_async_pf(vcpu)) {
|
if (!prefault && kvm_can_do_async_pf(vcpu)) {
|
||||||
trace_kvm_try_async_get_page(gva, gfn);
|
trace_kvm_try_async_get_page(gva, gfn);
|
||||||
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
|
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
|
||||||
trace_kvm_async_pf_doublefault(gva, gfn);
|
trace_kvm_async_pf_doublefault(gva, gfn);
|
||||||
|
|
|
@ -76,6 +76,7 @@ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
|
||||||
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
|
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
|
||||||
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
|
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
|
||||||
bool accessed_dirty);
|
bool accessed_dirty);
|
||||||
|
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
|
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1807,7 +1807,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
|
||||||
* AMD's VMCB does not have an explicit unusable field, so emulate it
|
* AMD's VMCB does not have an explicit unusable field, so emulate it
|
||||||
* for cross vendor migration purposes by "not present"
|
* for cross vendor migration purposes by "not present"
|
||||||
*/
|
*/
|
||||||
var->unusable = !var->present || (var->type == 0);
|
var->unusable = !var->present;
|
||||||
|
|
||||||
switch (seg) {
|
switch (seg) {
|
||||||
case VCPU_SREG_TR:
|
case VCPU_SREG_TR:
|
||||||
|
@ -1840,6 +1840,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
|
||||||
*/
|
*/
|
||||||
if (var->unusable)
|
if (var->unusable)
|
||||||
var->db = 0;
|
var->db = 0;
|
||||||
|
/* This is symmetric with svm_set_segment() */
|
||||||
var->dpl = to_svm(vcpu)->vmcb->save.cpl;
|
var->dpl = to_svm(vcpu)->vmcb->save.cpl;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1980,18 +1981,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
|
||||||
s->base = var->base;
|
s->base = var->base;
|
||||||
s->limit = var->limit;
|
s->limit = var->limit;
|
||||||
s->selector = var->selector;
|
s->selector = var->selector;
|
||||||
if (var->unusable)
|
|
||||||
s->attrib = 0;
|
|
||||||
else {
|
|
||||||
s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
|
s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
|
||||||
s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
|
s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
|
||||||
s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
|
s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
|
||||||
s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
|
s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
|
||||||
s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
|
s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
|
||||||
s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
|
s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
|
||||||
s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
|
s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
|
||||||
s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
|
s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is always accurate, except if SYSRET returned to a segment
|
* This is always accurate, except if SYSRET returned to a segment
|
||||||
|
@ -2000,7 +1997,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
|
||||||
* would entail passing the CPL to userspace and back.
|
* would entail passing the CPL to userspace and back.
|
||||||
*/
|
*/
|
||||||
if (seg == VCPU_SREG_SS)
|
if (seg == VCPU_SREG_SS)
|
||||||
svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
|
/* This is symmetric with svm_get_segment() */
|
||||||
|
svm->vmcb->save.cpl = (var->dpl & 3);
|
||||||
|
|
||||||
mark_dirty(svm->vmcb, VMCB_SEG);
|
mark_dirty(svm->vmcb, VMCB_SEG);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2425,7 +2425,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
|
||||||
if (!(vmcs12->exception_bitmap & (1u << nr)))
|
if (!(vmcs12->exception_bitmap & (1u << nr)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
|
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
|
||||||
vmcs_read32(VM_EXIT_INTR_INFO),
|
vmcs_read32(VM_EXIT_INTR_INFO),
|
||||||
vmcs_readl(EXIT_QUALIFICATION));
|
vmcs_readl(EXIT_QUALIFICATION));
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -6914,97 +6914,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
|
||||||
* This function performs the various checks including
|
|
||||||
* - if it's 4KB aligned
|
|
||||||
* - No bits beyond the physical address width are set
|
|
||||||
* - Returns 0 on success or else 1
|
|
||||||
* (Intel SDM Section 30.3)
|
|
||||||
*/
|
|
||||||
static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
|
|
||||||
gpa_t *vmpointer)
|
|
||||||
{
|
{
|
||||||
gva_t gva;
|
gva_t gva;
|
||||||
gpa_t vmptr;
|
|
||||||
struct x86_exception e;
|
struct x86_exception e;
|
||||||
struct page *page;
|
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
||||||
int maxphyaddr = cpuid_maxphyaddr(vcpu);
|
|
||||||
|
|
||||||
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
|
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
|
||||||
vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
|
vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
|
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
|
||||||
sizeof(vmptr), &e)) {
|
sizeof(*vmpointer), &e)) {
|
||||||
kvm_inject_page_fault(vcpu, &e);
|
kvm_inject_page_fault(vcpu, &e);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (exit_reason) {
|
|
||||||
case EXIT_REASON_VMON:
|
|
||||||
/*
|
|
||||||
* SDM 3: 24.11.5
|
|
||||||
* The first 4 bytes of VMXON region contain the supported
|
|
||||||
* VMCS revision identifier
|
|
||||||
*
|
|
||||||
* Note - IA32_VMX_BASIC[48] will never be 1
|
|
||||||
* for the nested case;
|
|
||||||
* which replaces physical address width with 32
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
|
|
||||||
nested_vmx_failInvalid(vcpu);
|
|
||||||
return kvm_skip_emulated_instruction(vcpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
page = nested_get_page(vcpu, vmptr);
|
|
||||||
if (page == NULL) {
|
|
||||||
nested_vmx_failInvalid(vcpu);
|
|
||||||
return kvm_skip_emulated_instruction(vcpu);
|
|
||||||
}
|
|
||||||
if (*(u32 *)kmap(page) != VMCS12_REVISION) {
|
|
||||||
kunmap(page);
|
|
||||||
nested_release_page_clean(page);
|
|
||||||
nested_vmx_failInvalid(vcpu);
|
|
||||||
return kvm_skip_emulated_instruction(vcpu);
|
|
||||||
}
|
|
||||||
kunmap(page);
|
|
||||||
nested_release_page_clean(page);
|
|
||||||
vmx->nested.vmxon_ptr = vmptr;
|
|
||||||
break;
|
|
||||||
case EXIT_REASON_VMCLEAR:
|
|
||||||
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
|
|
||||||
nested_vmx_failValid(vcpu,
|
|
||||||
VMXERR_VMCLEAR_INVALID_ADDRESS);
|
|
||||||
return kvm_skip_emulated_instruction(vcpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vmptr == vmx->nested.vmxon_ptr) {
|
|
||||||
nested_vmx_failValid(vcpu,
|
|
||||||
VMXERR_VMCLEAR_VMXON_POINTER);
|
|
||||||
return kvm_skip_emulated_instruction(vcpu);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case EXIT_REASON_VMPTRLD:
|
|
||||||
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
|
|
||||||
nested_vmx_failValid(vcpu,
|
|
||||||
VMXERR_VMPTRLD_INVALID_ADDRESS);
|
|
||||||
return kvm_skip_emulated_instruction(vcpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vmptr == vmx->nested.vmxon_ptr) {
|
|
||||||
nested_vmx_failValid(vcpu,
|
|
||||||
VMXERR_VMPTRLD_VMXON_POINTER);
|
|
||||||
return kvm_skip_emulated_instruction(vcpu);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
return 1; /* shouldn't happen */
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vmpointer)
|
|
||||||
*vmpointer = vmptr;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7066,6 +6990,8 @@ out_msr_bitmap:
|
||||||
static int handle_vmon(struct kvm_vcpu *vcpu)
|
static int handle_vmon(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
gpa_t vmptr;
|
||||||
|
struct page *page;
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
|
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
|
||||||
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
|
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
|
||||||
|
@ -7095,9 +7021,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
|
if (nested_vmx_get_vmptr(vcpu, &vmptr))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SDM 3: 24.11.5
|
||||||
|
* The first 4 bytes of VMXON region contain the supported
|
||||||
|
* VMCS revision identifier
|
||||||
|
*
|
||||||
|
* Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
|
||||||
|
* which replaces physical address width with 32
|
||||||
|
*/
|
||||||
|
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
|
||||||
|
nested_vmx_failInvalid(vcpu);
|
||||||
|
return kvm_skip_emulated_instruction(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
page = nested_get_page(vcpu, vmptr);
|
||||||
|
if (page == NULL) {
|
||||||
|
nested_vmx_failInvalid(vcpu);
|
||||||
|
return kvm_skip_emulated_instruction(vcpu);
|
||||||
|
}
|
||||||
|
if (*(u32 *)kmap(page) != VMCS12_REVISION) {
|
||||||
|
kunmap(page);
|
||||||
|
nested_release_page_clean(page);
|
||||||
|
nested_vmx_failInvalid(vcpu);
|
||||||
|
return kvm_skip_emulated_instruction(vcpu);
|
||||||
|
}
|
||||||
|
kunmap(page);
|
||||||
|
nested_release_page_clean(page);
|
||||||
|
|
||||||
|
vmx->nested.vmxon_ptr = vmptr;
|
||||||
ret = enter_vmx_operation(vcpu);
|
ret = enter_vmx_operation(vcpu);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -7213,9 +7167,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
|
||||||
if (!nested_vmx_check_permission(vcpu))
|
if (!nested_vmx_check_permission(vcpu))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
|
if (nested_vmx_get_vmptr(vcpu, &vmptr))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
|
||||||
|
nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
|
||||||
|
return kvm_skip_emulated_instruction(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vmptr == vmx->nested.vmxon_ptr) {
|
||||||
|
nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
|
||||||
|
return kvm_skip_emulated_instruction(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
if (vmptr == vmx->nested.current_vmptr)
|
if (vmptr == vmx->nested.current_vmptr)
|
||||||
nested_release_vmcs12(vmx);
|
nested_release_vmcs12(vmx);
|
||||||
|
|
||||||
|
@ -7545,9 +7509,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
|
||||||
if (!nested_vmx_check_permission(vcpu))
|
if (!nested_vmx_check_permission(vcpu))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
|
if (nested_vmx_get_vmptr(vcpu, &vmptr))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
|
||||||
|
nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
|
||||||
|
return kvm_skip_emulated_instruction(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vmptr == vmx->nested.vmxon_ptr) {
|
||||||
|
nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
|
||||||
|
return kvm_skip_emulated_instruction(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
if (vmx->nested.current_vmptr != vmptr) {
|
if (vmx->nested.current_vmptr != vmptr) {
|
||||||
struct vmcs12 *new_vmcs12;
|
struct vmcs12 *new_vmcs12;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
@ -7913,11 +7887,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
|
||||||
{
|
{
|
||||||
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
||||||
int cr = exit_qualification & 15;
|
int cr = exit_qualification & 15;
|
||||||
int reg = (exit_qualification >> 8) & 15;
|
int reg;
|
||||||
unsigned long val = kvm_register_readl(vcpu, reg);
|
unsigned long val;
|
||||||
|
|
||||||
switch ((exit_qualification >> 4) & 3) {
|
switch ((exit_qualification >> 4) & 3) {
|
||||||
case 0: /* mov to cr */
|
case 0: /* mov to cr */
|
||||||
|
reg = (exit_qualification >> 8) & 15;
|
||||||
|
val = kvm_register_readl(vcpu, reg);
|
||||||
switch (cr) {
|
switch (cr) {
|
||||||
case 0:
|
case 0:
|
||||||
if (vmcs12->cr0_guest_host_mask &
|
if (vmcs12->cr0_guest_host_mask &
|
||||||
|
@ -7972,6 +7948,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
|
||||||
* lmsw can change bits 1..3 of cr0, and only set bit 0 of
|
* lmsw can change bits 1..3 of cr0, and only set bit 0 of
|
||||||
* cr0. Other attempted changes are ignored, with no exit.
|
* cr0. Other attempted changes are ignored, with no exit.
|
||||||
*/
|
*/
|
||||||
|
val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
|
||||||
if (vmcs12->cr0_guest_host_mask & 0xe &
|
if (vmcs12->cr0_guest_host_mask & 0xe &
|
||||||
(val ^ vmcs12->cr0_read_shadow))
|
(val ^ vmcs12->cr0_read_shadow))
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -8394,10 +8394,13 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
|
||||||
if (vcpu->arch.pv.pv_unhalted)
|
if (vcpu->arch.pv.pv_unhalted)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (atomic_read(&vcpu->arch.nmi_queued))
|
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
|
||||||
|
(vcpu->arch.nmi_pending &&
|
||||||
|
kvm_x86_ops->nmi_allowed(vcpu)))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (kvm_test_request(KVM_REQ_SMI, vcpu))
|
if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
|
||||||
|
(vcpu->arch.smi_pending && !is_smm(vcpu)))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (kvm_arch_interrupt_allowed(vcpu) &&
|
if (kvm_arch_interrupt_allowed(vcpu) &&
|
||||||
|
@ -8604,8 +8607,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
|
||||||
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
|
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
|
||||||
return true;
|
return true;
|
||||||
else
|
else
|
||||||
return !kvm_event_needs_reinjection(vcpu) &&
|
return kvm_can_do_async_pf(vcpu);
|
||||||
kvm_x86_ops->interrupt_allowed(vcpu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_start_assignment(struct kvm *kvm)
|
void kvm_arch_start_assignment(struct kvm *kvm)
|
||||||
|
|
|
@ -65,11 +65,9 @@ static int __init nopat(char *str)
|
||||||
}
|
}
|
||||||
early_param("nopat", nopat);
|
early_param("nopat", nopat);
|
||||||
|
|
||||||
static bool __read_mostly __pat_initialized = false;
|
|
||||||
|
|
||||||
bool pat_enabled(void)
|
bool pat_enabled(void)
|
||||||
{
|
{
|
||||||
return __pat_initialized;
|
return !!__pat_enabled;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pat_enabled);
|
EXPORT_SYMBOL_GPL(pat_enabled);
|
||||||
|
|
||||||
|
@ -227,14 +225,13 @@ static void pat_bsp_init(u64 pat)
|
||||||
}
|
}
|
||||||
|
|
||||||
wrmsrl(MSR_IA32_CR_PAT, pat);
|
wrmsrl(MSR_IA32_CR_PAT, pat);
|
||||||
__pat_initialized = true;
|
|
||||||
|
|
||||||
__init_cache_modes(pat);
|
__init_cache_modes(pat);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pat_ap_init(u64 pat)
|
static void pat_ap_init(u64 pat)
|
||||||
{
|
{
|
||||||
if (!this_cpu_has(X86_FEATURE_PAT)) {
|
if (!boot_cpu_has(X86_FEATURE_PAT)) {
|
||||||
/*
|
/*
|
||||||
* If this happens we are on a secondary CPU, but switched to
|
* If this happens we are on a secondary CPU, but switched to
|
||||||
* PAT on the boot CPU. We have no way to undo PAT.
|
* PAT on the boot CPU. We have no way to undo PAT.
|
||||||
|
@ -309,7 +306,7 @@ void pat_init(void)
|
||||||
u64 pat;
|
u64 pat;
|
||||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||||
|
|
||||||
if (!__pat_enabled) {
|
if (!pat_enabled()) {
|
||||||
init_cache_modes();
|
init_cache_modes();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -828,9 +828,11 @@ static void __init kexec_enter_virtual_mode(void)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't do virtual mode, since we don't do runtime services, on
|
* We don't do virtual mode, since we don't do runtime services, on
|
||||||
* non-native EFI
|
* non-native EFI. With efi=old_map, we don't do runtime services in
|
||||||
|
* kexec kernel because in the initial boot something else might
|
||||||
|
* have been mapped at these virtual addresses.
|
||||||
*/
|
*/
|
||||||
if (!efi_is_native()) {
|
if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) {
|
||||||
efi_memmap_unmap();
|
efi_memmap_unmap();
|
||||||
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
|
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -71,11 +71,13 @@ static void __init early_code_mapping_set_exec(int executable)
|
||||||
|
|
||||||
pgd_t * __init efi_call_phys_prolog(void)
|
pgd_t * __init efi_call_phys_prolog(void)
|
||||||
{
|
{
|
||||||
unsigned long vaddress;
|
unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
|
||||||
pgd_t *save_pgd;
|
pgd_t *save_pgd, *pgd_k, *pgd_efi;
|
||||||
|
p4d_t *p4d, *p4d_k, *p4d_efi;
|
||||||
|
pud_t *pud;
|
||||||
|
|
||||||
int pgd;
|
int pgd;
|
||||||
int n_pgds;
|
int n_pgds, i, j;
|
||||||
|
|
||||||
if (!efi_enabled(EFI_OLD_MEMMAP)) {
|
if (!efi_enabled(EFI_OLD_MEMMAP)) {
|
||||||
save_pgd = (pgd_t *)read_cr3();
|
save_pgd = (pgd_t *)read_cr3();
|
||||||
|
@ -88,10 +90,49 @@ pgd_t * __init efi_call_phys_prolog(void)
|
||||||
n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
|
n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
|
||||||
save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
|
save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Build 1:1 identity mapping for efi=old_map usage. Note that
|
||||||
|
* PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
|
||||||
|
* it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
|
||||||
|
* address X, the pud_index(X) != pud_index(__va(X)), we can only copy
|
||||||
|
* PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
|
||||||
|
* This means here we can only reuse the PMD tables of the direct mapping.
|
||||||
|
*/
|
||||||
for (pgd = 0; pgd < n_pgds; pgd++) {
|
for (pgd = 0; pgd < n_pgds; pgd++) {
|
||||||
save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
|
addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
|
||||||
vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
|
vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
|
||||||
set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
|
pgd_efi = pgd_offset_k(addr_pgd);
|
||||||
|
save_pgd[pgd] = *pgd_efi;
|
||||||
|
|
||||||
|
p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
|
||||||
|
if (!p4d) {
|
||||||
|
pr_err("Failed to allocate p4d table!\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < PTRS_PER_P4D; i++) {
|
||||||
|
addr_p4d = addr_pgd + i * P4D_SIZE;
|
||||||
|
p4d_efi = p4d + p4d_index(addr_p4d);
|
||||||
|
|
||||||
|
pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
|
||||||
|
if (!pud) {
|
||||||
|
pr_err("Failed to allocate pud table!\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (j = 0; j < PTRS_PER_PUD; j++) {
|
||||||
|
addr_pud = addr_p4d + j * PUD_SIZE;
|
||||||
|
|
||||||
|
if (addr_pud > (max_pfn << PAGE_SHIFT))
|
||||||
|
break;
|
||||||
|
|
||||||
|
vaddr = (unsigned long)__va(addr_pud);
|
||||||
|
|
||||||
|
pgd_k = pgd_offset_k(vaddr);
|
||||||
|
p4d_k = p4d_offset(pgd_k, vaddr);
|
||||||
|
pud[j] = *pud_offset(p4d_k, vaddr);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
__flush_tlb_all();
|
__flush_tlb_all();
|
||||||
|
@ -104,8 +145,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
|
||||||
/*
|
/*
|
||||||
* After the lock is released, the original page table is restored.
|
* After the lock is released, the original page table is restored.
|
||||||
*/
|
*/
|
||||||
int pgd_idx;
|
int pgd_idx, i;
|
||||||
int nr_pgds;
|
int nr_pgds;
|
||||||
|
pgd_t *pgd;
|
||||||
|
p4d_t *p4d;
|
||||||
|
pud_t *pud;
|
||||||
|
|
||||||
if (!efi_enabled(EFI_OLD_MEMMAP)) {
|
if (!efi_enabled(EFI_OLD_MEMMAP)) {
|
||||||
write_cr3((unsigned long)save_pgd);
|
write_cr3((unsigned long)save_pgd);
|
||||||
|
@ -115,9 +159,28 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
|
||||||
|
|
||||||
nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
|
nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
|
||||||
|
|
||||||
for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++)
|
for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
|
||||||
|
pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
|
||||||
set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
|
set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
|
||||||
|
|
||||||
|
if (!(pgd_val(*pgd) & _PAGE_PRESENT))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
for (i = 0; i < PTRS_PER_P4D; i++) {
|
||||||
|
p4d = p4d_offset(pgd,
|
||||||
|
pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
|
||||||
|
|
||||||
|
if (!(p4d_val(*p4d) & _PAGE_PRESENT))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
pud = (pud_t *)p4d_page_vaddr(*p4d);
|
||||||
|
pud_free(&init_mm, pud);
|
||||||
|
}
|
||||||
|
|
||||||
|
p4d = (p4d_t *)pgd_page_vaddr(*pgd);
|
||||||
|
p4d_free(&init_mm, p4d);
|
||||||
|
}
|
||||||
|
|
||||||
kfree(save_pgd);
|
kfree(save_pgd);
|
||||||
|
|
||||||
__flush_tlb_all();
|
__flush_tlb_all();
|
||||||
|
|
|
@ -360,6 +360,9 @@ void __init efi_free_boot_services(void)
|
||||||
free_bootmem_late(start, size);
|
free_bootmem_late(start, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!num_entries)
|
||||||
|
return;
|
||||||
|
|
||||||
new_size = efi.memmap.desc_size * num_entries;
|
new_size = efi.memmap.desc_size * num_entries;
|
||||||
new_phys = efi_memmap_alloc(num_entries);
|
new_phys = efi_memmap_alloc(num_entries);
|
||||||
if (!new_phys) {
|
if (!new_phys) {
|
||||||
|
|
|
@ -52,7 +52,7 @@ BFQG_FLAG_FNS(idling)
|
||||||
BFQG_FLAG_FNS(empty)
|
BFQG_FLAG_FNS(empty)
|
||||||
#undef BFQG_FLAG_FNS
|
#undef BFQG_FLAG_FNS
|
||||||
|
|
||||||
/* This should be called with the queue_lock held. */
|
/* This should be called with the scheduler lock held. */
|
||||||
static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
|
static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
|
||||||
{
|
{
|
||||||
unsigned long long now;
|
unsigned long long now;
|
||||||
|
@ -67,7 +67,7 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
|
||||||
bfqg_stats_clear_waiting(stats);
|
bfqg_stats_clear_waiting(stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This should be called with the queue_lock held. */
|
/* This should be called with the scheduler lock held. */
|
||||||
static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
|
static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
|
||||||
struct bfq_group *curr_bfqg)
|
struct bfq_group *curr_bfqg)
|
||||||
{
|
{
|
||||||
|
@ -81,7 +81,7 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
|
||||||
bfqg_stats_mark_waiting(stats);
|
bfqg_stats_mark_waiting(stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This should be called with the queue_lock held. */
|
/* This should be called with the scheduler lock held. */
|
||||||
static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
|
static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
|
||||||
{
|
{
|
||||||
unsigned long long now;
|
unsigned long long now;
|
||||||
|
@ -203,12 +203,30 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
|
||||||
|
|
||||||
static void bfqg_get(struct bfq_group *bfqg)
|
static void bfqg_get(struct bfq_group *bfqg)
|
||||||
{
|
{
|
||||||
return blkg_get(bfqg_to_blkg(bfqg));
|
bfqg->ref++;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bfqg_put(struct bfq_group *bfqg)
|
void bfqg_put(struct bfq_group *bfqg)
|
||||||
{
|
{
|
||||||
return blkg_put(bfqg_to_blkg(bfqg));
|
bfqg->ref--;
|
||||||
|
|
||||||
|
if (bfqg->ref == 0)
|
||||||
|
kfree(bfqg);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void bfqg_and_blkg_get(struct bfq_group *bfqg)
|
||||||
|
{
|
||||||
|
/* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
|
||||||
|
bfqg_get(bfqg);
|
||||||
|
|
||||||
|
blkg_get(bfqg_to_blkg(bfqg));
|
||||||
|
}
|
||||||
|
|
||||||
|
void bfqg_and_blkg_put(struct bfq_group *bfqg)
|
||||||
|
{
|
||||||
|
bfqg_put(bfqg);
|
||||||
|
|
||||||
|
blkg_put(bfqg_to_blkg(bfqg));
|
||||||
}
|
}
|
||||||
|
|
||||||
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
|
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
|
||||||
|
@ -312,7 +330,11 @@ void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
|
||||||
if (bfqq) {
|
if (bfqq) {
|
||||||
bfqq->ioprio = bfqq->new_ioprio;
|
bfqq->ioprio = bfqq->new_ioprio;
|
||||||
bfqq->ioprio_class = bfqq->new_ioprio_class;
|
bfqq->ioprio_class = bfqq->new_ioprio_class;
|
||||||
bfqg_get(bfqg);
|
/*
|
||||||
|
* Make sure that bfqg and its associated blkg do not
|
||||||
|
* disappear before entity.
|
||||||
|
*/
|
||||||
|
bfqg_and_blkg_get(bfqg);
|
||||||
}
|
}
|
||||||
entity->parent = bfqg->my_entity; /* NULL for root group */
|
entity->parent = bfqg->my_entity; /* NULL for root group */
|
||||||
entity->sched_data = &bfqg->sched_data;
|
entity->sched_data = &bfqg->sched_data;
|
||||||
|
@ -399,6 +421,8 @@ struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* see comments in bfq_bic_update_cgroup for why refcounting */
|
||||||
|
bfqg_get(bfqg);
|
||||||
return &bfqg->pd;
|
return &bfqg->pd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -426,7 +450,7 @@ void bfq_pd_free(struct blkg_policy_data *pd)
|
||||||
struct bfq_group *bfqg = pd_to_bfqg(pd);
|
struct bfq_group *bfqg = pd_to_bfqg(pd);
|
||||||
|
|
||||||
bfqg_stats_exit(&bfqg->stats);
|
bfqg_stats_exit(&bfqg->stats);
|
||||||
return kfree(bfqg);
|
bfqg_put(bfqg);
|
||||||
}
|
}
|
||||||
|
|
||||||
void bfq_pd_reset_stats(struct blkg_policy_data *pd)
|
void bfq_pd_reset_stats(struct blkg_policy_data *pd)
|
||||||
|
@ -496,9 +520,10 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
|
||||||
* Move @bfqq to @bfqg, deactivating it from its old group and reactivating
|
* Move @bfqq to @bfqg, deactivating it from its old group and reactivating
|
||||||
* it on the new one. Avoid putting the entity on the old group idle tree.
|
* it on the new one. Avoid putting the entity on the old group idle tree.
|
||||||
*
|
*
|
||||||
* Must be called under the queue lock; the cgroup owning @bfqg must
|
* Must be called under the scheduler lock, to make sure that the blkg
|
||||||
* not disappear (by now this just means that we are called under
|
* owning @bfqg does not disappear (see comments in
|
||||||
* rcu_read_lock()).
|
* bfq_bic_update_cgroup on guaranteeing the consistency of blkg
|
||||||
|
* objects).
|
||||||
*/
|
*/
|
||||||
void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||||
struct bfq_group *bfqg)
|
struct bfq_group *bfqg)
|
||||||
|
@ -519,16 +544,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||||
bfq_deactivate_bfqq(bfqd, bfqq, false, false);
|
bfq_deactivate_bfqq(bfqd, bfqq, false, false);
|
||||||
else if (entity->on_st)
|
else if (entity->on_st)
|
||||||
bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
|
bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
|
||||||
bfqg_put(bfqq_group(bfqq));
|
bfqg_and_blkg_put(bfqq_group(bfqq));
|
||||||
|
|
||||||
/*
|
|
||||||
* Here we use a reference to bfqg. We don't need a refcounter
|
|
||||||
* as the cgroup reference will not be dropped, so that its
|
|
||||||
* destroy() callback will not be invoked.
|
|
||||||
*/
|
|
||||||
entity->parent = bfqg->my_entity;
|
entity->parent = bfqg->my_entity;
|
||||||
entity->sched_data = &bfqg->sched_data;
|
entity->sched_data = &bfqg->sched_data;
|
||||||
bfqg_get(bfqg);
|
/* pin down bfqg and its associated blkg */
|
||||||
|
bfqg_and_blkg_get(bfqg);
|
||||||
|
|
||||||
if (bfq_bfqq_busy(bfqq)) {
|
if (bfq_bfqq_busy(bfqq)) {
|
||||||
bfq_pos_tree_add_move(bfqd, bfqq);
|
bfq_pos_tree_add_move(bfqd, bfqq);
|
||||||
|
@ -545,8 +566,9 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||||
* @bic: the bic to move.
|
* @bic: the bic to move.
|
||||||
* @blkcg: the blk-cgroup to move to.
|
* @blkcg: the blk-cgroup to move to.
|
||||||
*
|
*
|
||||||
* Move bic to blkcg, assuming that bfqd->queue is locked; the caller
|
* Move bic to blkcg, assuming that bfqd->lock is held; which makes
|
||||||
* has to make sure that the reference to cgroup is valid across the call.
|
* sure that the reference to cgroup is valid across the call (see
|
||||||
|
* comments in bfq_bic_update_cgroup on this issue)
|
||||||
*
|
*
|
||||||
* NOTE: an alternative approach might have been to store the current
|
* NOTE: an alternative approach might have been to store the current
|
||||||
* cgroup in bfqq and getting a reference to it, reducing the lookup
|
* cgroup in bfqq and getting a reference to it, reducing the lookup
|
||||||
|
@ -604,6 +626,57 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
|
bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
|
||||||
|
/*
|
||||||
|
* Update blkg_path for bfq_log_* functions. We cache this
|
||||||
|
* path, and update it here, for the following
|
||||||
|
* reasons. Operations on blkg objects in blk-cgroup are
|
||||||
|
* protected with the request_queue lock, and not with the
|
||||||
|
* lock that protects the instances of this scheduler
|
||||||
|
* (bfqd->lock). This exposes BFQ to the following sort of
|
||||||
|
* race.
|
||||||
|
*
|
||||||
|
* The blkg_lookup performed in bfq_get_queue, protected
|
||||||
|
* through rcu, may happen to return the address of a copy of
|
||||||
|
* the original blkg. If this is the case, then the
|
||||||
|
* bfqg_and_blkg_get performed in bfq_get_queue, to pin down
|
||||||
|
* the blkg, is useless: it does not prevent blk-cgroup code
|
||||||
|
* from destroying both the original blkg and all objects
|
||||||
|
* directly or indirectly referred by the copy of the
|
||||||
|
* blkg.
|
||||||
|
*
|
||||||
|
* On the bright side, destroy operations on a blkg invoke, as
|
||||||
|
* a first step, hooks of the scheduler associated with the
|
||||||
|
* blkg. And these hooks are executed with bfqd->lock held for
|
||||||
|
* BFQ. As a consequence, for any blkg associated with the
|
||||||
|
* request queue this instance of the scheduler is attached
|
||||||
|
* to, we are guaranteed that such a blkg is not destroyed, and
|
||||||
|
* that all the pointers it contains are consistent, while we
|
||||||
|
* are holding bfqd->lock. A blkg_lookup performed with
|
||||||
|
* bfqd->lock held then returns a fully consistent blkg, which
|
||||||
|
* remains consistent until this lock is held.
|
||||||
|
*
|
||||||
|
* Thanks to the last fact, and to the fact that: (1) bfqg has
|
||||||
|
* been obtained through a blkg_lookup in the above
|
||||||
|
* assignment, and (2) bfqd->lock is being held, here we can
|
||||||
|
* safely use the policy data for the involved blkg (i.e., the
|
||||||
|
* field bfqg->pd) to get to the blkg associated with bfqg,
|
||||||
|
* and then we can safely use any field of blkg. After we
|
||||||
|
* release bfqd->lock, even just getting blkg through this
|
||||||
|
* bfqg may cause dangling references to be traversed, as
|
||||||
|
* bfqg->pd may not exist any more.
|
||||||
|
*
|
||||||
|
* In view of the above facts, here we cache, in the bfqg, any
|
||||||
|
* blkg data we may need for this bic, and for its associated
|
||||||
|
* bfq_queue. As of now, we need to cache only the path of the
|
||||||
|
* blkg, which is used in the bfq_log_* functions.
|
||||||
|
*
|
||||||
|
* Finally, note that bfqg itself needs to be protected from
|
||||||
|
* destruction on the blkg_free of the original blkg (which
|
||||||
|
* invokes bfq_pd_free). We use an additional private
|
||||||
|
* refcounter for bfqg, to let it disappear only after no
|
||||||
|
* bfq_queue refers to it any longer.
|
||||||
|
*/
|
||||||
|
blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
|
||||||
bic->blkcg_serial_nr = serial_nr;
|
bic->blkcg_serial_nr = serial_nr;
|
||||||
out:
|
out:
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
@ -640,8 +713,6 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
|
||||||
* @bfqd: the device data structure with the root group.
|
* @bfqd: the device data structure with the root group.
|
||||||
* @bfqg: the group to move from.
|
* @bfqg: the group to move from.
|
||||||
* @st: the service tree with the entities.
|
* @st: the service tree with the entities.
|
||||||
*
|
|
||||||
* Needs queue_lock to be taken and reference to be valid over the call.
|
|
||||||
*/
|
*/
|
||||||
static void bfq_reparent_active_entities(struct bfq_data *bfqd,
|
static void bfq_reparent_active_entities(struct bfq_data *bfqd,
|
||||||
struct bfq_group *bfqg,
|
struct bfq_group *bfqg,
|
||||||
|
@ -692,8 +763,7 @@ void bfq_pd_offline(struct blkg_policy_data *pd)
|
||||||
/*
|
/*
|
||||||
* The idle tree may still contain bfq_queues belonging
|
* The idle tree may still contain bfq_queues belonging
|
||||||
* to exited task because they never migrated to a different
|
* to exited task because they never migrated to a different
|
||||||
* cgroup from the one being destroyed now. No one else
|
* cgroup from the one being destroyed now.
|
||||||
* can access them so it's safe to act without any lock.
|
|
||||||
*/
|
*/
|
||||||
bfq_flush_idle_tree(st);
|
bfq_flush_idle_tree(st);
|
||||||
|
|
||||||
|
|
|
@ -3665,7 +3665,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
|
||||||
|
|
||||||
kmem_cache_free(bfq_pool, bfqq);
|
kmem_cache_free(bfq_pool, bfqq);
|
||||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||||
bfqg_put(bfqg);
|
bfqg_and_blkg_put(bfqg);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -759,6 +759,12 @@ struct bfq_group {
|
||||||
/* must be the first member */
|
/* must be the first member */
|
||||||
struct blkg_policy_data pd;
|
struct blkg_policy_data pd;
|
||||||
|
|
||||||
|
/* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
|
||||||
|
char blkg_path[128];
|
||||||
|
|
||||||
|
/* reference counter (see comments in bfq_bic_update_cgroup) */
|
||||||
|
int ref;
|
||||||
|
|
||||||
struct bfq_entity entity;
|
struct bfq_entity entity;
|
||||||
struct bfq_sched_data sched_data;
|
struct bfq_sched_data sched_data;
|
||||||
|
|
||||||
|
@ -838,7 +844,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
|
||||||
struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
|
struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
|
||||||
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
|
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
|
||||||
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
|
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
|
||||||
void bfqg_put(struct bfq_group *bfqg);
|
void bfqg_and_blkg_put(struct bfq_group *bfqg);
|
||||||
|
|
||||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||||
extern struct cftype bfq_blkcg_legacy_files[];
|
extern struct cftype bfq_blkcg_legacy_files[];
|
||||||
|
@ -910,20 +916,13 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
|
||||||
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
|
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
|
||||||
|
|
||||||
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
|
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
|
||||||
char __pbuf[128]; \
|
|
||||||
\
|
|
||||||
blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
|
|
||||||
blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\
|
blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\
|
||||||
bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
|
bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
|
||||||
__pbuf, ##args); \
|
bfqq_group(bfqq)->blkg_path, ##args); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
|
#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) \
|
||||||
char __pbuf[128]; \
|
blk_add_trace_msg((bfqd)->queue, "%s " fmt, (bfqg)->blkg_path, ##args)
|
||||||
\
|
|
||||||
blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
|
|
||||||
blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#else /* CONFIG_BFQ_GROUP_IOSCHED */
|
#else /* CONFIG_BFQ_GROUP_IOSCHED */
|
||||||
|
|
||||||
|
|
|
@ -175,6 +175,9 @@ bool bio_integrity_enabled(struct bio *bio)
|
||||||
if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
|
if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
if (!bio_sectors(bio))
|
||||||
|
return false;
|
||||||
|
|
||||||
/* Already protected? */
|
/* Already protected? */
|
||||||
if (bio_integrity(bio))
|
if (bio_integrity(bio))
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *blkg)
|
||||||
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
|
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
|
||||||
|
|
||||||
if (blkg->blkcg != &blkcg_root)
|
if (blkg->blkcg != &blkcg_root)
|
||||||
blk_exit_rl(&blkg->rl);
|
blk_exit_rl(blkg->q, &blkg->rl);
|
||||||
|
|
||||||
blkg_rwstat_exit(&blkg->stat_ios);
|
blkg_rwstat_exit(&blkg->stat_ios);
|
||||||
blkg_rwstat_exit(&blkg->stat_bytes);
|
blkg_rwstat_exit(&blkg->stat_bytes);
|
||||||
|
|
|
@ -648,13 +648,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
|
||||||
if (!rl->rq_pool)
|
if (!rl->rq_pool)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (rl != &q->root_rl)
|
||||||
|
WARN_ON_ONCE(!blk_get_queue(q));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_exit_rl(struct request_list *rl)
|
void blk_exit_rl(struct request_queue *q, struct request_list *rl)
|
||||||
{
|
{
|
||||||
if (rl->rq_pool)
|
if (rl->rq_pool) {
|
||||||
mempool_destroy(rl->rq_pool);
|
mempool_destroy(rl->rq_pool);
|
||||||
|
if (rl != &q->root_rl)
|
||||||
|
blk_put_queue(q);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
|
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
|
||||||
|
|
|
@ -1461,22 +1461,28 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
|
||||||
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
|
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
|
static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||||
bool may_sleep)
|
struct request *rq,
|
||||||
|
blk_qc_t *cookie, bool may_sleep)
|
||||||
{
|
{
|
||||||
struct request_queue *q = rq->q;
|
struct request_queue *q = rq->q;
|
||||||
struct blk_mq_queue_data bd = {
|
struct blk_mq_queue_data bd = {
|
||||||
.rq = rq,
|
.rq = rq,
|
||||||
.last = true,
|
.last = true,
|
||||||
};
|
};
|
||||||
struct blk_mq_hw_ctx *hctx;
|
|
||||||
blk_qc_t new_cookie;
|
blk_qc_t new_cookie;
|
||||||
int ret;
|
int ret;
|
||||||
|
bool run_queue = true;
|
||||||
|
|
||||||
|
if (blk_mq_hctx_stopped(hctx)) {
|
||||||
|
run_queue = false;
|
||||||
|
goto insert;
|
||||||
|
}
|
||||||
|
|
||||||
if (q->elevator)
|
if (q->elevator)
|
||||||
goto insert;
|
goto insert;
|
||||||
|
|
||||||
if (!blk_mq_get_driver_tag(rq, &hctx, false))
|
if (!blk_mq_get_driver_tag(rq, NULL, false))
|
||||||
goto insert;
|
goto insert;
|
||||||
|
|
||||||
new_cookie = request_to_qc_t(hctx, rq);
|
new_cookie = request_to_qc_t(hctx, rq);
|
||||||
|
@ -1500,7 +1506,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
|
||||||
|
|
||||||
__blk_mq_requeue_request(rq);
|
__blk_mq_requeue_request(rq);
|
||||||
insert:
|
insert:
|
||||||
blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
|
blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||||
|
@ -1508,7 +1514,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||||
{
|
{
|
||||||
if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
|
if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
__blk_mq_try_issue_directly(rq, cookie, false);
|
__blk_mq_try_issue_directly(hctx, rq, cookie, false);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
} else {
|
} else {
|
||||||
unsigned int srcu_idx;
|
unsigned int srcu_idx;
|
||||||
|
@ -1516,7 +1522,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
|
srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
|
||||||
__blk_mq_try_issue_directly(rq, cookie, true);
|
__blk_mq_try_issue_directly(hctx, rq, cookie, true);
|
||||||
srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
|
srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1619,9 +1625,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
|
|
||||||
blk_mq_put_ctx(data.ctx);
|
blk_mq_put_ctx(data.ctx);
|
||||||
|
|
||||||
if (same_queue_rq)
|
if (same_queue_rq) {
|
||||||
|
data.hctx = blk_mq_map_queue(q,
|
||||||
|
same_queue_rq->mq_ctx->cpu);
|
||||||
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
|
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
|
||||||
&cookie);
|
&cookie);
|
||||||
|
}
|
||||||
} else if (q->nr_hw_queues > 1 && is_sync) {
|
} else if (q->nr_hw_queues > 1 && is_sync) {
|
||||||
blk_mq_put_ctx(data.ctx);
|
blk_mq_put_ctx(data.ctx);
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio);
|
||||||
|
@ -2641,7 +2650,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
|
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
||||||
|
int nr_hw_queues)
|
||||||
{
|
{
|
||||||
struct request_queue *q;
|
struct request_queue *q;
|
||||||
|
|
||||||
|
@ -2665,6 +2675,13 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
|
||||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
||||||
blk_mq_unfreeze_queue(q);
|
blk_mq_unfreeze_queue(q);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
|
||||||
|
{
|
||||||
|
mutex_lock(&set->tag_list_lock);
|
||||||
|
__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
|
||||||
|
mutex_unlock(&set->tag_list_lock);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
|
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
|
||||||
|
|
||||||
/* Enable polling stats and return whether they were already enabled. */
|
/* Enable polling stats and return whether they were already enabled. */
|
||||||
|
|
|
@ -809,7 +809,7 @@ static void blk_release_queue(struct kobject *kobj)
|
||||||
|
|
||||||
blk_free_queue_stats(q->stats);
|
blk_free_queue_stats(q->stats);
|
||||||
|
|
||||||
blk_exit_rl(&q->root_rl);
|
blk_exit_rl(q, &q->root_rl);
|
||||||
|
|
||||||
if (q->queue_tags)
|
if (q->queue_tags)
|
||||||
__blk_queue_free_tags(q);
|
__blk_queue_free_tags(q);
|
||||||
|
|
|
@ -27,6 +27,13 @@ static int throtl_quantum = 32;
|
||||||
#define MIN_THROTL_IOPS (10)
|
#define MIN_THROTL_IOPS (10)
|
||||||
#define DFL_LATENCY_TARGET (-1L)
|
#define DFL_LATENCY_TARGET (-1L)
|
||||||
#define DFL_IDLE_THRESHOLD (0)
|
#define DFL_IDLE_THRESHOLD (0)
|
||||||
|
#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
|
||||||
|
#define LATENCY_FILTERED_SSD (0)
|
||||||
|
/*
|
||||||
|
* For HD, very small latency comes from sequential IO. Such IO is helpless to
|
||||||
|
* help determine if its IO is impacted by others, hence we ignore the IO
|
||||||
|
*/
|
||||||
|
#define LATENCY_FILTERED_HD (1000L) /* 1ms */
|
||||||
|
|
||||||
#define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
|
#define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
|
||||||
|
|
||||||
|
@ -212,6 +219,7 @@ struct throtl_data
|
||||||
struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
|
struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
|
||||||
struct latency_bucket __percpu *latency_buckets;
|
struct latency_bucket __percpu *latency_buckets;
|
||||||
unsigned long last_calculate_time;
|
unsigned long last_calculate_time;
|
||||||
|
unsigned long filtered_latency;
|
||||||
|
|
||||||
bool track_bio_latency;
|
bool track_bio_latency;
|
||||||
};
|
};
|
||||||
|
@ -698,7 +706,7 @@ static void throtl_dequeue_tg(struct throtl_grp *tg)
|
||||||
static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
|
static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
|
||||||
unsigned long expires)
|
unsigned long expires)
|
||||||
{
|
{
|
||||||
unsigned long max_expire = jiffies + 8 * sq_to_tg(sq)->td->throtl_slice;
|
unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Since we are adjusting the throttle limit dynamically, the sleep
|
* Since we are adjusting the throttle limit dynamically, the sleep
|
||||||
|
@ -2281,7 +2289,7 @@ void blk_throtl_bio_endio(struct bio *bio)
|
||||||
throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
|
throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
|
||||||
bio_op(bio), lat);
|
bio_op(bio), lat);
|
||||||
|
|
||||||
if (tg->latency_target) {
|
if (tg->latency_target && lat >= tg->td->filtered_latency) {
|
||||||
int bucket;
|
int bucket;
|
||||||
unsigned int threshold;
|
unsigned int threshold;
|
||||||
|
|
||||||
|
@ -2417,14 +2425,20 @@ void blk_throtl_exit(struct request_queue *q)
|
||||||
void blk_throtl_register_queue(struct request_queue *q)
|
void blk_throtl_register_queue(struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct throtl_data *td;
|
struct throtl_data *td;
|
||||||
|
int i;
|
||||||
|
|
||||||
td = q->td;
|
td = q->td;
|
||||||
BUG_ON(!td);
|
BUG_ON(!td);
|
||||||
|
|
||||||
if (blk_queue_nonrot(q))
|
if (blk_queue_nonrot(q)) {
|
||||||
td->throtl_slice = DFL_THROTL_SLICE_SSD;
|
td->throtl_slice = DFL_THROTL_SLICE_SSD;
|
||||||
else
|
td->filtered_latency = LATENCY_FILTERED_SSD;
|
||||||
|
} else {
|
||||||
td->throtl_slice = DFL_THROTL_SLICE_HD;
|
td->throtl_slice = DFL_THROTL_SLICE_HD;
|
||||||
|
td->filtered_latency = LATENCY_FILTERED_HD;
|
||||||
|
for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
|
||||||
|
td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
|
||||||
|
}
|
||||||
#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
|
#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
|
||||||
/* if no low limit, use previous default */
|
/* if no low limit, use previous default */
|
||||||
td->throtl_slice = DFL_THROTL_SLICE_HD;
|
td->throtl_slice = DFL_THROTL_SLICE_HD;
|
||||||
|
|
|
@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
|
||||||
|
|
||||||
int blk_init_rl(struct request_list *rl, struct request_queue *q,
|
int blk_init_rl(struct request_list *rl, struct request_queue *q,
|
||||||
gfp_t gfp_mask);
|
gfp_t gfp_mask);
|
||||||
void blk_exit_rl(struct request_list *rl);
|
void blk_exit_rl(struct request_queue *q, struct request_list *rl);
|
||||||
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
||||||
struct bio *bio);
|
struct bio *bio);
|
||||||
void blk_queue_bypass_start(struct request_queue *q);
|
void blk_queue_bypass_start(struct request_queue *q);
|
||||||
|
|
|
@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
|
||||||
static const int cfq_hist_divisor = 4;
|
static const int cfq_hist_divisor = 4;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* offset from end of service tree
|
* offset from end of queue service tree for idle class
|
||||||
*/
|
*/
|
||||||
#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
|
#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
|
||||||
|
/* offset from end of group service tree under time slice mode */
|
||||||
|
#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
|
||||||
|
/* offset from end of group service under IOPS mode */
|
||||||
|
#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* below this threshold, we consider thinktime immediate
|
* below this threshold, we consider thinktime immediate
|
||||||
|
@ -1362,6 +1366,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
|
||||||
cfqg->vfraction = max_t(unsigned, vfr, 1);
|
cfqg->vfraction = max_t(unsigned, vfr, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
|
||||||
|
{
|
||||||
|
if (!iops_mode(cfqd))
|
||||||
|
return CFQ_SLICE_MODE_GROUP_DELAY;
|
||||||
|
else
|
||||||
|
return CFQ_IOPS_MODE_GROUP_DELAY;
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
||||||
{
|
{
|
||||||
|
@ -1381,7 +1393,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
||||||
n = rb_last(&st->rb);
|
n = rb_last(&st->rb);
|
||||||
if (n) {
|
if (n) {
|
||||||
__cfqg = rb_entry_cfqg(n);
|
__cfqg = rb_entry_cfqg(n);
|
||||||
cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
|
cfqg->vdisktime = __cfqg->vdisktime +
|
||||||
|
cfq_get_cfqg_vdisktime_delay(cfqd);
|
||||||
} else
|
} else
|
||||||
cfqg->vdisktime = st->min_vdisktime;
|
cfqg->vdisktime = st->min_vdisktime;
|
||||||
cfq_group_service_tree_add(st, cfqg);
|
cfq_group_service_tree_add(st, cfqg);
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue