1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

drivers/net/ethernet/mellanox/mlx5/core/main.c
  b33886971d ("net/mlx5: Initialize flow steering during driver probe")
  40379a0084 ("net/mlx5_fpga: Drop INNOVA TLS support")
  f2b41b32cd ("net/mlx5: Remove ipsec_ops function table")
https://lore.kernel.org/all/20220519040345.6yrjromcdistu7vh@sx1/
  16d42d3133 ("net/mlx5: Drain fw_reset when removing device")
  8324a02c34 ("net/mlx5: Add exit route when waiting for FW")
https://lore.kernel.org/all/20220519114119.060ce014@canb.auug.org.au/

tools/testing/selftests/net/mptcp/mptcp_join.sh
  e274f71540 ("selftests: mptcp: add subflow limits test-cases")
  b6e074e171 ("selftests: mptcp: add infinite map testcase")
  5ac1d2d634 ("selftests: mptcp: Add tests for userspace PM type")
https://lore.kernel.org/all/20220516111918.366d747f@canb.auug.org.au/

net/mptcp/options.c
  ba2c89e0ea ("mptcp: fix checksum byte order")
  1e39e5a32a ("mptcp: infinite mapping sending")
  ea66758c17 ("tcp: allow MPTCP to update the announced window")
https://lore.kernel.org/all/20220519115146.751c3a37@canb.auug.org.au/

net/mptcp/pm.c
  95d6865178 ("mptcp: fix subflow accounting on close")
  4d25247d3a ("mptcp: bypass in-kernel PM restrictions for non-kernel PMs")
https://lore.kernel.org/all/20220516111435.72f35dca@canb.auug.org.au/

net/mptcp/subflow.c
  ae66fb2ba6 ("mptcp: Do TCP fallback on early DSS checksum failure")
  0348c690ed ("mptcp: add the fallback check")
  f8d4bcacff ("mptcp: infinite mapping receiving")
https://lore.kernel.org/all/20220519115837.380bb8d4@canb.auug.org.au/

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-05-19 11:23:59 -07:00
commit d7e6f58360
175 changed files with 1539 additions and 821 deletions

View file

@ -251,6 +251,7 @@ Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com> Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm> Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com> Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com> <martyna.szapar-mudlaw@intel.com>
Mathieu Othacehe <m.othacehe@gmail.com> Mathieu Othacehe <m.othacehe@gmail.com>
Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com> Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
Matthew Wilcox <willy@infradead.org> <matthew@wil.cx> Matthew Wilcox <willy@infradead.org> <matthew@wil.cx>

View file

@ -189,6 +189,9 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1024718 | | Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1024718 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1286807 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 | | Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+

View file

@ -33,7 +33,7 @@ patternProperties:
$ref: "/schemas/types.yaml#/definitions/string" $ref: "/schemas/types.yaml#/definitions/string"
enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2, enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2,
ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMC, ESPI, ESPIALT, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMC, ESPI, ESPIALT,
FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0, GPIT1, GPIT2, GPIT3, FSI1, FSI2, FWQSPI, FWSPIABR, FWSPID, FWSPIWP, GPIT0, GPIT1, GPIT2, GPIT3,
GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1, GPIU2, GPIU3, GPIU4, GPIU5, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1, GPIU2, GPIU3, GPIU4, GPIU5,
GPIU6, GPIU7, I2C1, I2C10, I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, GPIU6, GPIU7, I2C1, I2C10, I2C11, I2C12, I2C13, I2C14, I2C15, I2C16,
I2C2, I2C3, I2C4, I2C5, I2C6, I2C7, I2C8, I2C9, I3C3, I3C4, I3C5, I2C2, I2C3, I2C4, I2C5, I2C6, I2C7, I2C8, I2C9, I3C3, I3C4, I3C5,
@ -58,7 +58,7 @@ patternProperties:
$ref: "/schemas/types.yaml#/definitions/string" $ref: "/schemas/types.yaml#/definitions/string"
enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2, enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2,
ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1, EMMCG4, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1, EMMCG4,
EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP, EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWQSPI, FWSPIABR, FWSPID, FWSPIWP,
GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1, GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1, I2C10, GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1, I2C10,
I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5,

View file

@ -244,10 +244,11 @@ disclosure of a particular issue, unless requested by a response team or by
an involved disclosed party. The current ambassadors list: an involved disclosed party. The current ambassadors list:
============= ======================================================== ============= ========================================================
ARM Grant Likely <grant.likely@arm.com>
AMD Tom Lendacky <tom.lendacky@amd.com> AMD Tom Lendacky <tom.lendacky@amd.com>
IBM Z Christian Borntraeger <borntraeger@de.ibm.com> Ampere Darren Hart <darren@os.amperecomputing.com>
IBM Power Anton Blanchard <anton@linux.ibm.com> ARM Catalin Marinas <catalin.marinas@arm.com>
IBM Power Anton Blanchard <anton@linux.ibm.com>
IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
Intel Tony Luck <tony.luck@intel.com> Intel Tony Luck <tony.luck@intel.com>
Qualcomm Trilok Soni <tsoni@codeaurora.org> Qualcomm Trilok Soni <tsoni@codeaurora.org>

View file

@ -5442,6 +5442,7 @@ F: net/ax25/sysctl_net_ax25.c
DATA ACCESS MONITOR DATA ACCESS MONITOR
M: SeongJae Park <sj@kernel.org> M: SeongJae Park <sj@kernel.org>
L: damon@lists.linux.dev
L: linux-mm@kvack.org L: linux-mm@kvack.org
S: Maintained S: Maintained
F: Documentation/ABI/testing/sysfs-kernel-mm-damon F: Documentation/ABI/testing/sysfs-kernel-mm-damon
@ -14399,7 +14400,6 @@ F: arch/arm/*omap*/*pm*
F: drivers/cpufreq/omap-cpufreq.c F: drivers/cpufreq/omap-cpufreq.c
OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT
M: Rajendra Nayak <rnayak@codeaurora.org>
M: Paul Walmsley <paul@pwsan.com> M: Paul Walmsley <paul@pwsan.com>
L: linux-omap@vger.kernel.org L: linux-omap@vger.kernel.org
S: Maintained S: Maintained
@ -15502,7 +15502,8 @@ F: tools/perf/
PERFORMANCE EVENTS TOOLING ARM64 PERFORMANCE EVENTS TOOLING ARM64
R: John Garry <john.garry@huawei.com> R: John Garry <john.garry@huawei.com>
R: Will Deacon <will@kernel.org> R: Will Deacon <will@kernel.org>
R: Mathieu Poirier <mathieu.poirier@linaro.org> R: James Clark <james.clark@arm.com>
R: Mike Leach <mike.leach@linaro.org>
R: Leo Yan <leo.yan@linaro.org> R: Leo Yan <leo.yan@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported S: Supported

View file

@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 18 PATCHLEVEL = 18
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc6 EXTRAVERSION = -rc7
NAME = Superb Owl NAME = Superb Owl
# *DOCUMENTATION* # *DOCUMENTATION*

View file

@ -100,12 +100,14 @@
lm25066@40 { lm25066@40 {
compatible = "lm25066"; compatible = "lm25066";
reg = <0x40>; reg = <0x40>;
shunt-resistor-micro-ohms = <1000>;
}; };
/* 12VSB PMIC */ /* 12VSB PMIC */
lm25066@41 { lm25066@41 {
compatible = "lm25066"; compatible = "lm25066";
reg = <0x41>; reg = <0x41>;
shunt-resistor-micro-ohms = <10000>;
}; };
}; };
@ -196,7 +198,7 @@
gpio-line-names = gpio-line-names =
/* A */ "LOCATORLED_STATUS_N", "BMC_MAC2_INTB", "NMI_BTN_N", "BMC_NMI", /* A */ "LOCATORLED_STATUS_N", "BMC_MAC2_INTB", "NMI_BTN_N", "BMC_NMI",
"", "", "", "", "", "", "", "",
/* B */ "DDR_MEM_TEMP", "", "", "", "", "", "", "", /* B */ "POST_COMPLETE_N", "", "", "", "", "", "", "",
/* C */ "", "", "", "", "PCIE_HP_SEL_N", "PCIE_SATA_SEL_N", "LOCATORBTN", "", /* C */ "", "", "", "", "PCIE_HP_SEL_N", "PCIE_SATA_SEL_N", "LOCATORBTN", "",
/* D */ "BMC_PSIN", "BMC_PSOUT", "BMC_RESETCON", "RESETCON", /* D */ "BMC_PSIN", "BMC_PSOUT", "BMC_RESETCON", "RESETCON",
"", "", "", "PSU_FAN_FAIL_N", "", "", "", "PSU_FAN_FAIL_N",

View file

@ -117,9 +117,9 @@
groups = "FWSPID"; groups = "FWSPID";
}; };
pinctrl_fwqspid_default: fwqspid_default { pinctrl_fwqspi_default: fwqspi_default {
function = "FWSPID"; function = "FWQSPI";
groups = "FWQSPID"; groups = "FWQSPI";
}; };
pinctrl_fwspiwp_default: fwspiwp_default { pinctrl_fwspiwp_default: fwspiwp_default {
@ -653,12 +653,12 @@
}; };
pinctrl_qspi1_default: qspi1_default { pinctrl_qspi1_default: qspi1_default {
function = "QSPI1"; function = "SPI1";
groups = "QSPI1"; groups = "QSPI1";
}; };
pinctrl_qspi2_default: qspi2_default { pinctrl_qspi2_default: qspi2_default {
function = "QSPI2"; function = "SPI2";
groups = "QSPI2"; groups = "QSPI2";
}; };

View file

@ -393,6 +393,16 @@
reg = <0x1e6f2000 0x1000>; reg = <0x1e6f2000 0x1000>;
}; };
video: video@1e700000 {
compatible = "aspeed,ast2600-video-engine";
reg = <0x1e700000 0x1000>;
clocks = <&syscon ASPEED_CLK_GATE_VCLK>,
<&syscon ASPEED_CLK_GATE_ECLK>;
clock-names = "vclk", "eclk";
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
gpio0: gpio@1e780000 { gpio0: gpio@1e780000 {
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;

View file

@ -440,6 +440,9 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size); extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
unsigned long flags);
#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
#endif #endif
/* /*

View file

@ -493,3 +493,11 @@ void __init early_ioremap_init(void)
{ {
early_ioremap_setup(); early_ioremap_setup();
} }
bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
unsigned long flags)
{
unsigned long pfn = PHYS_PFN(offset);
return memblock_is_map_memory(pfn);
}

View file

@ -622,6 +622,10 @@
status = "okay"; status = "okay";
}; };
&rxmacro {
status = "okay";
};
&slpi { &slpi {
status = "okay"; status = "okay";
firmware-name = "qcom/sm8250/slpi.mbn"; firmware-name = "qcom/sm8250/slpi.mbn";
@ -773,6 +777,8 @@
}; };
&swr1 { &swr1 {
status = "okay";
wcd_rx: wcd9380-rx@0,4 { wcd_rx: wcd9380-rx@0,4 {
compatible = "sdw20217010d00"; compatible = "sdw20217010d00";
reg = <0 4>; reg = <0 4>;
@ -781,6 +787,8 @@
}; };
&swr2 { &swr2 {
status = "okay";
wcd_tx: wcd9380-tx@0,3 { wcd_tx: wcd9380-tx@0,3 {
compatible = "sdw20217010d00"; compatible = "sdw20217010d00";
reg = <0 3>; reg = <0 3>;
@ -819,6 +827,10 @@
}; };
}; };
&txmacro {
status = "okay";
};
&uart12 { &uart12 {
status = "okay"; status = "okay";
}; };

View file

@ -2255,6 +2255,7 @@
pinctrl-0 = <&rx_swr_active>; pinctrl-0 = <&rx_swr_active>;
compatible = "qcom,sm8250-lpass-rx-macro"; compatible = "qcom,sm8250-lpass-rx-macro";
reg = <0 0x3200000 0 0x1000>; reg = <0 0x3200000 0 0x1000>;
status = "disabled";
clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>, clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
<&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>, <&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
@ -2273,6 +2274,7 @@
swr1: soundwire-controller@3210000 { swr1: soundwire-controller@3210000 {
reg = <0 0x3210000 0 0x2000>; reg = <0 0x3210000 0 0x2000>;
compatible = "qcom,soundwire-v1.5.1"; compatible = "qcom,soundwire-v1.5.1";
status = "disabled";
interrupts = <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rxmacro>; clocks = <&rxmacro>;
clock-names = "iface"; clock-names = "iface";
@ -2300,6 +2302,7 @@
pinctrl-0 = <&tx_swr_active>; pinctrl-0 = <&tx_swr_active>;
compatible = "qcom,sm8250-lpass-tx-macro"; compatible = "qcom,sm8250-lpass-tx-macro";
reg = <0 0x3220000 0 0x1000>; reg = <0 0x3220000 0 0x1000>;
status = "disabled";
clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>, clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
<&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>, <&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
@ -2323,6 +2326,7 @@
compatible = "qcom,soundwire-v1.5.1"; compatible = "qcom,soundwire-v1.5.1";
interrupts-extended = <&intc GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>; interrupts-extended = <&intc GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "core"; interrupt-names = "core";
status = "disabled";
clocks = <&txmacro>; clocks = <&txmacro>;
clock-names = "iface"; clock-names = "iface";

View file

@ -16,6 +16,7 @@
aliases { aliases {
ethernet0 = &gmac0; ethernet0 = &gmac0;
ethernet1 = &gmac1;
mmc0 = &sdmmc0; mmc0 = &sdmmc0;
mmc1 = &sdhci; mmc1 = &sdhci;
}; };
@ -78,7 +79,6 @@
assigned-clocks = <&cru SCLK_GMAC0_RX_TX>, <&cru SCLK_GMAC0>; assigned-clocks = <&cru SCLK_GMAC0_RX_TX>, <&cru SCLK_GMAC0>;
assigned-clock-parents = <&cru SCLK_GMAC0_RGMII_SPEED>, <&cru CLK_MAC0_2TOP>; assigned-clock-parents = <&cru SCLK_GMAC0_RGMII_SPEED>, <&cru CLK_MAC0_2TOP>;
clock_in_out = "input"; clock_in_out = "input";
phy-handle = <&rgmii_phy0>;
phy-mode = "rgmii"; phy-mode = "rgmii";
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&gmac0_miim pinctrl-0 = <&gmac0_miim
@ -90,8 +90,38 @@
snps,reset-active-low; snps,reset-active-low;
/* Reset time is 20ms, 100ms for rtl8211f */ /* Reset time is 20ms, 100ms for rtl8211f */
snps,reset-delays-us = <0 20000 100000>; snps,reset-delays-us = <0 20000 100000>;
tx_delay = <0x4f>;
rx_delay = <0x0f>;
status = "okay";
fixed-link {
speed = <1000>;
full-duplex;
pause;
};
};
&gmac1 {
assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>;
assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru CLK_MAC1_2TOP>;
clock_in_out = "output";
phy-handle = <&rgmii_phy1>;
phy-mode = "rgmii";
pinctrl-names = "default";
pinctrl-0 = <&gmac1m1_miim
&gmac1m1_tx_bus2
&gmac1m1_rx_bus2
&gmac1m1_rgmii_clk
&gmac1m1_rgmii_bus>;
snps,reset-gpio = <&gpio3 RK_PB0 GPIO_ACTIVE_LOW>;
snps,reset-active-low;
/* Reset time is 20ms, 100ms for rtl8211f */
snps,reset-delays-us = <0 20000 100000>;
tx_delay = <0x3c>; tx_delay = <0x3c>;
rx_delay = <0x2f>; rx_delay = <0x2f>;
status = "okay"; status = "okay";
}; };
@ -315,8 +345,8 @@
status = "disabled"; status = "disabled";
}; };
&mdio0 { &mdio1 {
rgmii_phy0: ethernet-phy@0 { rgmii_phy1: ethernet-phy@0 {
compatible = "ethernet-phy-ieee802.3-c22"; compatible = "ethernet-phy-ieee802.3-c22";
reg = <0x0>; reg = <0x0>;
}; };
@ -345,9 +375,9 @@
pmuio2-supply = <&vcc3v3_pmu>; pmuio2-supply = <&vcc3v3_pmu>;
vccio1-supply = <&vccio_acodec>; vccio1-supply = <&vccio_acodec>;
vccio3-supply = <&vccio_sd>; vccio3-supply = <&vccio_sd>;
vccio4-supply = <&vcc_1v8>; vccio4-supply = <&vcc_3v3>;
vccio5-supply = <&vcc_3v3>; vccio5-supply = <&vcc_3v3>;
vccio6-supply = <&vcc_3v3>; vccio6-supply = <&vcc_1v8>;
vccio7-supply = <&vcc_3v3>; vccio7-supply = <&vcc_3v3>;
status = "okay"; status = "okay";
}; };

View file

@ -192,4 +192,8 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
extern int valid_phys_addr_range(phys_addr_t addr, size_t size); extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
unsigned long flags);
#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
#endif /* __ASM_IO_H */ #endif /* __ASM_IO_H */

View file

@ -75,6 +75,10 @@ obj-$(CONFIG_ARM64_MTE) += mte.o
obj-y += vdso-wrap.o obj-y += vdso-wrap.o
obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o
# Force dependency (vdso*-wrap.S includes vdso.so through incbin)
$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so
$(obj)/vdso32-wrap.o: $(obj)/vdso32/vdso.so
obj-y += probes/ obj-y += probes/
head-y := head.o head-y := head.o
extra-y += $(head-y) vmlinux.lds extra-y += $(head-y) vmlinux.lds

View file

@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
#ifdef CONFIG_ARM64_ERRATUM_1286807 #ifdef CONFIG_ARM64_ERRATUM_1286807
{ {
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
/* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
}, },
#endif #endif
{}, {},

View file

@ -654,7 +654,6 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0), ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1, ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
&id_aa64isar1_override), &id_aa64isar1_override),
ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2, ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2,
&id_aa64isar2_override), &id_aa64isar2_override),
@ -810,7 +809,7 @@ static void __init sort_ftr_regs(void)
* to sys_id for subsequent binary search in get_arm64_ftr_reg() * to sys_id for subsequent binary search in get_arm64_ftr_reg()
* to work correctly. * to work correctly.
*/ */
BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id); BUG_ON(arm64_ftr_regs[i].sys_id <= arm64_ftr_regs[i - 1].sys_id);
} }
} }

View file

@ -52,9 +52,6 @@ GCOV_PROFILE := n
targets += vdso.lds targets += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH) CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
# Force dependency (incbin is bad)
$(obj)/vdso.o : $(obj)/vdso.so
# Link rule for the .so file, .lds has to be first # Link rule for the .so file, .lds has to be first
$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold_and_vdso_check) $(call if_changed,vdsold_and_vdso_check)

View file

@ -131,9 +131,6 @@ obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso)
targets += vdso.lds targets += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH) CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
# Force dependency (vdso.s includes vdso.so through incbin)
$(obj)/vdso.o: $(obj)/vdso.so
include/generated/vdso32-offsets.h: $(obj)/vdso.so.dbg FORCE include/generated/vdso32-offsets.h: $(obj)/vdso.so.dbg FORCE
$(call if_changed,vdsosym) $(call if_changed,vdsosym)

View file

@ -99,3 +99,11 @@ void __init early_ioremap_init(void)
{ {
early_ioremap_setup(); early_ioremap_setup();
} }
bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
unsigned long flags)
{
unsigned long pfn = PHYS_PFN(offset);
return pfn_is_map_memory(pfn);
}

View file

@ -122,11 +122,27 @@
/* 0x0 - 0xb */ /* 0x0 - 0xb */
/* 'current->mm' needs to be in r4 */ /* switch_mmu_context() needs paging, let's enable it */
tophys(r4, r2) mfmsr r9
lwz r4, MM(r4) ori r11, r9, MSR_DR
tophys(r4, r4) mtmsr r11
/* This only clobbers r0, r3, r4 and r5 */ sync
/* switch_mmu_context() clobbers r12, rescue it */
SAVE_GPR(12, r1)
/* Calling switch_mmu_context(<inv>, current->mm, <inv>); */
lwz r4, MM(r2)
bl switch_mmu_context bl switch_mmu_context
/* restore r12 */
REST_GPR(12, r1)
/* Disable paging again */
mfmsr r9
li r6, MSR_DR
andc r9, r9, r6
mtmsr r9
sync
.endm .endm

View file

@ -902,6 +902,8 @@ static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end
static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
{ {
const unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
vmemmap_flush_unused_pmd(); vmemmap_flush_unused_pmd();
/* /*
@ -914,8 +916,7 @@ static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long
* Mark with PAGE_UNUSED the unused parts of the new memmap range * Mark with PAGE_UNUSED the unused parts of the new memmap range
*/ */
if (!IS_ALIGNED(start, PMD_SIZE)) if (!IS_ALIGNED(start, PMD_SIZE))
memset((void *)start, PAGE_UNUSED, memset((void *)page, PAGE_UNUSED, start - page);
start - ALIGN_DOWN(start, PMD_SIZE));
/* /*
* We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of

View file

@ -742,6 +742,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
if (at_head) { if (at_head) {
list_add(&rq->queuelist, &per_prio->dispatch); list_add(&rq->queuelist, &per_prio->dispatch);
rq->fifo_time = jiffies;
} else { } else {
deadline_add_rq_rb(per_prio, rq); deadline_add_rq_rb(per_prio, rq);

View file

@ -735,6 +735,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
size_t offset, u32 opt_flags) size_t offset, u32 opt_flags)
{ {
struct firmware *fw = NULL; struct firmware *fw = NULL;
struct cred *kern_cred = NULL;
const struct cred *old_cred;
bool nondirect = false; bool nondirect = false;
int ret; int ret;
@ -751,6 +753,18 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (ret <= 0) /* error or already assigned */ if (ret <= 0) /* error or already assigned */
goto out; goto out;
/*
* We are about to try to access the firmware file. Because we may have been
* called by a driver when serving an unrelated request from userland, we use
* the kernel credentials to read the file.
*/
kern_cred = prepare_kernel_cred(NULL);
if (!kern_cred) {
ret = -ENOMEM;
goto out;
}
old_cred = override_creds(kern_cred);
ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL); ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
/* Only full reads can support decompression, platform, and sysfs. */ /* Only full reads can support decompression, platform, and sysfs. */
@ -776,6 +790,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
} else } else
ret = assign_fw(fw, device); ret = assign_fw(fw, device);
revert_creds(old_cred);
put_cred(kern_cred);
out: out:
if (ret < 0) { if (ret < 0) {
fw_abort_batch_reqs(fw); fw_abort_batch_reqs(fw);

View file

@ -543,10 +543,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file->f_mode |= FMODE_LSEEK; file->f_mode |= FMODE_LSEEK;
dmabuf->file = file; dmabuf->file = file;
ret = dma_buf_stats_setup(dmabuf);
if (ret)
goto err_sysfs;
mutex_init(&dmabuf->lock); mutex_init(&dmabuf->lock);
INIT_LIST_HEAD(&dmabuf->attachments); INIT_LIST_HEAD(&dmabuf->attachments);
@ -554,6 +550,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
list_add(&dmabuf->list_node, &db_list.head); list_add(&dmabuf->list_node, &db_list.head);
mutex_unlock(&db_list.lock); mutex_unlock(&db_list.lock);
ret = dma_buf_stats_setup(dmabuf);
if (ret)
goto err_sysfs;
return dmabuf; return dmabuf;
err_sysfs: err_sysfs:

View file

@ -296,6 +296,7 @@ static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
{ {
struct amdgpu_device *adev = ctx->adev; struct amdgpu_device *adev = ctx->adev;
enum amd_dpm_forced_level level; enum amd_dpm_forced_level level;
u32 current_stable_pstate;
int r; int r;
mutex_lock(&adev->pm.stable_pstate_ctx_lock); mutex_lock(&adev->pm.stable_pstate_ctx_lock);
@ -304,6 +305,10 @@ static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
goto done; goto done;
} }
r = amdgpu_ctx_get_stable_pstate(ctx, &current_stable_pstate);
if (r || (stable_pstate == current_stable_pstate))
goto done;
switch (stable_pstate) { switch (stable_pstate) {
case AMDGPU_CTX_STABLE_PSTATE_NONE: case AMDGPU_CTX_STABLE_PSTATE_NONE:
level = AMD_DPM_FORCED_LEVEL_AUTO; level = AMD_DPM_FORCED_LEVEL_AUTO;

View file

@ -81,6 +81,10 @@
#include "mxgpu_vi.h" #include "mxgpu_vi.h"
#include "amdgpu_dm.h" #include "amdgpu_dm.h"
#if IS_ENABLED(CONFIG_X86)
#include <asm/intel-family.h>
#endif
#define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6 #define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
@ -1134,13 +1138,24 @@ static void vi_enable_aspm(struct amdgpu_device *adev)
WREG32_PCIE(ixPCIE_LC_CNTL, data); WREG32_PCIE(ixPCIE_LC_CNTL, data);
} }
static bool aspm_support_quirk_check(void)
{
#if IS_ENABLED(CONFIG_X86)
struct cpuinfo_x86 *c = &cpu_data(0);
return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
#else
return true;
#endif
}
static void vi_program_aspm(struct amdgpu_device *adev) static void vi_program_aspm(struct amdgpu_device *adev)
{ {
u32 data, data1, orig; u32 data, data1, orig;
bool bL1SS = false; bool bL1SS = false;
bool bClkReqSupport = true; bool bClkReqSupport = true;
if (!amdgpu_device_should_use_aspm(adev)) if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check())
return; return;
if (adev->flags & AMD_IS_APU || if (adev->flags & AMD_IS_APU ||

View file

@ -153,9 +153,4 @@ void dcn31_hw_sequencer_construct(struct dc *dc)
dc->hwss.init_hw = dcn20_fpga_init_hw; dc->hwss.init_hw = dcn20_fpga_init_hw;
dc->hwseq->funcs.init_pipes = NULL; dc->hwseq->funcs.init_pipes = NULL;
} }
if (dc->debug.disable_z10) {
/*hw not support z10 or sw disable it*/
dc->hwss.z10_restore = NULL;
dc->hwss.z10_save_init = NULL;
}
} }

View file

@ -1351,14 +1351,8 @@ static int smu_disable_dpms(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
int ret = 0; int ret = 0;
/*
* TODO: (adev->in_suspend && !adev->in_s0ix) is added to pair
* the workaround which always reset the asic in suspend.
* It's likely that workaround will be dropped in the future.
* Then the change here should be dropped together.
*/
bool use_baco = !smu->is_apu && bool use_baco = !smu->is_apu &&
(((amdgpu_in_reset(adev) || (adev->in_suspend && !adev->in_s0ix)) && ((amdgpu_in_reset(adev) &&
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));

View file

@ -1605,17 +1605,17 @@ void i915_vma_close(struct i915_vma *vma)
static void __i915_vma_remove_closed(struct i915_vma *vma) static void __i915_vma_remove_closed(struct i915_vma *vma)
{ {
struct intel_gt *gt = vma->vm->gt;
spin_lock_irq(&gt->closed_lock);
list_del_init(&vma->closed_link); list_del_init(&vma->closed_link);
spin_unlock_irq(&gt->closed_lock);
} }
void i915_vma_reopen(struct i915_vma *vma) void i915_vma_reopen(struct i915_vma *vma)
{ {
struct intel_gt *gt = vma->vm->gt;
spin_lock_irq(&gt->closed_lock);
if (i915_vma_is_closed(vma)) if (i915_vma_is_closed(vma))
__i915_vma_remove_closed(vma); __i915_vma_remove_closed(vma);
spin_unlock_irq(&gt->closed_lock);
} }
void i915_vma_release(struct kref *ref) void i915_vma_release(struct kref *ref)
@ -1641,6 +1641,7 @@ static void force_unbind(struct i915_vma *vma)
static void release_references(struct i915_vma *vma) static void release_references(struct i915_vma *vma)
{ {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
struct intel_gt *gt = vma->vm->gt;
GEM_BUG_ON(i915_vma_is_active(vma)); GEM_BUG_ON(i915_vma_is_active(vma));
@ -1650,7 +1651,9 @@ static void release_references(struct i915_vma *vma)
rb_erase(&vma->obj_node, &obj->vma.tree); rb_erase(&vma->obj_node, &obj->vma.tree);
spin_unlock(&obj->vma.lock); spin_unlock(&obj->vma.lock);
spin_lock_irq(&gt->closed_lock);
__i915_vma_remove_closed(vma); __i915_vma_remove_closed(vma);
spin_unlock_irq(&gt->closed_lock);
__i915_vma_put(vma); __i915_vma_put(vma);
} }

View file

@ -46,8 +46,9 @@ static bool
nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE], nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE],
struct nouveau_backlight *bl) struct nouveau_backlight *bl)
{ {
const int nb = ida_simple_get(&bl_ida, 0, 0, GFP_KERNEL); const int nb = ida_alloc_max(&bl_ida, 99, GFP_KERNEL);
if (nb < 0 || nb >= 100)
if (nb < 0)
return false; return false;
if (nb > 0) if (nb > 0)
snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb); snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb);
@ -414,7 +415,7 @@ nouveau_backlight_init(struct drm_connector *connector)
nv_encoder, ops, &props); nv_encoder, ops, &props);
if (IS_ERR(bl->dev)) { if (IS_ERR(bl->dev)) {
if (bl->id >= 0) if (bl->id >= 0)
ida_simple_remove(&bl_ida, bl->id); ida_free(&bl_ida, bl->id);
ret = PTR_ERR(bl->dev); ret = PTR_ERR(bl->dev);
goto fail_alloc; goto fail_alloc;
} }
@ -442,7 +443,7 @@ nouveau_backlight_fini(struct drm_connector *connector)
return; return;
if (bl->id >= 0) if (bl->id >= 0)
ida_simple_remove(&bl_ida, bl->id); ida_free(&bl_ida, bl->id);
backlight_device_unregister(bl->dev); backlight_device_unregister(bl->dev);
nv_conn->backlight = NULL; nv_conn->backlight = NULL;

View file

@ -123,7 +123,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
mutex_init(&tdev->iommu.mutex); mutex_init(&tdev->iommu.mutex);
if (iommu_present(&platform_bus_type)) { if (device_iommu_mapped(dev)) {
tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
if (!tdev->iommu.domain) if (!tdev->iommu.domain)
goto error; goto error;

View file

@ -38,6 +38,7 @@
#include <drm/drm_scdc_helper.h> #include <drm/drm_scdc_helper.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/component.h> #include <linux/component.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_gpio.h> #include <linux/of_gpio.h>

View file

@ -528,7 +528,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
*seqno = atomic_add_return(1, &dev_priv->marker_seq); *seqno = atomic_add_return(1, &dev_priv->marker_seq);
} while (*seqno == 0); } while (*seqno == 0);
if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) { if (!vmw_has_fences(dev_priv)) {
/* /*
* Don't request hardware to send a fence. The * Don't request hardware to send a fence. The
@ -675,11 +675,14 @@ int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
*/ */
bool vmw_cmd_supported(struct vmw_private *vmw) bool vmw_cmd_supported(struct vmw_private *vmw)
{ {
if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS | bool has_cmdbufs =
SVGA_CAP_CMD_BUFFERS_2)) != 0) (vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
return true; SVGA_CAP_CMD_BUFFERS_2)) != 0;
if (vmw_is_svga_v3(vmw))
return (has_cmdbufs &&
(vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0);
/* /*
* We have FIFO cmd's * We have FIFO cmd's
*/ */
return vmw->fifo_mem != NULL; return has_cmdbufs || vmw->fifo_mem != NULL;
} }

View file

@ -1679,4 +1679,12 @@ static inline void vmw_irq_status_write(struct vmw_private *vmw,
outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT); outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT);
} }
static inline bool vmw_has_fences(struct vmw_private *vmw)
{
if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
SVGA_CAP_CMD_BUFFERS_2)) != 0)
return true;
return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0;
}
#endif #endif

View file

@ -483,7 +483,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
static int vmw_fb_kms_framebuffer(struct fb_info *info) static int vmw_fb_kms_framebuffer(struct fb_info *info)
{ {
struct drm_mode_fb_cmd2 mode_cmd; struct drm_mode_fb_cmd2 mode_cmd = {0};
struct vmw_fb_par *par = info->par; struct vmw_fb_par *par = info->par;
struct fb_var_screeninfo *var = &info->var; struct fb_var_screeninfo *var = &info->var;
struct drm_framebuffer *cur_fb; struct drm_framebuffer *cur_fb;

View file

@ -82,6 +82,22 @@ fman_from_fence(struct vmw_fence_obj *fence)
return container_of(fence->base.lock, struct vmw_fence_manager, lock); return container_of(fence->base.lock, struct vmw_fence_manager, lock);
} }
static u32 vmw_fence_goal_read(struct vmw_private *vmw)
{
if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
else
return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
}
static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
{
if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
else
vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
}
/* /*
* Note on fencing subsystem usage of irqs: * Note on fencing subsystem usage of irqs:
* Typically the vmw_fences_update function is called * Typically the vmw_fences_update function is called
@ -392,7 +408,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
if (likely(!fman->seqno_valid)) if (likely(!fman->seqno_valid))
return false; return false;
goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL); goal_seqno = vmw_fence_goal_read(fman->dev_priv);
if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
return false; return false;
@ -400,9 +416,8 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
list_for_each_entry(fence, &fman->fence_list, head) { list_for_each_entry(fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) { if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true; fman->seqno_valid = true;
vmw_fifo_mem_write(fman->dev_priv, vmw_fence_goal_write(fman->dev_priv,
SVGA_FIFO_FENCE_GOAL, fence->base.seqno);
fence->base.seqno);
break; break;
} }
} }
@ -434,13 +449,12 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
if (dma_fence_is_signaled_locked(&fence->base)) if (dma_fence_is_signaled_locked(&fence->base))
return false; return false;
goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL); goal_seqno = vmw_fence_goal_read(fman->dev_priv);
if (likely(fman->seqno_valid && if (likely(fman->seqno_valid &&
goal_seqno - fence->base.seqno < VMW_FENCE_WRAP)) goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
return false; return false;
vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL, vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
fence->base.seqno);
fman->seqno_valid = true; fman->seqno_valid = true;
return true; return true;

View file

@ -32,6 +32,14 @@
#define VMW_FENCE_WRAP (1 << 24) #define VMW_FENCE_WRAP (1 << 24)
static u32 vmw_irqflag_fence_goal(struct vmw_private *vmw)
{
if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
return SVGA_IRQFLAG_REG_FENCE_GOAL;
else
return SVGA_IRQFLAG_FENCE_GOAL;
}
/** /**
* vmw_thread_fn - Deferred (process context) irq handler * vmw_thread_fn - Deferred (process context) irq handler
* *
@ -96,7 +104,7 @@ static irqreturn_t vmw_irq_handler(int irq, void *arg)
wake_up_all(&dev_priv->fifo_queue); wake_up_all(&dev_priv->fifo_queue);
if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE | if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
SVGA_IRQFLAG_FENCE_GOAL)) && vmw_irqflag_fence_goal(dev_priv))) &&
!test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending)) !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
ret = IRQ_WAKE_THREAD; ret = IRQ_WAKE_THREAD;
@ -137,8 +145,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true; return true;
if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE) && if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
vmw_fifo_idle(dev_priv, seqno))
return true; return true;
/** /**
@ -160,6 +167,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
unsigned long timeout) unsigned long timeout)
{ {
struct vmw_fifo_state *fifo_state = dev_priv->fifo; struct vmw_fifo_state *fifo_state = dev_priv->fifo;
bool fifo_down = false;
uint32_t count = 0; uint32_t count = 0;
uint32_t signal_seq; uint32_t signal_seq;
@ -176,12 +184,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
*/ */
if (fifo_idle) { if (fifo_idle) {
down_read(&fifo_state->rwsem);
if (dev_priv->cman) { if (dev_priv->cman) {
ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible, ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
10*HZ); 10*HZ);
if (ret) if (ret)
goto out_err; goto out_err;
} else if (fifo_state) {
down_read(&fifo_state->rwsem);
fifo_down = true;
} }
} }
@ -218,12 +228,12 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
} }
} }
finish_wait(&dev_priv->fence_queue, &__wait); finish_wait(&dev_priv->fence_queue, &__wait);
if (ret == 0 && fifo_idle) if (ret == 0 && fifo_idle && fifo_state)
vmw_fence_write(dev_priv, signal_seq); vmw_fence_write(dev_priv, signal_seq);
wake_up_all(&dev_priv->fence_queue); wake_up_all(&dev_priv->fence_queue);
out_err: out_err:
if (fifo_idle) if (fifo_down)
up_read(&fifo_state->rwsem); up_read(&fifo_state->rwsem);
return ret; return ret;
@ -266,13 +276,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
void vmw_goal_waiter_add(struct vmw_private *dev_priv) void vmw_goal_waiter_add(struct vmw_private *dev_priv)
{ {
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL, vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
&dev_priv->goal_queue_waiters); &dev_priv->goal_queue_waiters);
} }
void vmw_goal_waiter_remove(struct vmw_private *dev_priv) void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{ {
vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL, vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
&dev_priv->goal_queue_waiters); &dev_priv->goal_queue_waiters);
} }

View file

@ -1344,7 +1344,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
mode_cmd, mode_cmd,
is_bo_proxy); is_bo_proxy);
/* /*
* vmw_create_bo_proxy() adds a reference that is no longer * vmw_create_bo_proxy() adds a reference that is no longer
* needed * needed
@ -1385,13 +1384,16 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
ret = vmw_user_lookup_handle(dev_priv, file_priv, ret = vmw_user_lookup_handle(dev_priv, file_priv,
mode_cmd->handles[0], mode_cmd->handles[0],
&surface, &bo); &surface, &bo);
if (ret) if (ret) {
DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
mode_cmd->handles[0], mode_cmd->handles[0]);
goto err_out; goto err_out;
}
if (!bo && if (!bo &&
!vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) { !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
DRM_ERROR("Surface size cannot exceed %dx%d", DRM_ERROR("Surface size cannot exceed %dx%d\n",
dev_priv->texture_max_width, dev_priv->texture_max_width,
dev_priv->texture_max_height); dev_priv->texture_max_height);
goto err_out; goto err_out;

View file

@ -960,7 +960,7 @@ config SENSORS_LTC4261
config SENSORS_LTQ_CPUTEMP config SENSORS_LTQ_CPUTEMP
bool "Lantiq cpu temperature sensor driver" bool "Lantiq cpu temperature sensor driver"
depends on LANTIQ depends on SOC_XWAY
help help
If you say yes here you get support for the temperature If you say yes here you get support for the temperature
sensor inside your CPU. sensor inside your CPU.

View file

@ -708,10 +708,21 @@ static int tmp401_probe(struct i2c_client *client)
return 0; return 0;
} }
static const struct of_device_id __maybe_unused tmp4xx_of_match[] = {
{ .compatible = "ti,tmp401", },
{ .compatible = "ti,tmp411", },
{ .compatible = "ti,tmp431", },
{ .compatible = "ti,tmp432", },
{ .compatible = "ti,tmp435", },
{ },
};
MODULE_DEVICE_TABLE(of, tmp4xx_of_match);
static struct i2c_driver tmp401_driver = { static struct i2c_driver tmp401_driver = {
.class = I2C_CLASS_HWMON, .class = I2C_CLASS_HWMON,
.driver = { .driver = {
.name = "tmp401", .name = "tmp401",
.of_match_table = of_match_ptr(tmp4xx_of_match),
}, },
.probe_new = tmp401_probe, .probe_new = tmp401_probe,
.id_table = tmp401_id, .id_table = tmp401_id,

View file

@ -1087,9 +1087,15 @@ static int of_count_icc_providers(struct device_node *np)
{ {
struct device_node *child; struct device_node *child;
int count = 0; int count = 0;
const struct of_device_id __maybe_unused ignore_list[] = {
{ .compatible = "qcom,sc7180-ipa-virt" },
{ .compatible = "qcom,sdx55-ipa-virt" },
{}
};
for_each_available_child_of_node(np, child) { for_each_available_child_of_node(np, child) {
if (of_property_read_bool(child, "#interconnect-cells")) if (of_property_read_bool(child, "#interconnect-cells") &&
likely(!of_match_node(ignore_list, child)))
count++; count++;
count += of_count_icc_providers(child); count += of_count_icc_providers(child);
} }

View file

@ -1492,34 +1492,22 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
if (err) if (err)
return err; return err;
cdev->can.bittiming_const = cdev->bit_timing ? cdev->can.bittiming_const = &m_can_bittiming_const_30X;
cdev->bit_timing : &m_can_bittiming_const_30X; cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
cdev->can.data_bittiming_const = cdev->data_timing ?
cdev->data_timing :
&m_can_data_bittiming_const_30X;
break; break;
case 31: case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
if (err) if (err)
return err; return err;
cdev->can.bittiming_const = cdev->bit_timing ? cdev->can.bittiming_const = &m_can_bittiming_const_31X;
cdev->bit_timing : &m_can_bittiming_const_31X; cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
cdev->can.data_bittiming_const = cdev->data_timing ?
cdev->data_timing :
&m_can_data_bittiming_const_31X;
break; break;
case 32: case 32:
case 33: case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */ /* Support both MCAN version v3.2.x and v3.3.0 */
cdev->can.bittiming_const = cdev->bit_timing ? cdev->can.bittiming_const = &m_can_bittiming_const_31X;
cdev->bit_timing : &m_can_bittiming_const_31X; cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
cdev->can.data_bittiming_const = cdev->data_timing ?
cdev->data_timing :
&m_can_data_bittiming_const_31X;
cdev->can.ctrlmode_supported |= cdev->can.ctrlmode_supported |=
(m_can_niso_supported(cdev) ? (m_can_niso_supported(cdev) ?

View file

@ -85,9 +85,6 @@ struct m_can_classdev {
struct sk_buff *tx_skb; struct sk_buff *tx_skb;
struct phy *transceiver; struct phy *transceiver;
const struct can_bittiming_const *bit_timing;
const struct can_bittiming_const *data_timing;
struct m_can_ops *ops; struct m_can_ops *ops;
int version; int version;

View file

@ -18,14 +18,9 @@
#define M_CAN_PCI_MMIO_BAR 0 #define M_CAN_PCI_MMIO_BAR 0
#define M_CAN_CLOCK_FREQ_EHL 200000000
#define CTL_CSR_INT_CTL_OFFSET 0x508 #define CTL_CSR_INT_CTL_OFFSET 0x508
struct m_can_pci_config {
const struct can_bittiming_const *bit_timing;
const struct can_bittiming_const *data_timing;
unsigned int clock_freq;
};
struct m_can_pci_priv { struct m_can_pci_priv {
struct m_can_classdev cdev; struct m_can_classdev cdev;
@ -89,40 +84,9 @@ static struct m_can_ops m_can_pci_ops = {
.read_fifo = iomap_read_fifo, .read_fifo = iomap_read_fifo,
}; };
static const struct can_bittiming_const m_can_bittiming_const_ehl = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 64,
.tseg2_min = 1, /* Time segment 2 = phase_seg2 */
.tseg2_max = 128,
.sjw_max = 128,
.brp_min = 1,
.brp_max = 512,
.brp_inc = 1,
};
static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 16,
.tseg2_min = 1, /* Time segment 2 = phase_seg2 */
.tseg2_max = 8,
.sjw_max = 4,
.brp_min = 1,
.brp_max = 32,
.brp_inc = 1,
};
static const struct m_can_pci_config m_can_pci_ehl = {
.bit_timing = &m_can_bittiming_const_ehl,
.data_timing = &m_can_data_bittiming_const_ehl,
.clock_freq = 200000000,
};
static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{ {
struct device *dev = &pci->dev; struct device *dev = &pci->dev;
const struct m_can_pci_config *cfg;
struct m_can_classdev *mcan_class; struct m_can_classdev *mcan_class;
struct m_can_pci_priv *priv; struct m_can_pci_priv *priv;
void __iomem *base; void __iomem *base;
@ -150,8 +114,6 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (!mcan_class) if (!mcan_class)
return -ENOMEM; return -ENOMEM;
cfg = (const struct m_can_pci_config *)id->driver_data;
priv = cdev_to_priv(mcan_class); priv = cdev_to_priv(mcan_class);
priv->base = base; priv->base = base;
@ -163,9 +125,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->dev = &pci->dev; mcan_class->dev = &pci->dev;
mcan_class->net->irq = pci_irq_vector(pci, 0); mcan_class->net->irq = pci_irq_vector(pci, 0);
mcan_class->pm_clock_support = 1; mcan_class->pm_clock_support = 1;
mcan_class->bit_timing = cfg->bit_timing; mcan_class->can.clock.freq = id->driver_data;
mcan_class->data_timing = cfg->data_timing;
mcan_class->can.clock.freq = cfg->clock_freq;
mcan_class->ops = &m_can_pci_ops; mcan_class->ops = &m_can_pci_ops;
pci_set_drvdata(pci, mcan_class); pci_set_drvdata(pci, mcan_class);
@ -218,8 +178,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
m_can_pci_suspend, m_can_pci_resume); m_can_pci_suspend, m_can_pci_resume);
static const struct pci_device_id m_can_pci_id_table[] = { static const struct pci_device_id m_can_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, }, { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
{ PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, }, { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
{ } /* Terminating Entry */ { } /* Terminating Entry */
}; };
MODULE_DEVICE_TABLE(pci, m_can_pci_id_table); MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);

View file

@ -2585,8 +2585,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, 1); device_set_wakeup_capable(&pdev->dev, 1);
priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol"); priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
if (IS_ERR(priv->wol_clk)) if (IS_ERR(priv->wol_clk)) {
return PTR_ERR(priv->wol_clk); ret = PTR_ERR(priv->wol_clk);
goto err_deregister_fixed_link;
}
/* Set the needed headroom once and for all */ /* Set the needed headroom once and for all */
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);

View file

@ -1213,7 +1213,6 @@ static void gem_rx_refill(struct macb_queue *queue)
/* Make hw descriptor updates visible to CPU */ /* Make hw descriptor updates visible to CPU */
rmb(); rmb();
queue->rx_prepared_head++;
desc = macb_rx_desc(queue, entry); desc = macb_rx_desc(queue, entry);
if (!queue->rx_skbuff[entry]) { if (!queue->rx_skbuff[entry]) {
@ -1252,6 +1251,7 @@ static void gem_rx_refill(struct macb_queue *queue)
dma_wmb(); dma_wmb();
desc->addr &= ~MACB_BIT(RX_USED); desc->addr &= ~MACB_BIT(RX_USED);
} }
queue->rx_prepared_head++;
} }
/* Make descriptor updates visible to hardware */ /* Make descriptor updates visible to hardware */

View file

@ -1928,6 +1928,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* AST2400 doesn't have working HW checksum generation */ /* AST2400 doesn't have working HW checksum generation */
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac"))) if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
netdev->hw_features &= ~NETIF_F_HW_CSUM; netdev->hw_features &= ~NETIF_F_HW_CSUM;
/* AST2600 tx checksum with NCSI is broken */
if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
netdev->hw_features &= ~NETIF_F_HW_CSUM;
if (np && of_get_property(np, "no-hw-checksum", NULL)) if (np && of_get_property(np, "no-hw-checksum", NULL))
netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM); netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
netdev->features |= netdev->hw_features; netdev->features |= netdev->hw_features;

View file

@ -3043,8 +3043,8 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
ice_for_each_q_vector(vsi, i) { ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i]; struct ice_q_vector *q_vector = vsi->q_vectors[i];
coalesce[i].itr_tx = q_vector->tx.itr_setting; coalesce[i].itr_tx = q_vector->tx.itr_settings;
coalesce[i].itr_rx = q_vector->rx.itr_setting; coalesce[i].itr_rx = q_vector->rx.itr_settings;
coalesce[i].intrl = q_vector->intrl; coalesce[i].intrl = q_vector->intrl;
if (i < vsi->num_txq) if (i < vsi->num_txq)
@ -3100,21 +3100,21 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
*/ */
if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
rc = &vsi->q_vectors[i]->rx; rc = &vsi->q_vectors[i]->rx;
rc->itr_setting = coalesce[i].itr_rx; rc->itr_settings = coalesce[i].itr_rx;
ice_write_itr(rc, rc->itr_setting); ice_write_itr(rc, rc->itr_setting);
} else if (i < vsi->alloc_rxq) { } else if (i < vsi->alloc_rxq) {
rc = &vsi->q_vectors[i]->rx; rc = &vsi->q_vectors[i]->rx;
rc->itr_setting = coalesce[0].itr_rx; rc->itr_settings = coalesce[0].itr_rx;
ice_write_itr(rc, rc->itr_setting); ice_write_itr(rc, rc->itr_setting);
} }
if (i < vsi->alloc_txq && coalesce[i].tx_valid) { if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
rc = &vsi->q_vectors[i]->tx; rc = &vsi->q_vectors[i]->tx;
rc->itr_setting = coalesce[i].itr_tx; rc->itr_settings = coalesce[i].itr_tx;
ice_write_itr(rc, rc->itr_setting); ice_write_itr(rc, rc->itr_setting);
} else if (i < vsi->alloc_txq) { } else if (i < vsi->alloc_txq) {
rc = &vsi->q_vectors[i]->tx; rc = &vsi->q_vectors[i]->tx;
rc->itr_setting = coalesce[0].itr_tx; rc->itr_settings = coalesce[0].itr_tx;
ice_write_itr(rc, rc->itr_setting); ice_write_itr(rc, rc->itr_setting);
} }
@ -3128,12 +3128,12 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
for (; i < vsi->num_q_vectors; i++) { for (; i < vsi->num_q_vectors; i++) {
/* transmit */ /* transmit */
rc = &vsi->q_vectors[i]->tx; rc = &vsi->q_vectors[i]->tx;
rc->itr_setting = coalesce[0].itr_tx; rc->itr_settings = coalesce[0].itr_tx;
ice_write_itr(rc, rc->itr_setting); ice_write_itr(rc, rc->itr_setting);
/* receive */ /* receive */
rc = &vsi->q_vectors[i]->rx; rc = &vsi->q_vectors[i]->rx;
rc->itr_setting = coalesce[0].itr_rx; rc->itr_settings = coalesce[0].itr_rx;
ice_write_itr(rc, rc->itr_setting); ice_write_itr(rc, rc->itr_setting);
vsi->q_vectors[i]->intrl = coalesce[0].intrl; vsi->q_vectors[i]->intrl = coalesce[0].intrl;

View file

@ -6189,9 +6189,10 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_ptp_link_change(pf, pf->hw.pf_id, true); ice_ptp_link_change(pf, pf->hw.pf_id, true);
} }
/* clear this now, and the first stats read will be used as baseline */ /* Perform an initial read of the statistics registers now to
vsi->stat_offsets_loaded = false; * set the baseline so counters are ready when interface is up
*/
ice_update_eth_stats(vsi);
ice_service_task_schedule(pf); ice_service_task_schedule(pf);
return 0; return 0;

View file

@ -500,12 +500,19 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
* This function must be called periodically to ensure that the cached value * This function must be called periodically to ensure that the cached value
* is never more than 2 seconds old. It must also be called whenever the PHC * is never more than 2 seconds old. It must also be called whenever the PHC
* time has been changed. * time has been changed.
*
* Return:
* * 0 - OK, successfully updated
* * -EAGAIN - PF was busy, need to reschedule the update
*/ */
static void ice_ptp_update_cached_phctime(struct ice_pf *pf) static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
{ {
u64 systime; u64 systime;
int i; int i;
if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
return -EAGAIN;
/* Read the current PHC time */ /* Read the current PHC time */
systime = ice_ptp_read_src_clk_reg(pf, NULL); systime = ice_ptp_read_src_clk_reg(pf, NULL);
@ -528,6 +535,9 @@ static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
} }
} }
clear_bit(ICE_CFG_BUSY, pf->state);
return 0;
} }
/** /**
@ -2330,17 +2340,18 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
{ {
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
int err;
if (!test_bit(ICE_FLAG_PTP, pf->flags)) if (!test_bit(ICE_FLAG_PTP, pf->flags))
return; return;
ice_ptp_update_cached_phctime(pf); err = ice_ptp_update_cached_phctime(pf);
ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx); ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
/* Run twice a second */ /* Run twice a second or reschedule if phc update failed */
kthread_queue_delayed_work(ptp->kworker, &ptp->work, kthread_queue_delayed_work(ptp->kworker, &ptp->work,
msecs_to_jiffies(500)); msecs_to_jiffies(err ? 10 : 500));
} }
/** /**

View file

@ -385,9 +385,14 @@ struct ice_ring_container {
/* this matches the maximum number of ITR bits, but in usec /* this matches the maximum number of ITR bits, but in usec
* values, so it is shifted left one bit (bit zero is ignored) * values, so it is shifted left one bit (bit zero is ignored)
*/ */
u16 itr_setting:13; union {
u16 itr_reserved:2; struct {
u16 itr_mode:1; u16 itr_setting:13;
u16 itr_reserved:2;
u16 itr_mode:1;
};
u16 itr_settings;
};
enum ice_container_type type; enum ice_container_type type;
}; };

View file

@ -5505,7 +5505,8 @@ static void igb_watchdog_task(struct work_struct *work)
break; break;
} }
if (adapter->link_speed != SPEED_1000) if (adapter->link_speed != SPEED_1000 ||
!hw->phy.ops.read_reg)
goto no_wait; goto no_wait;
/* wait for Remote receiver status OK */ /* wait for Remote receiver status OK */

View file

@ -23,7 +23,7 @@ struct mlx5_ct_fs_smfs_matcher {
}; };
struct mlx5_ct_fs_smfs_matchers { struct mlx5_ct_fs_smfs_matchers {
struct mlx5_ct_fs_smfs_matcher smfs_matchers[4]; struct mlx5_ct_fs_smfs_matcher smfs_matchers[6];
struct list_head used; struct list_head used;
}; };
@ -44,7 +44,8 @@ struct mlx5_ct_fs_smfs_rule {
}; };
static inline void static inline void
mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp) mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp,
bool gre)
{ {
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
@ -77,7 +78,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport); MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags, MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
ntohs(MLX5_CT_TCP_FLAGS_MASK)); ntohs(MLX5_CT_TCP_FLAGS_MASK));
} else { } else if (!gre) {
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport); MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport); MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport);
} }
@ -87,7 +88,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo
static struct mlx5dr_matcher * static struct mlx5dr_matcher *
mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4, mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4,
bool tcp, u32 priority) bool tcp, bool gre, u32 priority)
{ {
struct mlx5dr_matcher *dr_matcher; struct mlx5dr_matcher *dr_matcher;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
@ -96,7 +97,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
if (!spec) if (!spec)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp); mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp, gre);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec); dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
@ -108,7 +109,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
} }
static struct mlx5_ct_fs_smfs_matcher * static struct mlx5_ct_fs_smfs_matcher *
mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp) mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp, bool gre)
{ {
struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs); struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher; struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
@ -119,7 +120,7 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
int prio; int prio;
matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers; matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
smfs_matcher = &matchers->smfs_matchers[ipv4 * 2 + tcp]; smfs_matcher = &matchers->smfs_matchers[ipv4 * 3 + tcp * 2 + gre];
if (refcount_inc_not_zero(&smfs_matcher->ref)) if (refcount_inc_not_zero(&smfs_matcher->ref))
return smfs_matcher; return smfs_matcher;
@ -145,11 +146,11 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
} }
tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl; tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, prio); dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, gre, prio);
if (IS_ERR(dr_matcher)) { if (IS_ERR(dr_matcher)) {
netdev_warn(fs->netdev, netdev_warn(fs->netdev,
"ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d), err: %ld\n", "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
nat, ipv4, tcp, PTR_ERR(dr_matcher)); nat, ipv4, tcp, gre, PTR_ERR(dr_matcher));
smfs_matcher = ERR_CAST(dr_matcher); smfs_matcher = ERR_CAST(dr_matcher);
goto out_unlock; goto out_unlock;
@ -222,16 +223,17 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
static inline bool static inline bool
mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys) mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys)
{ {
#define DISSECTOR_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name) #define DISS_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
const u32 basic_keys = DISSECTOR_BIT(BASIC) | DISSECTOR_BIT(CONTROL) | const u32 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) | DISS_BIT(META);
DISSECTOR_BIT(PORTS) | DISSECTOR_BIT(META); const u32 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
const u32 ipv4_tcp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS) | DISSECTOR_BIT(TCP); const u32 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
const u32 ipv4_udp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS); const u32 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS);
const u32 ipv6_tcp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS) | DISSECTOR_BIT(TCP); const u32 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS);
const u32 ipv6_udp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS); const u32 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
const u32 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp || return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
used_keys == ipv6_udp); used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
} }
static bool static bool
@ -254,20 +256,24 @@ mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *f
flow_rule_match_control(flow_rule, &control); flow_rule_match_control(flow_rule, &control);
flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs); flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs); flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
flow_rule_match_ports(flow_rule, &ports); if (basic.key->ip_proto != IPPROTO_GRE)
flow_rule_match_tcp(flow_rule, &tcp); flow_rule_match_ports(flow_rule, &ports);
if (basic.key->ip_proto == IPPROTO_TCP)
flow_rule_match_tcp(flow_rule, &tcp);
if (basic.mask->n_proto != htons(0xFFFF) || if (basic.mask->n_proto != htons(0xFFFF) ||
(basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) || (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
basic.mask->ip_proto != 0xFF || basic.mask->ip_proto != 0xFF ||
(basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP)) { (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
basic.key->ip_proto != IPPROTO_GRE)) {
ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)", ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto), ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
basic.key->ip_proto, basic.mask->ip_proto); basic.key->ip_proto, basic.mask->ip_proto);
return false; return false;
} }
if (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF)) { if (basic.key->ip_proto != IPPROTO_GRE &&
(ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)", ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
ports.mask->src, ports.mask->dst); ports.mask->src, ports.mask->dst);
return false; return false;
@ -291,7 +297,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
struct mlx5dr_action *actions[5]; struct mlx5dr_action *actions[5];
struct mlx5dr_rule *rule; struct mlx5dr_rule *rule;
int num_actions = 0, err; int num_actions = 0, err;
bool nat, tcp, ipv4; bool nat, tcp, ipv4, gre;
if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule)) if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
@ -314,15 +320,17 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4; ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
tcp = MLX5_GET(fte_match_param, spec->match_value, tcp = MLX5_GET(fte_match_param, spec->match_value,
outer_headers.ip_protocol) == IPPROTO_TCP; outer_headers.ip_protocol) == IPPROTO_TCP;
gre = MLX5_GET(fte_match_param, spec->match_value,
outer_headers.ip_protocol) == IPPROTO_GRE;
smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp); smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp, gre);
if (IS_ERR(smfs_matcher)) { if (IS_ERR(smfs_matcher)) {
err = PTR_ERR(smfs_matcher); err = PTR_ERR(smfs_matcher);
goto err_matcher; goto err_matcher;
} }
rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions, rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT); spec->flow_context.flow_source);
if (!rule) { if (!rule) {
err = -EINVAL; err = -EINVAL;
goto err_create; goto err_create;

View file

@ -14,19 +14,26 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
bool busy = false; bool busy = false;
int work_done = 0; int work_done = 0;
rcu_read_lock();
ch_stats->poll++; ch_stats->poll++;
work_done = mlx5e_poll_rx_cq(&rq->cq, budget); work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
busy |= work_done == budget; busy |= work_done == budget;
busy |= rq->post_wqes(rq); busy |= rq->post_wqes(rq);
if (busy) if (busy) {
return budget; work_done = budget;
goto out;
}
if (unlikely(!napi_complete_done(napi, work_done))) if (unlikely(!napi_complete_done(napi, work_done)))
return work_done; goto out;
mlx5e_cq_arm(&rq->cq); mlx5e_cq_arm(&rq->cq);
out:
rcu_read_unlock();
return work_done; return work_done;
} }

View file

@ -3858,6 +3858,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_NTUPLE) if (netdev->features & NETIF_F_NTUPLE)
netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n"); netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
features &= ~NETIF_F_GRO_HW;
if (netdev->features & NETIF_F_GRO_HW)
netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
return features; return features;
} }
@ -3890,6 +3894,25 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
} }
} }
if (params->xdp_prog) {
if (features & NETIF_F_LRO) {
netdev_warn(netdev, "LRO is incompatible with XDP\n");
features &= ~NETIF_F_LRO;
}
if (features & NETIF_F_GRO_HW) {
netdev_warn(netdev, "HW GRO is incompatible with XDP\n");
features &= ~NETIF_F_GRO_HW;
}
}
if (priv->xsk.refcnt) {
if (features & NETIF_F_GRO_HW) {
netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
priv->xsk.refcnt);
features &= ~NETIF_F_GRO_HW;
}
}
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
features &= ~NETIF_F_RXHASH; features &= ~NETIF_F_RXHASH;
if (netdev->features & NETIF_F_RXHASH) if (netdev->features & NETIF_F_RXHASH)
@ -4839,10 +4862,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
if (!!MLX5_CAP_GEN(mdev, shampo) &&
mlx5e_check_fragmented_striding_rq_cap(mdev))
netdev->hw_features |= NETIF_F_GRO_HW;
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) { if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO; netdev->hw_enc_features |= NETIF_F_TSO;

View file

@ -2673,28 +2673,6 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
clean_tree(&root_ns->ns.node); clean_tree(&root_ns->ns.node);
} }
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
cleanup_root_ns(steering->root_ns);
cleanup_root_ns(steering->fdb_root_ns);
steering->fdb_root_ns = NULL;
kfree(steering->fdb_sub_ns);
steering->fdb_sub_ns = NULL;
cleanup_root_ns(steering->port_sel_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->rdma_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
mlx5_cleanup_fc_stats(dev);
kmem_cache_destroy(steering->ftes_cache);
kmem_cache_destroy(steering->fgs_cache);
mlx5_ft_pool_destroy(dev);
kfree(steering);
}
static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering) static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
{ {
struct fs_prio *prio; struct fs_prio *prio;
@ -3096,43 +3074,28 @@ cleanup:
return err; return err;
} }
int mlx5_init_fs(struct mlx5_core_dev *dev) void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
{ {
struct mlx5_flow_steering *steering; struct mlx5_flow_steering *steering = dev->priv.steering;
cleanup_root_ns(steering->root_ns);
cleanup_root_ns(steering->fdb_root_ns);
steering->fdb_root_ns = NULL;
kfree(steering->fdb_sub_ns);
steering->fdb_sub_ns = NULL;
cleanup_root_ns(steering->port_sel_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->rdma_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
}
int mlx5_fs_core_init(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
int err = 0; int err = 0;
err = mlx5_init_fc_stats(dev);
if (err)
return err;
err = mlx5_ft_pool_init(dev);
if (err)
return err;
steering = kzalloc(sizeof(*steering), GFP_KERNEL);
if (!steering) {
err = -ENOMEM;
goto err;
}
steering->dev = dev;
dev->priv.steering = steering;
if (mlx5_fs_dr_is_supported(dev))
steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
else
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
sizeof(struct mlx5_flow_group), 0,
0, NULL);
steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
0, NULL);
if (!steering->ftes_cache || !steering->fgs_cache) {
err = -ENOMEM;
goto err;
}
if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
(MLX5_CAP_GEN(dev, nic_flow_table))) || (MLX5_CAP_GEN(dev, nic_flow_table))) ||
((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
@ -3189,8 +3152,64 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
} }
return 0; return 0;
err: err:
mlx5_cleanup_fs(dev); mlx5_fs_core_cleanup(dev);
return err;
}
void mlx5_fs_core_free(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
kmem_cache_destroy(steering->ftes_cache);
kmem_cache_destroy(steering->fgs_cache);
kfree(steering);
mlx5_ft_pool_destroy(dev);
mlx5_cleanup_fc_stats(dev);
}
int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering;
int err = 0;
err = mlx5_init_fc_stats(dev);
if (err)
return err;
err = mlx5_ft_pool_init(dev);
if (err)
goto err;
steering = kzalloc(sizeof(*steering), GFP_KERNEL);
if (!steering) {
err = -ENOMEM;
goto err;
}
steering->dev = dev;
dev->priv.steering = steering;
if (mlx5_fs_dr_is_supported(dev))
steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
else
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
sizeof(struct mlx5_flow_group), 0,
0, NULL);
steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
0, NULL);
if (!steering->ftes_cache || !steering->fgs_cache) {
err = -ENOMEM;
goto err;
}
return 0;
err:
mlx5_fs_core_free(dev);
return err; return err;
} }

View file

@ -299,8 +299,10 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns, int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
enum mlx5_flow_steering_mode mode); enum mlx5_flow_steering_mode mode);
int mlx5_init_fs(struct mlx5_core_dev *dev); int mlx5_fs_core_alloc(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev); void mlx5_fs_core_free(struct mlx5_core_dev *dev);
int mlx5_fs_core_init(struct mlx5_core_dev *dev);
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports); int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev); void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);

View file

@ -8,7 +8,8 @@
enum { enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED, MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
MLX5_FW_RESET_FLAGS_PENDING_COMP MLX5_FW_RESET_FLAGS_PENDING_COMP,
MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS
}; };
struct mlx5_fw_reset { struct mlx5_fw_reset {
@ -208,7 +209,10 @@ static void poll_sync_reset(struct timer_list *t)
if (fatal_error) { if (fatal_error) {
mlx5_core_warn(dev, "Got Device Reset\n"); mlx5_core_warn(dev, "Got Device Reset\n");
queue_work(fw_reset->wq, &fw_reset->reset_reload_work); if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
else
mlx5_core_err(dev, "Device is being removed, Drop new reset work\n");
return; return;
} }
@ -433,9 +437,12 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb); struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
struct mlx5_eqe *eqe = data; struct mlx5_eqe *eqe = data;
if (test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
return NOTIFY_DONE;
switch (eqe->sub_type) { switch (eqe->sub_type) {
case MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT: case MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT:
queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work); queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
break; break;
case MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT: case MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT:
mlx5_sync_reset_events_handle(fw_reset, eqe); mlx5_sync_reset_events_handle(fw_reset, eqe);
@ -479,6 +486,18 @@ void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb); mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
} }
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
cancel_work_sync(&fw_reset->fw_live_patch_work);
cancel_work_sync(&fw_reset->reset_request_work);
cancel_work_sync(&fw_reset->reset_reload_work);
cancel_work_sync(&fw_reset->reset_now_work);
cancel_work_sync(&fw_reset->reset_abort_work);
}
int mlx5_fw_reset_init(struct mlx5_core_dev *dev) int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
{ {
struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL); struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);

View file

@ -16,6 +16,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev); int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev); void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev); void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev);
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev);
int mlx5_fw_reset_init(struct mlx5_core_dev *dev); int mlx5_fw_reset_init(struct mlx5_core_dev *dev);
void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev); void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev);

View file

@ -936,6 +936,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_sf_table_cleanup; goto err_sf_table_cleanup;
} }
err = mlx5_fs_core_alloc(dev);
if (err) {
mlx5_core_err(dev, "Failed to alloc flow steering\n");
goto err_fs;
}
dev->dm = mlx5_dm_create(dev); dev->dm = mlx5_dm_create(dev);
if (IS_ERR(dev->dm)) if (IS_ERR(dev->dm))
mlx5_core_warn(dev, "Failed to init device memory%d\n", err); mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
@ -946,6 +952,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
return 0; return 0;
err_fs:
mlx5_sf_table_cleanup(dev);
err_sf_table_cleanup: err_sf_table_cleanup:
mlx5_sf_hw_table_cleanup(dev); mlx5_sf_hw_table_cleanup(dev);
err_sf_hw_table_cleanup: err_sf_hw_table_cleanup:
@ -983,6 +991,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_hv_vhca_destroy(dev->hv_vhca); mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer); mlx5_fw_tracer_destroy(dev->tracer);
mlx5_dm_cleanup(dev); mlx5_dm_cleanup(dev);
mlx5_fs_core_free(dev);
mlx5_sf_table_cleanup(dev); mlx5_sf_table_cleanup(dev);
mlx5_sf_hw_table_cleanup(dev); mlx5_sf_hw_table_cleanup(dev);
mlx5_vhca_event_cleanup(dev); mlx5_vhca_event_cleanup(dev);
@ -1181,7 +1190,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_fpga_start; goto err_fpga_start;
} }
err = mlx5_init_fs(dev); err = mlx5_fs_core_init(dev);
if (err) { if (err) {
mlx5_core_err(dev, "Failed to init flow steering\n"); mlx5_core_err(dev, "Failed to init flow steering\n");
goto err_fs; goto err_fs;
@ -1226,7 +1235,7 @@ err_ec:
err_vhca: err_vhca:
mlx5_vhca_event_stop(dev); mlx5_vhca_event_stop(dev);
err_set_hca: err_set_hca:
mlx5_cleanup_fs(dev); mlx5_fs_core_cleanup(dev);
err_fs: err_fs:
mlx5_fpga_device_stop(dev); mlx5_fpga_device_stop(dev);
err_fpga_start: err_fpga_start:
@ -1252,7 +1261,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_ec_cleanup(dev); mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev); mlx5_sf_hw_table_destroy(dev);
mlx5_vhca_event_stop(dev); mlx5_vhca_event_stop(dev);
mlx5_cleanup_fs(dev); mlx5_fs_core_cleanup(dev);
mlx5_fpga_device_stop(dev); mlx5_fpga_device_stop(dev);
mlx5_rsc_dump_cleanup(dev); mlx5_rsc_dump_cleanup(dev);
mlx5_hv_vhca_cleanup(dev->hv_vhca); mlx5_hv_vhca_cleanup(dev->hv_vhca);
@ -1608,6 +1617,10 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev); struct devlink *devlink = priv_to_devlink(dev);
/* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
* fw_reset before unregistering the devlink.
*/
mlx5_drain_fw_reset(dev);
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state); set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
devlink_unregister(devlink); devlink_unregister(devlink);
mlx5_sriov_disable(pdev); mlx5_sriov_disable(pdev);

View file

@ -530,6 +530,37 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
return 0; return 0;
} }
static void dr_action_modify_ttl_adjust(struct mlx5dr_domain *dmn,
struct mlx5dr_ste_actions_attr *attr,
bool rx_rule,
bool *recalc_cs_required)
{
*recalc_cs_required = false;
/* if device supports csum recalculation - no adjustment needed */
if (mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps))
return;
/* no adjustment needed on TX rules */
if (!rx_rule)
return;
if (!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify)) {
/* Ignore the modify TTL action.
* It is always kept as last HW action.
*/
attr->modify_actions--;
return;
}
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
/* Due to a HW bug on some devices, modifying TTL on RX flows
* will cause an incorrect checksum calculation. In such cases
* we will use a FW table to recalculate the checksum.
*/
*recalc_cs_required = true;
}
static void dr_action_print_sequence(struct mlx5dr_domain *dmn, static void dr_action_print_sequence(struct mlx5dr_domain *dmn,
struct mlx5dr_action *actions[], struct mlx5dr_action *actions[],
int last_idx) int last_idx)
@ -650,8 +681,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
case DR_ACTION_TYP_MODIFY_HDR: case DR_ACTION_TYP_MODIFY_HDR:
attr.modify_index = action->rewrite->index; attr.modify_index = action->rewrite->index;
attr.modify_actions = action->rewrite->num_of_actions; attr.modify_actions = action->rewrite->num_of_actions;
recalc_cs_required = action->rewrite->modify_ttl && if (action->rewrite->modify_ttl)
!mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps); dr_action_modify_ttl_adjust(dmn, &attr, rx_rule,
&recalc_cs_required);
break; break;
case DR_ACTION_TYP_L2_TO_TNL_L2: case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3: case DR_ACTION_TYP_L2_TO_TNL_L3:
@ -732,12 +764,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
*new_hw_ste_arr_sz = nic_matcher->num_of_builders; *new_hw_ste_arr_sz = nic_matcher->num_of_builders;
last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1); last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
/* Due to a HW bug in some devices, modifying TTL on RX flows will if (recalc_cs_required && dest_action) {
* cause an incorrect checksum calculation. In this case we will
* use a FW table to recalculate.
*/
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
rx_rule && recalc_cs_required && dest_action) {
ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr); ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
if (ret) { if (ret) {
mlx5dr_err(dmn, mlx5dr_err(dmn,
@ -842,7 +869,8 @@ struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests, struct mlx5dr_action_dest *dests,
u32 num_of_dests, u32 num_of_dests,
bool ignore_flow_level) bool ignore_flow_level,
u32 flow_source)
{ {
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests; struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions; struct mlx5dr_action **ref_actions;
@ -914,7 +942,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
reformat_req, reformat_req,
&action->dest_tbl->fw_tbl.id, &action->dest_tbl->fw_tbl.id,
&action->dest_tbl->fw_tbl.group_id, &action->dest_tbl->fw_tbl.group_id,
ignore_flow_level); ignore_flow_level,
flow_source);
if (ret) if (ret)
goto free_action; goto free_action;
@ -1556,12 +1585,6 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action)
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL; return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
} }
static bool dr_action_modify_ttl_ignore(struct mlx5dr_domain *dmn)
{
return !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps) &&
!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify);
}
static int dr_actions_convert_modify_header(struct mlx5dr_action *action, static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 max_hw_actions, u32 max_hw_actions,
u32 num_sw_actions, u32 num_sw_actions,
@ -1573,6 +1596,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
const struct mlx5dr_ste_action_modify_field *hw_dst_action_info; const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
const struct mlx5dr_ste_action_modify_field *hw_src_action_info; const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
struct mlx5dr_domain *dmn = action->rewrite->dmn; struct mlx5dr_domain *dmn = action->rewrite->dmn;
__be64 *modify_ttl_sw_action = NULL;
int ret, i, hw_idx = 0; int ret, i, hw_idx = 0;
__be64 *sw_action; __be64 *sw_action;
__be64 hw_action; __be64 hw_action;
@ -1585,8 +1609,14 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
action->rewrite->allow_rx = 1; action->rewrite->allow_rx = 1;
action->rewrite->allow_tx = 1; action->rewrite->allow_tx = 1;
for (i = 0; i < num_sw_actions; i++) { for (i = 0; i < num_sw_actions || modify_ttl_sw_action; i++) {
sw_action = &sw_actions[i]; /* modify TTL is handled separately, as a last action */
if (i == num_sw_actions) {
sw_action = modify_ttl_sw_action;
modify_ttl_sw_action = NULL;
} else {
sw_action = &sw_actions[i];
}
ret = dr_action_modify_check_field_limitation(action, ret = dr_action_modify_check_field_limitation(action,
sw_action); sw_action);
@ -1595,10 +1625,9 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
if (!(*modify_ttl) && if (!(*modify_ttl) &&
dr_action_modify_check_is_ttl_modify(sw_action)) { dr_action_modify_check_is_ttl_modify(sw_action)) {
if (dr_action_modify_ttl_ignore(dmn)) modify_ttl_sw_action = sw_action;
continue;
*modify_ttl = true; *modify_ttl = true;
continue;
} }
/* Convert SW action to HW action */ /* Convert SW action to HW action */

View file

@ -104,7 +104,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
bool reformat_req, bool reformat_req,
u32 *tbl_id, u32 *tbl_id,
u32 *group_id, u32 *group_id,
bool ignore_flow_level) bool ignore_flow_level,
u32 flow_source)
{ {
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {}; struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_cmd_fte_info fte_info = {}; struct mlx5dr_cmd_fte_info fte_info = {};
@ -139,6 +140,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
fte_info.val = val; fte_info.val = val;
fte_info.dest_arr = dest; fte_info.dest_arr = dest;
fte_info.ignore_flow_level = ignore_flow_level; fte_info.ignore_flow_level = ignore_flow_level;
fte_info.flow_context.flow_source = flow_source;
ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info); ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
if (ret) { if (ret) {

View file

@ -420,7 +420,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
* encapsulation. The reason for that is that we support * encapsulation. The reason for that is that we support
* modify headers for outer headers only * modify headers for outer headers only
*/ */
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT); dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_rewrite_actions(last_ste, dr_ste_v0_set_rewrite_actions(last_ste,
attr->modify_actions, attr->modify_actions,
@ -513,7 +513,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
} }
} }
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT) if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
dr_ste_v0_arr_init_next(&last_ste, dr_ste_v0_arr_init_next(&last_ste,
added_stes, added_stes,

View file

@ -1461,7 +1461,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
bool reformat_req, bool reformat_req,
u32 *tbl_id, u32 *tbl_id,
u32 *group_id, u32 *group_id,
bool ignore_flow_level); bool ignore_flow_level,
u32 flow_source);
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id, void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
u32 group_id); u32 group_id);
#endif /* _DR_TYPES_H_ */ #endif /* _DR_TYPES_H_ */

View file

@ -520,6 +520,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
} else if (num_term_actions > 1) { } else if (num_term_actions > 1) {
bool ignore_flow_level = bool ignore_flow_level =
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
u32 flow_source = fte->flow_context.flow_source;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
@ -529,7 +530,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain, tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
term_actions, term_actions,
num_term_actions, num_term_actions,
ignore_flow_level); ignore_flow_level,
flow_source);
if (!tmp_action) { if (!tmp_action) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto free_actions; goto free_actions;

View file

@ -99,7 +99,8 @@ struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests, struct mlx5dr_action_dest *dests,
u32 num_of_dests, u32 num_of_dests,
bool ignore_flow_level); bool ignore_flow_level,
u32 flow_source);
struct mlx5dr_action *mlx5dr_action_create_drop(void); struct mlx5dr_action *mlx5dr_action_create_drop(void);

View file

@ -101,6 +101,24 @@ static int lan966x_create_targets(struct platform_device *pdev,
return 0; return 0;
} }
static bool lan966x_port_unique_address(struct net_device *dev)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
int p;
for (p = 0; p < lan966x->num_phys_ports; ++p) {
port = lan966x->ports[p];
if (!port || port->dev == dev)
continue;
if (ether_addr_equal(dev->dev_addr, port->dev->dev_addr))
return false;
}
return true;
}
static int lan966x_port_set_mac_address(struct net_device *dev, void *p) static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
{ {
struct lan966x_port *port = netdev_priv(dev); struct lan966x_port *port = netdev_priv(dev);
@ -108,16 +126,26 @@ static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
const struct sockaddr *addr = p; const struct sockaddr *addr = p;
int ret; int ret;
if (ether_addr_equal(addr->sa_data, dev->dev_addr))
return 0;
/* Learn the new net device MAC address in the mac table. */ /* Learn the new net device MAC address in the mac table. */
ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID); ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
if (ret) if (ret)
return ret; return ret;
/* If there is another port with the same address as the dev, then don't
* delete it from the MAC table
*/
if (!lan966x_port_unique_address(dev))
goto out;
/* Then forget the previous one. */ /* Then forget the previous one. */
ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID); ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
if (ret) if (ret)
return ret; return ret;
out:
eth_hw_addr_set(dev, addr->sa_data); eth_hw_addr_set(dev, addr->sa_data);
return ret; return ret;
} }

View file

@ -3614,7 +3614,8 @@ static void ql_reset_work(struct work_struct *work)
qdev->mem_map_registers; qdev->mem_map_registers;
unsigned long hw_flags; unsigned long hw_flags;
if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
test_bit(QL_RESET_START, &qdev->flags)) {
clear_bit(QL_LINK_MASTER, &qdev->flags); clear_bit(QL_LINK_MASTER, &qdev->flags);
/* /*

View file

@ -1367,9 +1367,10 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
struct gsi_event *event_done; struct gsi_event *event_done;
struct gsi_event *event; struct gsi_event *event;
struct gsi_trans *trans; struct gsi_trans *trans;
u32 trans_count = 0;
u32 byte_count = 0; u32 byte_count = 0;
u32 old_index;
u32 event_avail; u32 event_avail;
u32 old_index;
trans_info = &channel->trans_info; trans_info = &channel->trans_info;
@ -1390,6 +1391,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
do { do {
trans->len = __le16_to_cpu(event->len); trans->len = __le16_to_cpu(event->len);
byte_count += trans->len; byte_count += trans->len;
trans_count++;
/* Move on to the next event and transaction */ /* Move on to the next event and transaction */
if (--event_avail) if (--event_avail)
@ -1401,7 +1403,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
/* We record RX bytes when they are received */ /* We record RX bytes when they are received */
channel->byte_count += byte_count; channel->byte_count += byte_count;
channel->trans_count++; channel->trans_count += trans_count;
} }
/* Initialize a ring, including allocating DMA memory for its entries */ /* Initialize a ring, including allocating DMA memory for its entries */

View file

@ -1153,13 +1153,12 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
return; return;
skb = __dev_alloc_skb(len, GFP_ATOMIC); skb = __dev_alloc_skb(len, GFP_ATOMIC);
if (!skb) if (skb) {
return; /* Copy the data into the socket buffer and receive it */
skb_put(skb, len);
/* Copy the data into the socket buffer and receive it */ memcpy(skb->data, data, len);
skb_put(skb, len); skb->truesize += extra;
memcpy(skb->data, data, len); }
skb->truesize += extra;
ipa_modem_skb_rx(endpoint->netdev, skb); ipa_modem_skb_rx(endpoint->netdev, skb);
} }

View file

@ -125,7 +125,7 @@ static void ipa_qmi_indication(struct ipa_qmi *ipa_qmi)
*/ */
static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi) static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
{ {
struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi); struct ipa *ipa;
int ret; int ret;
/* We aren't ready until the modem and microcontroller are */ /* We aren't ready until the modem and microcontroller are */

View file

@ -988,6 +988,7 @@ static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
path->encap.proto = htons(ETH_P_PPP_SES); path->encap.proto = htons(ETH_P_PPP_SES);
path->encap.id = be16_to_cpu(po->num); path->encap.id = be16_to_cpu(po->num);
memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN); memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
memcpy(ctx->daddr, po->pppoe_pa.remote, ETH_ALEN);
path->dev = ctx->dev; path->dev = ctx->dev;
ctx->dev = dev; ctx->dev = dev;

View file

@ -589,6 +589,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev, if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) { rbi->dma_addr)) {
dev_kfree_skb_any(rbi->skb); dev_kfree_skb_any(rbi->skb);
rbi->skb = NULL;
rq->stats.rx_buf_alloc_failure++; rq->stats.rx_buf_alloc_failure++;
break; break;
} }
@ -613,6 +614,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev, if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) { rbi->dma_addr)) {
put_page(rbi->page); put_page(rbi->page);
rbi->page = NULL;
rq->stats.rx_buf_alloc_failure++; rq->stats.rx_buf_alloc_failure++;
break; break;
} }
@ -1666,6 +1668,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
u32 i, ring_idx; u32 i, ring_idx;
struct Vmxnet3_RxDesc *rxd; struct Vmxnet3_RxDesc *rxd;
/* ring has already been cleaned up */
if (!rq->rx_ring[0].base)
return;
for (ring_idx = 0; ring_idx < 2; ring_idx++) { for (ring_idx = 0; ring_idx < 2; ring_idx++) {
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
#ifdef __BIG_ENDIAN_BITFIELD #ifdef __BIG_ENDIAN_BITFIELD

View file

@ -2787,13 +2787,14 @@ void pn53x_common_clean(struct pn533 *priv)
{ {
struct pn533_cmd *cmd, *n; struct pn533_cmd *cmd, *n;
/* delete the timer before cleanup the worker */
del_timer_sync(&priv->listen_timer);
flush_delayed_work(&priv->poll_work); flush_delayed_work(&priv->poll_work);
destroy_workqueue(priv->wq); destroy_workqueue(priv->wq);
skb_queue_purge(&priv->resp_q); skb_queue_purge(&priv->resp_q);
del_timer(&priv->listen_timer);
list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) { list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) {
list_del(&cmd->queue); list_del(&cmd->queue);
kfree(cmd); kfree(cmd);

View file

@ -1550,6 +1550,11 @@ static const struct qcom_pcie_cfg sc7280_cfg = {
.pipe_clk_need_muxing = true, .pipe_clk_need_muxing = true,
}; };
static const struct qcom_pcie_cfg sc8180x_cfg = {
.ops = &ops_1_9_0,
.has_tbu_clk = true,
};
static const struct dw_pcie_ops dw_pcie_ops = { static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up, .link_up = qcom_pcie_link_up,
.start_link = qcom_pcie_start_link, .start_link = qcom_pcie_start_link,
@ -1656,7 +1661,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-qcs404", .data = &ipq4019_cfg }, { .compatible = "qcom,pcie-qcs404", .data = &ipq4019_cfg },
{ .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg }, { .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg },
{ .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg }, { .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg },
{ .compatible = "qcom,pcie-sc8180x", .data = &sm8250_cfg }, { .compatible = "qcom,pcie-sc8180x", .data = &sc8180x_cfg },
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg }, { .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg },
{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg }, { .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg },
{ .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg }, { .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg },

View file

@ -272,7 +272,6 @@ struct advk_pcie {
u32 actions; u32 actions;
} wins[OB_WIN_COUNT]; } wins[OB_WIN_COUNT];
u8 wins_count; u8 wins_count;
int irq;
struct irq_domain *rp_irq_domain; struct irq_domain *rp_irq_domain;
struct irq_domain *irq_domain; struct irq_domain *irq_domain;
struct irq_chip irq_chip; struct irq_chip irq_chip;
@ -1570,26 +1569,21 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
} }
} }
static void advk_pcie_irq_handler(struct irq_desc *desc) static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
{ {
struct advk_pcie *pcie = irq_desc_get_handler_data(desc); struct advk_pcie *pcie = arg;
struct irq_chip *chip = irq_desc_get_chip(desc); u32 status;
u32 val, mask, status;
chained_irq_enter(chip, desc); status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
if (!(status & PCIE_IRQ_CORE_INT))
return IRQ_NONE;
val = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG); advk_pcie_handle_int(pcie);
mask = advk_readl(pcie, HOST_CTRL_INT_MASK_REG);
status = val & ((~mask) & PCIE_IRQ_ALL_MASK);
if (status & PCIE_IRQ_CORE_INT) { /* Clear interrupt */
advk_pcie_handle_int(pcie); advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
/* Clear interrupt */ return IRQ_HANDLED;
advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
}
chained_irq_exit(chip, desc);
} }
static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
@ -1669,7 +1663,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct advk_pcie *pcie; struct advk_pcie *pcie;
struct pci_host_bridge *bridge; struct pci_host_bridge *bridge;
struct resource_entry *entry; struct resource_entry *entry;
int ret; int ret, irq;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie)); bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
if (!bridge) if (!bridge)
@ -1755,9 +1749,17 @@ static int advk_pcie_probe(struct platform_device *pdev)
if (IS_ERR(pcie->base)) if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base); return PTR_ERR(pcie->base);
pcie->irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (pcie->irq < 0) if (irq < 0)
return pcie->irq; return irq;
ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
pcie);
if (ret) {
dev_err(dev, "Failed to register interrupt\n");
return ret;
}
pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node, pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
"reset-gpios", 0, "reset-gpios", 0,
@ -1814,15 +1816,12 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret; return ret;
} }
irq_set_chained_handler_and_data(pcie->irq, advk_pcie_irq_handler, pcie);
bridge->sysdata = pcie; bridge->sysdata = pcie;
bridge->ops = &advk_pcie_ops; bridge->ops = &advk_pcie_ops;
bridge->map_irq = advk_pcie_map_irq; bridge->map_irq = advk_pcie_map_irq;
ret = pci_host_probe(bridge); ret = pci_host_probe(bridge);
if (ret < 0) { if (ret < 0) {
irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
advk_pcie_remove_rp_irq_domain(pcie); advk_pcie_remove_rp_irq_domain(pcie);
advk_pcie_remove_msi_irq_domain(pcie); advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie); advk_pcie_remove_irq_domain(pcie);
@ -1871,9 +1870,6 @@ static int advk_pcie_remove(struct platform_device *pdev)
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
/* Remove IRQ handler */
irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
/* Remove IRQ domains */ /* Remove IRQ domains */
advk_pcie_remove_rp_irq_domain(pcie); advk_pcie_remove_rp_irq_domain(pcie);
advk_pcie_remove_msi_irq_domain(pcie); advk_pcie_remove_msi_irq_domain(pcie);

View file

@ -2920,6 +2920,16 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"), DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
}, },
/*
* Downstream device is not accessible after putting a root port
* into D3cold and back into D0 on Elo i2.
*/
.ident = "Elo i2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
},
}, },
#endif #endif
{ } { }

View file

@ -1236,18 +1236,17 @@ FUNC_GROUP_DECL(SALT8, AA12);
FUNC_GROUP_DECL(WDTRST4, AA12); FUNC_GROUP_DECL(WDTRST4, AA12);
#define AE12 196 #define AE12 196
SIG_EXPR_LIST_DECL_SEMG(AE12, FWSPIDQ2, FWQSPID, FWSPID, SIG_EXPR_LIST_DECL_SESG(AE12, FWSPIQ2, FWQSPI, SIG_DESC_SET(SCU438, 4));
SIG_DESC_SET(SCU438, 4));
SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4); SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4);
PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIDQ2), PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIQ2),
SIG_EXPR_LIST_PTR(AE12, GPIOY4)); SIG_EXPR_LIST_PTR(AE12, GPIOY4));
#define AF12 197 #define AF12 197
SIG_EXPR_LIST_DECL_SEMG(AF12, FWSPIDQ3, FWQSPID, FWSPID, SIG_EXPR_LIST_DECL_SESG(AF12, FWSPIQ3, FWQSPI, SIG_DESC_SET(SCU438, 5));
SIG_DESC_SET(SCU438, 5));
SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5); SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5);
PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIDQ3), PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIQ3),
SIG_EXPR_LIST_PTR(AF12, GPIOY5)); SIG_EXPR_LIST_PTR(AF12, GPIOY5));
FUNC_GROUP_DECL(FWQSPI, AE12, AF12);
#define AC12 198 #define AC12 198
SSSF_PIN_DECL(AC12, GPIOY6, FWSPIABR, SIG_DESC_SET(SCU438, 6)); SSSF_PIN_DECL(AC12, GPIOY6, FWSPIABR, SIG_DESC_SET(SCU438, 6));
@ -1520,9 +1519,8 @@ SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7); PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4); GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4); GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
FUNC_DECL_2(FWSPID, FWSPID, FWQSPID); FUNC_DECL_1(FWSPID, FWSPID);
FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4); FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8); FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
/* /*
@ -1918,7 +1916,7 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(FSI2), ASPEED_PINCTRL_GROUP(FSI2),
ASPEED_PINCTRL_GROUP(FWSPIABR), ASPEED_PINCTRL_GROUP(FWSPIABR),
ASPEED_PINCTRL_GROUP(FWSPID), ASPEED_PINCTRL_GROUP(FWSPID),
ASPEED_PINCTRL_GROUP(FWQSPID), ASPEED_PINCTRL_GROUP(FWQSPI),
ASPEED_PINCTRL_GROUP(FWSPIWP), ASPEED_PINCTRL_GROUP(FWSPIWP),
ASPEED_PINCTRL_GROUP(GPIT0), ASPEED_PINCTRL_GROUP(GPIT0),
ASPEED_PINCTRL_GROUP(GPIT1), ASPEED_PINCTRL_GROUP(GPIT1),
@ -2160,6 +2158,7 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(FSI2), ASPEED_PINCTRL_FUNC(FSI2),
ASPEED_PINCTRL_FUNC(FWSPIABR), ASPEED_PINCTRL_FUNC(FWSPIABR),
ASPEED_PINCTRL_FUNC(FWSPID), ASPEED_PINCTRL_FUNC(FWSPID),
ASPEED_PINCTRL_FUNC(FWQSPI),
ASPEED_PINCTRL_FUNC(FWSPIWP), ASPEED_PINCTRL_FUNC(FWSPIWP),
ASPEED_PINCTRL_FUNC(GPIT0), ASPEED_PINCTRL_FUNC(GPIT0),
ASPEED_PINCTRL_FUNC(GPIT1), ASPEED_PINCTRL_FUNC(GPIT1),

View file

@ -300,7 +300,7 @@ struct ptp_ocp {
struct platform_device *spi_flash; struct platform_device *spi_flash;
struct clk_hw *i2c_clk; struct clk_hw *i2c_clk;
struct timer_list watchdog; struct timer_list watchdog;
const struct ocp_attr_group *attr_tbl; const struct attribute_group **attr_group;
const struct ptp_ocp_eeprom_map *eeprom_map; const struct ptp_ocp_eeprom_map *eeprom_map;
struct dentry *debug_root; struct dentry *debug_root;
time64_t gnss_lost; time64_t gnss_lost;
@ -841,7 +841,7 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u32 adj_val)
} }
static void static void
ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns) ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, s64 delta_ns)
{ {
struct timespec64 ts; struct timespec64 ts;
unsigned long flags; unsigned long flags;
@ -850,7 +850,8 @@ ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
spin_lock_irqsave(&bp->lock, flags); spin_lock_irqsave(&bp->lock, flags);
err = __ptp_ocp_gettime_locked(bp, &ts, NULL); err = __ptp_ocp_gettime_locked(bp, &ts, NULL);
if (likely(!err)) { if (likely(!err)) {
timespec64_add_ns(&ts, delta_ns); set_normalized_timespec64(&ts, ts.tv_sec,
ts.tv_nsec + delta_ns);
__ptp_ocp_settime_locked(bp, &ts); __ptp_ocp_settime_locked(bp, &ts);
} }
spin_unlock_irqrestore(&bp->lock, flags); spin_unlock_irqrestore(&bp->lock, flags);
@ -1835,6 +1836,42 @@ ptp_ocp_signal_init(struct ptp_ocp *bp)
bp->signal_out[i]->mem); bp->signal_out[i]->mem);
} }
static void
ptp_ocp_attr_group_del(struct ptp_ocp *bp)
{
sysfs_remove_groups(&bp->dev.kobj, bp->attr_group);
kfree(bp->attr_group);
}
static int
ptp_ocp_attr_group_add(struct ptp_ocp *bp,
const struct ocp_attr_group *attr_tbl)
{
int count, i;
int err;
count = 0;
for (i = 0; attr_tbl[i].cap; i++)
if (attr_tbl[i].cap & bp->fw_cap)
count++;
bp->attr_group = kcalloc(count + 1, sizeof(struct attribute_group *),
GFP_KERNEL);
if (!bp->attr_group)
return -ENOMEM;
count = 0;
for (i = 0; attr_tbl[i].cap; i++)
if (attr_tbl[i].cap & bp->fw_cap)
bp->attr_group[count++] = attr_tbl[i].group;
err = sysfs_create_groups(&bp->dev.kobj, bp->attr_group);
if (err)
bp->attr_group[0] = NULL;
return err;
}
static void static void
ptp_ocp_sma_init(struct ptp_ocp *bp) ptp_ocp_sma_init(struct ptp_ocp *bp)
{ {
@ -1904,7 +1941,6 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
bp->flash_start = 1024 * 4096; bp->flash_start = 1024 * 4096;
bp->eeprom_map = fb_eeprom_map; bp->eeprom_map = fb_eeprom_map;
bp->fw_version = ioread32(&bp->image->version); bp->fw_version = ioread32(&bp->image->version);
bp->attr_tbl = fb_timecard_groups;
bp->fw_cap = OCP_CAP_BASIC; bp->fw_cap = OCP_CAP_BASIC;
ver = bp->fw_version & 0xffff; ver = bp->fw_version & 0xffff;
@ -1918,6 +1954,10 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
ptp_ocp_sma_init(bp); ptp_ocp_sma_init(bp);
ptp_ocp_signal_init(bp); ptp_ocp_signal_init(bp);
err = ptp_ocp_attr_group_add(bp, fb_timecard_groups);
if (err)
return err;
err = ptp_ocp_fb_set_pins(bp); err = ptp_ocp_fb_set_pins(bp);
if (err) if (err)
return err; return err;
@ -3388,7 +3428,6 @@ ptp_ocp_complete(struct ptp_ocp *bp)
{ {
struct pps_device *pps; struct pps_device *pps;
char buf[32]; char buf[32];
int i, err;
if (bp->gnss_port != -1) { if (bp->gnss_port != -1) {
sprintf(buf, "ttyS%d", bp->gnss_port); sprintf(buf, "ttyS%d", bp->gnss_port);
@ -3413,14 +3452,6 @@ ptp_ocp_complete(struct ptp_ocp *bp)
if (pps) if (pps)
ptp_ocp_symlink(bp, pps->dev, "pps"); ptp_ocp_symlink(bp, pps->dev, "pps");
for (i = 0; bp->attr_tbl[i].cap; i++) {
if (!(bp->attr_tbl[i].cap & bp->fw_cap))
continue;
err = sysfs_create_group(&bp->dev.kobj, bp->attr_tbl[i].group);
if (err)
return err;
}
ptp_ocp_debugfs_add_device(bp); ptp_ocp_debugfs_add_device(bp);
return 0; return 0;
@ -3492,15 +3523,11 @@ static void
ptp_ocp_detach_sysfs(struct ptp_ocp *bp) ptp_ocp_detach_sysfs(struct ptp_ocp *bp)
{ {
struct device *dev = &bp->dev; struct device *dev = &bp->dev;
int i;
sysfs_remove_link(&dev->kobj, "ttyGNSS"); sysfs_remove_link(&dev->kobj, "ttyGNSS");
sysfs_remove_link(&dev->kobj, "ttyMAC"); sysfs_remove_link(&dev->kobj, "ttyMAC");
sysfs_remove_link(&dev->kobj, "ptp"); sysfs_remove_link(&dev->kobj, "ptp");
sysfs_remove_link(&dev->kobj, "pps"); sysfs_remove_link(&dev->kobj, "pps");
if (bp->attr_tbl)
for (i = 0; bp->attr_tbl[i].cap; i++)
sysfs_remove_group(&dev->kobj, bp->attr_tbl[i].group);
} }
static void static void
@ -3510,6 +3537,7 @@ ptp_ocp_detach(struct ptp_ocp *bp)
ptp_ocp_debugfs_remove_device(bp); ptp_ocp_debugfs_remove_device(bp);
ptp_ocp_detach_sysfs(bp); ptp_ocp_detach_sysfs(bp);
ptp_ocp_attr_group_del(bp);
if (timer_pending(&bp->watchdog)) if (timer_pending(&bp->watchdog))
del_timer_sync(&bp->watchdog); del_timer_sync(&bp->watchdog);
if (bp->ts0) if (bp->ts0)

View file

@ -1172,9 +1172,8 @@ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
case SCSI_ACCESS_STATE_OPTIMAL: case SCSI_ACCESS_STATE_OPTIMAL:
case SCSI_ACCESS_STATE_ACTIVE: case SCSI_ACCESS_STATE_ACTIVE:
case SCSI_ACCESS_STATE_LBA: case SCSI_ACCESS_STATE_LBA:
return BLK_STS_OK;
case SCSI_ACCESS_STATE_TRANSITIONING: case SCSI_ACCESS_STATE_TRANSITIONING:
return BLK_STS_AGAIN; return BLK_STS_OK;
default: default:
req->rq_flags |= RQF_QUIET; req->rq_flags |= RQF_QUIET;
return BLK_STS_IOERR; return BLK_STS_IOERR;

View file

@ -1330,7 +1330,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_0) { LPFC_SLI_INTF_IF_TYPE_0) {
/* FLOGI needs to be 3 for WQE FCFI */ /* FLOGI needs to be 3 for WQE FCFI */
ct = ((SLI4_CT_FCFI >> 1) & 1) | (SLI4_CT_FCFI & 1); ct = SLI4_CT_FCFI;
bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
/* Set the fcfi to the fcfi we registered with */ /* Set the fcfi to the fcfi we registered with */

View file

@ -10720,10 +10720,10 @@ __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
/* Words 0 - 2 */ /* Words 0 - 2 */
bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde; bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys)); bde->addr_low = bpl->addr_low;
bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys)); bde->addr_high = bpl->addr_high;
bde->type_size = cpu_to_le32(xmit_len); bde->type_size = cpu_to_le32(xmit_len);
bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BLP_64); bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
/* Word 3 */ /* Word 3 */
cmdwqe->gen_req.request_payload_len = xmit_len; cmdwqe->gen_req.request_payload_len = xmit_len;

View file

@ -3826,6 +3826,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&cmd->cmd_lock, flags); spin_lock_irqsave(&cmd->cmd_lock, flags);
if (cmd->aborted) { if (cmd->aborted) {
if (cmd->sg_mapped)
qlt_unmap_sg(vha, cmd);
spin_unlock_irqrestore(&cmd->cmd_lock, flags); spin_unlock_irqrestore(&cmd->cmd_lock, flags);
/* /*
* It's normal to see 2 calls in this path: * It's normal to see 2 calls in this path:

View file

@ -510,9 +510,9 @@ static int qcom_slim_probe(struct platform_device *pdev)
} }
ctrl->irq = platform_get_irq(pdev, 0); ctrl->irq = platform_get_irq(pdev, 0);
if (!ctrl->irq) { if (ctrl->irq < 0) {
dev_err(&pdev->dev, "no slimbus IRQ\n"); dev_err(&pdev->dev, "no slimbus IRQ\n");
return -ENODEV; return ctrl->irq;
} }
sctrl = &ctrl->ctrl; sctrl = &ctrl->ctrl;

View file

@ -194,12 +194,31 @@ static int int3400_thermal_run_osc(acpi_handle handle, char *uuid_str, int *enab
return result; return result;
} }
static int set_os_uuid_mask(struct int3400_thermal_priv *priv, u32 mask)
{
int cap = 0;
/*
* Capability bits:
* Bit 0: set to 1 to indicate DPTF is active
* Bi1 1: set to 1 to active cooling is supported by user space daemon
* Bit 2: set to 1 to passive cooling is supported by user space daemon
* Bit 3: set to 1 to critical trip is handled by user space daemon
*/
if (mask)
cap = (priv->os_uuid_mask << 1) | 0x01;
return int3400_thermal_run_osc(priv->adev->handle,
"b23ba85d-c8b7-3542-88de-8de2ffcfd698",
&cap);
}
static ssize_t current_uuid_store(struct device *dev, static ssize_t current_uuid_store(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
struct int3400_thermal_priv *priv = dev_get_drvdata(dev); struct int3400_thermal_priv *priv = dev_get_drvdata(dev);
int i; int ret, i;
for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; ++i) { for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; ++i) {
if (!strncmp(buf, int3400_thermal_uuids[i], if (!strncmp(buf, int3400_thermal_uuids[i],
@ -231,19 +250,7 @@ static ssize_t current_uuid_store(struct device *dev,
} }
if (priv->os_uuid_mask) { if (priv->os_uuid_mask) {
int cap, ret; ret = set_os_uuid_mask(priv, priv->os_uuid_mask);
/*
* Capability bits:
* Bit 0: set to 1 to indicate DPTF is active
* Bi1 1: set to 1 to active cooling is supported by user space daemon
* Bit 2: set to 1 to passive cooling is supported by user space daemon
* Bit 3: set to 1 to critical trip is handled by user space daemon
*/
cap = ((priv->os_uuid_mask << 1) | 0x01);
ret = int3400_thermal_run_osc(priv->adev->handle,
"b23ba85d-c8b7-3542-88de-8de2ffcfd698",
&cap);
if (ret) if (ret)
return ret; return ret;
} }
@ -469,17 +476,26 @@ static int int3400_thermal_change_mode(struct thermal_zone_device *thermal,
if (mode != thermal->mode) { if (mode != thermal->mode) {
int enabled; int enabled;
enabled = mode == THERMAL_DEVICE_ENABLED;
if (priv->os_uuid_mask) {
if (!enabled) {
priv->os_uuid_mask = 0;
result = set_os_uuid_mask(priv, priv->os_uuid_mask);
}
goto eval_odvp;
}
if (priv->current_uuid_index < 0 || if (priv->current_uuid_index < 0 ||
priv->current_uuid_index >= INT3400_THERMAL_MAXIMUM_UUID) priv->current_uuid_index >= INT3400_THERMAL_MAXIMUM_UUID)
return -EINVAL; return -EINVAL;
enabled = (mode == THERMAL_DEVICE_ENABLED);
result = int3400_thermal_run_osc(priv->adev->handle, result = int3400_thermal_run_osc(priv->adev->handle,
int3400_thermal_uuids[priv->current_uuid_index], int3400_thermal_uuids[priv->current_uuid_index],
&enabled); &enabled);
} }
eval_odvp:
evaluate_odvp(priv); evaluate_odvp(priv);
return result; return result;

View file

@ -137,6 +137,7 @@ struct gsm_dlci {
int retries; int retries;
/* Uplink tty if active */ /* Uplink tty if active */
struct tty_port port; /* The tty bound to this DLCI if there is one */ struct tty_port port; /* The tty bound to this DLCI if there is one */
#define TX_SIZE 4096 /* Must be power of 2. */
struct kfifo fifo; /* Queue fifo for the DLCI */ struct kfifo fifo; /* Queue fifo for the DLCI */
int adaption; /* Adaption layer in use */ int adaption; /* Adaption layer in use */
int prev_adaption; int prev_adaption;
@ -1658,6 +1659,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
if (len == 0) if (len == 0)
return; return;
} }
len--;
slen++; slen++;
tty = tty_port_tty_get(port); tty = tty_port_tty_get(port);
if (tty) { if (tty) {
@ -1730,7 +1732,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
return NULL; return NULL;
spin_lock_init(&dlci->lock); spin_lock_init(&dlci->lock);
mutex_init(&dlci->mutex); mutex_init(&dlci->mutex);
if (kfifo_alloc(&dlci->fifo, 4096, GFP_KERNEL) < 0) { if (kfifo_alloc(&dlci->fifo, TX_SIZE, GFP_KERNEL) < 0) {
kfree(dlci); kfree(dlci);
return NULL; return NULL;
} }
@ -2351,6 +2353,7 @@ static void gsm_copy_config_values(struct gsm_mux *gsm,
static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c) static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
{ {
int ret = 0;
int need_close = 0; int need_close = 0;
int need_restart = 0; int need_restart = 0;
@ -2418,10 +2421,13 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
* FIXME: We need to separate activation/deactivation from adding * FIXME: We need to separate activation/deactivation from adding
* and removing from the mux array * and removing from the mux array
*/ */
if (need_restart) if (gsm->dead) {
gsm_activate_mux(gsm); ret = gsm_activate_mux(gsm);
if (gsm->initiator && need_close) if (ret)
gsm_dlci_begin_open(gsm->dlci[0]); return ret;
if (gsm->initiator)
gsm_dlci_begin_open(gsm->dlci[0]);
}
return 0; return 0;
} }
@ -2971,8 +2977,6 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
* Virtual tty side * Virtual tty side
*/ */
#define TX_SIZE 512
/** /**
* gsm_modem_upd_via_data - send modem bits via convergence layer * gsm_modem_upd_via_data - send modem bits via convergence layer
* @dlci: channel * @dlci: channel
@ -3212,7 +3216,7 @@ static unsigned int gsmtty_write_room(struct tty_struct *tty)
struct gsm_dlci *dlci = tty->driver_data; struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED) if (dlci->state == DLCI_CLOSED)
return 0; return 0;
return TX_SIZE - kfifo_len(&dlci->fifo); return kfifo_avail(&dlci->fifo);
} }
static unsigned int gsmtty_chars_in_buffer(struct tty_struct *tty) static unsigned int gsmtty_chars_in_buffer(struct tty_struct *tty)

View file

@ -37,6 +37,7 @@
#define MTK_UART_IER_RTSI 0x40 /* Enable RTS Modem status interrupt */ #define MTK_UART_IER_RTSI 0x40 /* Enable RTS Modem status interrupt */
#define MTK_UART_IER_CTSI 0x80 /* Enable CTS Modem status interrupt */ #define MTK_UART_IER_CTSI 0x80 /* Enable CTS Modem status interrupt */
#define MTK_UART_EFR 38 /* I/O: Extended Features Register */
#define MTK_UART_EFR_EN 0x10 /* Enable enhancement feature */ #define MTK_UART_EFR_EN 0x10 /* Enable enhancement feature */
#define MTK_UART_EFR_RTS 0x40 /* Enable hardware rx flow control */ #define MTK_UART_EFR_RTS 0x40 /* Enable hardware rx flow control */
#define MTK_UART_EFR_CTS 0x80 /* Enable hardware tx flow control */ #define MTK_UART_EFR_CTS 0x80 /* Enable hardware tx flow control */
@ -53,6 +54,12 @@
#define MTK_UART_TX_TRIGGER 1 #define MTK_UART_TX_TRIGGER 1
#define MTK_UART_RX_TRIGGER MTK_UART_RX_SIZE #define MTK_UART_RX_TRIGGER MTK_UART_RX_SIZE
#define MTK_UART_FEATURE_SEL 39 /* Feature Selection register */
#define MTK_UART_FEAT_NEWRMAP BIT(0) /* Use new register map */
#define MTK_UART_XON1 40 /* I/O: Xon character 1 */
#define MTK_UART_XOFF1 42 /* I/O: Xoff character 1 */
#ifdef CONFIG_SERIAL_8250_DMA #ifdef CONFIG_SERIAL_8250_DMA
enum dma_rx_status { enum dma_rx_status {
DMA_RX_START = 0, DMA_RX_START = 0,
@ -169,7 +176,7 @@ static void mtk8250_dma_enable(struct uart_8250_port *up)
MTK_UART_DMA_EN_RX | MTK_UART_DMA_EN_TX); MTK_UART_DMA_EN_RX | MTK_UART_DMA_EN_TX);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, UART_EFR_ECB); serial_out(up, MTK_UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, lcr); serial_out(up, UART_LCR, lcr);
if (dmaengine_slave_config(dma->rxchan, &dma->rxconf) != 0) if (dmaengine_slave_config(dma->rxchan, &dma->rxconf) != 0)
@ -232,7 +239,7 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
int lcr = serial_in(up, UART_LCR); int lcr = serial_in(up, UART_LCR);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, UART_EFR_ECB); serial_out(up, MTK_UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, lcr); serial_out(up, UART_LCR, lcr);
lcr = serial_in(up, UART_LCR); lcr = serial_in(up, UART_LCR);
@ -241,7 +248,7 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
serial_out(up, MTK_UART_ESCAPE_DAT, MTK_UART_ESCAPE_CHAR); serial_out(up, MTK_UART_ESCAPE_DAT, MTK_UART_ESCAPE_CHAR);
serial_out(up, MTK_UART_ESCAPE_EN, 0x00); serial_out(up, MTK_UART_ESCAPE_EN, 0x00);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, serial_in(up, UART_EFR) & serial_out(up, MTK_UART_EFR, serial_in(up, MTK_UART_EFR) &
(~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))); (~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK)));
serial_out(up, UART_LCR, lcr); serial_out(up, UART_LCR, lcr);
mtk8250_disable_intrs(up, MTK_UART_IER_XOFFI | mtk8250_disable_intrs(up, MTK_UART_IER_XOFFI |
@ -255,8 +262,8 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
/*enable hw flow control*/ /*enable hw flow control*/
serial_out(up, UART_EFR, MTK_UART_EFR_HW_FC | serial_out(up, MTK_UART_EFR, MTK_UART_EFR_HW_FC |
(serial_in(up, UART_EFR) & (serial_in(up, MTK_UART_EFR) &
(~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK)))); (~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))));
serial_out(up, UART_LCR, lcr); serial_out(up, UART_LCR, lcr);
@ -270,12 +277,12 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
/*enable sw flow control */ /*enable sw flow control */
serial_out(up, UART_EFR, MTK_UART_EFR_XON1_XOFF1 | serial_out(up, MTK_UART_EFR, MTK_UART_EFR_XON1_XOFF1 |
(serial_in(up, UART_EFR) & (serial_in(up, MTK_UART_EFR) &
(~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK)))); (~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))));
serial_out(up, UART_XON1, START_CHAR(port->state->port.tty)); serial_out(up, MTK_UART_XON1, START_CHAR(port->state->port.tty));
serial_out(up, UART_XOFF1, STOP_CHAR(port->state->port.tty)); serial_out(up, MTK_UART_XOFF1, STOP_CHAR(port->state->port.tty));
serial_out(up, UART_LCR, lcr); serial_out(up, UART_LCR, lcr);
mtk8250_disable_intrs(up, MTK_UART_IER_CTSI|MTK_UART_IER_RTSI); mtk8250_disable_intrs(up, MTK_UART_IER_CTSI|MTK_UART_IER_RTSI);
mtk8250_enable_intrs(up, MTK_UART_IER_XOFFI); mtk8250_enable_intrs(up, MTK_UART_IER_XOFFI);
@ -568,6 +575,10 @@ static int mtk8250_probe(struct platform_device *pdev)
uart.dma = data->dma; uart.dma = data->dma;
#endif #endif
/* Set AP UART new register map */
writel(MTK_UART_FEAT_NEWRMAP, uart.port.membase +
(MTK_UART_FEATURE_SEL << uart.port.regshift));
/* Disable Rate Fix function */ /* Disable Rate Fix function */
writel(0x0, uart.port.membase + writel(0x0, uart.port.membase +
(MTK_UART_RATE_FIX << uart.port.regshift)); (MTK_UART_RATE_FIX << uart.port.regshift));

View file

@ -471,11 +471,10 @@ static int digicolor_uart_probe(struct platform_device *pdev)
if (IS_ERR(uart_clk)) if (IS_ERR(uart_clk))
return PTR_ERR(uart_clk); return PTR_ERR(uart_clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dp->port.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
dp->port.mapbase = res->start;
dp->port.membase = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dp->port.membase)) if (IS_ERR(dp->port.membase))
return PTR_ERR(dp->port.membase); return PTR_ERR(dp->port.membase);
dp->port.mapbase = res->start;
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) if (irq < 0)

View file

@ -2664,6 +2664,7 @@ static int lpuart_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node; struct device_node *np = pdev->dev.of_node;
struct lpuart_port *sport; struct lpuart_port *sport;
struct resource *res; struct resource *res;
irq_handler_t handler;
int ret; int ret;
sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL); sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
@ -2741,17 +2742,11 @@ static int lpuart_probe(struct platform_device *pdev)
if (lpuart_is_32(sport)) { if (lpuart_is_32(sport)) {
lpuart_reg.cons = LPUART32_CONSOLE; lpuart_reg.cons = LPUART32_CONSOLE;
ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart32_int, 0, handler = lpuart32_int;
DRIVER_NAME, sport);
} else { } else {
lpuart_reg.cons = LPUART_CONSOLE; lpuart_reg.cons = LPUART_CONSOLE;
ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart_int, 0, handler = lpuart_int;
DRIVER_NAME, sport);
} }
if (ret)
goto failed_irq_request;
ret = uart_add_one_port(&lpuart_reg, &sport->port); ret = uart_add_one_port(&lpuart_reg, &sport->port);
if (ret) if (ret)
goto failed_attach_port; goto failed_attach_port;
@ -2773,13 +2768,18 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.rs485_config(&sport->port, &sport->port.rs485); sport->port.rs485_config(&sport->port, &sport->port.rs485);
ret = devm_request_irq(&pdev->dev, sport->port.irq, handler, 0,
DRIVER_NAME, sport);
if (ret)
goto failed_irq_request;
return 0; return 0;
failed_irq_request:
failed_get_rs485: failed_get_rs485:
failed_reset: failed_reset:
uart_remove_one_port(&lpuart_reg, &sport->port); uart_remove_one_port(&lpuart_reg, &sport->port);
failed_attach_port: failed_attach_port:
failed_irq_request:
lpuart_disable_clks(sport); lpuart_disable_clks(sport);
failed_clock_enable: failed_clock_enable:
failed_out_of_range: failed_out_of_range:

View file

@ -774,6 +774,7 @@ static int wdm_release(struct inode *inode, struct file *file)
poison_urbs(desc); poison_urbs(desc);
spin_lock_irq(&desc->iuspin); spin_lock_irq(&desc->iuspin);
desc->resp_count = 0; desc->resp_count = 0;
clear_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin); spin_unlock_irq(&desc->iuspin);
desc->manage_power(desc->intf, 0); desc->manage_power(desc->intf, 0);
unpoison_urbs(desc); unpoison_urbs(desc);

View file

@ -890,13 +890,37 @@ static void uvc_function_unbind(struct usb_configuration *c,
{ {
struct usb_composite_dev *cdev = c->cdev; struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f); struct uvc_device *uvc = to_uvc(f);
long wait_ret = 1;
uvcg_info(f, "%s()\n", __func__); uvcg_info(f, "%s()\n", __func__);
/* If we know we're connected via v4l2, then there should be a cleanup
* of the device from userspace either via UVC_EVENT_DISCONNECT or
* though the video device removal uevent. Allow some time for the
* application to close out before things get deleted.
*/
if (uvc->func_connected) {
uvcg_dbg(f, "waiting for clean disconnect\n");
wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
uvc->func_connected == false, msecs_to_jiffies(500));
uvcg_dbg(f, "done waiting with ret: %ld\n", wait_ret);
}
device_remove_file(&uvc->vdev.dev, &dev_attr_function_name); device_remove_file(&uvc->vdev.dev, &dev_attr_function_name);
video_unregister_device(&uvc->vdev); video_unregister_device(&uvc->vdev);
v4l2_device_unregister(&uvc->v4l2_dev); v4l2_device_unregister(&uvc->v4l2_dev);
if (uvc->func_connected) {
/* Wait for the release to occur to ensure there are no longer any
* pending operations that may cause panics when resources are cleaned
* up.
*/
uvcg_warn(f, "%s no clean disconnect, wait for release\n", __func__);
wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
uvc->func_connected == false, msecs_to_jiffies(1000));
uvcg_dbg(f, "done waiting for release with ret: %ld\n", wait_ret);
}
usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
kfree(uvc->control_buf); kfree(uvc->control_buf);
@ -915,6 +939,7 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
mutex_init(&uvc->video.mutex); mutex_init(&uvc->video.mutex);
uvc->state = UVC_STATE_DISCONNECTED; uvc->state = UVC_STATE_DISCONNECTED;
init_waitqueue_head(&uvc->func_connected_queue);
opts = fi_to_f_uvc_opts(fi); opts = fi_to_f_uvc_opts(fi);
mutex_lock(&opts->lock); mutex_lock(&opts->lock);

View file

@ -14,6 +14,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/usb/composite.h> #include <linux/usb/composite.h>
#include <linux/videodev2.h> #include <linux/videodev2.h>
#include <linux/wait.h>
#include <media/v4l2-device.h> #include <media/v4l2-device.h>
#include <media/v4l2-dev.h> #include <media/v4l2-dev.h>
@ -129,6 +130,7 @@ struct uvc_device {
struct usb_function func; struct usb_function func;
struct uvc_video video; struct uvc_video video;
bool func_connected; bool func_connected;
wait_queue_head_t func_connected_queue;
/* Descriptors */ /* Descriptors */
struct { struct {

View file

@ -253,10 +253,11 @@ uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
static void uvc_v4l2_disable(struct uvc_device *uvc) static void uvc_v4l2_disable(struct uvc_device *uvc)
{ {
uvc->func_connected = false;
uvc_function_disconnect(uvc); uvc_function_disconnect(uvc);
uvcg_video_enable(&uvc->video, 0); uvcg_video_enable(&uvc->video, 0);
uvcg_free_buffers(&uvc->video.queue); uvcg_free_buffers(&uvc->video.queue);
uvc->func_connected = false;
wake_up_interruptible(&uvc->func_connected_queue);
} }
static int static int

View file

@ -145,6 +145,7 @@ enum dev_state {
STATE_DEV_INVALID = 0, STATE_DEV_INVALID = 0,
STATE_DEV_OPENED, STATE_DEV_OPENED,
STATE_DEV_INITIALIZED, STATE_DEV_INITIALIZED,
STATE_DEV_REGISTERING,
STATE_DEV_RUNNING, STATE_DEV_RUNNING,
STATE_DEV_CLOSED, STATE_DEV_CLOSED,
STATE_DEV_FAILED STATE_DEV_FAILED
@ -508,6 +509,7 @@ static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out_unlock;
} }
dev->state = STATE_DEV_REGISTERING;
spin_unlock_irqrestore(&dev->lock, flags); spin_unlock_irqrestore(&dev->lock, flags);
ret = usb_gadget_probe_driver(&dev->driver); ret = usb_gadget_probe_driver(&dev->driver);

View file

@ -19,11 +19,6 @@
#define HS_BW_BOUNDARY 6144 #define HS_BW_BOUNDARY 6144
/* usb2 spec section11.18.1: at most 188 FS bytes per microframe */ /* usb2 spec section11.18.1: at most 188 FS bytes per microframe */
#define FS_PAYLOAD_MAX 188 #define FS_PAYLOAD_MAX 188
/*
* max number of microframes for split transfer,
* for fs isoc in : 1 ss + 1 idle + 7 cs
*/
#define TT_MICROFRAMES_MAX 9
#define DBG_BUF_EN 64 #define DBG_BUF_EN 64
@ -242,28 +237,17 @@ static void drop_tt(struct usb_device *udev)
static struct mu3h_sch_ep_info * static struct mu3h_sch_ep_info *
create_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev, create_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
struct usb_host_endpoint *ep, struct xhci_ep_ctx *ep_ctx) struct usb_host_endpoint *ep)
{ {
struct mu3h_sch_ep_info *sch_ep; struct mu3h_sch_ep_info *sch_ep;
struct mu3h_sch_bw_info *bw_info; struct mu3h_sch_bw_info *bw_info;
struct mu3h_sch_tt *tt = NULL; struct mu3h_sch_tt *tt = NULL;
u32 len_bw_budget_table;
bw_info = get_bw_info(mtk, udev, ep); bw_info = get_bw_info(mtk, udev, ep);
if (!bw_info) if (!bw_info)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
if (is_fs_or_ls(udev->speed)) sch_ep = kzalloc(sizeof(*sch_ep), GFP_KERNEL);
len_bw_budget_table = TT_MICROFRAMES_MAX;
else if ((udev->speed >= USB_SPEED_SUPER)
&& usb_endpoint_xfer_isoc(&ep->desc))
len_bw_budget_table = get_esit(ep_ctx);
else
len_bw_budget_table = 1;
sch_ep = kzalloc(struct_size(sch_ep, bw_budget_table,
len_bw_budget_table),
GFP_KERNEL);
if (!sch_ep) if (!sch_ep)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -295,8 +279,6 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
u32 mult; u32 mult;
u32 esit_pkts; u32 esit_pkts;
u32 max_esit_payload; u32 max_esit_payload;
u32 *bwb_table = sch_ep->bw_budget_table;
int i;
ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2)); ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
maxpkt = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); maxpkt = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
@ -332,7 +314,6 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
*/ */
sch_ep->pkts = max_burst + 1; sch_ep->pkts = max_burst + 1;
sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts; sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
bwb_table[0] = sch_ep->bw_cost_per_microframe;
} else if (sch_ep->speed >= USB_SPEED_SUPER) { } else if (sch_ep->speed >= USB_SPEED_SUPER) {
/* usb3_r1 spec section4.4.7 & 4.4.8 */ /* usb3_r1 spec section4.4.7 & 4.4.8 */
sch_ep->cs_count = 0; sch_ep->cs_count = 0;
@ -349,7 +330,6 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) { if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
sch_ep->pkts = esit_pkts; sch_ep->pkts = esit_pkts;
sch_ep->num_budget_microframes = 1; sch_ep->num_budget_microframes = 1;
bwb_table[0] = maxpkt * sch_ep->pkts;
} }
if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) { if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) {
@ -366,15 +346,8 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
DIV_ROUND_UP(esit_pkts, sch_ep->pkts); DIV_ROUND_UP(esit_pkts, sch_ep->pkts);
sch_ep->repeat = !!(sch_ep->num_budget_microframes > 1); sch_ep->repeat = !!(sch_ep->num_budget_microframes > 1);
sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
for (i = 0; i < sch_ep->num_budget_microframes - 1; i++)
bwb_table[i] = sch_ep->bw_cost_per_microframe;
/* last one <= bw_cost_per_microframe */
bwb_table[i] = maxpkt * esit_pkts
- i * sch_ep->bw_cost_per_microframe;
} }
sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
} else if (is_fs_or_ls(sch_ep->speed)) { } else if (is_fs_or_ls(sch_ep->speed)) {
sch_ep->pkts = 1; /* at most one packet for each microframe */ sch_ep->pkts = 1; /* at most one packet for each microframe */
@ -384,28 +357,7 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
*/ */
sch_ep->cs_count = DIV_ROUND_UP(maxpkt, FS_PAYLOAD_MAX); sch_ep->cs_count = DIV_ROUND_UP(maxpkt, FS_PAYLOAD_MAX);
sch_ep->num_budget_microframes = sch_ep->cs_count; sch_ep->num_budget_microframes = sch_ep->cs_count;
sch_ep->bw_cost_per_microframe = sch_ep->bw_cost_per_microframe = min_t(u32, maxpkt, FS_PAYLOAD_MAX);
(maxpkt < FS_PAYLOAD_MAX) ? maxpkt : FS_PAYLOAD_MAX;
/* init budget table */
if (ep_type == ISOC_OUT_EP) {
for (i = 0; i < sch_ep->num_budget_microframes; i++)
bwb_table[i] = sch_ep->bw_cost_per_microframe;
} else if (ep_type == INT_OUT_EP) {
/* only first one consumes bandwidth, others as zero */
bwb_table[0] = sch_ep->bw_cost_per_microframe;
} else { /* INT_IN_EP or ISOC_IN_EP */
bwb_table[0] = 0; /* start split */
bwb_table[1] = 0; /* idle */
/*
* due to cs_count will be updated according to cs
* position, assign all remainder budget array
* elements as @bw_cost_per_microframe, but only first
* @num_budget_microframes elements will be used later
*/
for (i = 2; i < TT_MICROFRAMES_MAX; i++)
bwb_table[i] = sch_ep->bw_cost_per_microframe;
}
} }
} }
@ -422,7 +374,7 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
for (j = 0; j < sch_ep->num_budget_microframes; j++) { for (j = 0; j < sch_ep->num_budget_microframes; j++) {
k = XHCI_MTK_BW_INDEX(base + j); k = XHCI_MTK_BW_INDEX(base + j);
bw = sch_bw->bus_bw[k] + sch_ep->bw_budget_table[j]; bw = sch_bw->bus_bw[k] + sch_ep->bw_cost_per_microframe;
if (bw > max_bw) if (bw > max_bw)
max_bw = bw; max_bw = bw;
} }
@ -433,18 +385,16 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw, static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
struct mu3h_sch_ep_info *sch_ep, bool used) struct mu3h_sch_ep_info *sch_ep, bool used)
{ {
int bw_updated;
u32 base; u32 base;
int i, j, k; int i, j;
bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1);
for (i = 0; i < sch_ep->num_esit; i++) { for (i = 0; i < sch_ep->num_esit; i++) {
base = sch_ep->offset + i * sch_ep->esit; base = sch_ep->offset + i * sch_ep->esit;
for (j = 0; j < sch_ep->num_budget_microframes; j++) { for (j = 0; j < sch_ep->num_budget_microframes; j++)
k = XHCI_MTK_BW_INDEX(base + j); sch_bw->bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated;
if (used)
sch_bw->bus_bw[k] += sch_ep->bw_budget_table[j];
else
sch_bw->bus_bw[k] -= sch_ep->bw_budget_table[j];
}
} }
} }
@ -464,7 +414,7 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
*/ */
for (j = 0; j < sch_ep->num_budget_microframes; j++) { for (j = 0; j < sch_ep->num_budget_microframes; j++) {
k = XHCI_MTK_BW_INDEX(base + j); k = XHCI_MTK_BW_INDEX(base + j);
tmp = tt->fs_bus_bw[k] + sch_ep->bw_budget_table[j]; tmp = tt->fs_bus_bw[k] + sch_ep->bw_cost_per_microframe;
if (tmp > FS_PAYLOAD_MAX) if (tmp > FS_PAYLOAD_MAX)
return -ESCH_BW_OVERFLOW; return -ESCH_BW_OVERFLOW;
} }
@ -538,19 +488,17 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used) static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used)
{ {
struct mu3h_sch_tt *tt = sch_ep->sch_tt; struct mu3h_sch_tt *tt = sch_ep->sch_tt;
int bw_updated;
u32 base; u32 base;
int i, j, k; int i, j;
bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1);
for (i = 0; i < sch_ep->num_esit; i++) { for (i = 0; i < sch_ep->num_esit; i++) {
base = sch_ep->offset + i * sch_ep->esit; base = sch_ep->offset + i * sch_ep->esit;
for (j = 0; j < sch_ep->num_budget_microframes; j++) { for (j = 0; j < sch_ep->num_budget_microframes; j++)
k = XHCI_MTK_BW_INDEX(base + j); tt->fs_bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated;
if (used)
tt->fs_bus_bw[k] += sch_ep->bw_budget_table[j];
else
tt->fs_bus_bw[k] -= sch_ep->bw_budget_table[j];
}
} }
if (used) if (used)
@ -710,7 +658,7 @@ static int add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed)); xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
sch_ep = create_sch_ep(mtk, udev, ep, ep_ctx); sch_ep = create_sch_ep(mtk, udev, ep);
if (IS_ERR_OR_NULL(sch_ep)) if (IS_ERR_OR_NULL(sch_ep))
return -ENOMEM; return -ENOMEM;

View file

@ -83,7 +83,6 @@ struct mu3h_sch_bw_info {
* times; 1: distribute the (bMaxBurst+1)*(Mult+1) packets * times; 1: distribute the (bMaxBurst+1)*(Mult+1) packets
* according to @pkts and @repeat. normal mode is used by * according to @pkts and @repeat. normal mode is used by
* default * default
* @bw_budget_table: table to record bandwidth budget per microframe
*/ */
struct mu3h_sch_ep_info { struct mu3h_sch_ep_info {
u32 esit; u32 esit;
@ -109,7 +108,6 @@ struct mu3h_sch_ep_info {
u32 pkts; u32 pkts;
u32 cs_count; u32 cs_count;
u32 burst_mode; u32 burst_mode;
u32 bw_budget_table[];
}; };
#define MU3C_U3_PORT_MAX 4 #define MU3C_U3_PORT_MAX 4

View file

@ -2123,10 +2123,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) }, .driver_info = RSVD(3) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
.driver_info = RSVD(4) | RSVD(5) }, .driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) }, .driver_info = RSVD(6) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0106, 0xff) }, /* Fibocom MA510 (ECM mode w/ diag intf.) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) }, /* Fibocom MA510 (ECM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */

View file

@ -106,6 +106,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LM930_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },

Some files were not shown because too many files have changed in this diff Show more