1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

drm fixes for v6.14-rc4

core:
 - remove MAINTAINERS entry
 
 cgroup/dmem:
 - use correct function for pool descendants
 
 panel:
 - fix signal polarity issue jd9365da-h3
 
 nouveau:
 - folio handling fix
 - config fix
 
 amdxdna:
 - fix missing header
 
 xe:
 - Fix error handling in xe_irq_install
 - Fix devcoredump format
 
 i915:
 - Use spin_lock_irqsave() in interruptible context on guc submission
 - Fixes on DDI and TRANS programming
 - Make sure all planes in use by the joiner have their crtc included
 - Fix 128b/132b modeset issues
 
 msm:
 - More catalog fixes:
 - to skip watchdog programming through top block if its not present
 - fix the setting of WB mask to ensure the WB input control is programmed
   correctly through ping-pong
 - drop lm_pair for sm6150 as that chipset does not have any 3dmerge block
 - Fix the mode validation logic for DP/eDP to account for widebus (2ppc)
   to allow high clock resolutions
 - Fix to disable dither during encoder disable as otherwise this was
   causing kms_writeback failure due to resource sharing between
   WB and DSI paths as DSI uses dither but WB does not
 - Fixes for virtual planes, namely to drop extraneous return and fix
   uninitialized variables
 - Fix to avoid spill-over of DSC encoder block bits when programming
   the bits-per-component
 - Fixes in the DSI PHY to protect against concurrent access of
   PHY_CMN_CLK_CFG regs between clock and display drivers
 - Core/GPU:
 - Fix non-blocking fence wait incorrectly rounding up to 1 jiffy timeout
 - Only print GMU fw version once, instead of each time the GPU resumes
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAme45ZkACgkQDHTzWXnE
 hr65yBAAktcl5lk1UteLZE/3YCOWu9XW3R30Sto9e+CkR/y+2oVSrJFSYF+OWSkV
 XRS4xpHkRV3L9YxnroiqIofI6rKoc82pm+HrKZ8Z3gf5gvEU+IB4H8QJdwhUQzcP
 8fwxG+ZsCXam6rIUL4kIO9InD1amxJNrFRts+aQ8N/BDeiJdQZOk7F6v9BGu+3hv
 HaFPfhXXmh7TYmCQqxoI6hZlEetBYkWFiS+2Ir85Xt6n7V0ae91L+rJK92PBIlmK
 sg9xOSMxde3Bxmv8ARn6R3pTaEb2iQfdw203o9P+JnZfJoKEb0IlitCxNJG7Lu4j
 hu+7Bk7tPEzncFQA69h5jpn8XL4urECvmkkfx07afj1jjRFFycQgqOkfMM/OiYRo
 4cDPi4BhCXwKg4B1KCrx3vGtiUhFc7l4eYyqILRt+U3LpzKtU0ymqSv/odzWz7+m
 ynfcAw5DzqozdYD/X8dRkWqEsYyihApJI0I1pj38tem1M1XMtlILjcSWGuGFEPwJ
 JIKYsinHRzJ1pggHf3nHzWV7uGFWY8sQwbQVAKTVgL8pq/dQzG3tW7rjLgYLI0ju
 hAd2ihXl/h8Ezt4f1dDxrwrTgC3RXL/S0y/g0lBkRjVZ4exMVQVHev+LlLCjcvCY
 L6dxZNHb4ggR50qhGekNT2Uxb/Sldu9tVuOGc801eynr6myLLOg=
 =cuOi
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2025-02-22' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "Weekly drm fixes pull request, lots of small things all over, msm has
  a bunch of things but all very small, xe, i915, a fix for the cgroup
  dmem controller.

  core:
   - remove MAINTAINERS entry

  cgroup/dmem:
   - use correct function for pool descendants

  panel:
   - fix signal polarity issue jd9365da-h3

  nouveau:
   - folio handling fix
   - config fix

  amdxdna:
   - fix missing header

  xe:
   - Fix error handling in xe_irq_install
   - Fix devcoredump format

  i915:
   - Use spin_lock_irqsave() in interruptible context on guc submission
   - Fixes on DDI and TRANS programming
   - Make sure all planes in use by the joiner have their crtc included
   - Fix 128b/132b modeset issues

  msm:
   - More catalog fixes:
      - to skip watchdog programming through top block if its not
        present
      - fix the setting of WB mask to ensure the WB input control is
        programmed correctly through ping-pong
      - drop lm_pair for sm6150 as that chipset does not have any
        3dmerge block
      - Fix the mode validation logic for DP/eDP to account for widebus
        (2ppc) to allow high clock resolutions
      - Fix to disable dither during encoder disable as otherwise this
        was causing kms_writeback failure due to resource sharing
        between WB and DSI paths as DSI uses dither but WB does not
      - Fixes for virtual planes, namely to drop extraneous return and
        fix uninitialized variables
      - Fix to avoid spill-over of DSC encoder block bits when
        programming the bits-per-component
      - Fixes in the DSI PHY to protect against concurrent access of
        PHY_CMN_CLK_CFG regs between clock and display drivers
   - Core/GPU:
      - Fix non-blocking fence wait incorrectly rounding up to 1 jiffy
        timeout
      - Only print GMU fw version once, instead of each time the GPU
        resumes"

* tag 'drm-fixes-2025-02-22' of https://gitlab.freedesktop.org/drm/kernel: (28 commits)
  drm/i915/dp: Fix disabling the transcoder function in 128b/132b mode
  drm/i915/dp: Fix error handling during 128b/132b link training
  accel/amdxdna: Add missing include linux/slab.h
  MAINTAINERS: Remove myself
  drm/nouveau/pmu: Fix gp10b firmware guard
  cgroup/dmem: Don't open-code css_for_each_descendant_pre
  drm/xe/guc: Fix size_t print format
  drm/xe: Make GUC binaries dump consistent with other binaries in devcoredump
  drm/i915: Make sure all planes in use by the joiner have their crtc included
  drm/i915/ddi: Fix HDMI port width programming in DDI_BUF_CTL
  drm/i915/dsi: Use TRANS_DDI_FUNC_CTL's own port width macro
  drm/xe: Fix error handling in xe_irq_install()
  drm/i915/gt: Use spin_lock_irqsave() in interruptible context
  drm/msm/dsi/phy: Do not overwite PHY_CMN_CLK_CFG1 when choosing bitclk source
  drm/msm/dsi/phy: Protect PHY_CMN_CLK_CFG1 against clock driver
  drm/msm/dsi/phy: Protect PHY_CMN_CLK_CFG0 updated from driver side
  drm/msm/dpu: Drop extraneous return in dpu_crtc_reassign_planes()
  drm/msm/dpu: Don't leak bits_per_component into random DSC_ENC fields
  drm/msm/dpu: Disable dither in phys encoder cleanup
  drm/msm/dpu: Fix uninitialized variable
  ...
This commit is contained in:
Linus Torvalds 2025-02-21 13:10:22 -08:00
commit 3ef7acec97
30 changed files with 146 additions and 124 deletions

View file

@ -7425,7 +7425,6 @@ F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
M: Karol Herbst <kherbst@redhat.com>
M: Lyude Paul <lyude@redhat.com>
M: Danilo Krummrich <dakr@kernel.org>
L: dri-devel@lists.freedesktop.org
@ -24069,7 +24068,6 @@ F: tools/testing/selftests/ftrace/
TRACING MMIO ACCESSES (MMIOTRACE)
M: Steven Rostedt <rostedt@goodmis.org>
M: Masami Hiramatsu <mhiramat@kernel.org>
R: Karol Herbst <karolherbst@gmail.com>
R: Pekka Paalanen <ppaalanen@gmail.com>
L: linux-kernel@vger.kernel.org
L: nouveau@lists.freedesktop.org

View file

@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/xarray.h>
#define CREATE_TRACE_POINTS

View file

@ -809,8 +809,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* select data lane width */
tmp = intel_de_read(display,
TRANS_DDI_FUNC_CTL(display, dsi_trans));
tmp &= ~DDI_PORT_WIDTH_MASK;
tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
tmp &= ~TRANS_DDI_PORT_WIDTH_MASK;
tmp |= TRANS_DDI_PORT_WIDTH(intel_dsi->lane_count);
/* select input pipe */
tmp &= ~TRANS_DDI_EDP_INPUT_MASK;

View file

@ -658,7 +658,6 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
u32 ctl;
if (DISPLAY_VER(dev_priv) >= 11)
@ -678,8 +677,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
if (DISPLAY_VER(dev_priv) >= 12) {
if (!intel_dp_mst_is_master_trans(crtc_state) ||
(!is_mst && intel_dp_is_uhbr(crtc_state))) {
if (!intel_dp_mst_is_master_trans(crtc_state)) {
ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_MODE_SELECT_MASK);
}
@ -3134,7 +3132,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
if (DISPLAY_VER(dev_priv) >= 12) {
if (is_mst) {
if (is_mst || intel_dp_is_uhbr(old_crtc_state)) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
intel_de_rmw(dev_priv,
@ -3487,7 +3485,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
buf_ctl |= DDI_PORT_WIDTH(lane_count);
buf_ctl |= DDI_PORT_WIDTH(crtc_state->lane_count);
if (DISPLAY_VER(dev_priv) >= 20)
buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;

View file

@ -6628,12 +6628,30 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_plane_state *plane_state;
struct intel_crtc_state *crtc_state;
struct intel_plane *plane;
struct intel_crtc *crtc;
u8 affected_pipes = 0;
u8 modeset_pipes = 0;
int i;
/*
* Any plane which is in use by the joiner needs its crtc.
* Pull those in first as this will not have happened yet
* if the plane remains disabled according to uapi.
*/
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
crtc = to_intel_crtc(plane_state->hw.crtc);
if (!crtc)
continue;
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
}
/* Now pull in all joined crtcs */
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
affected_pipes |= crtc_state->joiner_pipes;
if (intel_crtc_needs_modeset(crtc_state))

View file

@ -1563,7 +1563,7 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n");
return false;
goto out;
}
if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) &&
@ -1575,6 +1575,19 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
passed ? "passed" : "failed",
crtc_state->port_clock, crtc_state->lane_count);
out:
/*
* Ensure that the training pattern does get set to TPS2 even in case
* of a failure, as is the case at the end of a passing link training
* and what is expected by the transcoder. Leaving TPS1 set (and
* disabling the link train mode in DP_TP_CTL later from TPS1 directly)
* would result in a stuck transcoder HW state and flip-done timeouts
* later in the modeset sequence.
*/
if (!passed)
intel_dp_program_link_training_pattern(intel_dp, crtc_state,
DP_PHY_DPRX, DP_TRAINING_PATTERN_2);
return passed;
}

View file

@ -3449,10 +3449,10 @@ static inline int guc_lrc_desc_unpin(struct intel_context *ce)
*/
ret = deregister_context(ce, ce->guc_id.id);
if (ret) {
spin_lock(&ce->guc_state.lock);
spin_lock_irqsave(&ce->guc_state.lock, flags);
set_context_registered(ce);
clr_context_destroyed(ce);
spin_unlock(&ce->guc_state.lock);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
/*
* As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements
* the wakeref immediately but per function spec usage call this after unlock.

View file

@ -3633,7 +3633,7 @@ enum skl_power_gate {
#define DDI_BUF_IS_IDLE (1 << 7)
#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
#define DDI_A_4_LANES (1 << 4)
#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
#define DDI_PORT_WIDTH(width) (((width) == 3 ? 4 : ((width) - 1)) << 1)
#define DDI_PORT_WIDTH_MASK (7 << 1)
#define DDI_PORT_WIDTH_SHIFT 1
#define DDI_INIT_DISPLAY_DETECTED (1 << 0)

View file

@ -813,10 +813,10 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
}
ver = gmu_read(gmu, REG_A6XX_GMU_CORE_FW_VERSION);
DRM_INFO("Loaded GMU firmware v%u.%u.%u\n",
FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver),
FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver),
FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver));
DRM_INFO_ONCE("Loaded GMU firmware v%u.%u.%u\n",
FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver),
FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver),
FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver));
return 0;
}

View file

@ -297,7 +297,7 @@ static const struct dpu_wb_cfg sm8150_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SDM845_MASK,
.features = WB_SM8250_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,

View file

@ -304,7 +304,7 @@ static const struct dpu_wb_cfg sc8180x_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SDM845_MASK,
.features = WB_SM8250_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,

View file

@ -116,14 +116,12 @@ static const struct dpu_lm_cfg sm6150_lm[] = {
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
.lm_pair = LM_1,
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
.features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_1,
.lm_pair = LM_0,
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,

View file

@ -144,7 +144,7 @@ static const struct dpu_wb_cfg sm6125_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SDM845_MASK,
.features = WB_SM8250_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,

View file

@ -1228,8 +1228,6 @@ static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state
done:
kfree(states);
return ret;
return 0;
}
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,

View file

@ -2281,6 +2281,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
}
}
if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither)
phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL);
/* reset the merge 3D HW block */
if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,

View file

@ -52,6 +52,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
u32 slice_last_group_size;
u32 det_thresh_flatness;
bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
bool input_10_bits = dsc->bits_per_component == 10;
DPU_REG_WRITE(c, DSC_COMMON_MODE, mode);
@ -68,7 +69,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
data |= (dsc->line_buf_depth << 3);
data |= (dsc->simple_422 << 2);
data |= (dsc->convert_rgb << 1);
data |= dsc->bits_per_component;
data |= input_10_bits;
DPU_REG_WRITE(c, DSC_ENC, data);

View file

@ -272,7 +272,7 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
if (cap & BIT(DPU_MDP_VSYNC_SEL))
ops->setup_vsync_source = dpu_hw_setup_vsync_sel;
else
else if (!(cap & BIT(DPU_MDP_PERIPH_0_REMOVED)))
ops->setup_vsync_source = dpu_hw_setup_wd_timer;
ops->get_safe_status = dpu_hw_get_safe_status;

View file

@ -1164,7 +1164,6 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
unsigned int num_planes)
{
unsigned int i;
int ret;
for (i = 0; i < num_planes; i++) {
struct drm_plane_state *plane_state = states[i];
@ -1173,13 +1172,13 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
!plane_state->visible)
continue;
ret = dpu_plane_virtual_assign_resources(crtc, global_state,
int ret = dpu_plane_virtual_assign_resources(crtc, global_state,
state, plane_state);
if (ret)
break;
return ret;
}
return ret;
return 0;
}
static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe)

View file

@ -930,16 +930,17 @@ enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge,
return -EINVAL;
}
if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
return MODE_CLOCK_HIGH;
msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
link_info = &msm_dp_display->panel->link_info;
if (drm_mode_is_420_only(&dp->connector->display_info, mode) &&
msm_dp_display->panel->vsc_sdp_supported)
if ((drm_mode_is_420_only(&dp->connector->display_info, mode) &&
msm_dp_display->panel->vsc_sdp_supported) ||
msm_dp_wide_bus_available(dp))
mode_pclk_khz /= 2;
if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ)
return MODE_CLOCK_HIGH;
mode_bpp = dp->connector->display_info.bpc * num_components;
if (!mode_bpp)
mode_bpp = default_bpp;

View file

@ -257,7 +257,10 @@ static enum drm_mode_status msm_edp_bridge_mode_valid(struct drm_bridge *bridge,
return -EINVAL;
}
if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
if (msm_dp_wide_bus_available(dp))
mode_pclk_khz /= 2;
if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ)
return MODE_CLOCK_HIGH;
/*

View file

@ -83,6 +83,9 @@ struct dsi_pll_7nm {
/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
spinlock_t postdiv_lock;
/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG1 register */
spinlock_t pclk_mux_lock;
struct pll_7nm_cached_state cached_state;
struct dsi_pll_7nm *slave;
@ -372,22 +375,41 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
ndelay(250);
}
static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
static void dsi_pll_cmn_clk_cfg0_write(struct dsi_pll_7nm *pll, u32 val)
{
unsigned long flags;
spin_lock_irqsave(&pll->postdiv_lock, flags);
writel(val, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
spin_unlock_irqrestore(&pll->postdiv_lock, flags);
}
static void dsi_pll_cmn_clk_cfg1_update(struct dsi_pll_7nm *pll, u32 mask,
u32 val)
{
unsigned long flags;
u32 data;
spin_lock_irqsave(&pll->pclk_mux_lock, flags);
data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
writel(data & ~BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
data &= ~mask;
data |= val & mask;
writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
spin_unlock_irqrestore(&pll->pclk_mux_lock, flags);
}
static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
{
dsi_pll_cmn_clk_cfg1_update(pll, DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN, 0);
}
static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
{
u32 data;
u32 cfg_1 = DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN | DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN_SEL;
writel(0x04, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3);
data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
writel(data | BIT(5) | BIT(4), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_pll_cmn_clk_cfg1_update(pll, cfg_1, cfg_1);
}
static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
@ -565,7 +587,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
void __iomem *phy_base = pll_7nm->phy->base;
u32 val;
int ret;
@ -574,13 +595,10 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
val |= cached->pll_out_div;
writel(val, pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
writel(cached->bit_clk_div | (cached->pix_clk_div << 4),
phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
val = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
val &= ~0x3;
val |= cached->pll_mux;
writel(val, phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_pll_cmn_clk_cfg0_write(pll_7nm,
DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) |
DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div));
dsi_pll_cmn_clk_cfg1_update(pll_7nm, 0x3, cached->pll_mux);
ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
pll_7nm->vco_current_rate,
@ -599,7 +617,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
void __iomem *base = phy->base;
u32 data = 0x0; /* internal PLL */
DBG("DSI PLL%d", pll_7nm->phy->id);
@ -618,7 +635,8 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
}
/* set PLL src */
writel(data << 2, base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL__MASK,
DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL(data));
return 0;
}
@ -733,7 +751,7 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
pll_by_2_bit,
}), 2, 0, pll_7nm->phy->base +
REG_DSI_7nm_PHY_CMN_CLK_CFG1,
0, 1, 0, NULL);
0, 1, 0, &pll_7nm->pclk_mux_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@ -778,6 +796,7 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
pll_7nm_list[phy->id] = pll_7nm;
spin_lock_init(&pll_7nm->postdiv_lock);
spin_lock_init(&pll_7nm->pclk_mux_lock);
pll_7nm->phy = phy;

View file

@ -537,15 +537,12 @@ static inline int align_pitch(int width, int bpp)
static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
{
ktime_t now = ktime_get();
s64 remaining_jiffies;
if (ktime_compare(*timeout, now) < 0) {
remaining_jiffies = 0;
} else {
ktime_t rem = ktime_sub(*timeout, now);
remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
}
if (ktime_compare(*timeout, now) <= 0)
return 0;
ktime_t rem = ktime_sub(*timeout, now);
s64 remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
return clamp(remaining_jiffies, 1LL, (s64)INT_MAX);
}

View file

@ -9,8 +9,15 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00004" name="REVISION_ID1"/>
<reg32 offset="0x00008" name="REVISION_ID2"/>
<reg32 offset="0x0000c" name="REVISION_ID3"/>
<reg32 offset="0x00010" name="CLK_CFG0"/>
<reg32 offset="0x00014" name="CLK_CFG1"/>
<reg32 offset="0x00010" name="CLK_CFG0">
<bitfield name="DIV_CTRL_3_0" low="0" high="3" type="uint"/>
<bitfield name="DIV_CTRL_7_4" low="4" high="7" type="uint"/>
</reg32>
<reg32 offset="0x00014" name="CLK_CFG1">
<bitfield name="CLK_EN" pos="5" type="boolean"/>
<bitfield name="CLK_EN_SEL" pos="4" type="boolean"/>
<bitfield name="BITCLK_SEL" low="2" high="3" type="uint"/>
</reg32>
<reg32 offset="0x00018" name="GLBL_CTRL"/>
<reg32 offset="0x0001c" name="RBUF_CTRL"/>
<reg32 offset="0x00020" name="VREG_CTRL_0"/>

View file

@ -590,6 +590,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
struct mm_struct *mm = svmm->notifier.mm;
struct folio *folio;
struct page *page;
unsigned long start = args->p.addr;
unsigned long notifier_seq;
@ -616,12 +617,16 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
ret = -EINVAL;
goto out;
}
folio = page_folio(page);
mutex_lock(&svmm->mutex);
if (!mmu_interval_read_retry(&notifier->notifier,
notifier_seq))
break;
mutex_unlock(&svmm->mutex);
folio_unlock(folio);
folio_put(folio);
}
/* Map the page on the GPU. */
@ -637,8 +642,8 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
mutex_unlock(&svmm->mutex);
unlock_page(page);
put_page(page);
folio_unlock(folio);
folio_put(folio);
out:
mmu_interval_notifier_remove(&notifier->notifier);

View file

@ -75,7 +75,7 @@ gp10b_pmu_acr = {
.bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");

View file

@ -109,13 +109,13 @@ static int jadard_prepare(struct drm_panel *panel)
if (jadard->desc->lp11_to_reset_delay_ms)
msleep(jadard->desc->lp11_to_reset_delay_ms);
gpiod_set_value(jadard->reset, 1);
gpiod_set_value(jadard->reset, 0);
msleep(5);
gpiod_set_value(jadard->reset, 0);
gpiod_set_value(jadard->reset, 1);
msleep(10);
gpiod_set_value(jadard->reset, 1);
gpiod_set_value(jadard->reset, 0);
msleep(130);
ret = jadard->desc->init(jadard);
@ -1130,7 +1130,7 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
dsi->format = desc->format;
dsi->lanes = desc->lanes;
jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(jadard->reset)) {
DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n");
return PTR_ERR(jadard->reset);

View file

@ -1723,9 +1723,11 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
drm_printf(p, "\tg2h outstanding: %d\n",
snapshot->g2h_outstanding);
if (snapshot->ctb)
xe_print_blob_ascii85(p, "CTB data", '\n',
if (snapshot->ctb) {
drm_printf(p, "[CTB].length: 0x%zx\n", snapshot->ctb_size);
xe_print_blob_ascii85(p, "[CTB].data", '\n',
snapshot->ctb, 0, snapshot->ctb_size);
}
} else {
drm_puts(p, "CT disabled\n");
}

View file

@ -208,10 +208,11 @@ void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_
drm_printf(p, "GuC timestamp: 0x%08llX [%llu]\n", snapshot->stamp, snapshot->stamp);
drm_printf(p, "Log level: %u\n", snapshot->level);
drm_printf(p, "[LOG].length: 0x%zx\n", snapshot->size);
remain = snapshot->size;
for (i = 0; i < snapshot->num_chunks; i++) {
size_t size = min(GUC_LOG_CHUNK_SIZE, remain);
const char *prefix = i ? NULL : "Log data";
const char *prefix = i ? NULL : "[LOG].data";
char suffix = i == snapshot->num_chunks - 1 ? '\n' : 0;
xe_print_blob_ascii85(p, prefix, suffix, snapshot->copy[i], 0, size);

View file

@ -757,19 +757,7 @@ int xe_irq_install(struct xe_device *xe)
xe_irq_postinstall(xe);
err = devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
if (err)
goto free_irq_handler;
return 0;
free_irq_handler:
if (xe_device_has_msix(xe))
xe_irq_msix_free(xe);
else
xe_irq_msi_free(xe);
return err;
return devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
}
static void xe_irq_msi_synchronize_irq(struct xe_device *xe)

View file

@ -220,60 +220,32 @@ dmem_cgroup_calculate_protection(struct dmem_cgroup_pool_state *limit_pool,
struct dmem_cgroup_pool_state *test_pool)
{
struct page_counter *climit;
struct cgroup_subsys_state *css, *next_css;
struct cgroup_subsys_state *css;
struct dmemcg_state *dmemcg_iter;
struct dmem_cgroup_pool_state *pool, *parent_pool;
bool found_descendant;
struct dmem_cgroup_pool_state *pool, *found_pool;
climit = &limit_pool->cnt;
rcu_read_lock();
parent_pool = pool = limit_pool;
css = &limit_pool->cs->css;
/*
* This logic is roughly equivalent to css_foreach_descendant_pre,
* except we also track the parent pool to find out which pool we need
* to calculate protection values for.
*
* We can stop the traversal once we find test_pool among the
* descendants since we don't really care about any others.
*/
while (pool != test_pool) {
next_css = css_next_child(NULL, css);
if (next_css) {
parent_pool = pool;
} else {
while (css != &limit_pool->cs->css) {
next_css = css_next_child(css, css->parent);
if (next_css)
break;
css = css->parent;
parent_pool = pool_parent(parent_pool);
}
/*
* We can only hit this when test_pool is not a
* descendant of limit_pool.
*/
if (WARN_ON_ONCE(css == &limit_pool->cs->css))
break;
}
css = next_css;
found_descendant = false;
css_for_each_descendant_pre(css, &limit_pool->cs->css) {
dmemcg_iter = container_of(css, struct dmemcg_state, css);
found_pool = NULL;
list_for_each_entry_rcu(pool, &dmemcg_iter->pools, css_node) {
if (pool_parent(pool) == parent_pool) {
found_descendant = true;
if (pool->region == limit_pool->region) {
found_pool = pool;
break;
}
}
if (!found_descendant)
if (!found_pool)
continue;
page_counter_calculate_protection(
climit, &pool->cnt, true);
climit, &found_pool->cnt, true);
if (found_pool == test_pool)
break;
}
rcu_read_unlock();
}