In order to make the dbuf state computation less fragile let's make it stand on its own feet by not requiring someone to peek into a crystall ball ahead of time to figure out which pipes need to be added to the state under which potential future conditions. Instead we compute each piece of the state as we go along, and if any fallout occurs that affects more than the current set of pipes we add the affected pipes to the state naturally. That requires that we track a few extra thigns in the global dbuf state: dbuf slices for each pipe, and the weight each pipe has when distributing the same set of slice(s) between multiple pipes. Easy enough. We do need to follow a somewhat careful sequence of computations though as there are several steps involved in cooking up the dbuf state. Thoguh we could avoid some of that by computing more things on demand instead of relying on earlier step of the algorithm to have filled it out. I think the end result is still reasonable as the entire sequence is pretty much consolidated into a single function instead of being spread around all over. The rough sequence is this: 1. calculate active_pipes 2. calculate dbuf slices for every pipe 3. calculate total enabled slices 4. calculate new dbuf weights for any crtc in the state 5. calculate new ddb entry for every pipe based on the sets of slices and weights, and add any affected crtc to the state 6. calculate new plane ddb entries for all crtcs in the state, and add any affected plane to the state so that we'll perform the requisite hw reprogramming And as a nice bonus we get to throw dev_priv->wm.distrust_bios_wm out the window. v2: Keep crtc_state->wm.skl.ddb Reviewed-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com> Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210122205633.18492-8-ville.syrjala@linux.intel.com
96 lines
3.6 KiB
C
96 lines
3.6 KiB
C
/* SPDX-License-Identifier: MIT */
|
|
/*
|
|
* Copyright © 2019 Intel Corporation
|
|
*/
|
|
|
|
#ifndef __INTEL_PM_H__
|
|
#define __INTEL_PM_H__
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include "display/intel_bw.h"
|
|
#include "display/intel_display.h"
|
|
#include "display/intel_global_state.h"
|
|
|
|
#include "i915_drv.h"
|
|
#include "i915_reg.h"
|
|
|
|
struct drm_device;
|
|
struct drm_i915_private;
|
|
struct i915_request;
|
|
struct intel_atomic_state;
|
|
struct intel_crtc;
|
|
struct intel_crtc_state;
|
|
struct intel_plane;
|
|
struct skl_ddb_entry;
|
|
struct skl_pipe_wm;
|
|
struct skl_wm_level;
|
|
|
|
void intel_init_clock_gating(struct drm_i915_private *dev_priv);
|
|
void intel_suspend_hw(struct drm_i915_private *dev_priv);
|
|
int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
|
|
void intel_update_watermarks(struct intel_crtc *crtc);
|
|
void intel_init_pm(struct drm_i915_private *dev_priv);
|
|
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
|
|
void intel_pm_setup(struct drm_i915_private *dev_priv);
|
|
void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
|
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
|
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
|
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
|
u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv);
|
|
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
|
|
struct skl_ddb_entry *ddb_y,
|
|
struct skl_ddb_entry *ddb_uv);
|
|
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
|
|
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
|
|
const struct skl_ddb_entry *entry);
|
|
void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
|
|
struct skl_pipe_wm *out);
|
|
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
|
|
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
|
|
bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
|
|
const struct intel_bw_state *bw_state);
|
|
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
|
|
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
|
|
bool skl_wm_level_equals(const struct skl_wm_level *l1,
|
|
const struct skl_wm_level *l2);
|
|
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
|
|
const struct skl_ddb_entry *entries,
|
|
int num_entries, int ignore_idx);
|
|
void skl_write_plane_wm(struct intel_plane *plane,
|
|
const struct intel_crtc_state *crtc_state);
|
|
void skl_write_cursor_wm(struct intel_plane *plane,
|
|
const struct intel_crtc_state *crtc_state);
|
|
bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv);
|
|
void intel_init_ipc(struct drm_i915_private *dev_priv);
|
|
void intel_enable_ipc(struct drm_i915_private *dev_priv);
|
|
|
|
bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
|
|
|
|
struct intel_dbuf_state {
|
|
struct intel_global_state base;
|
|
|
|
struct skl_ddb_entry ddb[I915_MAX_PIPES];
|
|
unsigned int weight[I915_MAX_PIPES];
|
|
u8 slices[I915_MAX_PIPES];
|
|
|
|
u8 enabled_slices;
|
|
u8 active_pipes;
|
|
};
|
|
|
|
int intel_dbuf_init(struct drm_i915_private *dev_priv);
|
|
|
|
struct intel_dbuf_state *
|
|
intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
|
|
|
|
#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base)
|
|
#define intel_atomic_get_old_dbuf_state(state) \
|
|
to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj))
|
|
#define intel_atomic_get_new_dbuf_state(state) \
|
|
to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj))
|
|
|
|
int intel_dbuf_init(struct drm_i915_private *dev_priv);
|
|
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
|
|
void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
|
|
|
|
#endif /* __INTEL_PM_H__ */
|