diff options
Diffstat (limited to 'include')
51 files changed, 1621 insertions, 1223 deletions
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index 2a1f85f9a8a3..f668e75fbabe 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -143,6 +143,11 @@ struct dw_hdmi_plat_data { const struct drm_display_info *info, const struct drm_display_mode *mode); + /* Platform-specific audio enable/disable (optional) */ + void (*enable_audio)(struct dw_hdmi *hdmi, int channel, + int width, int rate, int non_pcm); + void (*disable_audio)(struct dw_hdmi *hdmi); + /* Vendor PHY support */ const struct dw_hdmi_phy_ops *phy_ops; const char *phy_name; @@ -173,6 +178,8 @@ void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense); int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn, struct device *codec_dev); +void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm); +void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width); void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt); void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, u8 *channel_status); @@ -187,9 +194,11 @@ void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address); void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, unsigned char addr); +void dw_hdmi_phy_gen1_reset(struct dw_hdmi *hdmi); + void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable); void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable); -void dw_hdmi_phy_reset(struct dw_hdmi *hdmi); +void dw_hdmi_phy_gen2_reset(struct dw_hdmi *hdmi); enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi, void *data); diff --git a/include/drm/dp/drm_dp_helper.h b/include/drm/display/drm_dp.h index 51e02cf75277..9e3aff7e68bb 100644 --- a/include/drm/dp/drm_dp_helper.h +++ b/include/drm/display/drm_dp.h @@ -20,17 +20,10 @@ * OF THIS SOFTWARE. */ -#ifndef _DRM_DP_HELPER_H_ -#define _DRM_DP_HELPER_H_ +#ifndef _DRM_DP_H_ +#define _DRM_DP_H_ -#include <linux/delay.h> -#include <linux/i2c.h> #include <linux/types.h> -#include <drm/drm_connector.h> - -struct drm_device; -struct drm_dp_aux; -struct drm_panel; /* * Unless otherwise noted, all values are from the DP 1.1a spec. Note that @@ -361,6 +354,7 @@ struct drm_panel; # define DP_PSR_IS_SUPPORTED 1 # define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */ # define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */ +# define DP_PSR2_WITH_Y_COORD_ET_SUPPORTED 4 /* eDP 1.5, adopted eDP 1.4b SCR */ #define DP_PSR_CAPS 0x071 /* XXX 1.2? */ # define DP_PSR_NO_TRAIN_ON_EXIT 1 @@ -375,6 +369,7 @@ struct drm_panel; # define DP_PSR_SETUP_TIME_SHIFT 1 # define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */ # define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */ +# define DP_PSR2_SU_AUX_FRAME_SYNC_NOT_NEEDED (1 << 6)/* eDP 1.5, adopted eDP 1.4b SCR */ #define DP_PSR2_SU_X_GRANULARITY 0x072 /* eDP 1.4b */ #define DP_PSR2_SU_Y_GRANULARITY 0x074 /* eDP 1.4b */ @@ -1520,16 +1515,6 @@ enum drm_dp_phy { #define DP_LINK_CONSTANT_N_VALUE 0x8000 #define DP_LINK_STATUS_SIZE 6 -bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], - int lane_count); -bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], - int lane_count); -u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], - int lane); -u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], - int lane); -u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE], - int lane); #define DP_BRANCH_OUI_HEADER_SIZE 0xc #define DP_RECEIVER_CAP_SIZE 0xf @@ -1539,31 +1524,6 @@ u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE], #define DP_LTTPR_COMMON_CAP_SIZE 8 #define DP_LTTPR_PHY_CAP_SIZE 3 -int drm_dp_read_clock_recovery_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], - enum drm_dp_phy dp_phy, bool uhbr); -int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], - enum drm_dp_phy dp_phy, bool uhbr); - -void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux, - const u8 dpcd[DP_RECEIVER_CAP_SIZE]); -void drm_dp_lttpr_link_train_clock_recovery_delay(void); -void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux, - const u8 dpcd[DP_RECEIVER_CAP_SIZE]); -void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux, - const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); - -int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux); -bool drm_dp_128b132b_lane_channel_eq_done(const u8 link_status[DP_LINK_STATUS_SIZE], - int lane_count); -bool drm_dp_128b132b_lane_symbol_locked(const u8 link_status[DP_LINK_STATUS_SIZE], - int lane_count); -bool drm_dp_128b132b_eq_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]); -bool drm_dp_128b132b_cds_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]); -bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SIZE]); - -u8 drm_dp_link_rate_to_bw_code(int link_rate); -int drm_dp_bw_code_to_link_rate(u8 link_bw); - #define DP_SDP_AUDIO_TIMESTAMP 0x01 #define DP_SDP_AUDIO_STREAM 0x02 #define DP_SDP_EXTENSION 0x04 /* DP 1.1 */ @@ -1727,651 +1687,4 @@ enum dp_content_type { DP_CONTENT_TYPE_GAME = 0x04, }; -/** - * struct drm_dp_vsc_sdp - drm DP VSC SDP - * - * This structure represents a DP VSC SDP of drm - * It is based on DP 1.4 spec [Table 2-116: VSC SDP Header Bytes] and - * [Table 2-117: VSC SDP Payload for DB16 through DB18] - * - * @sdp_type: secondary-data packet type - * @revision: revision number - * @length: number of valid data bytes - * @pixelformat: pixel encoding format - * @colorimetry: colorimetry format - * @bpc: bit per color - * @dynamic_range: dynamic range information - * @content_type: CTA-861-G defines content types and expected processing by a sink device - */ -struct drm_dp_vsc_sdp { - unsigned char sdp_type; - unsigned char revision; - unsigned char length; - enum dp_pixelformat pixelformat; - enum dp_colorimetry colorimetry; - int bpc; - enum dp_dynamic_range dynamic_range; - enum dp_content_type content_type; -}; - -void drm_dp_vsc_sdp_log(const char *level, struct device *dev, - const struct drm_dp_vsc_sdp *vsc); - -int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]); - -static inline int -drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); -} - -static inline u8 -drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; -} - -static inline bool -drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_DPCD_REV] >= 0x11 && - (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); -} - -static inline bool -drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_DPCD_REV] >= 0x11 && - (dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); -} - -static inline bool -drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_DPCD_REV] >= 0x12 && - dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED; -} - -static inline bool -drm_dp_max_downspread(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_DPCD_REV] >= 0x11 || - dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5; -} - -static inline bool -drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_DPCD_REV] >= 0x14 && - dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED; -} - -static inline u8 -drm_dp_training_pattern_mask(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return (dpcd[DP_DPCD_REV] >= 0x14) ? DP_TRAINING_PATTERN_MASK_1_4 : - DP_TRAINING_PATTERN_MASK; -} - -static inline bool -drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT; -} - -/* DP/eDP DSC support */ -u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], - bool is_edp); -u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); -int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE], - u8 dsc_bpc[3]); - -static inline bool -drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) -{ - return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] & - DP_DSC_DECOMPRESSION_IS_SUPPORTED; -} - -static inline u16 -drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) -{ - return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] | - (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] & - DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK << - DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT); -} - -static inline u32 -drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) -{ - /* Max Slicewidth = Number of Pixels * 320 */ - return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] * - DP_DSC_SLICE_WIDTH_MULTIPLIER; -} - -/* Forward Error Correction Support on DP 1.4 */ -static inline bool -drm_dp_sink_supports_fec(const u8 fec_capable) -{ - return fec_capable & DP_FEC_CAPABLE; -} - -static inline bool -drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_8B10B; -} - -static inline bool -drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_EDP_CONFIGURATION_CAP] & - DP_ALTERNATE_SCRAMBLER_RESET_CAP; -} - -/* Ignore MSA timing for Adaptive Sync support on DP 1.4 */ -static inline bool -drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_DOWN_STREAM_PORT_COUNT] & - DP_MSA_TIMING_PAR_IGNORED; -} - -/** - * drm_edp_backlight_supported() - Check an eDP DPCD for VESA backlight support - * @edp_dpcd: The DPCD to check - * - * Note that currently this function will return %false for panels which support various DPCD - * backlight features but which require the brightness be set through PWM, and don't support setting - * the brightness level via the DPCD. - * - * Returns: %True if @edp_dpcd indicates that VESA backlight controls are supported, %false - * otherwise - */ -static inline bool -drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]) -{ - return !!(edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP); -} - -/* - * DisplayPort AUX channel - */ - -/** - * struct drm_dp_aux_msg - DisplayPort AUX channel transaction - * @address: address of the (first) register to access - * @request: contains the type of transaction (see DP_AUX_* macros) - * @reply: upon completion, contains the reply type of the transaction - * @buffer: pointer to a transmission or reception buffer - * @size: size of @buffer - */ -struct drm_dp_aux_msg { - unsigned int address; - u8 request; - u8 reply; - void *buffer; - size_t size; -}; - -struct cec_adapter; -struct edid; -struct drm_connector; - -/** - * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX - * @lock: mutex protecting this struct - * @adap: the CEC adapter for CEC-Tunneling-over-AUX support. - * @connector: the connector this CEC adapter is associated with - * @unregister_work: unregister the CEC adapter - */ -struct drm_dp_aux_cec { - struct mutex lock; - struct cec_adapter *adap; - struct drm_connector *connector; - struct delayed_work unregister_work; -}; - -/** - * struct drm_dp_aux - DisplayPort AUX channel - * - * An AUX channel can also be used to transport I2C messages to a sink. A - * typical application of that is to access an EDID that's present in the sink - * device. The @transfer() function can also be used to execute such - * transactions. The drm_dp_aux_register() function registers an I2C adapter - * that can be passed to drm_probe_ddc(). Upon removal, drivers should call - * drm_dp_aux_unregister() to remove the I2C adapter. The I2C adapter uses long - * transfers by default; if a partial response is received, the adapter will - * drop down to the size given by the partial response for this transaction - * only. - */ -struct drm_dp_aux { - /** - * @name: user-visible name of this AUX channel and the - * I2C-over-AUX adapter. - * - * It's also used to specify the name of the I2C adapter. If set - * to %NULL, dev_name() of @dev will be used. - */ - const char *name; - - /** - * @ddc: I2C adapter that can be used for I2C-over-AUX - * communication - */ - struct i2c_adapter ddc; - - /** - * @dev: pointer to struct device that is the parent for this - * AUX channel. - */ - struct device *dev; - - /** - * @drm_dev: pointer to the &drm_device that owns this AUX channel. - * Beware, this may be %NULL before drm_dp_aux_register() has been - * called. - * - * It should be set to the &drm_device that will be using this AUX - * channel as early as possible. For many graphics drivers this should - * happen before drm_dp_aux_init(), however it's perfectly fine to set - * this field later so long as it's assigned before calling - * drm_dp_aux_register(). - */ - struct drm_device *drm_dev; - - /** - * @crtc: backpointer to the crtc that is currently using this - * AUX channel - */ - struct drm_crtc *crtc; - - /** - * @hw_mutex: internal mutex used for locking transfers. - * - * Note that if the underlying hardware is shared among multiple - * channels, the driver needs to do additional locking to - * prevent concurrent access. - */ - struct mutex hw_mutex; - - /** - * @crc_work: worker that captures CRCs for each frame - */ - struct work_struct crc_work; - - /** - * @crc_count: counter of captured frame CRCs - */ - u8 crc_count; - - /** - * @transfer: transfers a message representing a single AUX - * transaction. - * - * This is a hardware-specific implementation of how - * transactions are executed that the drivers must provide. - * - * A pointer to a &drm_dp_aux_msg structure describing the - * transaction is passed into this function. Upon success, the - * implementation should return the number of payload bytes that - * were transferred, or a negative error-code on failure. - * - * Helpers will propagate these errors, with the exception of - * the %-EBUSY error, which causes a transaction to be retried. - * On a short, helpers will return %-EPROTO to make it simpler - * to check for failure. - * - * The @transfer() function must only modify the reply field of - * the &drm_dp_aux_msg structure. The retry logic and i2c - * helpers assume this is the case. - * - * Also note that this callback can be called no matter the - * state @dev is in. Drivers that need that device to be powered - * to perform this operation will first need to make sure it's - * been properly enabled. - */ - ssize_t (*transfer)(struct drm_dp_aux *aux, - struct drm_dp_aux_msg *msg); - - /** - * @i2c_nack_count: Counts I2C NACKs, used for DP validation. - */ - unsigned i2c_nack_count; - /** - * @i2c_defer_count: Counts I2C DEFERs, used for DP validation. - */ - unsigned i2c_defer_count; - /** - * @cec: struct containing fields used for CEC-Tunneling-over-AUX. - */ - struct drm_dp_aux_cec cec; - /** - * @is_remote: Is this AUX CH actually using sideband messaging. - */ - bool is_remote; -}; - -ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, - void *buffer, size_t size); -ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, - void *buffer, size_t size); - -/** - * drm_dp_dpcd_readb() - read a single byte from the DPCD - * @aux: DisplayPort AUX channel - * @offset: address of the register to read - * @valuep: location where the value of the register will be stored - * - * Returns the number of bytes transferred (1) on success, or a negative - * error code on failure. - */ -static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux, - unsigned int offset, u8 *valuep) -{ - return drm_dp_dpcd_read(aux, offset, valuep, 1); -} - -/** - * drm_dp_dpcd_writeb() - write a single byte to the DPCD - * @aux: DisplayPort AUX channel - * @offset: address of the register to write - * @value: value to write to the register - * - * Returns the number of bytes transferred (1) on success, or a negative - * error code on failure. - */ -static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, - unsigned int offset, u8 value) -{ - return drm_dp_dpcd_write(aux, offset, &value, 1); -} - -int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux, - u8 dpcd[DP_RECEIVER_CAP_SIZE]); - -int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, - u8 status[DP_LINK_STATUS_SIZE]); - -int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux, - enum drm_dp_phy dp_phy, - u8 link_status[DP_LINK_STATUS_SIZE]); - -bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, - u8 real_edid_checksum); - -int drm_dp_read_downstream_info(struct drm_dp_aux *aux, - const u8 dpcd[DP_RECEIVER_CAP_SIZE], - u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]); -bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], u8 type); -bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], - const struct edid *edid); -int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]); -int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], - const struct edid *edid); -int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], - const struct edid *edid); -int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], - const struct edid *edid); -bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]); -bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]); -struct drm_display_mode *drm_dp_downstream_mode(struct drm_device *dev, - const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]); -int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]); -void drm_dp_downstream_debug(struct seq_file *m, - const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], - const struct edid *edid, - struct drm_dp_aux *aux); -enum drm_mode_subconnector -drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]); -void drm_dp_set_subconnector_property(struct drm_connector *connector, - enum drm_connector_status status, - const u8 *dpcd, - const u8 port_cap[4]); - -struct drm_dp_desc; -bool drm_dp_read_sink_count_cap(struct drm_connector *connector, - const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const struct drm_dp_desc *desc); -int drm_dp_read_sink_count(struct drm_dp_aux *aux); - -int drm_dp_read_lttpr_common_caps(struct drm_dp_aux *aux, - u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); -int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux, - enum drm_dp_phy dp_phy, - u8 caps[DP_LTTPR_PHY_CAP_SIZE]); -int drm_dp_lttpr_count(const u8 cap[DP_LTTPR_COMMON_CAP_SIZE]); -int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); -int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); -bool drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); -bool drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); - -void drm_dp_remote_aux_init(struct drm_dp_aux *aux); -void drm_dp_aux_init(struct drm_dp_aux *aux); -int drm_dp_aux_register(struct drm_dp_aux *aux); -void drm_dp_aux_unregister(struct drm_dp_aux *aux); - -int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc); -int drm_dp_stop_crc(struct drm_dp_aux *aux); - -struct drm_dp_dpcd_ident { - u8 oui[3]; - u8 device_id[6]; - u8 hw_rev; - u8 sw_major_rev; - u8 sw_minor_rev; -} __packed; - -/** - * struct drm_dp_desc - DP branch/sink device descriptor - * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch). - * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks. - */ -struct drm_dp_desc { - struct drm_dp_dpcd_ident ident; - u32 quirks; -}; - -int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, - bool is_branch); - -/** - * enum drm_dp_quirk - Display Port sink/branch device specific quirks - * - * Display Port sink and branch devices in the wild have a variety of bugs, try - * to collect them here. The quirks are shared, but it's up to the drivers to - * implement workarounds for them. - */ -enum drm_dp_quirk { - /** - * @DP_DPCD_QUIRK_CONSTANT_N: - * - * The device requires main link attributes Mvid and Nvid to be limited - * to 16 bits. So will give a constant value (0x8000) for compatability. - */ - DP_DPCD_QUIRK_CONSTANT_N, - /** - * @DP_DPCD_QUIRK_NO_PSR: - * - * The device does not support PSR even if reports that it supports or - * driver still need to implement proper handling for such device. - */ - DP_DPCD_QUIRK_NO_PSR, - /** - * @DP_DPCD_QUIRK_NO_SINK_COUNT: - * - * The device does not set SINK_COUNT to a non-zero value. - * The driver should ignore SINK_COUNT during detection. Note that - * drm_dp_read_sink_count_cap() automatically checks for this quirk. - */ - DP_DPCD_QUIRK_NO_SINK_COUNT, - /** - * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD: - * - * The device supports MST DSC despite not supporting Virtual DPCD. - * The DSC caps can be read from the physical aux instead. - */ - DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD, - /** - * @DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS: - * - * The device supports a link rate of 3.24 Gbps (multiplier 0xc) despite - * the DP_MAX_LINK_RATE register reporting a lower max multiplier. - */ - DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS, -}; - -/** - * drm_dp_has_quirk() - does the DP device have a specific quirk - * @desc: Device descriptor filled by drm_dp_read_desc() - * @quirk: Quirk to query for - * - * Return true if DP device identified by @desc has @quirk. - */ -static inline bool -drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) -{ - return desc->quirks & BIT(quirk); -} - -/** - * struct drm_edp_backlight_info - Probed eDP backlight info struct - * @pwmgen_bit_count: The pwmgen bit count - * @pwm_freq_pre_divider: The PWM frequency pre-divider value being used for this backlight, if any - * @max: The maximum backlight level that may be set - * @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register? - * @aux_enable: Does the panel support the AUX enable cap? - * @aux_set: Does the panel support setting the brightness through AUX? - * - * This structure contains various data about an eDP backlight, which can be populated by using - * drm_edp_backlight_init(). - */ -struct drm_edp_backlight_info { - u8 pwmgen_bit_count; - u8 pwm_freq_pre_divider; - u16 max; - - bool lsb_reg_used : 1; - bool aux_enable : 1; - bool aux_set : 1; -}; - -int -drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl, - u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE], - u16 *current_level, u8 *current_mode); -int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, - u16 level); -int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, - u16 level); -int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl); - -#if IS_ENABLED(CONFIG_DRM_KMS_HELPER) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ - (IS_MODULE(CONFIG_DRM_KMS_HELPER) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE))) - -int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux); - -#else - -static inline int drm_panel_dp_aux_backlight(struct drm_panel *panel, - struct drm_dp_aux *aux) -{ - return 0; -} - -#endif - -#ifdef CONFIG_DRM_DP_CEC -void drm_dp_cec_irq(struct drm_dp_aux *aux); -void drm_dp_cec_register_connector(struct drm_dp_aux *aux, - struct drm_connector *connector); -void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux); -void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid); -void drm_dp_cec_unset_edid(struct drm_dp_aux *aux); -#else -static inline void drm_dp_cec_irq(struct drm_dp_aux *aux) -{ -} - -static inline void -drm_dp_cec_register_connector(struct drm_dp_aux *aux, - struct drm_connector *connector) -{ -} - -static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux) -{ -} - -static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux, - const struct edid *edid) -{ -} - -static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux) -{ -} - -#endif - -/** - * struct drm_dp_phy_test_params - DP Phy Compliance parameters - * @link_rate: Requested Link rate from DPCD 0x219 - * @num_lanes: Number of lanes requested by sing through DPCD 0x220 - * @phy_pattern: DP Phy test pattern from DPCD 0x248 - * @hbr2_reset: DP HBR2_COMPLIANCE_SCRAMBLER_RESET from DCPD 0x24A and 0x24B - * @custom80: DP Test_80BIT_CUSTOM_PATTERN from DPCDs 0x250 through 0x259 - * @enhanced_frame_cap: flag for enhanced frame capability. - */ -struct drm_dp_phy_test_params { - int link_rate; - u8 num_lanes; - u8 phy_pattern; - u8 hbr2_reset[2]; - u8 custom80[10]; - bool enhanced_frame_cap; -}; - -int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux, - struct drm_dp_phy_test_params *data); -int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux, - struct drm_dp_phy_test_params *data, u8 dp_rev); -int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]); -int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd); -bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux); -int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps, - u8 frl_mode); -int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask, - u8 frl_type); -int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux); -int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux); - -bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux); -int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask); -void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux, - struct drm_connector *connector); -bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); -int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); -int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); -int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); -int drm_dp_pcon_pps_default(struct drm_dp_aux *aux); -int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128]); -int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6]); -bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], u8 color_spc); -int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc); - -#endif /* _DRM_DP_HELPER_H_ */ +#endif /* _DRM_DP_H_ */ diff --git a/include/drm/dp/drm_dp_aux_bus.h b/include/drm/display/drm_dp_aux_bus.h index 4f19b20b1dd6..4f19b20b1dd6 100644 --- a/include/drm/dp/drm_dp_aux_bus.h +++ b/include/drm/display/drm_dp_aux_bus.h diff --git a/include/drm/dp/drm_dp_dual_mode_helper.h b/include/drm/display/drm_dp_dual_mode_helper.h index 7ee482265087..7ee482265087 100644 --- a/include/drm/dp/drm_dp_dual_mode_helper.h +++ b/include/drm/display/drm_dp_dual_mode_helper.h diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h new file mode 100644 index 000000000000..dca40a045dd6 --- /dev/null +++ b/include/drm/display/drm_dp_helper.h @@ -0,0 +1,722 @@ +/* + * Copyright © 2008 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef _DRM_DP_HELPER_H_ +#define _DRM_DP_HELPER_H_ + +#include <linux/delay.h> +#include <linux/i2c.h> + +#include <drm/display/drm_dp.h> +#include <drm/drm_connector.h> + +struct drm_device; +struct drm_dp_aux; +struct drm_panel; + +bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane); +u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane); +u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane); + +int drm_dp_read_clock_recovery_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, bool uhbr); +int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, bool uhbr); + +void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +void drm_dp_lttpr_link_train_clock_recovery_delay(void); +void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux, + const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); + +int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux); +bool drm_dp_128b132b_lane_channel_eq_done(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_128b132b_lane_symbol_locked(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_128b132b_eq_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]); +bool drm_dp_128b132b_cds_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]); +bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SIZE]); + +u8 drm_dp_link_rate_to_bw_code(int link_rate); +int drm_dp_bw_code_to_link_rate(u8 link_bw); + +/** + * struct drm_dp_vsc_sdp - drm DP VSC SDP + * + * This structure represents a DP VSC SDP of drm + * It is based on DP 1.4 spec [Table 2-116: VSC SDP Header Bytes] and + * [Table 2-117: VSC SDP Payload for DB16 through DB18] + * + * @sdp_type: secondary-data packet type + * @revision: revision number + * @length: number of valid data bytes + * @pixelformat: pixel encoding format + * @colorimetry: colorimetry format + * @bpc: bit per color + * @dynamic_range: dynamic range information + * @content_type: CTA-861-G defines content types and expected processing by a sink device + */ +struct drm_dp_vsc_sdp { + unsigned char sdp_type; + unsigned char revision; + unsigned char length; + enum dp_pixelformat pixelformat; + enum dp_colorimetry colorimetry; + int bpc; + enum dp_dynamic_range dynamic_range; + enum dp_content_type content_type; +}; + +void drm_dp_vsc_sdp_log(const char *level, struct device *dev, + const struct drm_dp_vsc_sdp *vsc); + +int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]); + +static inline int +drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); +} + +static inline u8 +drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; +} + +static inline bool +drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x11 && + (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); +} + +static inline bool +drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x11 && + (dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); +} + +static inline bool +drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x12 && + dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED; +} + +static inline bool +drm_dp_max_downspread(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x11 || + dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5; +} + +static inline bool +drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x14 && + dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED; +} + +static inline u8 +drm_dp_training_pattern_mask(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return (dpcd[DP_DPCD_REV] >= 0x14) ? DP_TRAINING_PATTERN_MASK_1_4 : + DP_TRAINING_PATTERN_MASK; +} + +static inline bool +drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT; +} + +/* DP/eDP DSC support */ +u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], + bool is_edp); +u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); +int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE], + u8 dsc_bpc[3]); + +static inline bool +drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] & + DP_DSC_DECOMPRESSION_IS_SUPPORTED; +} + +static inline u16 +drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] | + (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] & + DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK << + DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT); +} + +static inline u32 +drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + /* Max Slicewidth = Number of Pixels * 320 */ + return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] * + DP_DSC_SLICE_WIDTH_MULTIPLIER; +} + +/* Forward Error Correction Support on DP 1.4 */ +static inline bool +drm_dp_sink_supports_fec(const u8 fec_capable) +{ + return fec_capable & DP_FEC_CAPABLE; +} + +static inline bool +drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_8B10B; +} + +static inline bool +drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_EDP_CONFIGURATION_CAP] & + DP_ALTERNATE_SCRAMBLER_RESET_CAP; +} + +/* Ignore MSA timing for Adaptive Sync support on DP 1.4 */ +static inline bool +drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DOWN_STREAM_PORT_COUNT] & + DP_MSA_TIMING_PAR_IGNORED; +} + +/** + * drm_edp_backlight_supported() - Check an eDP DPCD for VESA backlight support + * @edp_dpcd: The DPCD to check + * + * Note that currently this function will return %false for panels which support various DPCD + * backlight features but which require the brightness be set through PWM, and don't support setting + * the brightness level via the DPCD. + * + * Returns: %True if @edp_dpcd indicates that VESA backlight controls are supported, %false + * otherwise + */ +static inline bool +drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]) +{ + return !!(edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP); +} + +/* + * DisplayPort AUX channel + */ + +/** + * struct drm_dp_aux_msg - DisplayPort AUX channel transaction + * @address: address of the (first) register to access + * @request: contains the type of transaction (see DP_AUX_* macros) + * @reply: upon completion, contains the reply type of the transaction + * @buffer: pointer to a transmission or reception buffer + * @size: size of @buffer + */ +struct drm_dp_aux_msg { + unsigned int address; + u8 request; + u8 reply; + void *buffer; + size_t size; +}; + +struct cec_adapter; +struct edid; +struct drm_connector; + +/** + * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX + * @lock: mutex protecting this struct + * @adap: the CEC adapter for CEC-Tunneling-over-AUX support. + * @connector: the connector this CEC adapter is associated with + * @unregister_work: unregister the CEC adapter + */ +struct drm_dp_aux_cec { + struct mutex lock; + struct cec_adapter *adap; + struct drm_connector *connector; + struct delayed_work unregister_work; +}; + +/** + * struct drm_dp_aux - DisplayPort AUX channel + * + * An AUX channel can also be used to transport I2C messages to a sink. A + * typical application of that is to access an EDID that's present in the sink + * device. The @transfer() function can also be used to execute such + * transactions. The drm_dp_aux_register() function registers an I2C adapter + * that can be passed to drm_probe_ddc(). Upon removal, drivers should call + * drm_dp_aux_unregister() to remove the I2C adapter. The I2C adapter uses long + * transfers by default; if a partial response is received, the adapter will + * drop down to the size given by the partial response for this transaction + * only. + */ +struct drm_dp_aux { + /** + * @name: user-visible name of this AUX channel and the + * I2C-over-AUX adapter. + * + * It's also used to specify the name of the I2C adapter. If set + * to %NULL, dev_name() of @dev will be used. + */ + const char *name; + + /** + * @ddc: I2C adapter that can be used for I2C-over-AUX + * communication + */ + struct i2c_adapter ddc; + + /** + * @dev: pointer to struct device that is the parent for this + * AUX channel. + */ + struct device *dev; + + /** + * @drm_dev: pointer to the &drm_device that owns this AUX channel. + * Beware, this may be %NULL before drm_dp_aux_register() has been + * called. + * + * It should be set to the &drm_device that will be using this AUX + * channel as early as possible. For many graphics drivers this should + * happen before drm_dp_aux_init(), however it's perfectly fine to set + * this field later so long as it's assigned before calling + * drm_dp_aux_register(). + */ + struct drm_device *drm_dev; + + /** + * @crtc: backpointer to the crtc that is currently using this + * AUX channel + */ + struct drm_crtc *crtc; + + /** + * @hw_mutex: internal mutex used for locking transfers. + * + * Note that if the underlying hardware is shared among multiple + * channels, the driver needs to do additional locking to + * prevent concurrent access. + */ + struct mutex hw_mutex; + + /** + * @crc_work: worker that captures CRCs for each frame + */ + struct work_struct crc_work; + + /** + * @crc_count: counter of captured frame CRCs + */ + u8 crc_count; + + /** + * @transfer: transfers a message representing a single AUX + * transaction. + * + * This is a hardware-specific implementation of how + * transactions are executed that the drivers must provide. + * + * A pointer to a &drm_dp_aux_msg structure describing the + * transaction is passed into this function. Upon success, the + * implementation should return the number of payload bytes that + * were transferred, or a negative error-code on failure. + * + * Helpers will propagate these errors, with the exception of + * the %-EBUSY error, which causes a transaction to be retried. + * On a short, helpers will return %-EPROTO to make it simpler + * to check for failure. + * + * The @transfer() function must only modify the reply field of + * the &drm_dp_aux_msg structure. The retry logic and i2c + * helpers assume this is the case. + * + * Also note that this callback can be called no matter the + * state @dev is in. Drivers that need that device to be powered + * to perform this operation will first need to make sure it's + * been properly enabled. + */ + ssize_t (*transfer)(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg); + + /** + * @i2c_nack_count: Counts I2C NACKs, used for DP validation. + */ + unsigned i2c_nack_count; + /** + * @i2c_defer_count: Counts I2C DEFERs, used for DP validation. + */ + unsigned i2c_defer_count; + /** + * @cec: struct containing fields used for CEC-Tunneling-over-AUX. + */ + struct drm_dp_aux_cec cec; + /** + * @is_remote: Is this AUX CH actually using sideband messaging. + */ + bool is_remote; +}; + +int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset); +ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, + void *buffer, size_t size); +ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, + void *buffer, size_t size); + +/** + * drm_dp_dpcd_readb() - read a single byte from the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the register to read + * @valuep: location where the value of the register will be stored + * + * Returns the number of bytes transferred (1) on success, or a negative + * error code on failure. + */ +static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux, + unsigned int offset, u8 *valuep) +{ + return drm_dp_dpcd_read(aux, offset, valuep, 1); +} + +/** + * drm_dp_dpcd_writeb() - write a single byte to the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the register to write + * @value: value to write to the register + * + * Returns the number of bytes transferred (1) on success, or a negative + * error code on failure. + */ +static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, + unsigned int offset, u8 value) +{ + return drm_dp_dpcd_write(aux, offset, &value, 1); +} + +int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux, + u8 dpcd[DP_RECEIVER_CAP_SIZE]); + +int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, + u8 status[DP_LINK_STATUS_SIZE]); + +int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux, + enum drm_dp_phy dp_phy, + u8 link_status[DP_LINK_STATUS_SIZE]); + +bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, + u8 real_edid_checksum); + +int drm_dp_read_downstream_info(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]); +bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], u8 type); +bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +struct drm_display_mode *drm_dp_downstream_mode(struct drm_device *dev, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]); +void drm_dp_downstream_debug(struct seq_file *m, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid, + struct drm_dp_aux *aux); +enum drm_mode_subconnector +drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +void drm_dp_set_subconnector_property(struct drm_connector *connector, + enum drm_connector_status status, + const u8 *dpcd, + const u8 port_cap[4]); + +struct drm_dp_desc; +bool drm_dp_read_sink_count_cap(struct drm_connector *connector, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const struct drm_dp_desc *desc); +int drm_dp_read_sink_count(struct drm_dp_aux *aux); + +int drm_dp_read_lttpr_common_caps(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); +int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, + u8 caps[DP_LTTPR_PHY_CAP_SIZE]); +int drm_dp_lttpr_count(const u8 cap[DP_LTTPR_COMMON_CAP_SIZE]); +int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); +int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); +bool drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); +bool drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); + +void drm_dp_remote_aux_init(struct drm_dp_aux *aux); +void drm_dp_aux_init(struct drm_dp_aux *aux); +int drm_dp_aux_register(struct drm_dp_aux *aux); +void drm_dp_aux_unregister(struct drm_dp_aux *aux); + +int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc); +int drm_dp_stop_crc(struct drm_dp_aux *aux); + +struct drm_dp_dpcd_ident { + u8 oui[3]; + u8 device_id[6]; + u8 hw_rev; + u8 sw_major_rev; + u8 sw_minor_rev; +} __packed; + +/** + * struct drm_dp_desc - DP branch/sink device descriptor + * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch). + * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks. + */ +struct drm_dp_desc { + struct drm_dp_dpcd_ident ident; + u32 quirks; +}; + +int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, + bool is_branch); + +/** + * enum drm_dp_quirk - Display Port sink/branch device specific quirks + * + * Display Port sink and branch devices in the wild have a variety of bugs, try + * to collect them here. The quirks are shared, but it's up to the drivers to + * implement workarounds for them. + */ +enum drm_dp_quirk { + /** + * @DP_DPCD_QUIRK_CONSTANT_N: + * + * The device requires main link attributes Mvid and Nvid to be limited + * to 16 bits. So will give a constant value (0x8000) for compatability. + */ + DP_DPCD_QUIRK_CONSTANT_N, + /** + * @DP_DPCD_QUIRK_NO_PSR: + * + * The device does not support PSR even if reports that it supports or + * driver still need to implement proper handling for such device. + */ + DP_DPCD_QUIRK_NO_PSR, + /** + * @DP_DPCD_QUIRK_NO_SINK_COUNT: + * + * The device does not set SINK_COUNT to a non-zero value. + * The driver should ignore SINK_COUNT during detection. Note that + * drm_dp_read_sink_count_cap() automatically checks for this quirk. + */ + DP_DPCD_QUIRK_NO_SINK_COUNT, + /** + * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD: + * + * The device supports MST DSC despite not supporting Virtual DPCD. + * The DSC caps can be read from the physical aux instead. + */ + DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD, + /** + * @DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS: + * + * The device supports a link rate of 3.24 Gbps (multiplier 0xc) despite + * the DP_MAX_LINK_RATE register reporting a lower max multiplier. + */ + DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS, +}; + +/** + * drm_dp_has_quirk() - does the DP device have a specific quirk + * @desc: Device descriptor filled by drm_dp_read_desc() + * @quirk: Quirk to query for + * + * Return true if DP device identified by @desc has @quirk. + */ +static inline bool +drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) +{ + return desc->quirks & BIT(quirk); +} + +/** + * struct drm_edp_backlight_info - Probed eDP backlight info struct + * @pwmgen_bit_count: The pwmgen bit count + * @pwm_freq_pre_divider: The PWM frequency pre-divider value being used for this backlight, if any + * @max: The maximum backlight level that may be set + * @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register? + * @aux_enable: Does the panel support the AUX enable cap? + * @aux_set: Does the panel support setting the brightness through AUX? + * + * This structure contains various data about an eDP backlight, which can be populated by using + * drm_edp_backlight_init(). + */ +struct drm_edp_backlight_info { + u8 pwmgen_bit_count; + u8 pwm_freq_pre_divider; + u16 max; + + bool lsb_reg_used : 1; + bool aux_enable : 1; + bool aux_set : 1; +}; + +int +drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl, + u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE], + u16 *current_level, u8 *current_mode); +int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, + u16 level); +int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, + u16 level); +int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl); + +#if IS_ENABLED(CONFIG_DRM_KMS_HELPER) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ + (IS_MODULE(CONFIG_DRM_KMS_HELPER) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE))) + +int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux); + +#else + +static inline int drm_panel_dp_aux_backlight(struct drm_panel *panel, + struct drm_dp_aux *aux) +{ + return 0; +} + +#endif + +#ifdef CONFIG_DRM_DP_CEC +void drm_dp_cec_irq(struct drm_dp_aux *aux); +void drm_dp_cec_register_connector(struct drm_dp_aux *aux, + struct drm_connector *connector); +void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux); +void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid); +void drm_dp_cec_unset_edid(struct drm_dp_aux *aux); +#else +static inline void drm_dp_cec_irq(struct drm_dp_aux *aux) +{ +} + +static inline void +drm_dp_cec_register_connector(struct drm_dp_aux *aux, + struct drm_connector *connector) +{ +} + +static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux) +{ +} + +static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux, + const struct edid *edid) +{ +} + +static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux) +{ +} + +#endif + +/** + * struct drm_dp_phy_test_params - DP Phy Compliance parameters + * @link_rate: Requested Link rate from DPCD 0x219 + * @num_lanes: Number of lanes requested by sing through DPCD 0x220 + * @phy_pattern: DP Phy test pattern from DPCD 0x248 + * @hbr2_reset: DP HBR2_COMPLIANCE_SCRAMBLER_RESET from DCPD 0x24A and 0x24B + * @custom80: DP Test_80BIT_CUSTOM_PATTERN from DPCDs 0x250 through 0x259 + * @enhanced_frame_cap: flag for enhanced frame capability. + */ +struct drm_dp_phy_test_params { + int link_rate; + u8 num_lanes; + u8 phy_pattern; + u8 hbr2_reset[2]; + u8 custom80[10]; + bool enhanced_frame_cap; +}; + +int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux, + struct drm_dp_phy_test_params *data); +int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux, + struct drm_dp_phy_test_params *data, u8 dp_rev); +int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd); +bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux); +int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps, + u8 frl_mode); +int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask, + u8 frl_type); +int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux); +int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux); + +bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux); +int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask); +void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux, + struct drm_connector *connector); +bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); +int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); +int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); +int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); +int drm_dp_pcon_pps_default(struct drm_dp_aux *aux); +int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128]); +int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6]); +bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], u8 color_spc); +int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc); + +#endif /* _DRM_DP_HELPER_H_ */ diff --git a/include/drm/dp/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index 08276eb8c187..10adec068b7f 100644 --- a/include/drm/dp/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -23,7 +23,7 @@ #define _DRM_DP_MST_HELPER_H_ #include <linux/types.h> -#include <drm/dp/drm_dp_helper.h> +#include <drm/display/drm_dp_helper.h> #include <drm/drm_atomic.h> #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) diff --git a/include/drm/drm_dsc.h b/include/drm/display/drm_dsc.h index ca022e960dcc..bc90273d06a6 100644 --- a/include/drm/drm_dsc.h +++ b/include/drm/display/drm_dsc.h @@ -8,7 +8,7 @@ #ifndef DRM_DSC_H_ #define DRM_DSC_H_ -#include <drm/dp/drm_dp_helper.h> +#include <drm/display/drm_dp.h> /* VESA Display Stream Compression DSC 1.2 constants */ #define DSC_NUM_BUF_RANGES 15 @@ -602,10 +602,4 @@ struct drm_dsc_pps_infoframe { struct drm_dsc_picture_parameter_set pps_payload; } __packed; -void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header); -int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size); -void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp, - const struct drm_dsc_config *dsc_cfg); -int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg); - #endif /* _DRM_DSC_H_ */ diff --git a/include/drm/display/drm_dsc_helper.h b/include/drm/display/drm_dsc_helper.h new file mode 100644 index 000000000000..8b41edbbabab --- /dev/null +++ b/include/drm/display/drm_dsc_helper.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT + * Copyright (C) 2018 Intel Corp. + * + * Authors: + * Manasi Navare <manasi.d.navare@intel.com> + */ + +#ifndef DRM_DSC_HELPER_H_ +#define DRM_DSC_HELPER_H_ + +#include <drm/display/drm_dsc.h> + +void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header); +int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size); +void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp, + const struct drm_dsc_config *dsc_cfg); +int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg); + +#endif /* _DRM_DSC_HELPER_H_ */ + diff --git a/include/drm/drm_hdcp.h b/include/drm/display/drm_hdcp.h index 0b1111e3228e..96a99b1377c0 100644 --- a/include/drm/drm_hdcp.h +++ b/include/drm/display/drm_hdcp.h @@ -6,8 +6,8 @@ * Sean Paul <seanpaul@chromium.org> */ -#ifndef _DRM_HDCP_H_INCLUDED_ -#define _DRM_HDCP_H_INCLUDED_ +#ifndef _DRM_HDCP_H_ +#define _DRM_HDCP_H_ #include <linux/types.h> @@ -291,16 +291,6 @@ struct hdcp_srm_header { u8 srm_gen_no; } __packed; -struct drm_device; -struct drm_connector; - -int drm_hdcp_check_ksvs_revoked(struct drm_device *dev, - u8 *ksvs, u32 ksv_count); -int drm_connector_attach_content_protection_property( - struct drm_connector *connector, bool hdcp_content_type); -void drm_hdcp_update_content_protection(struct drm_connector *connector, - u64 val); - /* Content Type classification for HDCP2.2 vs others */ #define DRM_MODE_HDCP_CONTENT_TYPE0 0 #define DRM_MODE_HDCP_CONTENT_TYPE1 1 diff --git a/include/drm/display/drm_hdcp_helper.h b/include/drm/display/drm_hdcp_helper.h new file mode 100644 index 000000000000..8aaf87bf2735 --- /dev/null +++ b/include/drm/display/drm_hdcp_helper.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2017 Google, Inc. + * + * Authors: + * Sean Paul <seanpaul@chromium.org> + */ + +#ifndef _DRM_HDCP_HELPER_H_INCLUDED_ +#define _DRM_HDCP_HELPER_H_INCLUDED_ + +#include <drm/display/drm_hdcp.h> + +struct drm_device; +struct drm_connector; + +int drm_hdcp_check_ksvs_revoked(struct drm_device *dev, u8 *ksvs, u32 ksv_count); +int drm_connector_attach_content_protection_property(struct drm_connector *connector, + bool hdcp_content_type); +void drm_hdcp_update_content_protection(struct drm_connector *connector, u64 val); + +#endif diff --git a/include/drm/display/drm_hdmi_helper.h b/include/drm/display/drm_hdmi_helper.h new file mode 100644 index 000000000000..76d234826e22 --- /dev/null +++ b/include/drm/display/drm_hdmi_helper.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_HDMI_HELPER +#define DRM_HDMI_HELPER + +#include <linux/hdmi.h> + +struct drm_connector; +struct drm_connector_state; +struct drm_display_mode; + +void +drm_hdmi_avi_infoframe_colorimetry(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state); + +void +drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state); + +int +drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame, + const struct drm_connector_state *conn_state); + +void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state); + +#endif diff --git a/include/drm/drm_scdc_helper.h b/include/drm/display/drm_scdc.h index 6a483533aae4..3d58f37e8ed8 100644 --- a/include/drm/drm_scdc_helper.h +++ b/include/drm/display/drm_scdc.h @@ -21,11 +21,8 @@ * DEALINGS IN THE SOFTWARE. */ -#ifndef DRM_SCDC_HELPER_H -#define DRM_SCDC_HELPER_H - -#include <linux/i2c.h> -#include <linux/types.h> +#ifndef DRM_SCDC_H +#define DRM_SCDC_H #define SCDC_SINK_VERSION 0x01 @@ -88,49 +85,4 @@ #define SCDC_MANUFACTURER_SPECIFIC 0xde #define SCDC_MANUFACTURER_SPECIFIC_SIZE 34 -ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer, - size_t size); -ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset, - const void *buffer, size_t size); - -/** - * drm_scdc_readb - read a single byte from SCDC - * @adapter: I2C adapter - * @offset: offset of register to read - * @value: return location for the register value - * - * Reads a single byte from SCDC. This is a convenience wrapper around the - * drm_scdc_read() function. - * - * Returns: - * 0 on success or a negative error code on failure. - */ -static inline int drm_scdc_readb(struct i2c_adapter *adapter, u8 offset, - u8 *value) -{ - return drm_scdc_read(adapter, offset, value, sizeof(*value)); -} - -/** - * drm_scdc_writeb - write a single byte to SCDC - * @adapter: I2C adapter - * @offset: offset of register to read - * @value: return location for the register value - * - * Writes a single byte to SCDC. This is a convenience wrapper around the - * drm_scdc_write() function. - * - * Returns: - * 0 on success or a negative error code on failure. - */ -static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset, - u8 value) -{ - return drm_scdc_write(adapter, offset, &value, sizeof(value)); -} - -bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter); - -bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable); -bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set); #endif diff --git a/include/drm/display/drm_scdc_helper.h b/include/drm/display/drm_scdc_helper.h new file mode 100644 index 000000000000..ded01fd948b4 --- /dev/null +++ b/include/drm/display/drm_scdc_helper.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2015 NVIDIA Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_SCDC_HELPER_H +#define DRM_SCDC_HELPER_H + +#include <linux/types.h> + +#include <drm/display/drm_scdc.h> + +struct i2c_adapter; + +ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer, + size_t size); +ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset, + const void *buffer, size_t size); + +/** + * drm_scdc_readb - read a single byte from SCDC + * @adapter: I2C adapter + * @offset: offset of register to read + * @value: return location for the register value + * + * Reads a single byte from SCDC. This is a convenience wrapper around the + * drm_scdc_read() function. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_scdc_readb(struct i2c_adapter *adapter, u8 offset, + u8 *value) +{ + return drm_scdc_read(adapter, offset, value, sizeof(*value)); +} + +/** + * drm_scdc_writeb - write a single byte to SCDC + * @adapter: I2C adapter + * @offset: offset of register to read + * @value: return location for the register value + * + * Writes a single byte to SCDC. This is a convenience wrapper around the + * drm_scdc_write() function. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset, + u8 value) +{ + return drm_scdc_write(adapter, offset, &value, sizeof(value)); +} + +bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter); + +bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable); +bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set); + +#endif diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 1701c2128a5c..0777725085df 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -227,6 +227,18 @@ struct drm_private_state_funcs { */ void (*atomic_destroy_state)(struct drm_private_obj *obj, struct drm_private_state *state); + + /** + * @atomic_print_state: + * + * If driver subclasses &struct drm_private_state, it should implement + * this optional hook for printing additional driver specific state. + * + * Do not call this directly, use drm_atomic_private_obj_print_state() + * instead. + */ + void (*atomic_print_state)(struct drm_printer *p, + const struct drm_private_state *state); }; /** @@ -311,14 +323,21 @@ struct drm_private_obj { /** * struct drm_private_state - base struct for driver private object state - * @state: backpointer to global drm_atomic_state * - * Currently only contains a backpointer to the overall atomic update, but in - * the future also might hold synchronization information similar to e.g. - * &drm_crtc.commit. + * Currently only contains a backpointer to the overall atomic update, + * and the relevant private object but in the future also might hold + * synchronization information similar to e.g. &drm_crtc.commit. */ struct drm_private_state { + /** + * @state: backpointer to global drm_atomic_state + */ struct drm_atomic_state *state; + + /** + * @obj: backpointer to the private object + */ + struct drm_private_obj *obj; }; struct __drm_private_objs_state { diff --git a/include/drm/drm_atomic_uapi.h b/include/drm/drm_atomic_uapi.h index 8cec52ad1277..4c6d39d7bdb2 100644 --- a/include/drm/drm_atomic_uapi.h +++ b/include/drm/drm_atomic_uapi.h @@ -49,8 +49,6 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, struct drm_crtc *crtc); void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, struct drm_framebuffer *fb); -void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, - struct dma_fence *fence); int __must_check drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, struct drm_crtc *crtc); diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 5166186146f4..3ac4bf87f257 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -1784,9 +1784,6 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev); int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector); int drm_mode_create_dp_colorspace_property(struct drm_connector *connector); int drm_mode_create_content_type_property(struct drm_device *dev); -void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame, - const struct drm_connector_state *conn_state); - int drm_mode_create_suggested_offset_properties(struct drm_device *dev); int drm_connector_set_path_property(struct drm_connector *connector, diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 144c495b99c4..c3204a58fb09 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -372,8 +372,8 @@ struct drm_connector; struct drm_connector_state; struct drm_display_mode; -int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); -int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb); +int drm_edid_to_sad(const struct edid *edid, struct cea_sad **sads); +int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb); int drm_av_sync_delay(struct drm_connector *connector, const struct drm_display_mode *mode); @@ -401,23 +401,11 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, const struct drm_display_mode *mode); void -drm_hdmi_avi_infoframe_colorimetry(struct hdmi_avi_infoframe *frame, - const struct drm_connector_state *conn_state); - -void -drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame, - const struct drm_connector_state *conn_state); - -void drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, const struct drm_connector *connector, const struct drm_display_mode *mode, enum hdmi_quantization_range rgb_quant_range); -int -drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame, - const struct drm_connector_state *conn_state); - /** * drm_eld_mnl - Get ELD monitor name length in bytes. * @eld: pointer to an eld memory structure with mnl set @@ -569,8 +557,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); int drm_add_override_edid_modes(struct drm_connector *connector); u8 drm_match_cea_mode(const struct drm_display_mode *to_match); -bool drm_detect_hdmi_monitor(struct edid *edid); -bool drm_detect_monitor_audio(struct edid *edid); +bool drm_detect_hdmi_monitor(const struct edid *edid); +bool drm_detect_monitor_audio(const struct edid *edid); enum hdmi_quantization_range drm_default_rgb_quant_range(const struct drm_display_mode *mode); int drm_add_modes_noedid(struct drm_connector *connector, @@ -578,11 +566,11 @@ int drm_add_modes_noedid(struct drm_connector *connector, void drm_set_preferred_mode(struct drm_connector *connector, int hpref, int vpref); -int drm_edid_header_is_valid(const u8 *raw_edid); +int drm_edid_header_is_valid(const void *edid); bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid, bool *edid_corrupt); bool drm_edid_is_valid(struct edid *edid); -void drm_edid_get_monitor_name(struct edid *edid, char *name, +void drm_edid_get_monitor_name(const struct edid *edid, char *name, int buflen); struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, int hsize, int vsize, int fresh, diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 3af4624368d8..329607ca65c0 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -229,8 +229,7 @@ void drm_fb_helper_fill_info(struct fb_info *info, struct drm_fb_helper *fb_helper, struct drm_fb_helper_surface_size *sizes); -void drm_fb_helper_deferred_io(struct fb_info *info, - struct list_head *pagelist); +void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist); ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos); diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index a3acb7ac3550..e0a73a1e2df7 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -248,7 +248,7 @@ struct drm_file { */ struct drm_master *master; - /** @master_lock: Serializes @master. */ + /** @master_lookup_lock: Serializes @master. */ spinlock_t master_lookup_lock; /** @pid: Process that opened this file. */ diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index 0b0937c0b2f6..55145eca0782 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -43,8 +43,7 @@ int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_for const void *vmap, const struct drm_framebuffer *fb, const struct drm_rect *rect); -void drm_fb_xrgb8888_to_mono_reversed(void *dst, unsigned int dst_pitch, const void *src, - const struct drm_framebuffer *fb, - const struct drm_rect *clip); +void drm_fb_xrgb8888_to_mono(void *dst, unsigned int dst_pitch, const void *src, + const struct drm_framebuffer *fb, const struct drm_rect *clip); #endif /* __LINUX_DRM_FORMAT_HELPER_H */ diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index e2941cee14b6..9d7c61a122dc 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -407,11 +407,6 @@ int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, struct ww_acquire_ctx *acquire_ctx); void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, struct ww_acquire_ctx *acquire_ctx); -int drm_gem_fence_array_add(struct xarray *fence_array, - struct dma_fence *fence); -int drm_gem_fence_array_add_implicit(struct xarray *fence_array, - struct drm_gem_object *obj, - bool write); int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, u32 handle, u64 *offset); diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h index b45c6fbf53ac..359883942612 100644 --- a/include/drm/drm_managed.h +++ b/include/drm/drm_managed.h @@ -8,6 +8,7 @@ #include <linux/types.h> struct drm_device; +struct mutex; typedef void (*drmres_release_t)(struct drm_device *dev, void *res); @@ -104,4 +105,6 @@ char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp); void drmm_kfree(struct drm_device *dev, void *data); +int drmm_mutex_init(struct drm_device *dev, struct mutex *lock); + #endif diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 147e51b6d241..51e09a1a106a 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -137,6 +137,8 @@ struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node); #define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10) /* transmit data in low power */ #define MIPI_DSI_MODE_LPM BIT(11) +/* transmit data ending at the same time for all lanes within one hsync */ +#define MIPI_DSI_HS_PKT_END_ALIGNED BIT(12) enum mipi_dsi_pixel_format { MIPI_DSI_FMT_RGB888, diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 2fa6b2c33b3f..a80ae9639e96 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -492,6 +492,8 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags); void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); +void drm_mode_init(struct drm_display_mode *dst, + const struct drm_display_mode *src); struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, const struct drm_display_mode *mode); bool drm_mode_match(const struct drm_display_mode *mode1, diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index fdfa9f37ce05..fafa70ac1337 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -1384,7 +1384,7 @@ struct drm_mode_config_helper_funcs { * starting to commit the update to the hardware. * * After the atomic update is committed to the hardware this hook needs - * to call drm_atomic_helper_commit_hw_done(). Then wait for the upate + * to call drm_atomic_helper_commit_hw_done(). Then wait for the update * to be executed by the hardware, for example using * drm_atomic_helper_wait_for_vblanks() or * drm_atomic_helper_wait_for_flip_done(), and then clean up the old diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 1ba2d424a53f..d279ee455f01 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -179,6 +179,13 @@ struct drm_panel { * Panel entry in registry. */ struct list_head list; + + /** + * @dsc: + * + * Panel DSC pps payload to be sent + */ + struct drm_dsc_config *dsc; }; void drm_panel_init(struct drm_panel *panel, struct device *dev, diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 2628c7cde2da..89ea54652e87 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -74,9 +74,7 @@ struct drm_plane_state { * * Optional fence to wait for before scanning out @fb. The core atomic * code will set this when userspace is using explicit fencing. Do not - * write this field directly for a driver's implicit fence, use - * drm_atomic_set_fence_for_plane() to ensure that an explicit fence is - * preserved. + * write this field directly for a driver's implicit fence. * * Drivers should store any implicit fence in this from their * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_plane_helper_prepare_fb() diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h index 9697d2714d2a..17e576c80169 100644 --- a/include/drm/drm_writeback.h +++ b/include/drm/drm_writeback.h @@ -30,6 +30,8 @@ struct drm_writeback_connector { * @drm_writeback_connector control the behaviour of the @encoder * by passing the @enc_funcs parameter to drm_writeback_connector_init() * function. + * For users of drm_writeback_connector_init_with_encoder(), this field + * is not valid as the encoder is managed within their drivers. */ struct drm_encoder encoder; @@ -150,7 +152,14 @@ int drm_writeback_connector_init(struct drm_device *dev, struct drm_writeback_connector *wb_connector, const struct drm_connector_funcs *con_funcs, const struct drm_encoder_helper_funcs *enc_helper_funcs, - const u32 *formats, int n_formats); + const u32 *formats, int n_formats, + u32 possible_crtcs); + +int drm_writeback_connector_init_with_encoder(struct drm_device *dev, + struct drm_writeback_connector *wb_connector, + struct drm_encoder *enc, + const struct drm_connector_funcs *con_funcs, const u32 *formats, + int n_formats); int drm_writeback_set_fb(struct drm_connector_state *conn_state, struct drm_framebuffer *fb); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 944f83ef9f2e..0fca8f38bee4 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -270,6 +270,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); * @sched: the scheduler instance on which this job is scheduled. * @s_fence: contains the fences for the scheduling of job. * @finish_cb: the callback for the finished fence. + * @work: Helper to reschdeule job kill to different context. * @id: a unique id assigned to each job scheduled on the scheduler. * @karma: increment on every hang caused by this job. If this exceeds the hang * limit of the scheduler then the job is marked guilty and will not diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 6722005884db..7adce327c1c2 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -26,8 +26,7 @@ #ifndef _I915_DRM_H_ #define _I915_DRM_H_ -#include <drm/i915_pciids.h> -#include <uapi/drm/i915_drm.h> +#include <linux/types.h> /* For use by IPS driver */ unsigned long i915_read_mch_val(void); diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h index 702f613243bb..f441cbcd95a4 100644 --- a/include/drm/i915_mei_hdcp_interface.h +++ b/include/drm/i915_mei_hdcp_interface.h @@ -11,7 +11,7 @@ #include <linux/mutex.h> #include <linux/device.h> -#include <drm/drm_hdcp.h> +#include <drm/display/drm_hdcp.h> /** * enum hdcp_port_type - HDCP port implementation type defined by ME FW diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 533890dc9da1..283dadfbb4db 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -679,6 +679,39 @@ INTEL_VGA_DEVICE(0xA782, info), \ INTEL_VGA_DEVICE(0xA783, info), \ INTEL_VGA_DEVICE(0xA788, info), \ - INTEL_VGA_DEVICE(0xA789, info) + INTEL_VGA_DEVICE(0xA789, info), \ + INTEL_VGA_DEVICE(0xA78A, info), \ + INTEL_VGA_DEVICE(0xA78B, info) + +/* RPL-P */ +#define INTEL_RPLP_IDS(info) \ + INTEL_VGA_DEVICE(0xA720, info), \ + INTEL_VGA_DEVICE(0xA721, info), \ + INTEL_VGA_DEVICE(0xA7A0, info), \ + INTEL_VGA_DEVICE(0xA7A1, info), \ + INTEL_VGA_DEVICE(0xA7A8, info), \ + INTEL_VGA_DEVICE(0xA7A9, info) + +/* DG2 */ +#define INTEL_DG2_G10_IDS(info) \ + INTEL_VGA_DEVICE(0x5690, info), \ + INTEL_VGA_DEVICE(0x5691, info), \ + INTEL_VGA_DEVICE(0x5692, info) + +#define INTEL_DG2_G11_IDS(info) \ + INTEL_VGA_DEVICE(0x5693, info), \ + INTEL_VGA_DEVICE(0x5694, info), \ + INTEL_VGA_DEVICE(0x5695, info), \ + INTEL_VGA_DEVICE(0x56B0, info) + +#define INTEL_DG2_G12_IDS(info) \ + INTEL_VGA_DEVICE(0x5696, info), \ + INTEL_VGA_DEVICE(0x5697, info), \ + INTEL_VGA_DEVICE(0x56B2, info) + +#define INTEL_DG2_IDS(info) \ + INTEL_DG2_G10_IDS(info), \ + INTEL_DG2_G11_IDS(info), \ + INTEL_DG2_G12_IDS(info) #endif /* _I915_PCIIDS_H */ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 155b19ee12fb..2d524f8b0802 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -55,8 +55,6 @@ struct ttm_placement; struct ttm_place; -struct ttm_lru_bulk_move; - /** * enum ttm_bo_type * @@ -94,10 +92,8 @@ struct ttm_tt; * @ttm: TTM structure holding system pages. * @evicted: Whether the object was evicted without user-space knowing. * @deleted: True if the object is only a zombie and already deleted. - * @lru: List head for the lru list. * @ddestroy: List head for the delayed destroy list. * @swap: List head for swap LRU list. - * @moving: Fence set when BO is moving * @offset: The current GPU offset, which can have different meanings * depending on the memory type. For SYSTEM type memory, it should be 0. * @cur_placement: Hint of current placement. @@ -138,19 +134,18 @@ struct ttm_buffer_object { struct ttm_resource *resource; struct ttm_tt *ttm; bool deleted; + struct ttm_lru_bulk_move *bulk_move; /** * Members protected by the bdev::lru_lock. */ - struct list_head lru; struct list_head ddestroy; /** * Members protected by a bo reservation. */ - struct dma_fence *moving; unsigned priority; unsigned pin_count; @@ -291,30 +286,9 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, */ void ttm_bo_put(struct ttm_buffer_object *bo); -/** - * ttm_bo_move_to_lru_tail - * - * @bo: The buffer object. - * @mem: Resource object. - * @bulk: optional bulk move structure to remember BO positions - * - * Move this BO to the tail of all lru lists used to lookup and reserve an - * object. This function must be called with struct ttm_global::lru_lock - * held, and is used to make a BO less likely to be considered for eviction. - */ -void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, - struct ttm_resource *mem, - struct ttm_lru_bulk_move *bulk); - -/** - * ttm_bo_bulk_move_lru_tail - * - * @bulk: bulk move structure - * - * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that - * BO order never changes. Should be called with ttm_global::lru_lock held. - */ -void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk); +void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); +void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, + struct ttm_lru_bulk_move *bulk); /** * ttm_bo_lock_delayed_workqueue @@ -540,34 +514,8 @@ ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp, int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, gfp_t gfp_flags); -/** - * ttm_bo_pin - Pin the buffer object. - * @bo: The buffer object to pin - * - * Make sure the buffer is not evicted any more during memory pressure. - */ -static inline void ttm_bo_pin(struct ttm_buffer_object *bo) -{ - dma_resv_assert_held(bo->base.resv); - WARN_ON_ONCE(!kref_read(&bo->kref)); - ++bo->pin_count; -} - -/** - * ttm_bo_unpin - Unpin the buffer object. - * @bo: The buffer object to unpin - * - * Allows the buffer object to be evicted again during memory pressure. - */ -static inline void ttm_bo_unpin(struct ttm_buffer_object *bo) -{ - dma_resv_assert_held(bo->base.resv); - WARN_ON_ONCE(!kref_read(&bo->kref)); - if (bo->pin_count) - --bo->pin_count; - else - WARN_ON_ONCE(true); -} +void ttm_bo_pin(struct ttm_buffer_object *bo); +void ttm_bo_unpin(struct ttm_buffer_object *bo); int ttm_mem_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man, diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 5f087575194b..897b88f0bd59 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -45,33 +45,6 @@ #include "ttm_tt.h" #include "ttm_pool.h" -/** - * struct ttm_lru_bulk_move_pos - * - * @first: first BO in the bulk move range - * @last: last BO in the bulk move range - * - * Positions for a lru bulk move. - */ -struct ttm_lru_bulk_move_pos { - struct ttm_buffer_object *first; - struct ttm_buffer_object *last; -}; - -/** - * struct ttm_lru_bulk_move - * - * @tt: first/last lru entry for BOs in the TT domain - * @vram: first/last lru entry for BOs in the VRAM domain - * @swap: first/last lru entry for BOs on the swap list - * - * Helper structure for bulk moves on the LRU list. - */ -struct ttm_lru_bulk_move { - struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY]; - struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY]; -}; - /* * ttm_bo.c */ @@ -182,7 +155,7 @@ static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) { spin_lock(&bo->bdev->lru_lock); - ttm_bo_move_to_lru_tail(bo, bo->resource, NULL); + ttm_bo_move_to_lru_tail(bo); spin_unlock(&bo->bdev->lru_lock); } @@ -272,7 +245,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_resource *new_mem); /** - * ttm_bo_move_accel_cleanup. + * ttm_bo_move_sync_cleanup. * * @bo: A pointer to a struct ttm_buffer_object. * @new_mem: struct ttm_resource indicating where to move. @@ -280,13 +253,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed * by the caller to be idle. Typically used after memcpy buffer moves. */ -static inline void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, - struct ttm_resource *new_mem) -{ - int ret = ttm_bo_move_accel_cleanup(bo, NULL, true, false, new_mem); - - WARN_ON(ret); -} +void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, + struct ttm_resource *new_mem); /** * ttm_bo_pipeline_gutting. diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h index 0a4ddec78d8f..95b3c04b1ab9 100644 --- a/include/drm/ttm/ttm_device.h +++ b/include/drm/ttm/ttm_device.h @@ -30,8 +30,6 @@ #include <drm/ttm/ttm_resource.h> #include <drm/ttm/ttm_pool.h> -#define TTM_NUM_MEM_TYPES 8 - struct ttm_device; struct ttm_placement; struct ttm_buffer_object; @@ -201,15 +199,6 @@ struct ttm_device_funcs { void *buf, int len, int write); /** - * struct ttm_bo_driver member del_from_lru_notify - * - * @bo: the buffer object deleted from lru - * - * notify driver that a BO was deleted from LRU. - */ - void (*del_from_lru_notify)(struct ttm_buffer_object *bo); - - /** * Notify the driver that we're about to release a BO * * @bo: BO that is about to be released diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 0f1d55262c68..441653693970 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -26,14 +26,17 @@ #define _TTM_RESOURCE_H_ #include <linux/types.h> +#include <linux/list.h> #include <linux/mutex.h> #include <linux/iosys-map.h> #include <linux/dma-fence.h> + #include <drm/drm_print.h> #include <drm/ttm/ttm_caching.h> #include <drm/ttm/ttm_kmap_iter.h> #define TTM_MAX_BO_PRIORITY 4U +#define TTM_NUM_MEM_TYPES 8 struct ttm_device; struct ttm_resource_manager; @@ -178,6 +181,47 @@ struct ttm_resource { uint32_t placement; struct ttm_bus_placement bus; struct ttm_buffer_object *bo; + + /** + * @lru: Least recently used list, see &ttm_resource_manager.lru + */ + struct list_head lru; +}; + +/** + * struct ttm_resource_cursor + * + * @priority: the current priority + * + * Cursor to iterate over the resources in a manager. + */ +struct ttm_resource_cursor { + unsigned int priority; +}; + +/** + * struct ttm_lru_bulk_move_pos + * + * @first: first res in the bulk move range + * @last: last res in the bulk move range + * + * Range of resources for a lru bulk move. + */ +struct ttm_lru_bulk_move_pos { + struct ttm_resource *first; + struct ttm_resource *last; +}; + +/** + * struct ttm_lru_bulk_move + * + * @pos: first/last lru entry for resources in the each domain/priority + * + * Container for the current bulk move state. Should be used with + * ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move(). + */ +struct ttm_lru_bulk_move { + struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY]; }; /** @@ -266,6 +310,15 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man) man->move = NULL; } +void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk); +void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, + struct ttm_resource *res); +void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, + struct ttm_resource *res); +void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk); + +void ttm_resource_move_to_lru_tail(struct ttm_resource *res); + void ttm_resource_init(struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_resource *res); @@ -292,6 +345,26 @@ uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man); void ttm_resource_manager_debug(struct ttm_resource_manager *man, struct drm_printer *p); +struct ttm_resource * +ttm_resource_manager_first(struct ttm_resource_manager *man, + struct ttm_resource_cursor *cursor); +struct ttm_resource * +ttm_resource_manager_next(struct ttm_resource_manager *man, + struct ttm_resource_cursor *cursor, + struct ttm_resource *res); + +/** + * ttm_resource_manager_for_each_res - iterate over all resources + * @man: the resource manager + * @cursor: struct ttm_resource_cursor for the current position + * @res: the current resource + * + * Iterate over all the evictable resources in a resource manager. + */ +#define ttm_resource_manager_for_each_res(man, cursor, res) \ + for (res = ttm_resource_manager_first(man, cursor); res; \ + res = ttm_resource_manager_next(man, cursor, res)) + struct ttm_kmap_iter * ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, struct io_mapping *iomap, @@ -308,4 +381,8 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, void ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, struct ttm_device *bdev, struct ttm_resource *mem); + +void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, + struct dentry * parent, + const char *name); #endif diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index f20832139815..17a0310e8aaa 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -140,6 +140,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc); * @bo: The buffer object we create the ttm for. * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags. * @caching: the desired caching state of the pages + * @extra_pages: Extra pages needed for the driver. * * Create a struct ttm_tt to back data with system memory pages. * No pages are actually allocated. @@ -147,7 +148,8 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc); * NULL: Out of memory. */ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, - uint32_t page_flags, enum ttm_caching caching); + uint32_t page_flags, enum ttm_caching caching, + unsigned long extra_pages); int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo, uint32_t page_flags, enum ttm_caching caching); diff --git a/include/dt-bindings/soc/rockchip,vop2.h b/include/dt-bindings/soc/rockchip,vop2.h new file mode 100644 index 000000000000..6e66a802b96a --- /dev/null +++ b/include/dt-bindings/soc/rockchip,vop2.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ + +#ifndef __DT_BINDINGS_ROCKCHIP_VOP2_H +#define __DT_BINDINGS_ROCKCHIP_VOP2_H + +#define ROCKCHIP_VOP2_EP_RGB0 1 +#define ROCKCHIP_VOP2_EP_HDMI0 2 +#define ROCKCHIP_VOP2_EP_EDP0 3 +#define ROCKCHIP_VOP2_EP_MIPI0 4 +#define ROCKCHIP_VOP2_EP_LVDS0 5 +#define ROCKCHIP_VOP2_EP_MIPI1 6 +#define ROCKCHIP_VOP2_EP_LVDS1 7 + +#endif /* __DT_BINDINGS_ROCKCHIP_VOP2_H */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 2097760e8e95..71731796c8c3 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -393,27 +393,30 @@ struct dma_buf { * e.g. exposed in `Implicit Fence Poll Support`_ must follow the * below rules. * - * - Drivers must add a shared fence through dma_resv_add_shared_fence() - * for anything the userspace API considers a read access. This highly - * depends upon the API and window system. + * - Drivers must add a read fence through dma_resv_add_fence() with the + * DMA_RESV_USAGE_READ flag for anything the userspace API considers a + * read access. This highly depends upon the API and window system. * - * - Similarly drivers must set the exclusive fence through - * dma_resv_add_excl_fence() for anything the userspace API considers - * write access. + * - Similarly drivers must add a write fence through + * dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for + * anything the userspace API considers write access. * - * - Drivers may just always set the exclusive fence, since that only + * - Drivers may just always add a write fence, since that only * causes unecessarily synchronization, but no correctness issues. * * - Some drivers only expose a synchronous userspace API with no * pipelining across drivers. These do not set any fences for their * access. An example here is v4l. * + * - Driver should use dma_resv_usage_rw() when retrieving fences as + * dependency for implicit synchronization. + * * DYNAMIC IMPORTER RULES: * * Dynamic importers, see dma_buf_attachment_is_dynamic(), have * additional constraints on how they set up fences: * - * - Dynamic importers must obey the exclusive fence and wait for it to + * - Dynamic importers must obey the write fences and wait for them to * signal before allowing access to the buffer's underlying storage * through the device. * @@ -423,10 +426,9 @@ struct dma_buf { * * IMPORTANT: * - * All drivers must obey the struct dma_resv rules, specifically the - * rules for updating fences, see &dma_resv.fence_excl and - * &dma_resv.fence. If these dependency rules are broken access tracking - * can be lost resulting in use after free issues. + * All drivers and memory management related functions must obey the + * struct dma_resv rules, specifically the rules for updating and + * obeying fences. See enum dma_resv_usage for further descriptions. */ struct dma_resv *resv; diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index afdfdfac729f..c8ccbc94d5d2 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -47,24 +47,89 @@ extern struct ww_class reservation_ww_class; +struct dma_resv_list; + /** - * struct dma_resv_list - a list of shared fences - * @rcu: for internal use - * @shared_count: table of shared fences - * @shared_max: for growing shared fence table - * @shared: shared fence table + * enum dma_resv_usage - how the fences from a dma_resv obj are used + * + * This enum describes the different use cases for a dma_resv object and + * controls which fences are returned when queried. + * + * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and + * when the dma_resv object is asked for fences for one use case the fences + * for the lower use case are returned as well. + * + * For example when asking for WRITE fences then the KERNEL fences are returned + * as well. Similar when asked for READ fences then both WRITE and KERNEL + * fences are returned as well. */ -struct dma_resv_list { - struct rcu_head rcu; - u32 shared_count, shared_max; - struct dma_fence __rcu *shared[]; +enum dma_resv_usage { + /** + * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only. + * + * This should only be used for things like copying or clearing memory + * with a DMA hardware engine for the purpose of kernel memory + * management. + * + * Drivers *always* must wait for those fences before accessing the + * resource protected by the dma_resv object. The only exception for + * that is when the resource is known to be locked down in place by + * pinning it previously. + */ + DMA_RESV_USAGE_KERNEL, + + /** + * @DMA_RESV_USAGE_WRITE: Implicit write synchronization. + * + * This should only be used for userspace command submissions which add + * an implicit write dependency. + */ + DMA_RESV_USAGE_WRITE, + + /** + * @DMA_RESV_USAGE_READ: Implicit read synchronization. + * + * This should only be used for userspace command submissions which add + * an implicit read dependency. + */ + DMA_RESV_USAGE_READ, + + /** + * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync. + * + * This should be used by submissions which don't want to participate in + * implicit synchronization. + * + * The most common case are preemption fences as well as page table + * updates and their TLB flushes. + */ + DMA_RESV_USAGE_BOOKKEEP }; /** + * dma_resv_usage_rw - helper for implicit sync + * @write: true if we create a new implicit sync write + * + * This returns the implicit synchronization usage for write or read accesses, + * see enum dma_resv_usage and &dma_buf.resv. + */ +static inline enum dma_resv_usage dma_resv_usage_rw(bool write) +{ + /* This looks confusing at first sight, but is indeed correct. + * + * The rational is that new write operations needs to wait for the + * existing read and write operations to finish. + * But a new read operation only needs to wait for the existing write + * operations to finish. + */ + return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE; +} + +/** * struct dma_resv - a reservation object manages fences for a buffer * - * There are multiple uses for this, with sometimes slightly different rules in - * how the fence slots are used. + * This is a container for dma_fence objects which needs to handle multiple use + * cases. * * One use is to synchronize cross-driver access to a struct dma_buf, either for * dynamic buffer management or just to handle implicit synchronization between @@ -91,62 +156,16 @@ struct dma_resv { struct ww_mutex lock; /** - * @seq: + * @fences: * - * Sequence count for managing RCU read-side synchronization, allows - * read-only access to @fence_excl and @fence while ensuring we take a - * consistent snapshot. - */ - seqcount_ww_mutex_t seq; - - /** - * @fence_excl: - * - * The exclusive fence, if there is one currently. + * Array of fences which where added to the dma_resv object * - * There are two ways to update this fence: - * - * - First by calling dma_resv_add_excl_fence(), which replaces all - * fences attached to the reservation object. To guarantee that no - * fences are lost, this new fence must signal only after all previous - * fences, both shared and exclusive, have signalled. In some cases it - * is convenient to achieve that by attaching a struct dma_fence_array - * with all the new and old fences. - * - * - Alternatively the fence can be set directly, which leaves the - * shared fences unchanged. To guarantee that no fences are lost, this - * new fence must signal only after the previous exclusive fence has - * signalled. Since the shared fences are staying intact, it is not - * necessary to maintain any ordering against those. If semantically - * only a new access is added without actually treating the previous - * one as a dependency the exclusive fences can be strung together - * using struct dma_fence_chain. - * - * Note that actual semantics of what an exclusive or shared fence mean - * is defined by the user, for reservation objects shared across drivers - * see &dma_buf.resv. - */ - struct dma_fence __rcu *fence_excl; - - /** - * @fence: - * - * List of current shared fences. - * - * There are no ordering constraints of shared fences against the - * exclusive fence slot. If a waiter needs to wait for all access, it - * has to wait for both sets of fences to signal. - * - * A new fence is added by calling dma_resv_add_shared_fence(). Since - * this often needs to be done past the point of no return in command + * A new fence is added by calling dma_resv_add_fence(). Since this + * often needs to be done past the point of no return in command * submission it cannot fail, and therefore sufficient slots need to be - * reserved by calling dma_resv_reserve_shared(). - * - * Note that actual semantics of what an exclusive or shared fence mean - * is defined by the user, for reservation objects shared across drivers - * see &dma_buf.resv. + * reserved by calling dma_resv_reserve_fences(). */ - struct dma_resv_list __rcu *fence; + struct dma_resv_list __rcu *fences; }; /** @@ -165,14 +184,14 @@ struct dma_resv_iter { /** @obj: The dma_resv object we iterate over */ struct dma_resv *obj; - /** @all_fences: If all fences should be returned */ - bool all_fences; + /** @usage: Return fences with this usage or lower. */ + enum dma_resv_usage usage; /** @fence: the currently handled fence */ struct dma_fence *fence; - /** @seq: sequence number to check for modifications */ - unsigned int seq; + /** @fence_usage: the usage of the current fence */ + enum dma_resv_usage fence_usage; /** @index: index into the shared fences */ unsigned int index; @@ -180,8 +199,8 @@ struct dma_resv_iter { /** @fences: the shared fences; private, *MUST* not dereference */ struct dma_resv_list *fences; - /** @shared_count: number of shared fences */ - unsigned int shared_count; + /** @num_fences: number of fences */ + unsigned int num_fences; /** @is_restarted: true if this is the first returned fence */ bool is_restarted; @@ -196,14 +215,14 @@ struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor); * dma_resv_iter_begin - initialize a dma_resv_iter object * @cursor: The dma_resv_iter object to initialize * @obj: The dma_resv object which we want to iterate over - * @all_fences: If all fences should be returned or just the exclusive one + * @usage: controls which fences to include, see enum dma_resv_usage. */ static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor, struct dma_resv *obj, - bool all_fences) + enum dma_resv_usage usage) { cursor->obj = obj; - cursor->all_fences = all_fences; + cursor->usage = usage; cursor->fence = NULL; } @@ -220,14 +239,15 @@ static inline void dma_resv_iter_end(struct dma_resv_iter *cursor) } /** - * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one + * dma_resv_iter_usage - Return the usage of the current fence * @cursor: the cursor of the current position * - * Returns true if the currently returned fence is the exclusive one. + * Returns the usage of the currently processed fence. */ -static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor) +static inline enum dma_resv_usage +dma_resv_iter_usage(struct dma_resv_iter *cursor) { - return cursor->index == 0; + return cursor->fence_usage; } /** @@ -264,7 +284,7 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) * dma_resv_for_each_fence - fence iterator * @cursor: a struct dma_resv_iter pointer * @obj: a dma_resv object pointer - * @all_fences: true if all fences should be returned + * @usage: controls which fences to return * @fence: the current fence * * Iterate over the fences in a struct dma_resv object while holding the @@ -273,8 +293,8 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) * valid as long as the lock is held and so no extra reference to the fence is * taken. */ -#define dma_resv_for_each_fence(cursor, obj, all_fences, fence) \ - for (dma_resv_iter_begin(cursor, obj, all_fences), \ +#define dma_resv_for_each_fence(cursor, obj, usage, fence) \ + for (dma_resv_iter_begin(cursor, obj, usage), \ fence = dma_resv_iter_first(cursor); fence; \ fence = dma_resv_iter_next(cursor)) @@ -282,9 +302,9 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) #ifdef CONFIG_DEBUG_MUTEXES -void dma_resv_reset_shared_max(struct dma_resv *obj); +void dma_resv_reset_max_fences(struct dma_resv *obj); #else -static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {} +static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {} #endif /** @@ -430,51 +450,26 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) */ static inline void dma_resv_unlock(struct dma_resv *obj) { - dma_resv_reset_shared_max(obj); + dma_resv_reset_max_fences(obj); ww_mutex_unlock(&obj->lock); } -/** - * dma_resv_excl_fence - return the object's exclusive fence - * @obj: the reservation object - * - * Returns the exclusive fence (if any). Caller must either hold the objects - * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(), - * or one of the variants of each - * - * RETURNS - * The exclusive fence or NULL - */ -static inline struct dma_fence * -dma_resv_excl_fence(struct dma_resv *obj) -{ - return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj)); -} - -/** - * dma_resv_shared_list - get the reservation object's shared fence list - * @obj: the reservation object - * - * Returns the shared fence list. Caller must either hold the objects - * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(), - * or one of the variants of each - */ -static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj) -{ - return rcu_dereference_check(obj->fence, dma_resv_held(obj)); -} - void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); -int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); -void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); -void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); -int dma_resv_get_fences(struct dma_resv *obj, bool write, +int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences); +void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, + enum dma_resv_usage usage); +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, + struct dma_fence *fence, + enum dma_resv_usage usage); +int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, unsigned int *num_fences, struct dma_fence ***fences); +int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, + struct dma_fence **fence); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); -long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, - unsigned long timeout); -bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all); +long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, + bool intr, unsigned long timeout); +bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage); void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq); #endif /* _LINUX_RESERVATION_H */ diff --git a/include/linux/efi.h b/include/linux/efi.h index db424f3dc3f2..7d9b0bb47eb3 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1349,11 +1349,7 @@ static inline struct efi_mokvar_table_entry *efi_mokvar_entry_find( } #endif -#ifdef CONFIG_SYSFB extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); -#else -static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) { } -#endif struct linux_efi_coco_secret_area { u64 base_pa; diff --git a/include/linux/fb.h b/include/linux/fb.h index 9a77ab615c36..2892145468c9 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -201,12 +201,19 @@ struct fb_pixmap { }; #ifdef CONFIG_FB_DEFERRED_IO +struct fb_deferred_io_pageref { + struct page *page; + unsigned long offset; + /* private */ + struct list_head list; +}; + struct fb_deferred_io { /* delay between mkwrite and deferred handler */ unsigned long delay; - bool sort_pagelist; /* sort pagelist by offset */ - struct mutex lock; /* mutex that protects the page list */ - struct list_head pagelist; /* list of touched pages */ + bool sort_pagereflist; /* sort pagelist by offset */ + struct mutex lock; /* mutex that protects the pageref list */ + struct list_head pagereflist; /* list of pagerefs for touched pages */ /* callback */ void (*first_io)(struct fb_info *info); void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); @@ -450,7 +457,6 @@ struct fb_info { struct fb_var_screeninfo var; /* Current var */ struct fb_fix_screeninfo fix; /* Current fix */ struct fb_monspecs monspecs; /* Current Monitor specs */ - struct work_struct queue; /* Framebuffer event queue */ struct fb_pixmap pixmap; /* Image hardware mapper */ struct fb_pixmap sprite; /* Cursor hardware mapper */ struct fb_cmap cmap; /* Current cmap */ @@ -469,6 +475,8 @@ struct fb_info { #endif #ifdef CONFIG_FB_DEFERRED_IO struct delayed_work deferred_work; + unsigned long npagerefs; + struct fb_deferred_io_pageref *pagerefs; struct fb_deferred_io *fbdefio; #endif @@ -612,7 +620,6 @@ extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name); extern int remove_conflicting_framebuffers(struct apertures_struct *a, const char *name, bool primary); -extern bool is_firmware_framebuffer(struct apertures_struct *a); extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); extern int fb_show_logo(struct fb_info *fb_info, int rotate); extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); @@ -662,7 +669,7 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, /* drivers/video/fb_defio.c */ int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma); -extern void fb_deferred_io_init(struct fb_info *info); +extern int fb_deferred_io_init(struct fb_info *info); extern void fb_deferred_io_open(struct fb_info *info, struct inode *inode, struct file *file); diff --git a/include/linux/host1x.h b/include/linux/host1x.h index e8dc5bc41f79..c0bf4e581fe9 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -31,6 +31,11 @@ u64 host1x_get_dma_mask(struct host1x *host1x); * struct host1x_bo_cache - host1x buffer object cache * @mappings: list of mappings * @lock: synchronizes accesses to the list of mappings + * + * Note that entries are not periodically evicted from this cache and instead need to be + * explicitly released. This is used primarily for DRM/KMS where the cache's reference is + * released when the last reference to a buffer object represented by a mapping in this + * cache is dropped. */ struct host1x_bo_cache { struct list_head mappings; @@ -81,6 +86,7 @@ struct host1x_client_ops { * @parent: pointer to parent structure * @usecount: reference count for this structure * @lock: mutex for mutually exclusive concurrency + * @cache: host1x buffer object cache */ struct host1x_client { struct list_head list; diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 15d03f6532d0..bb539794f54a 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -15,7 +15,6 @@ struct mdev_type; struct mdev_device { struct device dev; guid_t uuid; - void *driver_data; struct list_head next; struct mdev_type *type; bool active; @@ -30,74 +29,6 @@ unsigned int mdev_get_type_group_id(struct mdev_device *mdev); unsigned int mtype_get_type_group_id(struct mdev_type *mtype); struct device *mtype_get_parent_dev(struct mdev_type *mtype); -/** - * struct mdev_parent_ops - Structure to be registered for each parent device to - * register the device to mdev module. - * - * @owner: The module owner. - * @device_driver: Which device driver to probe() on newly created devices - * @dev_attr_groups: Attributes of the parent device. - * @mdev_attr_groups: Attributes of the mediated device. - * @supported_type_groups: Attributes to define supported types. It is mandatory - * to provide supported types. - * @create: Called to allocate basic resources in parent device's - * driver for a particular mediated device. It is - * mandatory to provide create ops. - * @mdev: mdev_device structure on of mediated device - * that is being created - * Returns integer: success (0) or error (< 0) - * @remove: Called to free resources in parent device's driver for - * a mediated device. It is mandatory to provide 'remove' - * ops. - * @mdev: mdev_device device structure which is being - * destroyed - * Returns integer: success (0) or error (< 0) - * @read: Read emulation callback - * @mdev: mediated device structure - * @buf: read buffer - * @count: number of bytes to read - * @ppos: address. - * Retuns number on bytes read on success or error. - * @write: Write emulation callback - * @mdev: mediated device structure - * @buf: write buffer - * @count: number of bytes to be written - * @ppos: address. - * Retuns number on bytes written on success or error. - * @ioctl: IOCTL callback - * @mdev: mediated device structure - * @cmd: ioctl command - * @arg: arguments to ioctl - * @mmap: mmap callback - * @mdev: mediated device structure - * @vma: vma structure - * @request: request callback to release device - * @mdev: mediated device structure - * @count: request sequence number - * Parent device that support mediated device should be registered with mdev - * module with mdev_parent_ops structure. - **/ -struct mdev_parent_ops { - struct module *owner; - struct mdev_driver *device_driver; - const struct attribute_group **dev_attr_groups; - const struct attribute_group **mdev_attr_groups; - struct attribute_group **supported_type_groups; - - int (*create)(struct mdev_device *mdev); - int (*remove)(struct mdev_device *mdev); - int (*open_device)(struct mdev_device *mdev); - void (*close_device)(struct mdev_device *mdev); - ssize_t (*read)(struct mdev_device *mdev, char __user *buf, - size_t count, loff_t *ppos); - ssize_t (*write)(struct mdev_device *mdev, const char __user *buf, - size_t count, loff_t *ppos); - long (*ioctl)(struct mdev_device *mdev, unsigned int cmd, - unsigned long arg); - int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma); - void (*request)(struct mdev_device *mdev, unsigned int count); -}; - /* interface for exporting mdev supported type attributes */ struct mdev_type_attribute { struct attribute attr; @@ -122,23 +53,18 @@ struct mdev_type_attribute mdev_type_attr_##_name = \ * struct mdev_driver - Mediated device driver * @probe: called when new device created * @remove: called when device removed + * @supported_type_groups: Attributes to define supported types. It is mandatory + * to provide supported types. * @driver: device driver structure * **/ struct mdev_driver { int (*probe)(struct mdev_device *dev); void (*remove)(struct mdev_device *dev); + struct attribute_group **supported_type_groups; struct device_driver driver; }; -static inline void *mdev_get_drvdata(struct mdev_device *mdev) -{ - return mdev->driver_data; -} -static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data) -{ - mdev->driver_data = data; -} static inline const guid_t *mdev_uuid(struct mdev_device *mdev) { return &mdev->uuid; @@ -146,7 +72,7 @@ static inline const guid_t *mdev_uuid(struct mdev_device *mdev) extern struct bus_type mdev_bus_type; -int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops); +int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver); void mdev_unregister_device(struct device *dev); int mdev_register_driver(struct mdev_driver *drv); diff --git a/include/linux/mei_aux.h b/include/linux/mei_aux.h new file mode 100644 index 000000000000..587f25128848 --- /dev/null +++ b/include/linux/mei_aux.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Intel Corporation. All rights reserved. + */ +#ifndef _LINUX_MEI_AUX_H +#define _LINUX_MEI_AUX_H + +#include <linux/auxiliary_bus.h> + +struct mei_aux_device { + struct auxiliary_device aux_dev; + int irq; + struct resource bar; +}; + +#define auxiliary_dev_to_mei_aux_dev(auxiliary_dev) \ + container_of(auxiliary_dev, struct mei_aux_device, aux_dev) + +#endif /* _LINUX_MEI_AUX_H */ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 37ded6b8fee6..3926e9027947 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -17,7 +17,6 @@ #include <linux/kcsan-checks.h> #include <linux/lockdep.h> #include <linux/mutex.h> -#include <linux/ww_mutex.h> #include <linux/preempt.h> #include <linux/spinlock.h> @@ -164,7 +163,7 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) * static initializer or init function. This enables lockdep to validate * that the write side critical section is properly serialized. * - * LOCKNAME: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex. + * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex */ /* @@ -184,7 +183,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) #define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock) #define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex) -#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex) /* * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers @@ -277,7 +275,6 @@ SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_s SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock)) SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock)) SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock)) -SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL)) /* * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t @@ -304,8 +301,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu __seqprop_case((s), raw_spinlock, prop), \ __seqprop_case((s), spinlock, prop), \ __seqprop_case((s), rwlock, prop), \ - __seqprop_case((s), mutex, prop), \ - __seqprop_case((s), ww_mutex, prop)) + __seqprop_case((s), mutex, prop)) #define seqprop_ptr(s) __seqprop(s, ptr) #define seqprop_sequence(s) __seqprop(s, sequence) diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 1d65c1fbc4ec..9a1d210d135d 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -1150,7 +1150,9 @@ struct drm_amdgpu_info_video_caps { #define AMDGPU_FAMILY_RV 142 /* Raven */ #define AMDGPU_FAMILY_NV 143 /* Navi10 */ #define AMDGPU_FAMILY_VGH 144 /* Van Gogh */ +#define AMDGPU_FAMILY_GC_11_0_0 145 /* GC 11.0.0 */ #define AMDGPU_FAMILY_YC 146 /* Yellow Carp */ +#define AMDGPU_FAMILY_GC_11_0_1 148 /* GC 11.0.1 */ #define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */ #define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */ diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index fc0c1454d275..f1972154a594 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -573,6 +573,53 @@ extern "C" { #define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8) /* + * Intel Tile 4 layout + * + * This is a tiled layout using 4KB tiles in a row-major layout. It has the same + * shape as Tile Y at two granularities: 4KB (128B x 32) and 64B (16B x 4). It + * only differs from Tile Y at the 256B granularity in between. At this + * granularity, Tile Y has a shape of 16B x 32 rows, but this tiling has a shape + * of 64B x 8 rows. + */ +#define I915_FORMAT_MOD_4_TILED fourcc_mod_code(INTEL, 9) + +/* + * Intel color control surfaces (CCS) for DG2 render compression. + * + * The main surface is Tile 4 and at plane index 0. The CCS data is stored + * outside of the GEM object in a reserved memory area dedicated for the + * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The + * main surface pitch is required to be a multiple of four Tile 4 widths. + */ +#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS fourcc_mod_code(INTEL, 10) + +/* + * Intel color control surfaces (CCS) for DG2 media compression. + * + * The main surface is Tile 4 and at plane index 0. For semi-planar formats + * like NV12, the Y and UV planes are Tile 4 and are located at plane indices + * 0 and 1, respectively. The CCS for all planes are stored outside of the + * GEM object in a reserved memory area dedicated for the storage of the + * CCS data for all RC/RC_CC/MC compressible GEM objects. The main surface + * pitch is required to be a multiple of four Tile 4 widths. + */ +#define I915_FORMAT_MOD_4_TILED_DG2_MC_CCS fourcc_mod_code(INTEL, 11) + +/* + * Intel Color Control Surface with Clear Color (CCS) for DG2 render compression. + * + * The main surface is Tile 4 and at plane index 0. The CCS data is stored + * outside of the GEM object in a reserved memory area dedicated for the + * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The + * main surface pitch is required to be a multiple of four Tile 4 widths. The + * clear color is stored at plane index 1 and the pitch should be ignored. The + * format of the 256 bits of clear color data matches the one used for the + * I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description + * for details. + */ +#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12) + +/* * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks * * Macroblocks are laid in a Z-shape, and each pixel data is following the @@ -609,6 +656,28 @@ extern "C" { */ #define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1) +/* + * Qualcomm Tiled Format + * + * Similar to DRM_FORMAT_MOD_QCOM_COMPRESSED but not compressed. + * Implementation may be platform and base-format specific. + * + * Each macrotile consists of m x n (mostly 4 x 4) tiles. + * Pixel data pitch/stride is aligned with macrotile width. + * Pixel data height is aligned with macrotile height. + * Entire pixel data buffer is aligned with 4k(bytes). + */ +#define DRM_FORMAT_MOD_QCOM_TILED3 fourcc_mod_code(QCOM, 3) + +/* + * Qualcomm Alternate Tiled Format + * + * Alternate tiled format typically only used within GMEM. + * Implementation may be platform and base-format specific. + */ +#define DRM_FORMAT_MOD_QCOM_TILED2 fourcc_mod_code(QCOM, 2) + + /* Vivante framebuffer modifiers */ /* diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 05c3642aaece..a2def7b27009 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -154,25 +154,77 @@ enum i915_mocs_table_index { I915_MOCS_CACHED, }; -/* +/** + * enum drm_i915_gem_engine_class - uapi engine type enumeration + * * Different engines serve different roles, and there may be more than one - * engine serving each role. enum drm_i915_gem_engine_class provides a - * classification of the role of the engine, which may be used when requesting - * operations to be performed on a certain subset of engines, or for providing - * information about that group. + * engine serving each role. This enum provides a classification of the role + * of the engine, which may be used when requesting operations to be performed + * on a certain subset of engines, or for providing information about that + * group. */ enum drm_i915_gem_engine_class { + /** + * @I915_ENGINE_CLASS_RENDER: + * + * Render engines support instructions used for 3D, Compute (GPGPU), + * and programmable media workloads. These instructions fetch data and + * dispatch individual work items to threads that operate in parallel. + * The threads run small programs (called "kernels" or "shaders") on + * the GPU's execution units (EUs). + */ I915_ENGINE_CLASS_RENDER = 0, + + /** + * @I915_ENGINE_CLASS_COPY: + * + * Copy engines (also referred to as "blitters") support instructions + * that move blocks of data from one location in memory to another, + * or that fill a specified location of memory with fixed data. + * Copy engines can perform pre-defined logical or bitwise operations + * on the source, destination, or pattern data. + */ I915_ENGINE_CLASS_COPY = 1, + + /** + * @I915_ENGINE_CLASS_VIDEO: + * + * Video engines (also referred to as "bit stream decode" (BSD) or + * "vdbox") support instructions that perform fixed-function media + * decode and encode. + */ I915_ENGINE_CLASS_VIDEO = 2, + + /** + * @I915_ENGINE_CLASS_VIDEO_ENHANCE: + * + * Video enhancement engines (also referred to as "vebox") support + * instructions related to image enhancement. + */ I915_ENGINE_CLASS_VIDEO_ENHANCE = 3, - /* should be kept compact */ + /** + * @I915_ENGINE_CLASS_COMPUTE: + * + * Compute engines support a subset of the instructions available + * on render engines: compute engines support Compute (GPGPU) and + * programmable media workloads, but do not support the 3D pipeline. + */ + I915_ENGINE_CLASS_COMPUTE = 4, + + /* Values in this enum should be kept compact. */ + /** + * @I915_ENGINE_CLASS_INVALID: + * + * Placeholder value to represent an invalid engine class assignment. + */ I915_ENGINE_CLASS_INVALID = -1 }; -/* +/** + * struct i915_engine_class_instance - Engine class/instance identifier + * * There may be more than one engine fulfilling any role within the system. * Each engine of a class is given a unique instance number and therefore * any engine can be specified by its class:instance tuplet. APIs that allow @@ -180,10 +232,21 @@ enum drm_i915_gem_engine_class { * for this identification. */ struct i915_engine_class_instance { - __u16 engine_class; /* see enum drm_i915_gem_engine_class */ - __u16 engine_instance; + /** + * @engine_class: + * + * Engine class from enum drm_i915_gem_engine_class + */ + __u16 engine_class; #define I915_ENGINE_CLASS_INVALID_NONE -1 #define I915_ENGINE_CLASS_INVALID_VIRTUAL -2 + + /** + * @engine_instance: + * + * Engine instance. + */ + __u16 engine_instance; }; /** @@ -2657,24 +2720,65 @@ enum drm_i915_perf_record_type { DRM_I915_PERF_RECORD_MAX /* non-ABI */ }; -/* +/** + * struct drm_i915_perf_oa_config + * * Structure to upload perf dynamic configuration into the kernel. */ struct drm_i915_perf_oa_config { - /** String formatted like "%08x-%04x-%04x-%04x-%012x" */ + /** + * @uuid: + * + * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x" + */ char uuid[36]; + /** + * @n_mux_regs: + * + * Number of mux regs in &mux_regs_ptr. + */ __u32 n_mux_regs; + + /** + * @n_boolean_regs: + * + * Number of boolean regs in &boolean_regs_ptr. + */ __u32 n_boolean_regs; + + /** + * @n_flex_regs: + * + * Number of flex regs in &flex_regs_ptr. + */ __u32 n_flex_regs; - /* - * These fields are pointers to tuples of u32 values (register address, - * value). For example the expected length of the buffer pointed by - * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs). + /** + * @mux_regs_ptr: + * + * Pointer to tuples of u32 values (register address, value) for mux + * registers. Expected length of buffer is (2 * sizeof(u32) * + * &n_mux_regs). */ __u64 mux_regs_ptr; + + /** + * @boolean_regs_ptr: + * + * Pointer to tuples of u32 values (register address, value) for mux + * registers. Expected length of buffer is (2 * sizeof(u32) * + * &n_boolean_regs). + */ __u64 boolean_regs_ptr; + + /** + * @flex_regs_ptr: + * + * Pointer to tuples of u32 values (register address, value) for mux + * registers. Expected length of buffer is (2 * sizeof(u32) * + * &n_flex_regs). + */ __u64 flex_regs_ptr; }; @@ -2685,12 +2789,24 @@ struct drm_i915_perf_oa_config { * @data_ptr is also depends on the specific @query_id. */ struct drm_i915_query_item { - /** @query_id: The id for this query */ + /** + * @query_id: + * + * The id for this query. Currently accepted query IDs are: + * - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info) + * - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info) + * - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config) + * - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions) + * - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`) + * - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info) + */ __u64 query_id; -#define DRM_I915_QUERY_TOPOLOGY_INFO 1 -#define DRM_I915_QUERY_ENGINE_INFO 2 -#define DRM_I915_QUERY_PERF_CONFIG 3 -#define DRM_I915_QUERY_MEMORY_REGIONS 4 +#define DRM_I915_QUERY_TOPOLOGY_INFO 1 +#define DRM_I915_QUERY_ENGINE_INFO 2 +#define DRM_I915_QUERY_PERF_CONFIG 3 +#define DRM_I915_QUERY_MEMORY_REGIONS 4 +#define DRM_I915_QUERY_HWCONFIG_BLOB 5 +#define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6 /* Must be kept compact -- no holes and well documented */ /** @@ -2706,14 +2822,17 @@ struct drm_i915_query_item { /** * @flags: * - * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0. + * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0. * - * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the + * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the * following: * - * - DRM_I915_QUERY_PERF_CONFIG_LIST - * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID - * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID + * - %DRM_I915_QUERY_PERF_CONFIG_LIST + * - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID + * - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID + * + * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain + * a struct i915_engine_class_instance that references a render engine. */ __u32 flags; #define DRM_I915_QUERY_PERF_CONFIG_LIST 1 @@ -2771,66 +2890,112 @@ struct drm_i915_query { __u64 items_ptr; }; -/* - * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO : - * - * data: contains the 3 pieces of information : - * - * - the slice mask with one bit per slice telling whether a slice is - * available. The availability of slice X can be queried with the following - * formula : - * - * (data[X / 8] >> (X % 8)) & 1 - * - * - the subslice mask for each slice with one bit per subslice telling - * whether a subslice is available. Gen12 has dual-subslices, which are - * similar to two gen11 subslices. For gen12, this array represents dual- - * subslices. The availability of subslice Y in slice X can be queried - * with the following formula : - * - * (data[subslice_offset + - * X * subslice_stride + - * Y / 8] >> (Y % 8)) & 1 - * - * - the EU mask for each subslice in each slice with one bit per EU telling - * whether an EU is available. The availability of EU Z in subslice Y in - * slice X can be queried with the following formula : +/** + * struct drm_i915_query_topology_info * - * (data[eu_offset + - * (X * max_subslices + Y) * eu_stride + - * Z / 8] >> (Z % 8)) & 1 + * Describes slice/subslice/EU information queried by + * %DRM_I915_QUERY_TOPOLOGY_INFO */ struct drm_i915_query_topology_info { - /* + /** + * @flags: + * * Unused for now. Must be cleared to zero. */ __u16 flags; + /** + * @max_slices: + * + * The number of bits used to express the slice mask. + */ __u16 max_slices; + + /** + * @max_subslices: + * + * The number of bits used to express the subslice mask. + */ __u16 max_subslices; + + /** + * @max_eus_per_subslice: + * + * The number of bits in the EU mask that correspond to a single + * subslice's EUs. + */ __u16 max_eus_per_subslice; - /* + /** + * @subslice_offset: + * * Offset in data[] at which the subslice masks are stored. */ __u16 subslice_offset; - /* + /** + * @subslice_stride: + * * Stride at which each of the subslice masks for each slice are * stored. */ __u16 subslice_stride; - /* + /** + * @eu_offset: + * * Offset in data[] at which the EU masks are stored. */ __u16 eu_offset; - /* + /** + * @eu_stride: + * * Stride at which each of the EU masks for each subslice are stored. */ __u16 eu_stride; + /** + * @data: + * + * Contains 3 pieces of information : + * + * - The slice mask with one bit per slice telling whether a slice is + * available. The availability of slice X can be queried with the + * following formula : + * + * .. code:: c + * + * (data[X / 8] >> (X % 8)) & 1 + * + * Starting with Xe_HP platforms, Intel hardware no longer has + * traditional slices so i915 will always report a single slice + * (hardcoded slicemask = 0x1) which contains all of the platform's + * subslices. I.e., the mask here does not reflect any of the newer + * hardware concepts such as "gslices" or "cslices" since userspace + * is capable of inferring those from the subslice mask. + * + * - The subslice mask for each slice with one bit per subslice telling + * whether a subslice is available. Starting with Gen12 we use the + * term "subslice" to refer to what the hardware documentation + * describes as a "dual-subslices." The availability of subslice Y + * in slice X can be queried with the following formula : + * + * .. code:: c + * + * (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1 + * + * - The EU mask for each subslice in each slice, with one bit per EU + * telling whether an EU is available. The availability of EU Z in + * subslice Y in slice X can be queried with the following formula : + * + * .. code:: c + * + * (data[eu_offset + + * (X * max_subslices + Y) * eu_stride + + * Z / 8 + * ] >> (Z % 8)) & 1 + */ __u8 data[]; }; @@ -2951,52 +3116,68 @@ struct drm_i915_query_engine_info { struct drm_i915_engine_info engines[]; }; -/* - * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG. +/** + * struct drm_i915_query_perf_config + * + * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and + * %DRM_I915_QUERY_GEOMETRY_SUBSLICES. */ struct drm_i915_query_perf_config { union { - /* - * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets - * this fields to the number of configurations available. + /** + * @n_configs: + * + * When &drm_i915_query_item.flags == + * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to + * the number of configurations available. */ __u64 n_configs; - /* - * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, - * i915 will use the value in this field as configuration - * identifier to decide what data to write into config_ptr. + /** + * @config: + * + * When &drm_i915_query_item.flags == + * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the + * value in this field as configuration identifier to decide + * what data to write into config_ptr. */ __u64 config; - /* - * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, - * i915 will use the value in this field as configuration - * identifier to decide what data to write into config_ptr. + /** + * @uuid: + * + * When &drm_i915_query_item.flags == + * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the + * value in this field as configuration identifier to decide + * what data to write into config_ptr. * * String formatted like "%08x-%04x-%04x-%04x-%012x" */ char uuid[36]; }; - /* + /** + * @flags: + * * Unused for now. Must be cleared to zero. */ __u32 flags; - /* - * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will - * write an array of __u64 of configuration identifiers. + /** + * @data: * - * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will - * write a struct drm_i915_perf_oa_config. If the following fields of - * drm_i915_perf_oa_config are set not set to 0, i915 will write into - * the associated pointers the values of submitted when the + * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST, + * i915 will write an array of __u64 of configuration identifiers. + * + * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA, + * i915 will write a struct drm_i915_perf_oa_config. If the following + * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will + * write into the associated pointers the values of submitted when the * configuration was created : * - * - n_mux_regs - * - n_boolean_regs - * - n_flex_regs + * - &drm_i915_perf_oa_config.n_mux_regs + * - &drm_i915_perf_oa_config.n_boolean_regs + * - &drm_i915_perf_oa_config.n_flex_regs */ __u8 data[]; }; @@ -3135,6 +3316,16 @@ struct drm_i915_query_memory_regions { }; /** + * DOC: GuC HWCONFIG blob uAPI + * + * The GuC produces a blob with information about the current device. + * i915 reads this blob from GuC and makes it available via this uAPI. + * + * The format and meaning of the blob content are documented in the + * Programmer's Reference Manual. + */ + +/** * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added * extension support using struct i915_user_extension. * diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 07efc8033492..3c7b097c4e3d 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -82,6 +82,10 @@ struct drm_msm_timespec { #define MSM_PARAM_FAULTS 0x09 /* RO */ #define MSM_PARAM_SUSPENDS 0x0a /* RO */ #define MSM_PARAM_SYSPROF 0x0b /* WO: 1 preserves perfcntrs, 2 also disables suspend */ +#define MSM_PARAM_COMM 0x0c /* WO: override for task->comm */ +#define MSM_PARAM_CMDLINE 0x0d /* WO: override for task cmdline */ +#define MSM_PARAM_VA_START 0x0e /* RO: start of valid GPU iova range */ +#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */ /* For backwards compat. The original support for preemption was based on * a single ring per priority level so # of priority levels equals the # @@ -95,6 +99,8 @@ struct drm_msm_param { __u32 pipe; /* in, MSM_PIPE_x */ __u32 param; /* in, MSM_PARAM_x */ __u64 value; /* out (get_param) or in (set_param) */ + __u32 len; /* zero for non-pointer params */ + __u32 pad; /* must be zero */ }; /* @@ -131,6 +137,7 @@ struct drm_msm_gem_new { #define MSM_INFO_GET_IOVA 0x01 /* get iova, returned by value */ #define MSM_INFO_SET_NAME 0x02 /* set the debug name (by pointer) */ #define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */ +#define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */ struct drm_msm_gem_info { __u32 handle; /* in */ diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index 8277644c1144..26549c86a91f 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2022 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -92,6 +92,12 @@ extern "C" { * * DRM_VMW_PARAM_SM5 * SM5 support is enabled. + * + * DRM_VMW_PARAM_GL43 + * SM5.1+GL4.3 support is enabled. + * + * DRM_VMW_PARAM_DEVICE_ID + * PCI ID of the underlying SVGA device. */ #define DRM_VMW_PARAM_NUM_STREAMS 0 @@ -111,6 +117,7 @@ extern "C" { #define DRM_VMW_PARAM_SM4_1 14 #define DRM_VMW_PARAM_SM5 15 #define DRM_VMW_PARAM_GL43 16 +#define DRM_VMW_PARAM_DEVICE_ID 17 /** * enum drm_vmw_handle_type - handle type for ref ioctls |