diff options
Diffstat (limited to 'drivers/gpu')
42 files changed, 680 insertions, 392 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index db0f2a476c23..3f20f77afdd2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -218,7 +218,7 @@ static struct cmn2asic_mapping sienna_cichlid_workload_map[PP_SMC_POWER_PROFILE_ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), }; diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig index 511d67b16d14..ef8c230e0f62 100644 --- a/drivers/gpu/drm/bridge/cadence/Kconfig +++ b/drivers/gpu/drm/bridge/cadence/Kconfig @@ -13,7 +13,7 @@ config DRM_CDNS_MHDP8546 if DRM_CDNS_MHDP8546 config DRM_CDNS_MHDP8546_J721E - depends on ARCH_K3_J721E_SOC || COMPILE_TEST + depends on ARCH_K3 || COMPILE_TEST bool "J721E Cadence DPI/DP wrapper support" default y help diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 748df1cacd2b..0c79a9ba48bb 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2327,12 +2327,6 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi) { enum drm_connector_status result; - mutex_lock(&hdmi->mutex); - hdmi->force = DRM_FORCE_UNSPECIFIED; - dw_hdmi_update_power(hdmi); - dw_hdmi_update_phy_mask(hdmi); - mutex_unlock(&hdmi->mutex); - result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data); mutex_lock(&hdmi->mutex); diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 6417f374b923..951d5f708e92 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_EXYNOS tristate "DRM Support for Samsung SoC Exynos Series" - depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST) + depends on OF && DRM && COMMON_CLK + depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST depends on MMU select DRM_KMS_HELPER select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c index 15eb3770d817..361e3a0c5ab6 100644 --- a/drivers/gpu/drm/gma500/psb_irq.c +++ b/drivers/gpu/drm/gma500/psb_irq.c @@ -347,6 +347,7 @@ int psb_irq_postinstall(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; unsigned long irqflags; + unsigned int i; spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); @@ -359,20 +360,12 @@ int psb_irq_postinstall(struct drm_device *dev) PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); - if (dev->vblank[0].enabled) - psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); - else - psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); - - if (dev->vblank[1].enabled) - psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); - else - psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); - - if (dev->vblank[2].enabled) - psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); - else - psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); + for (i = 0; i < dev->num_crtcs; ++i) { + if (dev->vblank[i].enabled) + psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); + else + psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); + } if (dev_priv->ops->hotplug_enable) dev_priv->ops->hotplug_enable(dev, true); @@ -385,6 +378,7 @@ void psb_irq_uninstall(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; unsigned long irqflags; + unsigned int i; spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); @@ -393,14 +387,10 @@ void psb_irq_uninstall(struct drm_device *dev) PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); - if (dev->vblank[0].enabled) - psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); - - if (dev->vblank[1].enabled) - psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); - - if (dev->vblank[2].enabled) - psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); + for (i = 0; i < dev->num_crtcs; ++i) { + if (dev->vblank[i].enabled) + psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); + } dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG | _PSB_IRQ_MSVDX_FLAG | diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index ba26545392bc..53a00cf3fa32 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -18748,7 +18748,7 @@ int intel_modeset_init(struct drm_i915_private *i915) */ ret = intel_initial_commit(&i915->drm); if (ret) - return ret; + drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret); intel_overlay_setup(i915); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 3896d08c4177..2165398d2c7c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -615,7 +615,7 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, return 0; } /* Also take into account max slice width */ - min_slice_count = min_t(u8, min_slice_count, + min_slice_count = max_t(u8, min_slice_count, DIV_ROUND_UP(mode_hdisplay, max_slice_width)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 1904e6e5ea64..b07dc1156a0e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -3097,7 +3097,7 @@ static void retire_requests(struct intel_timeline *tl, struct i915_request *end) break; } -static void eb_request_add(struct i915_execbuffer *eb) +static int eb_request_add(struct i915_execbuffer *eb, int err) { struct i915_request *rq = eb->request; struct intel_timeline * const tl = i915_request_timeline(rq); @@ -3118,6 +3118,7 @@ static void eb_request_add(struct i915_execbuffer *eb) /* Serialise with context_close via the add_to_timeline */ i915_request_set_error_once(rq, -ENOENT); __i915_request_skip(rq); + err = -ENOENT; /* override any transient errors */ } __i915_request_queue(rq, &attr); @@ -3127,6 +3128,8 @@ static void eb_request_add(struct i915_execbuffer *eb) retire_requests(tl, prev); mutex_unlock(&tl->mutex); + + return err; } static const i915_user_extension_fn execbuf_extensions[] = { @@ -3332,7 +3335,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, err = eb_submit(&eb, batch); err_request: i915_request_get(eb.request); - eb_request_add(&eb); + err = eb_request_add(&eb, err); if (eb.fences) signal_fence_array(&eb); diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index d8b206e53660..a24cc1ff08a0 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -30,18 +30,21 @@ #include "i915_trace.h" #include "intel_breadcrumbs.h" #include "intel_context.h" +#include "intel_engine_pm.h" #include "intel_gt_pm.h" #include "intel_gt_requests.h" -static void irq_enable(struct intel_engine_cs *engine) +static bool irq_enable(struct intel_engine_cs *engine) { if (!engine->irq_enable) - return; + return false; /* Caller disables interrupts */ spin_lock(&engine->gt->irq_lock); engine->irq_enable(engine); spin_unlock(&engine->gt->irq_lock); + + return true; } static void irq_disable(struct intel_engine_cs *engine) @@ -57,12 +60,11 @@ static void irq_disable(struct intel_engine_cs *engine) static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) { - lockdep_assert_held(&b->irq_lock); - - if (!b->irq_engine || b->irq_armed) - return; - - if (!intel_gt_pm_get_if_awake(b->irq_engine->gt)) + /* + * Since we are waiting on a request, the GPU should be busy + * and should have its own rpm reference. + */ + if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt))) return; /* @@ -73,25 +75,24 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) */ WRITE_ONCE(b->irq_armed, true); - /* - * Since we are waiting on a request, the GPU should be busy - * and should have its own rpm reference. This is tracked - * by i915->gt.awake, we can forgo holding our own wakref - * for the interrupt as before i915->gt.awake is released (when - * the driver is idle) we disarm the breadcrumbs. - */ - - if (!b->irq_enabled++) - irq_enable(b->irq_engine); + /* Requests may have completed before we could enable the interrupt. */ + if (!b->irq_enabled++ && irq_enable(b->irq_engine)) + irq_work_queue(&b->irq_work); } -static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) +static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) { - lockdep_assert_held(&b->irq_lock); - - if (!b->irq_engine || !b->irq_armed) + if (!b->irq_engine) return; + spin_lock(&b->irq_lock); + if (!b->irq_armed) + __intel_breadcrumbs_arm_irq(b); + spin_unlock(&b->irq_lock); +} + +static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) +{ GEM_BUG_ON(!b->irq_enabled); if (!--b->irq_enabled) irq_disable(b->irq_engine); @@ -100,20 +101,37 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) intel_gt_pm_put_async(b->irq_engine->gt); } +static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) +{ + spin_lock(&b->irq_lock); + if (b->irq_armed) + __intel_breadcrumbs_disarm_irq(b); + spin_unlock(&b->irq_lock); +} + static void add_signaling_context(struct intel_breadcrumbs *b, struct intel_context *ce) { - intel_context_get(ce); - list_add_tail(&ce->signal_link, &b->signalers); - if (list_is_first(&ce->signal_link, &b->signalers)) - __intel_breadcrumbs_arm_irq(b); + lockdep_assert_held(&ce->signal_lock); + + spin_lock(&b->signalers_lock); + list_add_rcu(&ce->signal_link, &b->signalers); + spin_unlock(&b->signalers_lock); } -static void remove_signaling_context(struct intel_breadcrumbs *b, +static bool remove_signaling_context(struct intel_breadcrumbs *b, struct intel_context *ce) { - list_del(&ce->signal_link); - intel_context_put(ce); + lockdep_assert_held(&ce->signal_lock); + + if (!list_empty(&ce->signals)) + return false; + + spin_lock(&b->signalers_lock); + list_del_rcu(&ce->signal_link); + spin_unlock(&b->signalers_lock); + + return true; } static inline bool __request_completed(const struct i915_request *rq) @@ -174,73 +192,103 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl) intel_engine_add_retire(b->irq_engine, tl); } -static bool __signal_request(struct i915_request *rq, struct list_head *signals) +static bool __signal_request(struct i915_request *rq) { - clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); if (!__dma_fence_signal(&rq->fence)) { i915_request_put(rq); return false; } - list_add_tail(&rq->signal_link, signals); return true; } +static struct llist_node * +slist_add(struct llist_node *node, struct llist_node *head) +{ + node->next = head; + return node; +} + static void signal_irq_work(struct irq_work *work) { struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work); const ktime_t timestamp = ktime_get(); - struct intel_context *ce, *cn; - struct list_head *pos, *next; - LIST_HEAD(signal); - - spin_lock(&b->irq_lock); + struct llist_node *signal, *sn; + struct intel_context *ce; - if (list_empty(&b->signalers)) - __intel_breadcrumbs_disarm_irq(b); + signal = NULL; + if (unlikely(!llist_empty(&b->signaled_requests))) + signal = llist_del_all(&b->signaled_requests); - list_splice_init(&b->signaled_requests, &signal); + /* + * Keep the irq armed until the interrupt after all listeners are gone. + * + * Enabling/disabling the interrupt is rather costly, roughly a couple + * of hundred microseconds. If we are proactive and enable/disable + * the interrupt around every request that wants a breadcrumb, we + * quickly drown in the extra orders of magnitude of latency imposed + * on request submission. + * + * So we try to be lazy, and keep the interrupts enabled until no + * more listeners appear within a breadcrumb interrupt interval (that + * is until a request completes that no one cares about). The + * observation is that listeners come in batches, and will often + * listen to a bunch of requests in succession. Though note on icl+, + * interrupts are always enabled due to concerns with rc6 being + * dysfunctional with per-engine interrupt masking. + * + * We also try to avoid raising too many interrupts, as they may + * be generated by userspace batches and it is unfortunately rather + * too easy to drown the CPU under a flood of GPU interrupts. Thus + * whenever no one appears to be listening, we turn off the interrupts. + * Fewer interrupts should conserve power -- at the very least, fewer + * interrupt draw less ire from other users of the system and tools + * like powertop. + */ + if (!signal && READ_ONCE(b->irq_armed) && list_empty(&b->signalers)) + intel_breadcrumbs_disarm_irq(b); - list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) { - GEM_BUG_ON(list_empty(&ce->signals)); + rcu_read_lock(); + list_for_each_entry_rcu(ce, &b->signalers, signal_link) { + struct i915_request *rq; - list_for_each_safe(pos, next, &ce->signals) { - struct i915_request *rq = - list_entry(pos, typeof(*rq), signal_link); + list_for_each_entry_rcu(rq, &ce->signals, signal_link) { + bool release; - GEM_BUG_ON(!check_signal_order(ce, rq)); if (!__request_completed(rq)) break; + if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, + &rq->fence.flags)) + break; + /* * Queue for execution after dropping the signaling * spinlock as the callback chain may end up adding * more signalers to the same context or engine. */ - __signal_request(rq, &signal); - } + spin_lock(&ce->signal_lock); + list_del_rcu(&rq->signal_link); + release = remove_signaling_context(b, ce); + spin_unlock(&ce->signal_lock); - /* - * We process the list deletion in bulk, only using a list_add - * (not list_move) above but keeping the status of - * rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit. - */ - if (!list_is_first(pos, &ce->signals)) { - /* Advance the list to the first incomplete request */ - __list_del_many(&ce->signals, pos); - if (&ce->signals == pos) { /* now empty */ + if (__signal_request(rq)) + /* We own signal_node now, xfer to local list */ + signal = slist_add(&rq->signal_node, signal); + + if (release) { add_retire(b, ce->timeline); - remove_signaling_context(b, ce); + intel_context_put(ce); } } } + rcu_read_unlock(); - spin_unlock(&b->irq_lock); - - list_for_each_safe(pos, next, &signal) { + llist_for_each_safe(signal, sn, signal) { struct i915_request *rq = - list_entry(pos, typeof(*rq), signal_link); + llist_entry(signal, typeof(*rq), signal_node); struct list_head cb_list; spin_lock(&rq->lock); @@ -251,6 +299,9 @@ static void signal_irq_work(struct irq_work *work) i915_request_put(rq); } + + if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers)) + intel_breadcrumbs_arm_irq(b); } struct intel_breadcrumbs * @@ -262,14 +313,15 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine) if (!b) return NULL; - spin_lock_init(&b->irq_lock); + b->irq_engine = irq_engine; + + spin_lock_init(&b->signalers_lock); INIT_LIST_HEAD(&b->signalers); - INIT_LIST_HEAD(&b->signaled_requests); + init_llist_head(&b->signaled_requests); + spin_lock_init(&b->irq_lock); init_irq_work(&b->irq_work, signal_irq_work); - b->irq_engine = irq_engine; - return b; } @@ -292,27 +344,28 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b) void intel_breadcrumbs_park(struct intel_breadcrumbs *b) { - unsigned long flags; - - if (!READ_ONCE(b->irq_armed)) - return; - - spin_lock_irqsave(&b->irq_lock, flags); - __intel_breadcrumbs_disarm_irq(b); - spin_unlock_irqrestore(&b->irq_lock, flags); - - if (!list_empty(&b->signalers)) - irq_work_queue(&b->irq_work); + /* Kick the work once more to drain the signalers */ + irq_work_sync(&b->irq_work); + while (unlikely(READ_ONCE(b->irq_armed))) { + local_irq_disable(); + signal_irq_work(&b->irq_work); + local_irq_enable(); + cond_resched(); + } + GEM_BUG_ON(!list_empty(&b->signalers)); } void intel_breadcrumbs_free(struct intel_breadcrumbs *b) { + irq_work_sync(&b->irq_work); + GEM_BUG_ON(!list_empty(&b->signalers)); + GEM_BUG_ON(b->irq_armed); kfree(b); } -static void insert_breadcrumb(struct i915_request *rq, - struct intel_breadcrumbs *b) +static void insert_breadcrumb(struct i915_request *rq) { + struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs; struct intel_context *ce = rq->context; struct list_head *pos; @@ -327,12 +380,14 @@ static void insert_breadcrumb(struct i915_request *rq, * its signal completion. */ if (__request_completed(rq)) { - if (__signal_request(rq, &b->signaled_requests)) + if (__signal_request(rq) && + llist_add(&rq->signal_node, &b->signaled_requests)) irq_work_queue(&b->irq_work); return; } if (list_empty(&ce->signals)) { + intel_context_get(ce); add_signaling_context(b, ce); pos = &ce->signals; } else { @@ -358,18 +413,22 @@ static void insert_breadcrumb(struct i915_request *rq, break; } } - list_add(&rq->signal_link, pos); + list_add_rcu(&rq->signal_link, pos); GEM_BUG_ON(!check_signal_order(ce, rq)); + GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)); set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); - /* Check after attaching to irq, interrupt may have already fired. */ - if (__request_completed(rq)) - irq_work_queue(&b->irq_work); + /* + * Defer enabling the interrupt to after HW submission and recheck + * the request as it may have completed and raised the interrupt as + * we were attaching it into the lists. + */ + irq_work_queue(&b->irq_work); } bool i915_request_enable_breadcrumb(struct i915_request *rq) { - struct intel_breadcrumbs *b; + struct intel_context *ce = rq->context; /* Serialises with i915_request_retire() using rq->lock */ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) @@ -384,67 +443,30 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq) if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) return true; - /* - * rq->engine is locked by rq->engine->active.lock. That however - * is not known until after rq->engine has been dereferenced and - * the lock acquired. Hence we acquire the lock and then validate - * that rq->engine still matches the lock we hold for it. - * - * Here, we are using the breadcrumb lock as a proxy for the - * rq->engine->active.lock, and we know that since the breadcrumb - * will be serialised within i915_request_submit/i915_request_unsubmit, - * the engine cannot change while active as long as we hold the - * breadcrumb lock on that engine. - * - * From the dma_fence_enable_signaling() path, we are outside of the - * request submit/unsubmit path, and so we must be more careful to - * acquire the right lock. - */ - b = READ_ONCE(rq->engine)->breadcrumbs; - spin_lock(&b->irq_lock); - while (unlikely(b != READ_ONCE(rq->engine)->breadcrumbs)) { - spin_unlock(&b->irq_lock); - b = READ_ONCE(rq->engine)->breadcrumbs; - spin_lock(&b->irq_lock); - } - - /* - * Now that we are finally serialised with request submit/unsubmit, - * [with b->irq_lock] and with i915_request_retire() [via checking - * SIGNALED with rq->lock] confirm the request is indeed active. If - * it is no longer active, the breadcrumb will be attached upon - * i915_request_submit(). - */ + spin_lock(&ce->signal_lock); if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) - insert_breadcrumb(rq, b); - - spin_unlock(&b->irq_lock); + insert_breadcrumb(rq); + spin_unlock(&ce->signal_lock); return true; } void i915_request_cancel_breadcrumb(struct i915_request *rq) { - struct intel_breadcrumbs *b = rq->engine->breadcrumbs; + struct intel_context *ce = rq->context; + bool release; - /* - * We must wait for b->irq_lock so that we know the interrupt handler - * has released its reference to the intel_context and has completed - * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if - * required). - */ - spin_lock(&b->irq_lock); - if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { - struct intel_context *ce = rq->context; + if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) + return; - list_del(&rq->signal_link); - if (list_empty(&ce->signals)) - remove_signaling_context(b, ce); + spin_lock(&ce->signal_lock); + list_del_rcu(&rq->signal_link); + release = remove_signaling_context(rq->engine->breadcrumbs, ce); + spin_unlock(&ce->signal_lock); + if (release) + intel_context_put(ce); - clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); - i915_request_put(rq); - } - spin_unlock(&b->irq_lock); + i915_request_put(rq); } static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p) @@ -454,18 +476,17 @@ static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p) drm_printf(p, "Signals:\n"); - spin_lock_irq(&b->irq_lock); - list_for_each_entry(ce, &b->signalers, signal_link) { - list_for_each_entry(rq, &ce->signals, signal_link) { + rcu_read_lock(); + list_for_each_entry_rcu(ce, &b->signalers, signal_link) { + list_for_each_entry_rcu(rq, &ce->signals, signal_link) drm_printf(p, "\t[%llx:%llx%s] @ %dms\n", rq->fence.context, rq->fence.seqno, i915_request_completed(rq) ? "!" : i915_request_started(rq) ? "*" : "", jiffies_to_msecs(jiffies - rq->emitted_jiffies)); - } } - spin_unlock_irq(&b->irq_lock); + rcu_read_unlock(); } void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h index 8e53b9942695..a74bb3062bd8 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h @@ -29,18 +29,16 @@ * the overhead of waking that client is much preferred. */ struct intel_breadcrumbs { - spinlock_t irq_lock; /* protects the lists used in hardirq context */ - /* Not all breadcrumbs are attached to physical HW */ struct intel_engine_cs *irq_engine; + spinlock_t signalers_lock; /* protects the list of signalers */ struct list_head signalers; - struct list_head signaled_requests; + struct llist_head signaled_requests; + spinlock_t irq_lock; /* protects the interrupt from hardirq context */ struct irq_work irq_work; /* for use from inside irq_lock */ - unsigned int irq_enabled; - bool irq_armed; }; diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 92a3f25c4006..349e7fa1488d 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -25,11 +25,18 @@ static struct intel_context *intel_context_alloc(void) return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL); } -void intel_context_free(struct intel_context *ce) +static void rcu_context_free(struct rcu_head *rcu) { + struct intel_context *ce = container_of(rcu, typeof(*ce), rcu); + kmem_cache_free(global.slab_ce, ce); } +void intel_context_free(struct intel_context *ce) +{ + call_rcu(&ce->rcu, rcu_context_free); +} + struct intel_context * intel_context_create(struct intel_engine_cs *engine) { @@ -356,8 +363,7 @@ static int __intel_context_active(struct i915_active *active) } void -intel_context_init(struct intel_context *ce, - struct intel_engine_cs *engine) +intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) { GEM_BUG_ON(!engine->cops); GEM_BUG_ON(!engine->gt->vm); @@ -373,7 +379,8 @@ intel_context_init(struct intel_context *ce, ce->vm = i915_vm_get(engine->gt->vm); - INIT_LIST_HEAD(&ce->signal_link); + /* NB ce->signal_link/lock is used under RCU */ + spin_lock_init(&ce->signal_lock); INIT_LIST_HEAD(&ce->signals); mutex_init(&ce->pin_mutex); diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 552cb57a2e8c..52fa9c132746 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -25,6 +25,7 @@ DECLARE_EWMA(runtime, 3, 8); struct i915_gem_context; struct i915_gem_ww_ctx; struct i915_vma; +struct intel_breadcrumbs; struct intel_context; struct intel_ring; @@ -44,7 +45,16 @@ struct intel_context_ops { }; struct intel_context { - struct kref ref; + /* + * Note: Some fields may be accessed under RCU. + * + * Unless otherwise noted a field can safely be assumed to be protected + * by strong reference counting. + */ + union { + struct kref ref; /* no kref_get_unless_zero()! */ + struct rcu_head rcu; + }; struct intel_engine_cs *engine; struct intel_engine_cs *inflight; @@ -54,8 +64,15 @@ struct intel_context { struct i915_address_space *vm; struct i915_gem_context __rcu *gem_context; - struct list_head signal_link; - struct list_head signals; + /* + * @signal_lock protects the list of requests that need signaling, + * @signals. While there are any requests that need signaling, + * we add the context to the breadcrumbs worker, and remove it + * upon completion/cancellation of the last request. + */ + struct list_head signal_link; /* Accessed under RCU */ + struct list_head signals; /* Guarded by signal_lock */ + spinlock_t signal_lock; /* protects signals, the list of requests */ struct i915_vma *state; struct intel_ring *ring; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 8a51c1c3a091..7614a3d24fca 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -182,6 +182,7 @@ struct virtual_engine { struct intel_engine_cs base; struct intel_context context; + struct rcu_work rcu; /* * We allow only a single request through the virtual engine at a time @@ -2837,6 +2838,9 @@ static void __execlists_hold(struct i915_request *rq) static bool execlists_hold(struct intel_engine_cs *engine, struct i915_request *rq) { + if (i915_request_on_hold(rq)) + return false; + spin_lock_irq(&engine->active.lock); if (i915_request_completed(rq)) { /* too late! */ @@ -3220,8 +3224,10 @@ static void execlists_submission_tasklet(unsigned long data) spin_unlock_irqrestore(&engine->active.lock, flags); /* Recheck after serialising with direct-submission */ - if (unlikely(timeout && preempt_timeout(engine))) + if (unlikely(timeout && preempt_timeout(engine))) { + cancel_timer(&engine->execlists.preempt); execlists_reset(engine, "preemption time out"); + } } } @@ -5480,44 +5486,90 @@ static struct list_head *virtual_queue(struct virtual_engine *ve) return &ve->base.execlists.default_priolist.requests[0]; } -static void virtual_context_destroy(struct kref *kref) +static void rcu_virtual_context_destroy(struct work_struct *wrk) { struct virtual_engine *ve = - container_of(kref, typeof(*ve), context.ref); + container_of(wrk, typeof(*ve), rcu.work); unsigned int n; - GEM_BUG_ON(!list_empty(virtual_queue(ve))); - GEM_BUG_ON(ve->request); GEM_BUG_ON(ve->context.inflight); + /* Preempt-to-busy may leave a stale request behind. */ + if (unlikely(ve->request)) { + struct i915_request *old; + + spin_lock_irq(&ve->base.active.lock); + + old = fetch_and_zero(&ve->request); + if (old) { + GEM_BUG_ON(!i915_request_completed(old)); + __i915_request_submit(old); + i915_request_put(old); + } + + spin_unlock_irq(&ve->base.active.lock); + } + + /* + * Flush the tasklet in case it is still running on another core. + * + * This needs to be done before we remove ourselves from the siblings' + * rbtrees as in the case it is running in parallel, it may reinsert + * the rb_node into a sibling. + */ + tasklet_kill(&ve->base.execlists.tasklet); + + /* Decouple ourselves from the siblings, no more access allowed. */ for (n = 0; n < ve->num_siblings; n++) { struct intel_engine_cs *sibling = ve->siblings[n]; struct rb_node *node = &ve->nodes[sibling->id].rb; - unsigned long flags; if (RB_EMPTY_NODE(node)) continue; - spin_lock_irqsave(&sibling->active.lock, flags); + spin_lock_irq(&sibling->active.lock); /* Detachment is lazily performed in the execlists tasklet */ if (!RB_EMPTY_NODE(node)) rb_erase_cached(node, &sibling->execlists.virtual); - spin_unlock_irqrestore(&sibling->active.lock, flags); + spin_unlock_irq(&sibling->active.lock); } GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); + GEM_BUG_ON(!list_empty(virtual_queue(ve))); if (ve->context.state) __execlists_context_fini(&ve->context); intel_context_fini(&ve->context); + intel_breadcrumbs_free(ve->base.breadcrumbs); intel_engine_free_request_pool(&ve->base); kfree(ve->bonds); kfree(ve); } +static void virtual_context_destroy(struct kref *kref) +{ + struct virtual_engine *ve = + container_of(kref, typeof(*ve), context.ref); + + GEM_BUG_ON(!list_empty(&ve->context.signals)); + + /* + * When destroying the virtual engine, we have to be aware that + * it may still be in use from an hardirq/softirq context causing + * the resubmission of a completed request (background completion + * due to preempt-to-busy). Before we can free the engine, we need + * to flush the submission code and tasklets that are still potentially + * accessing the engine. Flushing the tasklets requires process context, + * and since we can guard the resubmit onto the engine with an RCU read + * lock, we can delegate the free of the engine to an RCU worker. + */ + INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy); + queue_rcu_work(system_wq, &ve->rcu); +} + static void virtual_engine_initial_hint(struct virtual_engine *ve) { int swp; diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 254873e1646e..ab6870242e18 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -59,8 +59,7 @@ struct drm_i915_mocs_table { #define _L3_CACHEABILITY(value) ((value) << 4) /* Helper defines */ -#define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */ -#define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ +#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ /* (e)LLC caching options */ /* @@ -131,7 +130,19 @@ static const struct drm_i915_mocs_entry skl_mocs_table[] = { GEN9_MOCS_ENTRIES, MOCS_ENTRY(I915_MOCS_CACHED, LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3), - L3_3_WB) + L3_3_WB), + + /* + * mocs:63 + * - used by the L3 for all of its evictions. + * Thus it is expected to allow LLC cacheability to enable coherent + * flows to be maintained. + * - used to force L3 uncachable cycles. + * Thus it is expected to make the surface L3 uncacheable. + */ + MOCS_ENTRY(63, + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), + L3_1_UC) }; /* NOTE: the LE_TGT_CACHE is not used on Broxton */ @@ -349,15 +360,15 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, if (IS_DG1(i915)) { table->size = ARRAY_SIZE(dg1_mocs_table); table->table = dg1_mocs_table; - table->n_entries = GEN11_NUM_MOCS_ENTRIES; + table->n_entries = GEN9_NUM_MOCS_ENTRIES; } else if (INTEL_GEN(i915) >= 12) { table->size = ARRAY_SIZE(tgl_mocs_table); table->table = tgl_mocs_table; - table->n_entries = GEN11_NUM_MOCS_ENTRIES; + table->n_entries = GEN9_NUM_MOCS_ENTRIES; } else if (IS_GEN(i915, 11)) { table->size = ARRAY_SIZE(icl_mocs_table); table->table = icl_mocs_table; - table->n_entries = GEN11_NUM_MOCS_ENTRIES; + table->n_entries = GEN9_NUM_MOCS_ENTRIES; } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) { table->size = ARRAY_SIZE(skl_mocs_table); table->n_entries = GEN9_NUM_MOCS_ENTRIES; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 0d88f17799ff..b629eeb14002 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -897,6 +897,10 @@ void intel_rps_park(struct intel_rps *rps) adj = -2; rps->last_adj = adj; rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq); + if (rps->cur_freq < rps->efficient_freq) { + rps->cur_freq = rps->efficient_freq; + rps->last_adj = 0; + } GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); } diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index fed9503a7c4e..adc9a8ea410a 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -131,8 +131,10 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) return; } - if (wal->list) + if (wal->list) { memcpy(list, wal->list, sizeof(*wa) * wal->count); + kfree(wal->list); + } wal->list = list; } diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c index f011ea42487e..5982b62f913d 100644 --- a/drivers/gpu/drm/i915/gt/shmem_utils.c +++ b/drivers/gpu/drm/i915/gt/shmem_utils.c @@ -73,7 +73,7 @@ void *shmem_pin_map(struct file *file) mapping_set_unevictable(file->f_mapping); return vaddr; err_page: - while (--i >= 0) + while (i--) put_page(pages[i]); kvfree(pages); return NULL; @@ -103,10 +103,13 @@ static int __shmem_rw(struct file *file, loff_t off, return PTR_ERR(page); vaddr = kmap(page); - if (write) + if (write) { memcpy(vaddr + offset_in_page(off), ptr, this); - else + set_page_dirty(page); + } else { memcpy(ptr, vaddr + offset_in_page(off), this); + } + mark_page_accessed(page); kunmap(page); put_page(page); diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 5c1fcac260d3..a15f87539657 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -164,7 +164,7 @@ static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = { /* let the virtual display supports DP1.2 */ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { - 0x12, 0x014, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + 0x12, 0x014, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static void emulate_monitor_status_change(struct intel_vgpu *vgpu) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index d830b6c65284..60f1a386dd06 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -829,8 +829,10 @@ static int intel_vgpu_open(struct mdev_device *mdev) /* Take a module reference as mdev core doesn't take * a reference for vendor driver. */ - if (!try_module_get(THIS_MODULE)) + if (!try_module_get(THIS_MODULE)) { + ret = -ENODEV; goto undo_group; + } ret = kvmgt_guest_init(mdev); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 1c8e63f84134..e49944fde333 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -439,7 +439,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (IS_BROADWELL(dev_priv)) ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B); - else + /* FixMe: Re-enable APL/BXT once vfio_edid enabled */ + else if (!IS_BROXTON(dev_priv)) ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); if (ret) goto out_clean_sched_policy; diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 16b721080195..620b6fab2c5c 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -176,7 +176,9 @@ struct i915_request { struct intel_context *context; struct intel_ring *ring; struct intel_timeline __rcu *timeline; + struct list_head signal_link; + struct llist_node signal_node; /* * The rcu epoch of when this request was allocated. Used to judiciously diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 23a6132c5f4e..412e21604a05 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -211,8 +211,8 @@ static int igt_gem_ww_ctx(void *arg) return PTR_ERR(obj); obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); + if (IS_ERR(obj2)) { + err = PTR_ERR(obj2); goto put1; } diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 64bbb8288249..e424a6d1a68c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -2293,8 +2293,10 @@ static int perf_request_latency(void *arg) struct intel_context *ce; ce = intel_context_create(engine); - if (IS_ERR(ce)) + if (IS_ERR(ce)) { + err = PTR_ERR(ce); goto out; + } err = intel_context_pin(ce); if (err) { @@ -2467,8 +2469,10 @@ static int perf_series_engines(void *arg) struct intel_context *ce; ce = intel_context_create(engine); - if (IS_ERR(ce)) + if (IS_ERR(ce)) { + err = PTR_ERR(ce); goto out; + } err = intel_context_pin(ce); if (err) { diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index 870626e04ec0..9d25181bd7e2 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -413,7 +413,13 @@ static int mcde_probe(struct platform_device *pdev) match); if (ret) { dev_err(dev, "failed to add component master\n"); - goto clk_disable; + /* + * The EPOD regulator is already disabled at this point so some + * special errorpath code is needed + */ + clk_disable_unprepare(mcde->mcde_clk); + regulator_disable(mcde->vana); + return ret; } return 0; diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 4a188a942c38..65fd99c528af 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -444,7 +444,10 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) u32 horizontal_sync_active_byte; u32 horizontal_backporch_byte; u32 horizontal_frontporch_byte; + u32 horizontal_front_back_byte; + u32 data_phy_cycles_byte; u32 dsi_tmp_buf_bpp, data_phy_cycles; + u32 delta; struct mtk_phy_timing *timing = &dsi->phy_timing; struct videomode *vm = &dsi->vm; @@ -466,50 +469,30 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10); if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) - horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp; + horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp - 10; else horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) * - dsi_tmp_buf_bpp; + dsi_tmp_buf_bpp - 10; data_phy_cycles = timing->lpx + timing->da_hs_prepare + - timing->da_hs_zero + timing->da_hs_exit; - - if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { - if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp > - data_phy_cycles * dsi->lanes + 18) { - horizontal_frontporch_byte = - vm->hfront_porch * dsi_tmp_buf_bpp - - (data_phy_cycles * dsi->lanes + 18) * - vm->hfront_porch / - (vm->hfront_porch + vm->hback_porch); - - horizontal_backporch_byte = - horizontal_backporch_byte - - (data_phy_cycles * dsi->lanes + 18) * - vm->hback_porch / - (vm->hfront_porch + vm->hback_porch); - } else { - DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); - horizontal_frontporch_byte = vm->hfront_porch * - dsi_tmp_buf_bpp; - } + timing->da_hs_zero + timing->da_hs_exit + 3; + + delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12; + + horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp; + horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte; + data_phy_cycles_byte = data_phy_cycles * dsi->lanes + delta; + + if (horizontal_front_back_byte > data_phy_cycles_byte) { + horizontal_frontporch_byte -= data_phy_cycles_byte * + horizontal_frontporch_byte / + horizontal_front_back_byte; + + horizontal_backporch_byte -= data_phy_cycles_byte * + horizontal_backporch_byte / + horizontal_front_back_byte; } else { - if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp > - data_phy_cycles * dsi->lanes + 12) { - horizontal_frontporch_byte = - vm->hfront_porch * dsi_tmp_buf_bpp - - (data_phy_cycles * dsi->lanes + 12) * - vm->hfront_porch / - (vm->hfront_porch + vm->hback_porch); - horizontal_backporch_byte = horizontal_backporch_byte - - (data_phy_cycles * dsi->lanes + 12) * - vm->hback_porch / - (vm->hfront_porch + vm->hback_porch); - } else { - DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); - horizontal_frontporch_byte = vm->hfront_porch * - dsi_tmp_buf_bpp; - } + DRM_WARN("HFP + HBP less than d-phy, FPS will under 60Hz\n"); } writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC); diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index a6b3d6e84c52..3e1bb0aefb87 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -22,6 +22,7 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane.h> #include <drm/drm_plane_helper.h> #include <drm/drm_vblank.h> @@ -494,11 +495,13 @@ static bool mxsfb_format_mod_supported(struct drm_plane *plane, } static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = { + .prepare_fb = drm_gem_fb_prepare_fb, .atomic_check = mxsfb_plane_atomic_check, .atomic_update = mxsfb_plane_primary_atomic_update, }; static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = { + .prepare_fb = drm_gem_fb_prepare_fb, .atomic_check = mxsfb_plane_atomic_check, .atomic_update = mxsfb_plane_overlay_atomic_update, }; diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index b111fe24a06b..36d6b6093d16 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -455,7 +455,7 @@ nv50_outp_get_old_connector(struct nouveau_encoder *outp, * DAC *****************************************************************************/ static void -nv50_dac_disable(struct drm_encoder *encoder) +nv50_dac_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nv50_core *core = nv50_disp(encoder->dev)->core; @@ -467,7 +467,7 @@ nv50_dac_disable(struct drm_encoder *encoder) } static void -nv50_dac_enable(struct drm_encoder *encoder) +nv50_dac_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); @@ -525,8 +525,8 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) static const struct drm_encoder_helper_funcs nv50_dac_help = { .atomic_check = nv50_outp_atomic_check, - .enable = nv50_dac_enable, - .disable = nv50_dac_disable, + .atomic_enable = nv50_dac_enable, + .atomic_disable = nv50_dac_disable, .detect = nv50_dac_detect }; @@ -1055,7 +1055,7 @@ nv50_dp_bpc_to_depth(unsigned int bpc) } static void -nv50_msto_enable(struct drm_encoder *encoder) +nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct nv50_head *head = nv50_head(encoder->crtc); struct nv50_head_atom *armh = nv50_head_atom(head->base.base.state); @@ -1101,7 +1101,7 @@ nv50_msto_enable(struct drm_encoder *encoder) } static void -nv50_msto_disable(struct drm_encoder *encoder) +nv50_msto_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct nv50_msto *msto = nv50_msto(encoder); struct nv50_mstc *mstc = msto->mstc; @@ -1118,8 +1118,8 @@ nv50_msto_disable(struct drm_encoder *encoder) static const struct drm_encoder_helper_funcs nv50_msto_help = { - .disable = nv50_msto_disable, - .enable = nv50_msto_enable, + .atomic_disable = nv50_msto_disable, + .atomic_enable = nv50_msto_enable, .atomic_check = nv50_msto_atomic_check, }; @@ -1645,8 +1645,7 @@ nv50_sor_disable(struct drm_encoder *encoder, } static void -nv50_sor_enable(struct drm_encoder *encoder, - struct drm_atomic_state *state) +nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); @@ -1873,7 +1872,7 @@ nv50_pior_atomic_check(struct drm_encoder *encoder, } static void -nv50_pior_disable(struct drm_encoder *encoder) +nv50_pior_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nv50_core *core = nv50_disp(encoder->dev)->core; @@ -1885,7 +1884,7 @@ nv50_pior_disable(struct drm_encoder *encoder) } static void -nv50_pior_enable(struct drm_encoder *encoder) +nv50_pior_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); @@ -1921,14 +1920,14 @@ nv50_pior_enable(struct drm_encoder *encoder) } core->func->pior->ctrl(core, nv_encoder->or, ctrl, asyh); - nv_encoder->crtc = encoder->crtc; + nv_encoder->crtc = &nv_crtc->base; } static const struct drm_encoder_helper_funcs nv50_pior_help = { .atomic_check = nv50_pior_atomic_check, - .enable = nv50_pior_enable, - .disable = nv50_pior_disable, + .atomic_enable = nv50_pior_enable, + .atomic_disable = nv50_pior_disable, }; static void diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 62a4fdffd0ae..1386b0fc1640 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1131,8 +1131,8 @@ retry: } reg->bus.offset = handle; - ret = 0; } + ret = 0; break; default: ret = -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 6f21f36719fc..8b4b3688c7ae 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -532,11 +532,13 @@ static void nouveau_connector_set_edid(struct nouveau_connector *nv_connector, struct edid *edid) { - struct edid *old_edid = nv_connector->edid; + if (nv_connector->edid != edid) { + struct edid *old_edid = nv_connector->edid; - drm_connector_update_edid_property(&nv_connector->base, edid); - kfree(old_edid); - nv_connector->edid = edid; + drm_connector_update_edid_property(&nv_connector->base, edid); + kfree(old_edid); + nv_connector->edid = edid; + } } static enum drm_connector_status @@ -669,8 +671,10 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) /* Try retrieving EDID via DDC */ if (!drm->vbios.fp_no_ddc) { status = nouveau_connector_detect(connector, force); - if (status == connector_status_connected) + if (status == connector_status_connected) { + edid = nv_connector->edid; goto out; + } } /* On some laptops (Sony, i'm looking at you) there appears to diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 787d05eefd9c..c88cbb85f101 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -572,8 +572,10 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, NV_PRINTK(err, cli, "validating bo list\n"); validate_fini(op, chan, NULL, NULL); return ret; + } else if (ret > 0) { + *apply_relocs = true; } - *apply_relocs = ret; + return 0; } @@ -676,7 +678,6 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); } - u_free(reloc); return ret; } @@ -886,9 +887,10 @@ out: break; } } - u_free(reloc); } out_prevalid: + if (!IS_ERR(reloc)) + u_free(reloc); u_free(bo); u_free(push); diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 033fd30074b0..282e4c837cd9 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -195,8 +195,7 @@ static void sdi_bridge_mode_set(struct drm_bridge *bridge, sdi->pixelclock = adjusted_mode->clock * 1000; } -static void sdi_bridge_enable(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state) +static void sdi_bridge_enable(struct drm_bridge *bridge) { struct sdi_device *sdi = drm_bridge_to_sdi(bridge); struct dispc_clock_info dispc_cinfo; @@ -259,8 +258,7 @@ err_get_dispc: regulator_disable(sdi->vdds_sdi_reg); } -static void sdi_bridge_disable(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state) +static void sdi_bridge_disable(struct drm_bridge *bridge) { struct sdi_device *sdi = drm_bridge_to_sdi(bridge); @@ -278,8 +276,8 @@ static const struct drm_bridge_funcs sdi_bridge_funcs = { .mode_valid = sdi_bridge_mode_valid, .mode_fixup = sdi_bridge_mode_fixup, .mode_set = sdi_bridge_mode_set, - .atomic_enable = sdi_bridge_enable, - .atomic_disable = sdi_bridge_disable, + .enable = sdi_bridge_enable, + .disable = sdi_bridge_disable, }; static void sdi_bridge_init(struct sdi_device *sdi) diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c index e95fdfb16b6c..ba0b3ead150f 100644 --- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c @@ -629,7 +629,7 @@ static int acx565akm_probe(struct spi_device *spi) lcd->spi = spi; mutex_init(&lcd->mutex); - lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW); + lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(lcd->reset_gpio)) { dev_err(&spi->dev, "failed to get reset GPIO\n"); return PTR_ERR(lcd->reset_gpio); diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index 8658ef82d937..654bc52d9ff3 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -544,7 +544,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, struct device_node *port, *endpoint; int ret = 0, child_count = 0; const char *name; - u32 endpoint_id; + u32 endpoint_id = 0; lvds->drm_dev = drm_dev; port = of_graph_get_port_by_id(dev->of_node, 1); diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 77497b45f9a2..55960cbb1019 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -814,9 +814,15 @@ static int sun4i_backend_bind(struct device *dev, struct device *master, * * XXX(hch): this has no business in a driver and needs to move * to the device tree. + * + * If we have two subsequent calls to dma_direct_set_offset + * returns -EINVAL. Unfortunately, this happens when we have two + * backends in the system, and will result in the driver + * reporting an error while it has been setup properly before. + * Ignore EINVAL, but it should really be removed eventually. */ ret = dma_direct_set_offset(drm->dev, PHYS_OFFSET, 0, SZ_4G); - if (ret) + if (ret && ret != -EINVAL) return ret; } diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index d4c08043dd81..92add2cef2e7 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -208,6 +208,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, phy_node = of_parse_phandle(dev->of_node, "phys", 0); if (!phy_node) { dev_err(dev, "Can't found PHY phandle\n"); + ret = -EINVAL; goto err_disable_clk_tmds; } diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 19ffb0626505..e45c8414e2a3 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -90,7 +90,7 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) if (!fpriv) return -ENOMEM; - idr_init(&fpriv->contexts); + idr_init_base(&fpriv->contexts, 1); mutex_init(&fpriv->lock); filp->driver_priv = fpriv; diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 5a4fd0dbf4cf..47d26b5d9945 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -129,7 +129,6 @@ int tegra_output_probe(struct tegra_output *output) if (!output->ddc) { err = -EPROBE_DEFER; - of_node_put(ddc); return err; } } diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index e88a17c2937f..cc2aa2308a51 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -397,7 +397,6 @@ struct tegra_sor; struct tegra_sor_ops { const char *name; int (*probe)(struct tegra_sor *sor); - int (*remove)(struct tegra_sor *sor); void (*audio_enable)(struct tegra_sor *sor); void (*audio_disable)(struct tegra_sor *sor); }; @@ -2942,6 +2941,24 @@ static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = { .atomic_check = tegra_sor_encoder_atomic_check, }; +static void tegra_sor_disable_regulator(void *data) +{ + struct regulator *reg = data; + + regulator_disable(reg); +} + +static int tegra_sor_enable_regulator(struct tegra_sor *sor, struct regulator *reg) +{ + int err; + + err = regulator_enable(reg); + if (err) + return err; + + return devm_add_action_or_reset(sor->dev, tegra_sor_disable_regulator, reg); +} + static int tegra_sor_hdmi_probe(struct tegra_sor *sor) { int err; @@ -2953,7 +2970,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor) return PTR_ERR(sor->avdd_io_supply); } - err = regulator_enable(sor->avdd_io_supply); + err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply); if (err < 0) { dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n", err); @@ -2967,7 +2984,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor) return PTR_ERR(sor->vdd_pll_supply); } - err = regulator_enable(sor->vdd_pll_supply); + err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply); if (err < 0) { dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n", err); @@ -2981,7 +2998,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor) return PTR_ERR(sor->hdmi_supply); } - err = regulator_enable(sor->hdmi_supply); + err = tegra_sor_enable_regulator(sor, sor->hdmi_supply); if (err < 0) { dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err); return err; @@ -2992,19 +3009,9 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor) return 0; } -static int tegra_sor_hdmi_remove(struct tegra_sor *sor) -{ - regulator_disable(sor->hdmi_supply); - regulator_disable(sor->vdd_pll_supply); - regulator_disable(sor->avdd_io_supply); - - return 0; -} - static const struct tegra_sor_ops tegra_sor_hdmi_ops = { .name = "HDMI", .probe = tegra_sor_hdmi_probe, - .remove = tegra_sor_hdmi_remove, .audio_enable = tegra_sor_hdmi_audio_enable, .audio_disable = tegra_sor_hdmi_audio_disable, }; @@ -3017,7 +3024,7 @@ static int tegra_sor_dp_probe(struct tegra_sor *sor) if (IS_ERR(sor->avdd_io_supply)) return PTR_ERR(sor->avdd_io_supply); - err = regulator_enable(sor->avdd_io_supply); + err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply); if (err < 0) return err; @@ -3025,25 +3032,16 @@ static int tegra_sor_dp_probe(struct tegra_sor *sor) if (IS_ERR(sor->vdd_pll_supply)) return PTR_ERR(sor->vdd_pll_supply); - err = regulator_enable(sor->vdd_pll_supply); + err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply); if (err < 0) return err; return 0; } -static int tegra_sor_dp_remove(struct tegra_sor *sor) -{ - regulator_disable(sor->vdd_pll_supply); - regulator_disable(sor->avdd_io_supply); - - return 0; -} - static const struct tegra_sor_ops tegra_sor_dp_ops = { .name = "DP", .probe = tegra_sor_dp_probe, - .remove = tegra_sor_dp_remove, }; static int tegra_sor_init(struct host1x_client *client) @@ -3145,6 +3143,7 @@ static int tegra_sor_init(struct host1x_client *client) if (err < 0) { dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err); + clk_disable_unprepare(sor->clk); return err; } @@ -3152,12 +3151,17 @@ static int tegra_sor_init(struct host1x_client *client) } err = clk_prepare_enable(sor->clk_safe); - if (err < 0) + if (err < 0) { + clk_disable_unprepare(sor->clk); return err; + } err = clk_prepare_enable(sor->clk_dp); - if (err < 0) + if (err < 0) { + clk_disable_unprepare(sor->clk_safe); + clk_disable_unprepare(sor->clk); return err; + } return 0; } @@ -3764,17 +3768,16 @@ static int tegra_sor_probe(struct platform_device *pdev) return err; err = tegra_output_probe(&sor->output); - if (err < 0) { - dev_err(&pdev->dev, "failed to probe output: %d\n", err); - return err; - } + if (err < 0) + return dev_err_probe(&pdev->dev, err, + "failed to probe output\n"); if (sor->ops && sor->ops->probe) { err = sor->ops->probe(sor); if (err < 0) { dev_err(&pdev->dev, "failed to probe %s: %d\n", sor->ops->name, err); - goto output; + goto remove; } } @@ -3955,9 +3958,6 @@ unregister: rpm_disable: pm_runtime_disable(&pdev->dev); remove: - if (sor->ops && sor->ops->remove) - sor->ops->remove(sor); -output: tegra_output_remove(&sor->output); return err; } @@ -3976,12 +3976,6 @@ static int tegra_sor_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); - if (sor->ops && sor->ops->remove) { - err = sor->ops->remove(sor); - if (err < 0) - dev_err(&pdev->dev, "failed to remove SOR: %d\n", err); - } - tegra_output_remove(&sor->output); return 0; diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 137c382256d5..43a1af110b3e 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -219,6 +219,7 @@ struct vc4_dev { struct drm_modeset_lock ctm_state_lock; struct drm_private_obj ctm_manager; + struct drm_private_obj hvs_channels; struct drm_private_obj load_tracker; /* List of vc4_debugfs_info_entry for adding to debugfs once @@ -531,6 +532,9 @@ struct vc4_crtc_state { unsigned int top; unsigned int bottom; } margins; + + /* Transitional state below, only valid during atomic commits */ + bool update_muxing; }; #define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index b80eb9d3d9d5..555106220578 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -759,12 +759,54 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder) { } +#define WIFI_2_4GHz_CH1_MIN_FREQ 2400000000ULL +#define WIFI_2_4GHz_CH1_MAX_FREQ 2422000000ULL + +static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + unsigned long long pixel_rate = mode->clock * 1000; + unsigned long long tmds_rate; + + if (vc4_hdmi->variant->unsupported_odd_h_timings && + ((mode->hdisplay % 2) || (mode->hsync_start % 2) || + (mode->hsync_end % 2) || (mode->htotal % 2))) + return -EINVAL; + + /* + * The 1440p@60 pixel rate is in the same range than the first + * WiFi channel (between 2.4GHz and 2.422GHz with 22MHz + * bandwidth). Slightly lower the frequency to bring it out of + * the WiFi range. + */ + tmds_rate = pixel_rate * 10; + if (vc4_hdmi->disable_wifi_frequencies && + (tmds_rate >= WIFI_2_4GHz_CH1_MIN_FREQ && + tmds_rate <= WIFI_2_4GHz_CH1_MAX_FREQ)) { + mode->clock = 238560; + pixel_rate = mode->clock * 1000; + } + + if (pixel_rate > vc4_hdmi->variant->max_pixel_clock) + return -EINVAL; + + return 0; +} + static enum drm_mode_status vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder, const struct drm_display_mode *mode) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + if (vc4_hdmi->variant->unsupported_odd_h_timings && + ((mode->hdisplay % 2) || (mode->hsync_start % 2) || + (mode->hsync_end % 2) || (mode->htotal % 2))) + return MODE_H_ILLEGAL; + if ((mode->clock * 1000) > vc4_hdmi->variant->max_pixel_clock) return MODE_CLOCK_HIGH; @@ -772,6 +814,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder, } static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = { + .atomic_check = vc4_hdmi_encoder_atomic_check, .mode_valid = vc4_hdmi_encoder_mode_valid, .disable = vc4_hdmi_encoder_disable, .enable = vc4_hdmi_encoder_enable, @@ -1693,6 +1736,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW; } + vc4_hdmi->disable_wifi_frequencies = + of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence"); + pm_runtime_enable(dev); drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); @@ -1816,6 +1862,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = { PHY_LANE_2, PHY_LANE_CK, }, + .unsupported_odd_h_timings = true, .init_resources = vc5_hdmi_init_resources, .csc_setup = vc5_hdmi_csc_setup, @@ -1841,6 +1888,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = { PHY_LANE_CK, PHY_LANE_2, }, + .unsupported_odd_h_timings = true, .init_resources = vc5_hdmi_init_resources, .csc_setup = vc5_hdmi_csc_setup, diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index 63c6f8bddf1d..0526a9cf608a 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -62,6 +62,9 @@ struct vc4_hdmi_variant { */ enum vc4_hdmi_phy_channel phy_lane_mapping[4]; + /* The BCM2711 cannot deal with odd horizontal pixel timings */ + bool unsupported_odd_h_timings; + /* Callback to get the resources (memory region, interrupts, * clocks, etc) for that variant. */ @@ -139,6 +142,14 @@ struct vc4_hdmi { int hpd_gpio; bool hpd_active_low; + /* + * On some systems (like the RPi4), some modes are in the same + * frequency range than the WiFi channels (1440p@60Hz for + * example). Should we take evasive actions because that system + * has a wifi adapter? + */ + bool disable_wifi_frequencies; + struct cec_adapter *cec_adap; struct cec_msg cec_rx_msg; bool cec_tx_ok; diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 2b951cae04ad..ba310c0ab5f6 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -24,6 +24,8 @@ #include "vc4_drv.h" #include "vc4_regs.h" +#define HVS_NUM_CHANNELS 3 + struct vc4_ctm_state { struct drm_private_state base; struct drm_color_ctm *ctm; @@ -35,6 +37,17 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv) return container_of(priv, struct vc4_ctm_state, base); } +struct vc4_hvs_state { + struct drm_private_state base; + unsigned int unassigned_channels; +}; + +static struct vc4_hvs_state * +to_vc4_hvs_state(struct drm_private_state *priv) +{ + return container_of(priv, struct vc4_hvs_state, base); +} + struct vc4_load_tracker_state { struct drm_private_state base; u64 hvs_load; @@ -113,7 +126,7 @@ static int vc4_ctm_obj_init(struct vc4_dev *vc4) drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base, &vc4_ctm_state_funcs); - return drmm_add_action(&vc4->base, vc4_ctm_obj_fini, NULL); + return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL); } /* Converts a DRM S31.32 value to the HW S0.9 format. */ @@ -169,6 +182,19 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO)); } +static struct vc4_hvs_state * +vc4_hvs_get_global_state(struct drm_atomic_state *state) +{ + struct vc4_dev *vc4 = to_vc4_dev(state->dev); + struct drm_private_state *priv_state; + + priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_vc4_hvs_state(priv_state); +} + static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) { @@ -213,10 +239,7 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, { struct drm_crtc_state *crtc_state; struct drm_crtc *crtc; - unsigned char dsp2_mux = 0; - unsigned char dsp3_mux = 3; - unsigned char dsp4_mux = 3; - unsigned char dsp5_mux = 3; + unsigned char mux; unsigned int i; u32 reg; @@ -224,50 +247,59 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); - if (!crtc_state->active) + if (!vc4_state->update_muxing) continue; switch (vc4_crtc->data->hvs_output) { case 2: - dsp2_mux = (vc4_state->assigned_channel == 2) ? 0 : 1; + mux = (vc4_state->assigned_channel == 2) ? 0 : 1; + reg = HVS_READ(SCALER_DISPECTRL); + HVS_WRITE(SCALER_DISPECTRL, + (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) | + VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX)); break; case 3: - dsp3_mux = vc4_state->assigned_channel; + if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) + mux = 3; + else + mux = vc4_state->assigned_channel; + + reg = HVS_READ(SCALER_DISPCTRL); + HVS_WRITE(SCALER_DISPCTRL, + (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) | + VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX)); break; case 4: - dsp4_mux = vc4_state->assigned_channel; + if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) + mux = 3; + else + mux = vc4_state->assigned_channel; + + reg = HVS_READ(SCALER_DISPEOLN); + HVS_WRITE(SCALER_DISPEOLN, + (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) | + VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX)); + break; case 5: - dsp5_mux = vc4_state->assigned_channel; + if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) + mux = 3; + else + mux = vc4_state->assigned_channel; + + reg = HVS_READ(SCALER_DISPDITHER); + HVS_WRITE(SCALER_DISPDITHER, + (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) | + VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX)); break; default: break; } } - - reg = HVS_READ(SCALER_DISPECTRL); - HVS_WRITE(SCALER_DISPECTRL, - (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) | - VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX)); - - reg = HVS_READ(SCALER_DISPCTRL); - HVS_WRITE(SCALER_DISPCTRL, - (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) | - VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX)); - - reg = HVS_READ(SCALER_DISPEOLN); - HVS_WRITE(SCALER_DISPEOLN, - (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) | - VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX)); - - reg = HVS_READ(SCALER_DISPDITHER); - HVS_WRITE(SCALER_DISPDITHER, - (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) | - VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX)); } static void @@ -657,53 +689,123 @@ static int vc4_load_tracker_obj_init(struct vc4_dev *vc4) &load_state->base, &vc4_load_tracker_state_funcs); - return drmm_add_action(&vc4->base, vc4_load_tracker_obj_fini, NULL); + return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL); } -#define NUM_OUTPUTS 6 -#define NUM_CHANNELS 3 +static struct drm_private_state * +vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj) +{ + struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state); + struct vc4_hvs_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; -static int -vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + state->unassigned_channels = old_state->unassigned_channels; + + return &state->base; +} + +static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) { - unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0); - struct drm_crtc_state *old_crtc_state, *new_crtc_state; - struct drm_crtc *crtc; - int i, ret; + struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state); - /* - * Since the HVS FIFOs are shared across all the pixelvalves and - * the TXP (and thus all the CRTCs), we need to pull the current - * state of all the enabled CRTCs so that an update to a single - * CRTC still keeps the previous FIFOs enabled and assigned to - * the same CRTCs, instead of evaluating only the CRTC being - * modified. - */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_crtc_state *crtc_state; + kfree(hvs_state); +} - if (!crtc->state->enable) - continue; +static const struct drm_private_state_funcs vc4_hvs_state_funcs = { + .atomic_duplicate_state = vc4_hvs_channels_duplicate_state, + .atomic_destroy_state = vc4_hvs_channels_destroy_state, +}; - crtc_state = drm_atomic_get_crtc_state(state, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - } +static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + drm_atomic_private_obj_fini(&vc4->hvs_channels); +} + +static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4) +{ + struct vc4_hvs_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + state->unassigned_channels = GENMASK(HVS_NUM_CHANNELS - 1, 0); + drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels, + &state->base, + &vc4_hvs_state_funcs); + + return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL); +} + +/* + * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and + * the TXP (and therefore all the CRTCs found on that platform). + * + * The naive (and our initial) implementation would just iterate over + * all the active CRTCs, try to find a suitable FIFO, and then remove it + * from the pool of available FIFOs. However, there are a few corner + * cases that need to be considered: + * + * - When running in a dual-display setup (so with two CRTCs involved), + * we can update the state of a single CRTC (for example by changing + * its mode using xrandr under X11) without affecting the other. In + * this case, the other CRTC wouldn't be in the state at all, so we + * need to consider all the running CRTCs in the DRM device to assign + * a FIFO, not just the one in the state. + * + * - To fix the above, we can't use drm_atomic_get_crtc_state on all + * enabled CRTCs to pull their CRTC state into the global state, since + * a page flip would start considering their vblank to complete. Since + * we don't have a guarantee that they are actually active, that + * vblank might never happen, and shouldn't even be considered if we + * want to do a page flip on a single CRTC. That can be tested by + * doing a modetest -v first on HDMI1 and then on HDMI0. + * + * - Since we need the pixelvalve to be disabled and enabled back when + * the FIFO is changed, we should keep the FIFO assigned for as long + * as the CRTC is enabled, only considering it free again once that + * CRTC has been disabled. This can be tested by booting X11 on a + * single display, and changing the resolution down and then back up. + */ +static int vc4_pv_muxing_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct vc4_hvs_state *hvs_new_state; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_crtc *crtc; + unsigned int i; + + hvs_new_state = vc4_hvs_get_global_state(state); + if (!hvs_new_state) + return -EINVAL; for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + struct vc4_crtc_state *old_vc4_crtc_state = + to_vc4_crtc_state(old_crtc_state); struct vc4_crtc_state *new_vc4_crtc_state = to_vc4_crtc_state(new_crtc_state); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); unsigned int matching_channels; - if (old_crtc_state->enable && !new_crtc_state->enable) - new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; - - if (!new_crtc_state->enable) + /* Nothing to do here, let's skip it */ + if (old_crtc_state->enable == new_crtc_state->enable) continue; - if (new_vc4_crtc_state->assigned_channel != VC4_HVS_CHANNEL_DISABLED) { - unassigned_channels &= ~BIT(new_vc4_crtc_state->assigned_channel); + /* Muxing will need to be modified, mark it as such */ + new_vc4_crtc_state->update_muxing = true; + + /* If we're disabling our CRTC, we put back our channel */ + if (!new_crtc_state->enable) { + hvs_new_state->unassigned_channels |= BIT(old_vc4_crtc_state->assigned_channel); + new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; continue; } @@ -731,17 +833,29 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) * the future, we will need to have something smarter, * but it works so far. */ - matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels; + matching_channels = hvs_new_state->unassigned_channels & vc4_crtc->data->hvs_available_channels; if (matching_channels) { unsigned int channel = ffs(matching_channels) - 1; new_vc4_crtc_state->assigned_channel = channel; - unassigned_channels &= ~BIT(channel); + hvs_new_state->unassigned_channels &= ~BIT(channel); } else { return -EINVAL; } } + return 0; +} + +static int +vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) +{ + int ret; + + ret = vc4_pv_muxing_atomic_check(dev, state); + if (ret) + return ret; + ret = vc4_ctm_atomic_check(dev, state); if (ret < 0) return ret; @@ -808,6 +922,10 @@ int vc4_kms_load(struct drm_device *dev) if (ret) return ret; + ret = vc4_hvs_channels_obj_init(vc4); + if (ret) + return ret; + drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); |