diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-24 17:27:20 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-24 17:27:20 -0700 |
commit | e285c1746accb80620e511f9c72e9893beeedc0e (patch) | |
tree | 9d9a67a306ff19d6ef6c3982c1bc00cecc69bd70 | |
parent | 6c5103890057b1bb781b26b7aae38d33e4c517d8 (diff) | |
parent | 51eab416c9b4b3ed16553d405ec4a5f67daa34cf (diff) | |
download | lwn-e285c1746accb80620e511f9c72e9893beeedc0e.tar.gz lwn-e285c1746accb80620e511f9c72e9893beeedc0e.zip |
Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm/vblank: update recently added vbl interface to be more future proof.
drm radeon: Return -EINVAL on wrong pm sysfs access
drm/radeon/kms: fix hardcoded EDID handling
Revert "drm/i915: Don't save/restore hardware status page address register"
drm/i915: Avoid unmapping pages from a NULL address space
drm/i915: Fix use after free within tracepoint
drm/i915: Restore missing command flush before interrupt on BLT ring
drm/i915: Disable pagefaults along execbuffer relocation fast path
drm/i915: Fix computation of pitch for dumb bo creator
drm/i915: report correct render clock frequencies on SNB
drm/i915/dp: Correct the order of deletion for ghost eDP devices
drm/i915: Fix tiling corruption from pipelined fencing
drm/i915: Re-enable self-refresh
drm/i915: Prevent racy removal of request from client list
drm/i915: skip redundant operations whilst enabling pipes and planes
drm/i915: Remove surplus POSTING_READs before wait_for_vblank
drm/radeon/kms: prefer legacy pll algo for tv-out
drm: check for modesetting on modeset ioctls
drm/kernel: vblank wait on crtc > 1
drm: Fix use-after-free in drm_gem_vm_close()
-rw-r--r-- | drivers/gpu/drm/drm_crtc.c | 51 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_ioctl.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_irq.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 70 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 39 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 109 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/atombios_crtc.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_combios.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_connectors.c | 30 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_mode.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_pm.c | 8 | ||||
-rw-r--r-- | include/drm/drm.h | 4 |
16 files changed, 250 insertions, 145 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 4c95b5fd9df3..799e1490cf24 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1073,6 +1073,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data, uint32_t __user *encoder_id; struct drm_mode_group *mode_group; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); /* @@ -1244,6 +1247,9 @@ int drm_mode_getcrtc(struct drm_device *dev, struct drm_mode_object *obj; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_resp->crtc_id, @@ -1312,6 +1318,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, uint64_t __user *prop_values; uint32_t __user *encoder_ptr; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); @@ -1431,6 +1440,9 @@ int drm_mode_getencoder(struct drm_device *dev, void *data, struct drm_encoder *encoder; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, enc_resp->encoder_id, DRM_MODE_OBJECT_ENCODER); @@ -1486,6 +1498,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, int ret = 0; int i; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_req->crtc_id, DRM_MODE_OBJECT_CRTC); @@ -1603,6 +1618,9 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, struct drm_crtc *crtc; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if (!req->flags) { DRM_ERROR("no operation set\n"); return -EINVAL; @@ -1667,6 +1685,9 @@ int drm_mode_addfb(struct drm_device *dev, struct drm_framebuffer *fb; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if ((config->min_width > r->width) || (r->width > config->max_width)) { DRM_ERROR("mode new framebuffer width not within limits\n"); return -EINVAL; @@ -1724,6 +1745,9 @@ int drm_mode_rmfb(struct drm_device *dev, int ret = 0; int found = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); /* TODO check that we realy get a framebuffer back. */ @@ -1780,6 +1804,9 @@ int drm_mode_getfb(struct drm_device *dev, struct drm_framebuffer *fb; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { @@ -1813,6 +1840,9 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, int num_clips; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { @@ -1996,6 +2026,9 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev, struct drm_mode_modeinfo *umode = &mode_cmd->mode; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); @@ -2042,6 +2075,9 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev, struct drm_mode_modeinfo *umode = &mode_cmd->mode; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); @@ -2211,6 +2247,9 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, uint64_t __user *values_ptr; uint32_t __user *blob_length_ptr; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); if (!obj) { @@ -2333,6 +2372,9 @@ int drm_mode_getblob_ioctl(struct drm_device *dev, int ret = 0; void *blob_ptr; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); if (!obj) { @@ -2393,6 +2435,9 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev, int ret = -EINVAL; int i; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); @@ -2509,6 +2554,9 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, int size; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { @@ -2560,6 +2608,9 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev, int size; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 57ce27c9a747..74e4ff578017 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -499,11 +499,12 @@ EXPORT_SYMBOL(drm_gem_vm_open); void drm_gem_vm_close(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; + struct drm_device *dev = obj->dev; - mutex_lock(&obj->dev->struct_mutex); + mutex_lock(&dev->struct_mutex); drm_vm_close_locked(vma); drm_gem_object_unreference(obj); - mutex_unlock(&obj->dev->struct_mutex); + mutex_unlock(&dev->struct_mutex); } EXPORT_SYMBOL(drm_gem_vm_close); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 7f6912a16761..904d7e9c8e47 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -280,6 +280,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) if (dev->driver->dumb_create) req->value = 1; break; + case DRM_CAP_VBLANK_HIGH_CRTC: + req->value = 1; + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index a34ef97d3c81..741457bd1c46 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -1125,7 +1125,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, { union drm_wait_vblank *vblwait = data; int ret = 0; - unsigned int flags, seq, crtc; + unsigned int flags, seq, crtc, high_crtc; if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled)) return -EINVAL; @@ -1134,16 +1134,21 @@ int drm_wait_vblank(struct drm_device *dev, void *data, return -EINVAL; if (vblwait->request.type & - ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { + ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK | + _DRM_VBLANK_HIGH_CRTC_MASK)) { DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", vblwait->request.type, - (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); + (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK | + _DRM_VBLANK_HIGH_CRTC_MASK)); return -EINVAL; } flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; - crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; - + high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK); + if (high_crtc) + crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT; + else + crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; if (crtc >= dev->num_crtcs) return -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 09e0327fc6ce..87c8e29465e3 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -892,7 +892,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) seq_printf(m, "Render p-state limit: %d\n", rp_state_limits & 0xff); seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> - GEN6_CAGF_SHIFT) * 100); + GEN6_CAGF_SHIFT) * 50); seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & GEN6_CURICONT_MASK); seq_printf(m, "RP CUR UP: %dus\n", rpcurup & @@ -908,15 +908,15 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) max_freq = (rp_state_cap & 0xff0000) >> 16; seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", - max_freq * 100); + max_freq * 50); max_freq = (rp_state_cap & 0xff00) >> 8; seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", - max_freq * 100); + max_freq * 50); max_freq = rp_state_cap & 0xff; seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", - max_freq * 100); + max_freq * 50); __gen6_gt_force_wake_put(dev_priv); } else { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c4c2855d002d..7ce3f353af33 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -224,7 +224,7 @@ i915_gem_dumb_create(struct drm_file *file, struct drm_mode_create_dumb *args) { /* have to work out size/pitch and return them */ - args->pitch = ALIGN(args->width & ((args->bpp + 1) / 8), 64); + args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); args->size = args->pitch * args->height; return i915_gem_create(file, dev, args->size, &args->handle); @@ -1356,9 +1356,10 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) if (!obj->fault_mappable) return; - unmap_mapping_range(obj->base.dev->dev_mapping, - (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, - obj->base.size, 1); + if (obj->base.dev->dev_mapping) + unmap_mapping_range(obj->base.dev->dev_mapping, + (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, + obj->base.size, 1); obj->fault_mappable = false; } @@ -1796,8 +1797,10 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) return; spin_lock(&file_priv->mm.lock); - list_del(&request->client_list); - request->file_priv = NULL; + if (request->file_priv) { + list_del(&request->client_list); + request->file_priv = NULL; + } spin_unlock(&file_priv->mm.lock); } @@ -2217,13 +2220,18 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring, { int ret; + if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0) + return 0; + trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains); ret = ring->flush(ring, invalidate_domains, flush_domains); if (ret) return ret; - i915_gem_process_flushing_list(ring, flush_domains); + if (flush_domains & I915_GEM_GPU_DOMAINS) + i915_gem_process_flushing_list(ring, flush_domains); + return 0; } @@ -2579,8 +2587,23 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, reg = &dev_priv->fence_regs[obj->fence_reg]; list_move_tail(®->lru_list, &dev_priv->mm.fence_list); - if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) - pipelined = NULL; + if (obj->tiling_changed) { + ret = i915_gem_object_flush_fence(obj, pipelined); + if (ret) + return ret; + + if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) + pipelined = NULL; + + if (pipelined) { + reg->setup_seqno = + i915_gem_next_request_seqno(pipelined); + obj->last_fenced_seqno = reg->setup_seqno; + obj->last_fenced_ring = pipelined; + } + + goto update; + } if (!pipelined) { if (reg->setup_seqno) { @@ -2599,31 +2622,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, ret = i915_gem_object_flush_fence(obj, pipelined); if (ret) return ret; - } else if (obj->tiling_changed) { - if (obj->fenced_gpu_access) { - if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { - ret = i915_gem_flush_ring(obj->ring, - 0, obj->base.write_domain); - if (ret) - return ret; - } - - obj->fenced_gpu_access = false; - } - } - - if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) - pipelined = NULL; - BUG_ON(!pipelined && reg->setup_seqno); - - if (obj->tiling_changed) { - if (pipelined) { - reg->setup_seqno = - i915_gem_next_request_seqno(pipelined); - obj->last_fenced_seqno = reg->setup_seqno; - obj->last_fenced_ring = pipelined; - } - goto update; } return 0; @@ -3606,6 +3604,8 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) return; } + trace_i915_gem_object_destroy(obj); + if (obj->base.map_list.map) i915_gem_free_mmap_offset(obj); @@ -3615,8 +3615,6 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) kfree(obj->page_cpu_valid); kfree(obj->bit_17); kfree(obj); - - trace_i915_gem_object_destroy(obj); } void i915_gem_free_object(struct drm_gem_object *gem_obj) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 7ff7f933ddf1..20a4cc5b818f 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -367,6 +367,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, uint32_t __iomem *reloc_entry; void __iomem *reloc_page; + /* We can't wait for rendering with pagefaults disabled */ + if (obj->active && in_atomic()) + return -EFAULT; + ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret) return ret; @@ -440,15 +444,24 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, struct list_head *objects) { struct drm_i915_gem_object *obj; - int ret; - + int ret = 0; + + /* This is the fast path and we cannot handle a pagefault whilst + * holding the struct mutex lest the user pass in the relocations + * contained within a mmaped bo. For in such a case we, the page + * fault handler would call i915_gem_fault() and we would try to + * acquire the struct mutex again. Obviously this is bad and so + * lockdep complains vehemently. + */ + pagefault_disable(); list_for_each_entry(obj, objects, exec_list) { ret = i915_gem_execbuffer_relocate_object(obj, eb); if (ret) - return ret; + break; } + pagefault_enable(); - return 0; + return ret; } static int diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3106c0dc8389..432fc04c6bff 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1516,9 +1516,10 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, reg = PIPECONF(pipe); val = I915_READ(reg); - val |= PIPECONF_ENABLE; - I915_WRITE(reg, val); - POSTING_READ(reg); + if (val & PIPECONF_ENABLE) + return; + + I915_WRITE(reg, val | PIPECONF_ENABLE); intel_wait_for_vblank(dev_priv->dev, pipe); } @@ -1552,9 +1553,10 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, reg = PIPECONF(pipe); val = I915_READ(reg); - val &= ~PIPECONF_ENABLE; - I915_WRITE(reg, val); - POSTING_READ(reg); + if ((val & PIPECONF_ENABLE) == 0) + return; + + I915_WRITE(reg, val & ~PIPECONF_ENABLE); intel_wait_for_pipe_off(dev_priv->dev, pipe); } @@ -1577,9 +1579,10 @@ static void intel_enable_plane(struct drm_i915_private *dev_priv, reg = DSPCNTR(plane); val = I915_READ(reg); - val |= DISPLAY_PLANE_ENABLE; - I915_WRITE(reg, val); - POSTING_READ(reg); + if (val & DISPLAY_PLANE_ENABLE) + return; + + I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); intel_wait_for_vblank(dev_priv->dev, pipe); } @@ -1610,9 +1613,10 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv, reg = DSPCNTR(plane); val = I915_READ(reg); - val &= ~DISPLAY_PLANE_ENABLE; - I915_WRITE(reg, val); - POSTING_READ(reg); + if ((val & DISPLAY_PLANE_ENABLE) == 0) + return; + + I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); intel_flush_display_plane(dev_priv, plane); intel_wait_for_vblank(dev_priv->dev, pipe); } @@ -1769,7 +1773,6 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) return; I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); - POSTING_READ(DPFC_CONTROL); intel_wait_for_vblank(dev, intel_crtc->pipe); } @@ -1861,7 +1864,6 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) return; I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); - POSTING_READ(ILK_DPFC_CONTROL); intel_wait_for_vblank(dev, intel_crtc->pipe); } @@ -3883,10 +3885,7 @@ static bool g4x_compute_srwm(struct drm_device *dev, display, cursor); } -static inline bool single_plane_enabled(unsigned int mask) -{ - return mask && (mask & -mask) == 0; -} +#define single_plane_enabled(mask) is_power_of_2(mask) static void g4x_update_wm(struct drm_device *dev) { @@ -5777,7 +5776,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc) dpll &= ~DISPLAY_RATE_SELECT_FPA1; I915_WRITE(dpll_reg, dpll); - POSTING_READ(dpll_reg); intel_wait_for_vblank(dev, pipe); dpll = I915_READ(dpll_reg); @@ -5821,7 +5819,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) dpll |= DISPLAY_RATE_SELECT_FPA1; I915_WRITE(dpll_reg, dpll); - dpll = I915_READ(dpll_reg); intel_wait_for_vblank(dev, pipe); dpll = I915_READ(dpll_reg); if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) @@ -6933,7 +6930,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); if (pcu_mbox & (1<<31)) { /* OC supported */ max_freq = pcu_mbox & 0xff; - DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 100); + DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); } /* In units of 100MHz */ diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d29e33f815d7..0daefca5cbb8 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1957,9 +1957,9 @@ intel_dp_init(struct drm_device *dev, int output_reg) DP_NO_AUX_HANDSHAKE_LINK_TRAINING; } else { /* if this fails, presume the device is a ghost */ - DRM_ERROR("failed to retrieve link info\n"); - intel_dp_destroy(&intel_connector->base); + DRM_INFO("failed to retrieve link info, disabling eDP\n"); intel_dp_encoder_destroy(&intel_dp->base.base); + intel_dp_destroy(&intel_connector->base); return; } } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 789c47801ba8..e9e6f71418a4 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -65,62 +65,60 @@ render_ring_flush(struct intel_ring_buffer *ring, u32 cmd; int ret; - if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { + /* + * read/write caches: + * + * I915_GEM_DOMAIN_RENDER is always invalidated, but is + * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is + * also flushed at 2d versus 3d pipeline switches. + * + * read-only caches: + * + * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if + * MI_READ_FLUSH is set, and is always flushed on 965. + * + * I915_GEM_DOMAIN_COMMAND may not exist? + * + * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is + * invalidated when MI_EXE_FLUSH is set. + * + * I915_GEM_DOMAIN_VERTEX, which exists on 965, is + * invalidated with every MI_FLUSH. + * + * TLBs: + * + * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND + * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and + * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER + * are flushed at any MI_FLUSH. + */ + + cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; + if ((invalidate_domains|flush_domains) & + I915_GEM_DOMAIN_RENDER) + cmd &= ~MI_NO_WRITE_FLUSH; + if (INTEL_INFO(dev)->gen < 4) { /* - * read/write caches: - * - * I915_GEM_DOMAIN_RENDER is always invalidated, but is - * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is - * also flushed at 2d versus 3d pipeline switches. - * - * read-only caches: - * - * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if - * MI_READ_FLUSH is set, and is always flushed on 965. - * - * I915_GEM_DOMAIN_COMMAND may not exist? - * - * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is - * invalidated when MI_EXE_FLUSH is set. - * - * I915_GEM_DOMAIN_VERTEX, which exists on 965, is - * invalidated with every MI_FLUSH. - * - * TLBs: - * - * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND - * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and - * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER - * are flushed at any MI_FLUSH. + * On the 965, the sampler cache always gets flushed + * and this bit is reserved. */ + if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) + cmd |= MI_READ_FLUSH; + } + if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) + cmd |= MI_EXE_FLUSH; - cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; - if ((invalidate_domains|flush_domains) & - I915_GEM_DOMAIN_RENDER) - cmd &= ~MI_NO_WRITE_FLUSH; - if (INTEL_INFO(dev)->gen < 4) { - /* - * On the 965, the sampler cache always gets flushed - * and this bit is reserved. - */ - if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) - cmd |= MI_READ_FLUSH; - } - if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) - cmd |= MI_EXE_FLUSH; - - if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && - (IS_G4X(dev) || IS_GEN5(dev))) - cmd |= MI_INVALIDATE_ISP; + if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && + (IS_G4X(dev) || IS_GEN5(dev))) + cmd |= MI_INVALIDATE_ISP; - ret = intel_ring_begin(ring, 2); - if (ret) - return ret; + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; - intel_ring_emit(ring, cmd); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); - } + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -568,9 +566,6 @@ bsd_ring_flush(struct intel_ring_buffer *ring, { int ret; - if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) - return 0; - ret = intel_ring_begin(ring, 2); if (ret) return ret; @@ -1056,9 +1051,6 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, uint32_t cmd; int ret; - if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0) - return 0; - ret = intel_ring_begin(ring, 4); if (ret) return ret; @@ -1230,9 +1222,6 @@ static int blt_ring_flush(struct intel_ring_buffer *ring, uint32_t cmd; int ret; - if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0) - return 0; - ret = blt_ring_begin(ring, 4); if (ret) return ret; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 3cd3234ba0af..10e41af6b026 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -957,7 +957,11 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode /* adjust pixel clock as needed */ adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); - if (ASIC_IS_AVIVO(rdev)) + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) + /* TV seems to prefer the legacy algo on some boards */ + radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, + &ref_div, &post_div); + else if (ASIC_IS_AVIVO(rdev)) radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); else diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index cf7c8d5b4ec2..cf602e2d0718 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -448,7 +448,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) { - int edid_info; + int edid_info, size; struct edid *edid; unsigned char *raw; edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); @@ -456,11 +456,12 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) return false; raw = rdev->bios + edid_info; - edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL); + size = EDID_LENGTH * (raw[0x7e] + 1); + edid = kmalloc(size, GFP_KERNEL); if (edid == NULL) return false; - memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1)); + memcpy((unsigned char *)edid, raw, size); if (!drm_edid_is_valid(edid)) { kfree(edid); @@ -468,6 +469,7 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) } rdev->mode_info.bios_hardcoded_edid = edid; + rdev->mode_info.bios_hardcoded_edid_size = size; return true; } @@ -475,8 +477,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) struct edid * radeon_bios_get_hardcoded_edid(struct radeon_device *rdev) { - if (rdev->mode_info.bios_hardcoded_edid) - return rdev->mode_info.bios_hardcoded_edid; + struct edid *edid; + + if (rdev->mode_info.bios_hardcoded_edid) { + edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL); + if (edid) { + memcpy((unsigned char *)edid, + (unsigned char *)rdev->mode_info.bios_hardcoded_edid, + rdev->mode_info.bios_hardcoded_edid_size); + return edid; + } + } return NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 28c7961cd19b..2ef6d5135064 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -633,6 +633,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector, static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector, bool force) { + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder; struct drm_encoder_helper_funcs *encoder_funcs; @@ -683,6 +685,17 @@ radeon_vga_detect(struct drm_connector *connector, bool force) if (ret == connector_status_connected) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); + + /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the + * vbios to deal with KVMs. If we have one and are not able to detect a monitor + * by other means, assume the CRT is connected and use that EDID. + */ + if ((!rdev->is_atom_bios) && + (ret == connector_status_disconnected) && + rdev->mode_info.bios_hardcoded_edid_size) { + ret = connector_status_connected; + } + radeon_connector_update_scratch_regs(connector, ret); return ret; } @@ -794,6 +807,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector) static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector, bool force) { + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder = NULL; struct drm_encoder_helper_funcs *encoder_funcs; @@ -833,8 +848,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) * you don't really know what's connected to which port as both are digital. */ if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { - struct drm_device *dev = connector->dev; - struct radeon_device *rdev = dev->dev_private; struct drm_connector *list_connector; struct radeon_connector *list_radeon_connector; list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { @@ -899,6 +912,19 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); } + /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the + * vbios to deal with KVMs. If we have one and are not able to detect a monitor + * by other means, assume the DFP is connected and use that EDID. In most + * cases the DVI port is actually a virtual KVM port connected to the service + * processor. + */ + if ((!rdev->is_atom_bios) && + (ret == connector_status_disconnected) && + rdev->mode_info.bios_hardcoded_edid_size) { + radeon_connector->use_digital = true; + ret = connector_status_connected; + } + out: /* updated in get modes as well since we need to know if it's analog or digital */ radeon_connector_update_scratch_regs(connector, ret); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index e4582814bb78..9c57538231d5 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -239,6 +239,7 @@ struct radeon_mode_info { struct drm_property *underscan_vborder_property; /* hardcoded DFP edid from BIOS */ struct edid *bios_hardcoded_edid; + int bios_hardcoded_edid_size; /* pointer to fbdev info structure */ struct radeon_fbdev *rfbdev; diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 2aed03bde4b2..08de669e025a 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -365,12 +365,14 @@ static ssize_t radeon_set_pm_profile(struct device *dev, else if (strncmp("high", buf, strlen("high")) == 0) rdev->pm.profile = PM_PROFILE_HIGH; else { - DRM_ERROR("invalid power profile!\n"); + count = -EINVAL; goto fail; } radeon_pm_update_profile(rdev); radeon_pm_set_clocks(rdev); - } + } else + count = -EINVAL; + fail: mutex_unlock(&rdev->pm.mutex); @@ -413,7 +415,7 @@ static ssize_t radeon_set_pm_method(struct device *dev, mutex_unlock(&rdev->pm.mutex); cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); } else { - DRM_ERROR("invalid power method!\n"); + count = -EINVAL; goto fail; } radeon_pm_compute_clocks(rdev); diff --git a/include/drm/drm.h b/include/drm/drm.h index 9ac431396176..4be33b4ca2f8 100644 --- a/include/drm/drm.h +++ b/include/drm/drm.h @@ -463,12 +463,15 @@ struct drm_irq_busid { enum drm_vblank_seq_type { _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ + /* bits 1-6 are reserved for high crtcs */ + _DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e, _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */ }; +#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1 #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ @@ -753,6 +756,7 @@ struct drm_event_vblank { }; #define DRM_CAP_DUMB_BUFFER 0x1 +#define DRM_CAP_VBLANK_HIGH_CRTC 0x2 /* typedef area */ #ifndef __KERNEL__ |