diff options
Diffstat (limited to 'drivers/gpu')
47 files changed, 546 insertions, 444 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 7d0f00a935f..f2aaf39be39 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -836,11 +836,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) mode_changed = true; } else if (set->fb == NULL) { mode_changed = true; - } else if ((set->fb->bits_per_pixel != - set->crtc->fb->bits_per_pixel) || - set->fb->depth != set->crtc->fb->depth) - fb_changed = true; - else + } else fb_changed = true; } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 766c46875a2..f3c58e2bd75 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -125,28 +125,28 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), - - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW) + DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), + + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED) }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index f665b05592f..ab6c9733041 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, return mode; } +/* + * EDID is delightfully ambiguous about how interlaced modes are to be + * encoded. Our internal representation is of frame height, but some + * HDTV detailed timings are encoded as field height. + * + * The format list here is from CEA, in frame size. Technically we + * should be checking refresh rate too. Whatever. + */ +static void +drm_mode_do_interlace_quirk(struct drm_display_mode *mode, + struct detailed_pixel_timing *pt) +{ + int i; + static const struct { + int w, h; + } cea_interlaced[] = { + { 1920, 1080 }, + { 720, 480 }, + { 1440, 480 }, + { 2880, 480 }, + { 720, 576 }, + { 1440, 576 }, + { 2880, 576 }, + }; + static const int n_sizes = + sizeof(cea_interlaced)/sizeof(cea_interlaced[0]); + + if (!(pt->misc & DRM_EDID_PT_INTERLACED)) + return; + + for (i = 0; i < n_sizes; i++) { + if ((mode->hdisplay == cea_interlaced[i].w) && + (mode->vdisplay == cea_interlaced[i].h / 2)) { + mode->vdisplay *= 2; + mode->vsync_start *= 2; + mode->vsync_end *= 2; + mode->vtotal *= 2; + mode->vtotal |= 1; + } + } + + mode->flags |= DRM_MODE_FLAG_INTERLACE; +} + /** * drm_mode_detailed - create a new mode from an EDID detailed timing section * @dev: DRM device (needed to create new mode) @@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, drm_mode_set_name(mode); - if (pt->misc & DRM_EDID_PT_INTERLACED) - mode->flags |= DRM_MODE_FLAG_INTERLACE; + drm_mode_do_interlace_quirk(mode, pt); if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 0f9e90552dc..50549703584 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -27,6 +27,7 @@ * Dave Airlie <airlied@linux.ie> * Jesse Barnes <jesse.barnes@intel.com> */ +#include <linux/kernel.h> #include <linux/sysrq.h> #include <linux/fb.h> #include "drmP.h" @@ -50,21 +51,6 @@ int drm_fb_helper_add_connector(struct drm_connector *connector) } EXPORT_SYMBOL(drm_fb_helper_add_connector); -static int my_atoi(const char *name) -{ - int val = 0; - - for (;; name++) { - switch (*name) { - case '0' ... '9': - val = 10*val+(*name-'0'); - break; - default: - return val; - } - } -} - /** * drm_fb_helper_connector_parse_command_line - parse command line for connector * @connector - connector to parse line for @@ -111,7 +97,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con namelen = i; if (!refresh_specified && !bpp_specified && !yres_specified) { - refresh = my_atoi(&name[i+1]); + refresh = simple_strtol(&name[i+1], NULL, 10); refresh_specified = 1; if (cvt || rb) cvt = 0; @@ -121,7 +107,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con case '-': namelen = i; if (!bpp_specified && !yres_specified) { - bpp = my_atoi(&name[i+1]); + bpp = simple_strtol(&name[i+1], NULL, 10); bpp_specified = 1; if (cvt || rb) cvt = 0; @@ -130,7 +116,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con break; case 'x': if (!yres_specified) { - yres = my_atoi(&name[i+1]); + yres = simple_strtol(&name[i+1], NULL, 10); yres_specified = 1; } else goto done; @@ -170,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con } } if (i < 0 && yres_specified) { - xres = my_atoi(name); + xres = simple_strtol(name, NULL, 10); res_specified = 1; } done: @@ -694,7 +680,7 @@ int drm_fb_helper_set_par(struct fb_info *info) int i; if (var->pixclock != 0) { - DRM_ERROR("PIXEL CLCOK SET\n"); + DRM_ERROR("PIXEL CLOCK SET\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 8bf3770f294..aa89d4b0b4c 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -192,9 +192,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) idr_remove(&filp->object_idr, handle); spin_unlock(&filp->table_lock); - mutex_lock(&dev->struct_mutex); - drm_gem_object_handle_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_handle_unreference_unlocked(obj); return 0; } @@ -325,9 +323,7 @@ again: } err: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); return ret; } @@ -358,9 +354,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, return -ENOENT; ret = drm_gem_handle_create(file_priv, obj, &handle); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); if (ret) return ret; @@ -390,7 +384,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) { struct drm_gem_object *obj = ptr; - drm_gem_object_handle_unreference(obj); + drm_gem_object_handle_unreference_unlocked(obj); return 0; } @@ -403,16 +397,25 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) void drm_gem_release(struct drm_device *dev, struct drm_file *file_private) { - mutex_lock(&dev->struct_mutex); idr_for_each(&file_private->object_idr, &drm_gem_object_release_handle, NULL); idr_destroy(&file_private->object_idr); - mutex_unlock(&dev->struct_mutex); +} + +static void +drm_gem_object_free_common(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + fput(obj->filp); + atomic_dec(&dev->object_count); + atomic_sub(obj->size, &dev->object_memory); + kfree(obj); } /** * Called after the last reference to the object has been lost. + * Must be called holding struct_ mutex * * Frees the object */ @@ -427,14 +430,40 @@ drm_gem_object_free(struct kref *kref) if (dev->driver->gem_free_object != NULL) dev->driver->gem_free_object(obj); - fput(obj->filp); - atomic_dec(&dev->object_count); - atomic_sub(obj->size, &dev->object_memory); - kfree(obj); + drm_gem_object_free_common(obj); } EXPORT_SYMBOL(drm_gem_object_free); /** + * Called after the last reference to the object has been lost. + * Must be called without holding struct_mutex + * + * Frees the object + */ +void +drm_gem_object_free_unlocked(struct kref *kref) +{ + struct drm_gem_object *obj = (struct drm_gem_object *) kref; + struct drm_device *dev = obj->dev; + + if (dev->driver->gem_free_object_unlocked != NULL) + dev->driver->gem_free_object_unlocked(obj); + else if (dev->driver->gem_free_object != NULL) { + mutex_lock(&dev->struct_mutex); + dev->driver->gem_free_object(obj); + mutex_unlock(&dev->struct_mutex); + } + + drm_gem_object_free_common(obj); +} +EXPORT_SYMBOL(drm_gem_object_free_unlocked); + +static void drm_gem_object_ref_bug(struct kref *list_kref) +{ + BUG(); +} + +/** * Called after the last handle to the object has been closed * * Removes any name for the object. Note that this must be @@ -458,8 +487,10 @@ drm_gem_object_handle_free(struct kref *kref) /* * The object name held a reference to this object, drop * that now. + * + * This cannot be the last reference, since the handle holds one too. */ - drm_gem_object_unreference(obj); + kref_put(&obj->refcount, drm_gem_object_ref_bug); } else spin_unlock(&dev->object_name_lock); @@ -477,11 +508,8 @@ EXPORT_SYMBOL(drm_gem_vm_open); void drm_gem_vm_close(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; - struct drm_device *dev = obj->dev; - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); } EXPORT_SYMBOL(drm_gem_vm_close); diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index cdec3297712..2ac074c8f5d 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, wasted += alignment - tmp; } - if (entry->size >= size + wasted) { + if (entry->size >= size + wasted && + (entry->start + wasted + size) <= end) { if (!best_match) return entry; if (entry->size < best_size) { diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 89f1cb86c32..742bd8f738c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -176,6 +176,8 @@ MODULE_DEVICE_TABLE(pci, pciidlist); static int i915_drm_freeze(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; + pci_save_state(dev->pdev); /* If KMS is active, we do the leavevt stuff here */ @@ -191,17 +193,12 @@ static int i915_drm_freeze(struct drm_device *dev) i915_save_state(dev); - return 0; -} - -static void i915_drm_suspend(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - intel_opregion_free(dev, 1); /* Modeset on resume, not lid events */ dev_priv->modeset_on_lid = 0; + + return 0; } static int i915_suspend(struct drm_device *dev, pm_message_t state) @@ -221,8 +218,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) if (error) return error; - i915_drm_suspend(dev); - if (state.event == PM_EVENT_SUSPEND) { /* Shut down the device */ pci_disable_device(dev->pdev); @@ -237,6 +232,10 @@ static int i915_drm_thaw(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; + i915_restore_state(dev); + + intel_opregion_init(dev, 1); + /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); @@ -263,10 +262,6 @@ static int i915_resume(struct drm_device *dev) pci_set_master(dev->pdev); - i915_restore_state(dev); - - intel_opregion_init(dev, 1); - return i915_drm_thaw(dev); } @@ -423,8 +418,6 @@ static int i915_pm_suspend(struct device *dev) if (error) return error; - i915_drm_suspend(drm_dev); - pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); @@ -464,13 +457,8 @@ static int i915_pm_poweroff(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - int error; - - error = i915_drm_freeze(drm_dev); - if (!error) - i915_drm_suspend(drm_dev); - return error; + return i915_drm_freeze(drm_dev); } const struct dev_pm_ops i915_pm_ops = { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 59dcce054d1..b5df30ca0fa 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -128,9 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return -ENOMEM; ret = drm_gem_handle_create(file_priv, obj, &handle); - mutex_lock(&dev->struct_mutex); - drm_gem_object_handle_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_handle_unreference_unlocked(obj); if (ret) return ret; @@ -488,7 +486,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, */ if (args->offset > obj->size || args->size > obj->size || args->offset + args->size > obj->size) { - drm_gem_object_unreference(obj); + drm_gem_object_unreference_unlocked(obj); return -EINVAL; } @@ -501,7 +499,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, file_priv); } - drm_gem_object_unreference(obj); + drm_gem_object_unreference_unlocked(obj); return ret; } @@ -961,7 +959,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, */ if (args->offset > obj->size || args->size > obj->size || args->offset + args->size > obj->size) { - drm_gem_object_unreference(obj); + drm_gem_object_unreference_unlocked(obj); return -EINVAL; } @@ -995,7 +993,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, DRM_INFO("pwrite failed %d\n", ret); #endif - drm_gem_object_unreference(obj); + drm_gem_object_unreference_unlocked(obj); return ret; } @@ -1138,9 +1136,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, PROT_READ | PROT_WRITE, MAP_SHARED, args->offset); up_write(¤t->mm->mmap_sem); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); if (IS_ERR((void *)addr)) return addr; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index ba247d1f9bc..20653776965 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -283,9 +283,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, obj_priv = obj->driver_private; if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a483f41e9f2..1b5cd833bc7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3569,11 +3569,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, intel_crtc->cursor_bo = bo; return 0; -fail: - mutex_lock(&dev->struct_mutex); fail_locked: - drm_gem_object_unreference(bo); mutex_unlock(&dev->struct_mutex); +fail: + drm_gem_object_unreference_unlocked(bo); return ret; } @@ -4513,9 +4512,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) intelfb_remove(dev, fb); drm_framebuffer_cleanup(fb); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(intel_fb->obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(intel_fb->obj); kfree(intel_fb); } @@ -4578,9 +4575,7 @@ intel_user_framebuffer_create(struct drm_device *dev, ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); if (ret) { - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); return NULL; } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 1740577c617..93031a75d11 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -636,6 +636,13 @@ static const struct dmi_system_id bad_lid_status[] = { DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), }, }, + { + .ident = "Clevo M5x0N", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), + DMI_MATCH(DMI_BOARD_NAME, "M5x0N"), + }, + }, { } }; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index f3086fd4773..c3fa406912b 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1164,7 +1164,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, out_unlock: mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->mode_config.mutex); - drm_gem_object_unreference(new_bo); + drm_gem_object_unreference_unlocked(new_bo); kfree(params); return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 2cd0fad17da..0e9cd1d4913 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -5861,13 +5861,12 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, struct drm_nouveau_private *dev_priv = dev->dev_private; struct nvbios *bios = &dev_priv->VBIOS; struct init_exec iexec = { true, false }; - unsigned long flags; - spin_lock_irqsave(&bios->lock, flags); + mutex_lock(&bios->lock); bios->display.output = dcbent; parse_init_table(bios, table, &iexec); bios->display.output = NULL; - spin_unlock_irqrestore(&bios->lock, flags); + mutex_unlock(&bios->lock); } static bool NVInitVBIOS(struct drm_device *dev) @@ -5876,7 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev) struct nvbios *bios = &dev_priv->VBIOS; memset(bios, 0, sizeof(struct nvbios)); - spin_lock_init(&bios->lock); + mutex_init(&bios->lock); bios->dev = dev; if (!NVShadowVBIOS(dev, bios->data)) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 68446fd4146..fd94bd6dc26 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -205,7 +205,7 @@ struct nvbios { struct drm_device *dev; struct nouveau_bios_info pub; - spinlock_t lock; + struct mutex lock; uint8_t data[NV_PROM_SIZE]; unsigned int length; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index dfc94391d71..cf1c5c0a0ab 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -39,11 +39,8 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) if (drm_fb->fbdev) nouveau_fbcon_remove(dev, drm_fb); - if (fb->nvbo) { - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(fb->nvbo->gem); - mutex_unlock(&dev->struct_mutex); - } + if (fb->nvbo) + drm_gem_object_unreference_unlocked(fb->nvbo->gem); drm_framebuffer_cleanup(drm_fb); kfree(fb); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 5445cefdd03..1c15ef37b71 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -583,6 +583,7 @@ struct drm_nouveau_private { uint64_t vm_end; struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; int vm_vram_pt_nr; + uint64_t vram_sys_base; /* the mtrr covering the FB */ int fb_mtrr; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index ea879a2efef..d48c59cdefe 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -401,10 +401,8 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb) unregister_framebuffer(info); nouveau_bo_unmap(nouveau_fb->nvbo); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(nouveau_fb->nvbo->gem); + drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); nouveau_fb->nvbo = NULL; - mutex_unlock(&dev->struct_mutex); if (par) drm_fb_helper_free(&par->helper); framebuffer_release(info); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 70cc30803e3..34063c56189 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -167,12 +167,10 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_handle_unreference(nvbo->gem); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_handle_unreference_unlocked(nvbo->gem); if (ret) - drm_gem_object_unreference(nvbo->gem); + drm_gem_object_unreference_unlocked(nvbo->gem); return ret; } @@ -865,9 +863,7 @@ nouveau_gem_ioctl_pin(struct drm_device *dev, void *data, req->domain = NOUVEAU_GEM_DOMAIN_VRAM; out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gem); return ret; } @@ -891,9 +887,7 @@ nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data, ret = nouveau_bo_unpin(nouveau_gem_object(gem)); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gem); return ret; } @@ -935,9 +929,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, } out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gem); return ret; } @@ -965,9 +957,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, ret = 0; out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gem); return ret; } @@ -986,9 +976,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data, return -EINVAL; ret = nouveau_gem_info(gem, req); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gem); return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 8f3a12f614e..2dc09dbd817 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, uint32_t flags, uint64_t phys) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj **pgt; - unsigned psz, pfl, pages; - - if (virt >= dev_priv->vm_gart_base && - (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) { - psz = 12; - pgt = &dev_priv->gart_info.sg_ctxdma; - pfl = 0x21; - virt -= dev_priv->vm_gart_base; - } else - if (virt >= dev_priv->vm_vram_base && - (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) { - psz = 16; - pgt = dev_priv->vm_vram_pt; - pfl = 0x01; - virt -= dev_priv->vm_vram_base; - } else { - NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n", - virt, virt + size - 1); - return -EINVAL; - } + struct nouveau_gpuobj *pgt; + unsigned block; + int i; - pages = size >> psz; + virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1; + size = (size >> 16) << 1; + + phys |= ((uint64_t)flags << 32); + phys |= 1; + if (dev_priv->vram_sys_base) { + phys += dev_priv->vram_sys_base; + phys |= 0x30; + } dev_priv->engine.instmem.prepare_access(dev, true); - if (flags & 0x80000000) { - while (pages--) { - struct nouveau_gpuobj *pt = pgt[virt >> 29]; - unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; + while (size) { + unsigned offset_h = upper_32_bits(phys); + unsigned offset_l = lower_32_bits(phys); + unsigned pte, end; + + for (i = 7; i >= 0; i--) { + block = 1 << (i + 1); + if (size >= block && !(virt & (block - 1))) + break; + } + offset_l |= (i << 7); - nv_wo32(dev, pt, pte++, 0x00000000); - nv_wo32(dev, pt, pte++, 0x00000000); + phys += block << 15; + size -= block; - virt += (1 << psz); - } - } else { - while (pages--) { - struct nouveau_gpuobj *pt = pgt[virt >> 29]; - unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; - unsigned offset_h = upper_32_bits(phys) & 0xff; - unsigned offset_l = lower_32_bits(phys); + while (block) { + pgt = dev_priv->vm_vram_pt[virt >> 14]; + pte = virt & 0x3ffe; - nv_wo32(dev, pt, pte++, offset_l | pfl); - nv_wo32(dev, pt, pte++, offset_h | flags); + end = pte + block; + if (end > 16384) + end = 16384; + block -= (end - pte); + virt += (end - pte); - phys += (1 << psz); - virt += (1 << psz); + while (pte < end) { + nv_wo32(dev, pgt, pte++, offset_l); + nv_wo32(dev, pgt, pte++, offset_h); + } } } dev_priv->engine.instmem.finish_access(dev); @@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, void nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) { - nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *pgt; + unsigned pages, pte, end; + + virt -= dev_priv->vm_vram_base; + pages = (size >> 16) << 1; + + dev_priv->engine.instmem.prepare_access(dev, true); + while (pages) { + pgt = dev_priv->vm_vram_pt[virt >> 29]; + pte = (virt & 0x1ffe0000ULL) >> 15; + + end = pte + pages; + if (end > 16384) + end = 16384; + pages -= (end - pte); + virt += (end - pte) << 15; + + while (pte < end) + nv_wo32(dev, pgt, pte++, 0); + } + dev_priv->engine.instmem.finish_access(dev); + + nv_wr32(dev, 0x100c80, 0x00050001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); + return; + } + + nv_wr32(dev, 0x100c80, 0x00000001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); + } } /* diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index d99dc087f9b..9537f3e3011 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -61,11 +61,8 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) chan->notifier_bo = ntfy; out_err: - if (ret) { - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(ntfy->gem); - mutex_unlock(&dev->struct_mutex); - } + if (ret) + drm_gem_object_unreference_unlocked(ntfy->gem); return ret; } @@ -81,8 +78,8 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) nouveau_bo_unmap(chan->notifier_bo); mutex_lock(&dev->struct_mutex); nouveau_bo_unpin(chan->notifier_bo); - drm_gem_object_unreference(chan->notifier_bo->gem); mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); nouveau_mem_takedown(&chan->notifier_heap); } diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index d2f143ed97c..a1d1ebb073d 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -926,9 +926,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); nv_crtc->cursor.show(nv_crtc, true); out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gem); return ret; } diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index d0e038d2894..1d73b15d70d 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c @@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; - uint8_t saved_seq1, saved_pi, saved_rpc1; + uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; uint8_t saved_palette0[3], saved_palette_mask; uint32_t saved_rtest_ctrl, saved_rgen_ctrl; int i; @@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, /* only implemented for head A for now */ NVSetOwner(dev, 0); + saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX); + NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80); + saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); @@ -203,6 +206,7 @@ out: NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); + NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode); if (blue == 0x18) { NV_INFO(dev, "Load detected on head A\n"); diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 58b917c3341..21ac6e49b6e 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c @@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder) nouveau_encoder(encoder)->restore.output); nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); + + nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED; } static int nv17_tv_create_resources(struct drm_encoder *encoder, diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index d1a651e3400..cfabeb974a5 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -358,9 +358,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, nv_crtc->cursor.show(nv_crtc, true); out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gem); return ret; } diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 94400f777e7..f0dc4e36ef0 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev) for (i = 0x1700; i <= 0x1710; i += 4) priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); + if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) + dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; + else + dev_priv->vram_sys_base = 0; + /* Reserve the last MiB of VRAM, we should probably try to avoid * setting up the below tables over the top of the VBIOS image at * some point. @@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev) * We map the entire fake channel into the start of the PRAMIN BAR */ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, - 0, &priv->pramin_pt); + 0, &priv->pramin_pt); if (ret) return ret; - for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) { - if (v < (c_offset + c_size)) - BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); - else - BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); + v = c_offset | 1; + if (dev_priv->vram_sys_base) { + v += dev_priv->vram_sys_base; + v |= 0x30; + } + + i = 0; + while (v < dev_priv->vram_sys_base + c_offset + c_size) { + BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v); + BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); + v += 0x1000; + i += 8; + } + + while (i < pt_size) { + BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000); BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); + i += 8; } BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); @@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; - uint32_t pte, pte_end, vram; + struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj; + uint32_t pte, pte_end; + uint64_t vram; if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) return -EINVAL; @@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", gpuobj->im_pramin->start, gpuobj->im_pramin->size); - pte = (gpuobj->im_pramin->start >> 12) << 3; - pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; + pte = (gpuobj->im_pramin->start >> 12) << 1; + pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; vram = gpuobj->im_backing_start; NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", gpuobj->im_pramin->start, pte, pte_end); NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); + vram |= 1; + if (dev_priv->vram_sys_base) { + vram += dev_priv->vram_sys_base; + vram |= 0x30; + } + dev_priv->engine.instmem.prepare_access(dev, true); while (pte < pte_end) { - nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); - nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); - - pte += 8; + nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram)); + nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram)); vram += NV50_INSTMEM_PAGE_SIZE; } dev_priv->engine.instmem.finish_access(dev); @@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) if (gpuobj->im_bound == 0) return -EINVAL; - pte = (gpuobj->im_pramin->start >> 12) << 3; - pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; + pte = (gpuobj->im_pramin->start >> 12) << 1; + pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; dev_priv->engine.instmem.prepare_access(dev, true); while (pte < pte_end) { - nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); - nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); - pte += 8; + nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); + nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); } dev_priv->engine.instmem.finish_access(dev); diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index e3b44562d26..d75788feac6 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/sched.h> +#include <asm/unaligned.h> #define ATOM_DEBUG @@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, case ATOM_ARG_PS: idx = U8(*ptr); (*ptr)++; - val = le32_to_cpu(ctx->ps[idx]); + /* get_unaligned_le32 avoids unaligned accesses from atombios + * tables, noticed on a DEC Alpha. */ + val = get_unaligned_le32((u32 *)&ctx->ps[idx]); if (print) DEBUG("PS[0x%02X,0x%04X]", idx, val); break; @@ -640,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) uint8_t count = U8((*ptr)++); SDEBUG(" count: %d\n", count); if (arg == ATOM_UNIT_MICROSEC) - schedule_timeout_uninterruptible(usecs_to_jiffies(count)); + udelay(count); else schedule_timeout_uninterruptible(msecs_to_jiffies(count)); } @@ -878,8 +881,6 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; - attr &= 0x38; - attr |= atom_def_dst[attr >> 3] << 6; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); shift = atom_get_src(ctx, attr, ptr); @@ -894,8 +895,6 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; - attr &= 0x38; - attr |= atom_def_dst[attr >> 3] << 6; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); shift = atom_get_src(ctx, attr, ptr); diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index b32eeea5bb8..99915a682d5 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -350,7 +350,7 @@ retry: atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if (args.ucReplyStatus && !args.ucDataOutLen) { - if (args.ucReplyStatus == 0x20 && retry_count < 10) + if (args.ucReplyStatus == 0x20 && retry_count++ < 10) goto retry; DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index af1c3ca8a4c..446b765ac72 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev) void r600_vb_ib_put(struct radeon_device *rdev) { radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); - mutex_lock(&rdev->ib_pool.mutex); - list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs); - mutex_unlock(&rdev->ib_pool.mutex); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); } diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 6d5a711c2e9..75bcf35a093 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev, gb_tiling_config |= R600_BANK_SWAPS(1); - backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, - dev_priv->r600_max_backends, - (0xff << dev_priv->r600_max_backends) & 0xff); + if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740) + backend_map = 0x28; + else + backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, + dev_priv->r600_max_backends, + (0xff << dev_priv->r600_max_backends) & 0xff); gb_tiling_config |= R600_BACKEND_MAP(backend_map); cc_gc_shader_pipe_config = diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index f57480ba135..c0356bb193e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -96,6 +96,7 @@ extern int radeon_audio; * symbol; */ #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ +/* RADEON_IB_POOL_SIZE must be a power of 2 */ #define RADEON_IB_POOL_SIZE 16 #define RADEON_DEBUGFS_MAX_NUM_FILES 32 #define RADEONFB_CONN_LIMIT 4 @@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); */ struct radeon_ib { struct list_head list; - unsigned long idx; + unsigned idx; uint64_t gpu_addr; struct radeon_fence *fence; - uint32_t *ptr; + uint32_t *ptr; uint32_t length_dw; + bool free; }; /* @@ -377,10 +379,9 @@ struct radeon_ib { struct radeon_ib_pool { struct mutex mutex; struct radeon_bo *robj; - struct list_head scheduled_ibs; struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; bool ready; - DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); + unsigned head_id; }; struct radeon_cp { diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 2dcda611587..4d8831548a5 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -206,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, *connector_type = DRM_MODE_CONNECTOR_DVID; } + /* Asrock RS600 board lists the DVI port as HDMI */ + if ((dev->pdev->device == 0x7941) && + (dev->pdev->subsystem_vendor == 0x1849) && + (dev->pdev->subsystem_device == 0x7941)) { + if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && + (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) + *connector_type = DRM_MODE_CONNECTOR_DVID; + } + /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ if ((dev->pdev->device == 0x7941) && (dev->pdev->subsystem_vendor == 0x147b) && diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 23818854001..65f81942f39 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -780,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect * connected and the DVI port disconnected. If the edid doesn't * say HDMI, vice versa. */ - if (radeon_connector->shared_ddc && connector_status_connected) { + if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { struct drm_device *dev = connector->dev; struct drm_connector *list_connector; struct radeon_connector *list_radeon_connector; @@ -1060,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev, return; } if (radeon_connector->ddc_bus && i2c_bus->valid) { - if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus, - sizeof(struct radeon_i2c_bus_rec)) == 0) { + if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) { radeon_connector->shared_ddc = true; shared_ddc = true; } diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 1190148cf5e..70ba02ed772 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) &p->validated); } } - return radeon_bo_list_validate(&p->validated, p->ib->fence); + return radeon_bo_list_validate(&p->validated); } int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) @@ -189,18 +189,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) { unsigned i; - if (error && parser->ib) { - radeon_bo_list_unvalidate(&parser->validated, - parser->ib->fence); - } else { - radeon_bo_list_unreserve(&parser->validated); + if (!error && parser->ib) { + radeon_bo_list_fence(&parser->validated, parser->ib->fence); } + radeon_bo_list_unreserve(&parser->validated); for (i = 0; i < parser->nrelocs; i++) { - if (parser->relocs[i].gobj) { - mutex_lock(&parser->rdev->ddev->struct_mutex); - drm_gem_object_unreference(parser->relocs[i].gobj); - mutex_unlock(&parser->rdev->ddev->struct_mutex); - } + if (parser->relocs[i].gobj) + drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); } kfree(parser->track); kfree(parser->relocs); diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 28772a37009..6f4a5534a99 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -169,17 +169,13 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, unpin: if (radeon_crtc->cursor_bo) { radeon_gem_object_unpin(radeon_crtc->cursor_bo); - mutex_lock(&crtc->dev->struct_mutex); - drm_gem_object_unreference(radeon_crtc->cursor_bo); - mutex_unlock(&crtc->dev->struct_mutex); + drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); } radeon_crtc->cursor_bo = obj; return 0; fail: - mutex_lock(&crtc->dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&crtc->dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 7e17a362b54..3db82550562 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -679,11 +679,8 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) if (fb->fbdev) radeonfb_remove(dev, fb); - if (radeon_fb->obj) { - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(radeon_fb->obj); - mutex_unlock(&dev->struct_mutex); - } + if (radeon_fb->obj) + drm_gem_object_unreference_unlocked(radeon_fb->obj); drm_framebuffer_cleanup(fb); kfree(radeon_fb); } diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index e13785282a8..c57ad606504 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -106,9 +106,10 @@ * 1.29- R500 3D cmd buffer support * 1.30- Add support for occlusion queries * 1.31- Add support for num Z pipes from GET_PARAM + * 1.32- fixes for rv740 setup */ #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 31 +#define DRIVER_MINOR 32 #define DRIVER_PATCHLEVEL 0 enum radeon_cp_microcode_version { diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index db8e9a355a0..ef92d147d8f 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -69,9 +69,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, if (r != -ERESTARTSYS) DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", size, initial_domain, alignment, r); - mutex_lock(&rdev->ddev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&rdev->ddev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } gobj->driver_private = robj; @@ -202,14 +200,10 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, } r = drm_gem_handle_create(filp, gobj, &handle); if (r) { - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } - mutex_lock(&dev->struct_mutex); - drm_gem_object_handle_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_handle_unreference_unlocked(gobj); args->handle = handle; return 0; } @@ -236,9 +230,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } @@ -255,9 +247,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, } robj = gobj->driver_private; args->addr_ptr = radeon_bo_mmap_offset(robj); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return 0; } @@ -288,9 +278,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, default: break; } - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } @@ -311,9 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, /* callback hw specific functions if any */ if (robj->rdev->asic->ioctl_wait_idle) robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } @@ -331,9 +317,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, return -EINVAL; robj = gobj->driver_private; r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } @@ -356,8 +340,6 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); radeon_bo_unreserve(rbo); out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d72a71bff21..f1da370928e 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head) } } -int radeon_bo_list_validate(struct list_head *head, void *fence) +int radeon_bo_list_validate(struct list_head *head) { struct radeon_bo_list *lobj; struct radeon_bo *bo; - struct radeon_fence *old_fence = NULL; int r; r = radeon_bo_list_reserve(head); @@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence) } lobj->gpu_offset = radeon_bo_gpu_offset(bo); lobj->tiling_flags = bo->tiling_flags; - if (fence) { - old_fence = (struct radeon_fence *)bo->tbo.sync_obj; - bo->tbo.sync_obj = radeon_fence_ref(fence); - bo->tbo.sync_obj_arg = NULL; - } - if (old_fence) { - radeon_fence_unref(&old_fence); - } } return 0; } -void radeon_bo_list_unvalidate(struct list_head *head, void *fence) +void radeon_bo_list_fence(struct list_head *head, void *fence) { struct radeon_bo_list *lobj; - struct radeon_fence *old_fence; - - if (fence) - list_for_each_entry(lobj, head, list) { - old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); - if (old_fence == fence) { - lobj->bo->tbo.sync_obj = NULL; - radeon_fence_unref(&old_fence); - } + struct radeon_bo *bo; + struct radeon_fence *old_fence = NULL; + + list_for_each_entry(lobj, head, list) { + bo = lobj->bo; + spin_lock(&bo->tbo.lock); + old_fence = (struct radeon_fence *)bo->tbo.sync_obj; + bo->tbo.sync_obj = radeon_fence_ref(fence); + bo->tbo.sync_obj_arg = NULL; + spin_unlock(&bo->tbo.lock); + if (old_fence) { + radeon_fence_unref(&old_fence); } - radeon_bo_list_unreserve(head); + } } int radeon_bo_fbdev_mmap(struct radeon_bo *bo, diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index a02f18011ad..7ab43de1e24 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, struct list_head *head); extern int radeon_bo_list_reserve(struct list_head *head); extern void radeon_bo_list_unreserve(struct list_head *head); -extern int radeon_bo_list_validate(struct list_head *head, void *fence); -extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); +extern int radeon_bo_list_validate(struct list_head *head); +extern void radeon_bo_list_fence(struct list_head *head, void *fence); extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, struct vm_area_struct *vma); extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 4d12b2d17b4..6579eb4c1f2 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) { struct radeon_fence *fence; struct radeon_ib *nib; - unsigned long i; - int r = 0; + int r = 0, i, c; *ib = NULL; r = radeon_fence_create(rdev, &fence); if (r) { - DRM_ERROR("failed to create fence for new IB\n"); + dev_err(rdev->dev, "failed to create fence for new IB\n"); return r; } mutex_lock(&rdev->ib_pool.mutex); - i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); - if (i < RADEON_IB_POOL_SIZE) { - set_bit(i, rdev->ib_pool.alloc_bm); - rdev->ib_pool.ibs[i].length_dw = 0; - *ib = &rdev->ib_pool.ibs[i]; - mutex_unlock(&rdev->ib_pool.mutex); - goto out; + for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) { + i &= (RADEON_IB_POOL_SIZE - 1); + if (rdev->ib_pool.ibs[i].free) { + nib = &rdev->ib_pool.ibs[i]; + break; + } } - if (list_empty(&rdev->ib_pool.scheduled_ibs)) { - /* we go do nothings here */ + if (nib == NULL) { + /* This should never happen, it means we allocated all + * IB and haven't scheduled one yet, return EBUSY to + * userspace hoping that on ioctl recall we get better + * luck + */ + dev_err(rdev->dev, "no free indirect buffer !\n"); mutex_unlock(&rdev->ib_pool.mutex); - DRM_ERROR("all IB allocated none scheduled.\n"); - r = -EINVAL; - goto out; + radeon_fence_unref(&fence); + return -EBUSY; } - /* get the first ib on the scheduled list */ - nib = list_entry(rdev->ib_pool.scheduled_ibs.next, - struct radeon_ib, list); - if (nib->fence == NULL) { - /* we go do nothings here */ + rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1); + nib->free = false; + if (nib->fence) { mutex_unlock(&rdev->ib_pool.mutex); - DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); - r = -EINVAL; - goto out; - } - mutex_unlock(&rdev->ib_pool.mutex); - - r = radeon_fence_wait(nib->fence, false); - if (r) { - DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, - (unsigned long)nib->gpu_addr, nib->length_dw); - DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); - goto out; + r = radeon_fence_wait(nib->fence, false); + if (r) { + dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n", + nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw); + mutex_lock(&rdev->ib_pool.mutex); + nib->free = true; + mutex_unlock(&rdev->ib_pool.mutex); + radeon_fence_unref(&fence); + return r; + } + mutex_lock(&rdev->ib_pool.mutex); } radeon_fence_unref(&nib->fence); - + nib->fence = fence; nib->length_dw = 0; - - /* scheduled list is accessed here */ - mutex_lock(&rdev->ib_pool.mutex); - list_del(&nib->list); - INIT_LIST_HEAD(&nib->list); mutex_unlock(&rdev->ib_pool.mutex); - *ib = nib; -out: - if (r) { - radeon_fence_unref(&fence); - } else { - (*ib)->fence = fence; - } - return r; + return 0; } void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) @@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) if (tmp == NULL) { return; } - mutex_lock(&rdev->ib_pool.mutex); - if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) { - /* IB is scheduled & not signaled don't do anythings */ - mutex_unlock(&rdev->ib_pool.mutex); - return; - } - list_del(&tmp->list); - INIT_LIST_HEAD(&tmp->list); - if (tmp->fence) + if (!tmp->fence->emited) radeon_fence_unref(&tmp->fence); - - tmp->length_dw = 0; - clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); + mutex_lock(&rdev->ib_pool.mutex); + tmp->free = true; mutex_unlock(&rdev->ib_pool.mutex); } @@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) if (!ib->length_dw || !rdev->cp.ready) { /* TODO: Nothings in the ib we should report. */ - DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); + DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); return -EINVAL; } @@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) radeon_ring_ib_execute(rdev, ib); radeon_fence_emit(rdev, ib->fence); mutex_lock(&rdev->ib_pool.mutex); - list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); + /* once scheduled IB is considered free and protected by the fence */ + ib->free = true; mutex_unlock(&rdev->ib_pool.mutex); radeon_ring_unlock_commit(rdev); return 0; @@ -164,7 +143,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev) if (rdev->ib_pool.robj) return 0; /* Allocate 1M object buffer */ - INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, true, RADEON_GEM_DOMAIN_GTT, &rdev->ib_pool.robj); @@ -195,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev) rdev->ib_pool.ibs[i].ptr = ptr + offset; rdev->ib_pool.ibs[i].idx = i; rdev->ib_pool.ibs[i].length_dw = 0; - INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); + rdev->ib_pool.ibs[i].free = true; } - bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); + rdev->ib_pool.head_id = 0; rdev->ib_pool.ready = true; DRM_INFO("radeon: ib pool ready.\n"); if (radeon_debugfs_ib_init(rdev)) { @@ -214,7 +192,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) return; } mutex_lock(&rdev->ib_pool.mutex); - bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); if (rdev->ib_pool.robj) { r = radeon_bo_reserve(rdev->ib_pool.robj, false); if (likely(r == 0)) { @@ -363,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data) if (ib == NULL) { return 0; } - seq_printf(m, "IB %04lu\n", ib->idx); + seq_printf(m, "IB %04u\n", ib->idx); seq_printf(m, "IB fence %p\n", ib->fence); seq_printf(m, "IB size %05u dwords\n", ib->length_dw); for (i = 0; i < ib->length_dw; i++) { diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 5943d561fd1..03021674d09 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev) gb_tiling_config |= BANK_SWAPS(1); - backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes, - rdev->config.rv770.max_backends, - (0xff << rdev->config.rv770.max_backends) & 0xff); + if (rdev->family == CHIP_RV740) + backend_map = 0x28; + else + backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes, + rdev->config.rv770.max_backends, + (0xff << rdev->config.rv770.max_backends) & 0xff); gb_tiling_config |= BACKEND_MAP(backend_map); cc_gc_shader_pipe_config = diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 1a3e909b7bb..c7320ce4567 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1020,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem) { int i; + struct drm_mm_node *node = mem->mm_node; + + if (node && placement->lpfn != 0 && + (node->start < placement->fpfn || + node->start + node->size > placement->lpfn)) + return -1; for (i = 0; i < placement->num_placement; i++) { if ((placement->placement[i] & mem->placement & diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index e2123af7775..a759170763b 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -196,14 +196,15 @@ EXPORT_SYMBOL(ttm_tt_populate); #ifdef CONFIG_X86 static inline int ttm_tt_set_page_caching(struct page *p, - enum ttm_caching_state c_state) + enum ttm_caching_state c_old, + enum ttm_caching_state c_new) { int ret = 0; if (PageHighMem(p)) return 0; - if (get_page_memtype(p) != -1) { + if (c_old != tt_cached) { /* p isn't in the default caching state, set it to * writeback first to free its current memtype. */ @@ -212,16 +213,17 @@ static inline int ttm_tt_set_page_caching(struct page *p, return ret; } - if (c_state == tt_wc) + if (c_new == tt_wc) ret = set_memory_wc((unsigned long) page_address(p), 1); - else if (c_state == tt_uncached) + else if (c_new == tt_uncached) ret = set_pages_uc(p, 1); return ret; } #else /* CONFIG_X86 */ static inline int ttm_tt_set_page_caching(struct page *p, - enum ttm_caching_state c_state) + enum ttm_caching_state c_old, + enum ttm_caching_state c_new) { return 0; } @@ -254,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm, for (i = 0; i < ttm->num_pages; ++i) { cur_page = ttm->pages[i]; if (likely(cur_page != NULL)) { - ret = ttm_tt_set_page_caching(cur_page, c_state); + ret = ttm_tt_set_page_caching(cur_page, + ttm->caching_state, + c_state); if (unlikely(ret != 0)) goto out_err; } @@ -268,7 +272,7 @@ out_err: for (j = 0; j < i; ++j) { cur_page = ttm->pages[j]; if (likely(cur_page != NULL)) { - (void)ttm_tt_set_page_caching(cur_page, + (void)ttm_tt_set_page_caching(cur_page, c_state, ttm->caching_state); } } @@ -476,7 +480,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) void *from_virtual; void *to_virtual; int i; - int ret; + int ret = -ENOMEM; if (ttm->page_flags & TTM_PAGE_FLAG_USER) { ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, @@ -495,8 +499,10 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) for (i = 0; i < ttm->num_pages; ++i) { from_page = read_mapping_page(swap_space, i, NULL); - if (IS_ERR(from_page)) + if (IS_ERR(from_page)) { + ret = PTR_ERR(from_page); goto out_err; + } to_page = __ttm_tt_get_page(ttm, i); if (unlikely(to_page == NULL)) goto out_err; @@ -519,7 +525,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) return 0; out_err: ttm_tt_free_alloced_pages(ttm); - return -ENOMEM; + return ret; } int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) @@ -531,6 +537,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) void *from_virtual; void *to_virtual; int i; + int ret = -ENOMEM; BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); BUG_ON(ttm->caching_state != tt_cached); @@ -553,7 +560,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) 0); if (unlikely(IS_ERR(swap_storage))) { printk(KERN_ERR "Failed allocating swap storage.\n"); - return -ENOMEM; + return PTR_ERR(swap_storage); } } else swap_storage = persistant_swap_storage; @@ -565,9 +572,10 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) if (unlikely(from_page == NULL)) continue; to_page = read_mapping_page(swap_space, i, NULL); - if (unlikely(to_page == NULL)) + if (unlikely(IS_ERR(to_page))) { + ret = PTR_ERR(to_page); goto out_err; - + } preempt_disable(); from_virtual = kmap_atomic(from_page, KM_USER0); to_virtual = kmap_atomic(to_page, KM_USER1); @@ -591,5 +599,5 @@ out_err: if (!persistant_swap_storage) fput(swap_storage); - return -ENOMEM; + return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index a6e8f687fa6..0c9c0811f42 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) */ DRM_INFO("It appears like vesafb is loaded. " - "Ignore above error if any. Entering stealth mode.\n"); + "Ignore above error if any.\n"); ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); if (unlikely(ret != 0)) { DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); goto out_no_device; } - vmw_kms_init(dev_priv); - vmw_overlay_init(dev_priv); - } else { - ret = vmw_request_device(dev_priv); - if (unlikely(ret != 0)) - goto out_no_device; - vmw_kms_init(dev_priv); - vmw_overlay_init(dev_priv); - vmw_fb_init(dev_priv); } + ret = vmw_request_device(dev_priv); + if (unlikely(ret != 0)) + goto out_no_device; + vmw_kms_init(dev_priv); + vmw_overlay_init(dev_priv); + vmw_fb_init(dev_priv); dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; register_pm_notifier(&dev_priv->pm_nb); @@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev) unregister_pm_notifier(&dev_priv->pm_nb); - if (!dev_priv->stealth) { - vmw_fb_close(dev_priv); - vmw_kms_close(dev_priv); - vmw_overlay_close(dev_priv); - vmw_release_device(dev_priv); - pci_release_regions(dev->pdev); - } else { - vmw_kms_close(dev_priv); - vmw_overlay_close(dev_priv); + vmw_fb_close(dev_priv); + vmw_kms_close(dev_priv); + vmw_overlay_close(dev_priv); + vmw_release_device(dev_priv); + if (dev_priv->stealth) pci_release_region(dev->pdev, 2); - } + else + pci_release_regions(dev->pdev); + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) drm_irq_uninstall(dev_priv->dev); if (dev->devname == vmw_devname) @@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev, int ret = 0; DRM_INFO("Master set.\n"); - if (dev_priv->stealth) { - ret = vmw_request_device(dev_priv); - if (unlikely(ret != 0)) - return ret; - } if (active) { BUG_ON(active != &dev_priv->fbdev_master); @@ -649,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev, ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); - if (dev_priv->stealth) { - ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); - if (unlikely(ret != 0)) - DRM_ERROR("Unable to clean VRAM on master drop.\n"); - vmw_release_device(dev_priv); - } dev_priv->active_master = &dev_priv->fbdev_master; ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); ttm_vt_unlock(&dev_priv->fbdev_master.lock); - if (!dev_priv->stealth) - vmw_fb_on(dev_priv); + vmw_fb_on(dev_priv); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index d69caf92ffe..0897359b3e4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); } -static int vmw_cmd_dma(struct vmw_private *dev_priv, - struct vmw_sw_context *sw_context, - SVGA3dCmdHeader *header) +static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGAGuestPtr *ptr, + struct vmw_dma_buffer **vmw_bo_p) { - uint32_t handle; struct vmw_dma_buffer *vmw_bo = NULL; struct ttm_buffer_object *bo; - struct vmw_surface *srf = NULL; - struct vmw_dma_cmd { - SVGA3dCmdHeader header; - SVGA3dCmdSurfaceDMA dma; - } *cmd; + uint32_t handle = ptr->gmrId; struct vmw_relocation *reloc; - int ret; uint32_t cur_validate_node; struct ttm_validate_buffer *val_buf; + int ret; - cmd = container_of(header, struct vmw_dma_cmd, header); - handle = cmd->dma.guest.ptr.gmrId; ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use GMR region.\n"); @@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, bo = &vmw_bo->base; if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { - DRM_ERROR("Max number of DMA commands per submission" + DRM_ERROR("Max number relocations per submission" " exceeded\n"); ret = -EINVAL; goto out_no_reloc; } reloc = &sw_context->relocs[sw_context->cur_reloc++]; - reloc->location = &cmd->dma.guest.ptr; + reloc->location = ptr; cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { @@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, list_add_tail(&val_buf->head, &sw_context->validate_nodes); ++sw_context->cur_val_buf; } + *vmw_bo_p = vmw_bo; + return 0; + +out_no_reloc: + vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_p = NULL; + return ret; +} + +static int vmw_cmd_end_query(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_dma_buffer *vmw_bo; + struct vmw_query_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdEndQuery q; + } *cmd; + int ret; + + cmd = container_of(header, struct vmw_query_cmd, header); + ret = vmw_cmd_cid_check(dev_priv, sw_context, header); + if (unlikely(ret != 0)) + return ret; + + ret = vmw_translate_guest_ptr(dev_priv, sw_context, + &cmd->q.guestResult, + &vmw_bo); + if (unlikely(ret != 0)) + return ret; + + vmw_dmabuf_unreference(&vmw_bo); + return 0; +} +static int vmw_cmd_wait_query(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_dma_buffer *vmw_bo; + struct vmw_query_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdWaitForQuery q; + } *cmd; + int ret; + + cmd = container_of(header, struct vmw_query_cmd, header); + ret = vmw_cmd_cid_check(dev_priv, sw_context, header); + if (unlikely(ret != 0)) + return ret; + + ret = vmw_translate_guest_ptr(dev_priv, sw_context, + &cmd->q.guestResult, + &vmw_bo); + if (unlikely(ret != 0)) + return ret; + + vmw_dmabuf_unreference(&vmw_bo); + return 0; +} + + +static int vmw_cmd_dma(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_dma_buffer *vmw_bo = NULL; + struct ttm_buffer_object *bo; + struct vmw_surface *srf = NULL; + struct vmw_dma_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdSurfaceDMA dma; + } *cmd; + int ret; + + cmd = container_of(header, struct vmw_dma_cmd, header); + ret = vmw_translate_guest_ptr(dev_priv, sw_context, + &cmd->dma.guest.ptr, + &vmw_bo); + if (unlikely(ret != 0)) + return ret; + + bo = &vmw_bo->base; ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, cmd->dma.host.sid, &srf); if (ret) { @@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), + VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, &vmw_cmd_blt_surf_screen_check) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 4f4f6432be8..a93367041cd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv) info->pixmap.scan_align = 1; #endif + info->aperture_base = vmw_priv->vram_start; + info->aperture_size = vmw_priv->vram_size; + /* * Dirty & Deferred IO */ diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 24b56dc5459..2f6cf69ecb3 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, remaining -= 7; pr_devel("client 0x%p called 'target'\n", priv); /* if target is default */ - if (!strncmp(kbuf, "default", 7)) + if (!strncmp(curr_pos, "default", 7)) pdev = pci_dev_get(vga_default_device()); else { if (!vga_pci_str_to_vars(curr_pos, remaining, |