From eaf99c749d43ae74ac7ffece5512f3c73f01dfd2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 6 Aug 2014 10:08:32 +0200 Subject: drm: Perform cmdline mode parsing during connector initialisation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit i915.ko has a custom fbdev initialisation routine that aims to preserve the current mode set by the BIOS, unless overruled by the user. The user's wishes are determined by what, if any, mode is specified on the command line (via the video= parameter). However, that command line mode is first parsed by drm_fb_helper_initial_config() which is called after i915.ko's custom initial_config() as a fallback method. So in order for us to honour it, we need to move the cmdline parser earlier. If we perform the connector cmdline parsing as soon as we initialise the connector, that cmdline mode and forced status is then available even if the fbdev helper is not compiled in or never called. We also then expose the cmdline user mode in the connector mode lists. v2: Rebase after connector->name upheaval. v3: Adapt mga200 to look for the cmdline mode in the new place. Nicely simplifies things while at that. v4: Fix checkpatch. v5: Select FB_CMDLINE to adapt to the changed fbdev patch. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=73154 Signed-off-by: Chris Wilson (v2) Cc: Jesse Barnes Cc: Ville Syrjälä Cc: Daniel Vetter Reviewed-by: Jesse Barnes (v2) Cc: dri-devel@lists.freedesktop.org Cc: Julia Lemire Cc: Dave Airlie Signed-off-by: Daniel Vetter --- include/drm/drm_crtc.h | 1 + include/drm/drm_fb_helper.h | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index f1105d0da05..c530b4920a0 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -548,6 +548,7 @@ struct drm_connector { void *helper_private; /* forced on connector */ + struct drm_cmdline_mode cmdline_mode; enum drm_connector_force force; bool override_edid; uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index bfd329d613c..f4ad254e348 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -77,7 +77,6 @@ struct drm_fb_helper_funcs { struct drm_fb_helper_connector { struct drm_connector *connector; - struct drm_cmdline_mode cmdline_mode; }; struct drm_fb_helper { -- cgit v1.2.3-70-g09d2 From 4ed0ce3d0bccd74416ba6beb33a8a79d1617e97b Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 6 Aug 2014 14:49:53 +0300 Subject: drm: Disable vblank interrupt immediately when drm_vblank_offdelay<0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make drm_vblank_put() disable the vblank interrupt immediately when the refcount drops to zero and drm_vblank_offdelay<0. v2: Preserve the current drm_vblank_offdelay==0 'never disable' behaviur Reviewed-by: Matt Roper Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- Documentation/DocBook/drm.tmpl | 1 + drivers/gpu/drm/drm_drv.c | 4 ++-- drivers/gpu/drm/drm_irq.c | 11 +++++++---- include/drm/drmP.h | 2 +- 4 files changed, 11 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 1d3756d3176..55923d00bd5 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -3386,6 +3386,7 @@ void (*disable_vblank) (struct drm_device *dev, int crtc); by scheduling a timer. The delay is accessible through the vblankoffdelay module parameter or the drm_vblank_offdelay global variable and expressed in milliseconds. Its default value is 5000 ms. + Zero means never disable, and a negative value means disable immediately. When a vertical blanking interrupt occurs drivers only need to call the diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 92bc6b1d964..db03e16ca81 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -39,7 +39,7 @@ unsigned int drm_debug = 0; /* 1 to enable debug output */ EXPORT_SYMBOL(drm_debug); -unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ +int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ @@ -53,7 +53,7 @@ MODULE_AUTHOR(CORE_AUTHOR); MODULE_DESCRIPTION(CORE_DESC); MODULE_LICENSE("GPL and additional rights"); MODULE_PARM_DESC(debug, "Enable debug output"); -MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); +MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)"); MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index b2428cb0c64..99145c4d536 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -993,10 +993,13 @@ void drm_vblank_put(struct drm_device *dev, int crtc) BUG_ON(atomic_read(&vblank->refcount) == 0); /* Last user schedules interrupt disable */ - if (atomic_dec_and_test(&vblank->refcount) && - (drm_vblank_offdelay > 0)) - mod_timer(&vblank->disable_timer, - jiffies + ((drm_vblank_offdelay * HZ)/1000)); + if (atomic_dec_and_test(&vblank->refcount)) { + if (drm_vblank_offdelay < 0) + vblank_disable_fn((unsigned long)vblank); + else if (drm_vblank_offdelay > 0) + mod_timer(&vblank->disable_timer, + jiffies + ((drm_vblank_offdelay * HZ)/1000)); + } } EXPORT_SYMBOL(drm_vblank_put); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index a5764638208..24b32d453c6 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1345,7 +1345,7 @@ extern void drm_put_dev(struct drm_device *dev); extern void drm_unplug_dev(struct drm_device *dev); extern unsigned int drm_debug; -extern unsigned int drm_vblank_offdelay; +extern int drm_vblank_offdelay; extern unsigned int drm_timestamp_precision; extern unsigned int drm_timestamp_monotonic; -- cgit v1.2.3-70-g09d2 From 00185e667009dda907887a4f84fbd02c6e651a49 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 6 Aug 2014 14:49:54 +0300 Subject: drm: Add dev->vblank_disable_immediate flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a flag to drm_device which will cause the vblank code to bypass the disable timer and always disable the vblank interrupt immediately when the last reference is dropped. v2: Add some notes about the flag to the kernel doc Reviewed-by: Matt Roper Reviewed-by: Daniel Vetter Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- Documentation/DocBook/drm.tmpl | 6 ++++++ drivers/gpu/drm/drm_irq.c | 2 +- include/drm/drmP.h | 10 ++++++++++ 3 files changed, 17 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 55923d00bd5..583edbffff1 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -3387,6 +3387,12 @@ void (*disable_vblank) (struct drm_device *dev, int crtc); module parameter or the drm_vblank_offdelay global variable and expressed in milliseconds. Its default value is 5000 ms. Zero means never disable, and a negative value means disable immediately. + Drivers may override the behaviour by setting the + drm_device + vblank_disable_immediate flag, which when set + causes vblank interrupts to be disabled immediately regardless of the + drm_vblank_offdelay value. The flag should only be set if there's a + properly working hardware vblank counter present. When a vertical blanking interrupt occurs drivers only need to call the diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 99145c4d536..8dbcc3f892d 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -994,7 +994,7 @@ void drm_vblank_put(struct drm_device *dev, int crtc) /* Last user schedules interrupt disable */ if (atomic_dec_and_test(&vblank->refcount)) { - if (drm_vblank_offdelay < 0) + if (dev->vblank_disable_immediate || drm_vblank_offdelay < 0) vblank_disable_fn((unsigned long)vblank); else if (drm_vblank_offdelay > 0) mod_timer(&vblank->disable_timer, diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 24b32d453c6..17a5c10474b 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1074,6 +1074,16 @@ struct drm_device { */ bool vblank_disable_allowed; + /* + * If true, vblank interrupt will be disabled immediately when the + * refcount drops to zero, as opposed to via the vblank disable + * timer. + * This can be set to true it the hardware has a working vblank + * counter and the driver uses drm_vblank_on() and drm_vblank_off() + * appropriately. + */ + bool vblank_disable_immediate; + /* array of size num_crtcs */ struct drm_vblank_crtc *vblank; -- cgit v1.2.3-70-g09d2 From 020178a1bcadf20b9d057988984f374c905d542e Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 22 May 2014 19:36:03 +0300 Subject: drm: Add drm_crtc_vblank_waitqueue() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a small static inline helper to grab the vblank wait queue based on the drm_crtc. This is useful for drivers to do internal vblank waits using wait_event() & co. v2: Pimp commit message (Daniel) Add kernel doc (Daniel) Suggested-by: Daniel Vetter Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- Documentation/DocBook/drm.tmpl | 1 + include/drm/drmP.h | 11 +++++++++++ 2 files changed, 12 insertions(+) (limited to 'include') diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 1d3756d3176..97275948937 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -3400,6 +3400,7 @@ void (*disable_vblank) (struct drm_device *dev, int crtc); Vertical Blanking and Interrupt Handling Functions Reference !Edrivers/gpu/drm/drm_irq.c +!Iinclude/drm/drmP.h drm_crtc_vblank_waitqueue diff --git a/include/drm/drmP.h b/include/drm/drmP.h index d3d9be6b83e..bb44c1ee557 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1344,6 +1344,17 @@ extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, extern void drm_calc_timestamping_constants(struct drm_crtc *crtc, const struct drm_display_mode *mode); +/** + * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC + * @crtc: which CRTC's vblank waitqueue to retrieve + * + * This function returns a pointer to the vblank waitqueue for the CRTC. + * Drivers can use this to implement vblank waits using wait_event() & co. + */ +static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc) +{ + return &crtc->dev->vblank[drm_crtc_index(crtc)].queue; +} /* Modesetting support */ extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); -- cgit v1.2.3-70-g09d2 From 2a297cce2e775812e9d6ca84c3ab92cee5c38e25 Mon Sep 17 00:00:00 2001 From: Sonika Jindal Date: Tue, 5 Aug 2014 11:26:54 +0530 Subject: drm: Add rotation_property to mode_config MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Sonika Jindal Reviewed-by: Ville Syrjälä Acked-by: Dave Airlie Signed-off-by: Daniel Vetter --- include/drm/drm_crtc.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index f1105d0da05..62f73bdbcc4 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -821,6 +821,7 @@ struct drm_mode_config { struct drm_property *dpms_property; struct drm_property *path_property; struct drm_property *plane_type_property; + struct drm_property *rotation_property; /* DVI-I properties */ struct drm_property *dvi_i_subconnector_property; -- cgit v1.2.3-70-g09d2 From 10f637bf292ba501f9b9e9df6dfe21d8fa521fbd Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 29 Jul 2014 13:47:11 +0200 Subject: drm: Add drm_plane/connector_index In the atomic state we'll have an array of states for crtcs, planes and connectors and need to be able to at them by their index. We already have a drm_crtc_index function so add the missing ones for planes and connectors. If it later on turns out that the list walking is too expensive we can add the index to the relevant modeset objects. Rob Clark doesn't like the loops too much, but we can always add an obj->idx parameter later on. And for now reiterating is actually safer since nowadays we have hotpluggable connectors (thanks to DP MST). v2: Fix embarrassing copypasta fail in kerneldoc and header declarations, spotted by Matt Roper. Cc: Matt Roper Reviewed-by: Matt Roper Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_crtc.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ include/drm/drm_crtc.h | 2 ++ 2 files changed, 48 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 66d3bfb8d26..f3ef461deeb 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1021,6 +1021,29 @@ void drm_connector_cleanup(struct drm_connector *connector) } EXPORT_SYMBOL(drm_connector_cleanup); +/** + * drm_connector_index - find the index of a registered connector + * @connector: connector to find index for + * + * Given a registered connector, return the index of that connector within a DRM + * device's list of connectors. + */ +unsigned int drm_connector_index(struct drm_connector *connector) +{ + unsigned int index = 0; + struct drm_connector *tmp; + + list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) { + if (tmp == connector) + return index; + + index++; + } + + BUG(); +} +EXPORT_SYMBOL(drm_connector_index); + /** * drm_connector_register - register a connector * @connector: the connector to register @@ -1325,6 +1348,29 @@ void drm_plane_cleanup(struct drm_plane *plane) } EXPORT_SYMBOL(drm_plane_cleanup); +/** + * drm_plane_index - find the index of a registered plane + * @plane: plane to find index for + * + * Given a registered plane, return the index of that CRTC within a DRM + * device's list of planes. + */ +unsigned int drm_plane_index(struct drm_plane *plane) +{ + unsigned int index = 0; + struct drm_plane *tmp; + + list_for_each_entry(tmp, &plane->dev->mode_config.plane_list, head) { + if (tmp == plane) + return index; + + index++; + } + + BUG(); +} +EXPORT_SYMBOL(drm_plane_index); + /** * drm_plane_force_disable - Forcibly disable a plane * @plane: plane to disable diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index c530b4920a0..9f18e7022ab 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -904,6 +904,7 @@ int drm_connector_register(struct drm_connector *connector); void drm_connector_unregister(struct drm_connector *connector); extern void drm_connector_cleanup(struct drm_connector *connector); +extern unsigned int drm_connector_index(struct drm_connector *connector); /* helper to unplug all connectors from sysfs for device */ extern void drm_connector_unplug_all(struct drm_device *dev); @@ -943,6 +944,7 @@ extern int drm_plane_init(struct drm_device *dev, const uint32_t *formats, uint32_t format_count, bool is_primary); extern void drm_plane_cleanup(struct drm_plane *plane); +extern unsigned int drm_plane_index(struct drm_plane *plane); extern void drm_plane_force_disable(struct drm_plane *plane); extern int drm_crtc_check_viewport(const struct drm_crtc *crtc, int x, int y, -- cgit v1.2.3-70-g09d2 From a6a8bb848d5ca40bc0eb708ddeb23df2b0eca1fb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 25 Jul 2014 17:47:18 +0200 Subject: drm: Move modeset_lock_all helpers to drm_modeset_lock.[hc] Somehow we've forgotten about this little bit of OCD. Reviewed-by: Dave Airlie Reviewed-by: Matt Roper Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_crtc.c | 95 -------------------------------------- drivers/gpu/drm/drm_modeset_lock.c | 95 ++++++++++++++++++++++++++++++++++++++ include/drm/drm_crtc.h | 4 -- include/drm/drm_modeset_lock.h | 5 ++ 4 files changed, 100 insertions(+), 99 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index f3ef461deeb..caaa01f3b35 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -45,101 +45,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, struct drm_mode_fb_cmd2 *r, struct drm_file *file_priv); -/** - * drm_modeset_lock_all - take all modeset locks - * @dev: drm device - * - * This function takes all modeset locks, suitable where a more fine-grained - * scheme isn't (yet) implemented. Locks must be dropped with - * drm_modeset_unlock_all. - */ -void drm_modeset_lock_all(struct drm_device *dev) -{ - struct drm_mode_config *config = &dev->mode_config; - struct drm_modeset_acquire_ctx *ctx; - int ret; - - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (WARN_ON(!ctx)) - return; - - mutex_lock(&config->mutex); - - drm_modeset_acquire_init(ctx, 0); - -retry: - ret = drm_modeset_lock(&config->connection_mutex, ctx); - if (ret) - goto fail; - ret = drm_modeset_lock_all_crtcs(dev, ctx); - if (ret) - goto fail; - - WARN_ON(config->acquire_ctx); - - /* now we hold the locks, so now that it is safe, stash the - * ctx for drm_modeset_unlock_all(): - */ - config->acquire_ctx = ctx; - - drm_warn_on_modeset_not_all_locked(dev); - - return; - -fail: - if (ret == -EDEADLK) { - drm_modeset_backoff(ctx); - goto retry; - } -} -EXPORT_SYMBOL(drm_modeset_lock_all); - -/** - * drm_modeset_unlock_all - drop all modeset locks - * @dev: device - * - * This function drop all modeset locks taken by drm_modeset_lock_all. - */ -void drm_modeset_unlock_all(struct drm_device *dev) -{ - struct drm_mode_config *config = &dev->mode_config; - struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx; - - if (WARN_ON(!ctx)) - return; - - config->acquire_ctx = NULL; - drm_modeset_drop_locks(ctx); - drm_modeset_acquire_fini(ctx); - - kfree(ctx); - - mutex_unlock(&dev->mode_config.mutex); -} -EXPORT_SYMBOL(drm_modeset_unlock_all); - -/** - * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked - * @dev: device - * - * Useful as a debug assert. - */ -void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) -{ - struct drm_crtc *crtc; - - /* Locking is currently fubar in the panic handler. */ - if (oops_in_progress) - return; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) - WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); - - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); - WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); -} -EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked); - /* Avoid boilerplate. I'm tired of typing. */ #define DRM_ENUM_NAME_FN(fnname, list) \ const char *fnname(int val) \ diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index 0dc57d5ecd1..73e6534fd0a 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -56,6 +56,101 @@ */ +/** + * drm_modeset_lock_all - take all modeset locks + * @dev: drm device + * + * This function takes all modeset locks, suitable where a more fine-grained + * scheme isn't (yet) implemented. Locks must be dropped with + * drm_modeset_unlock_all. + */ +void drm_modeset_lock_all(struct drm_device *dev) +{ + struct drm_mode_config *config = &dev->mode_config; + struct drm_modeset_acquire_ctx *ctx; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (WARN_ON(!ctx)) + return; + + mutex_lock(&config->mutex); + + drm_modeset_acquire_init(ctx, 0); + +retry: + ret = drm_modeset_lock(&config->connection_mutex, ctx); + if (ret) + goto fail; + ret = drm_modeset_lock_all_crtcs(dev, ctx); + if (ret) + goto fail; + + WARN_ON(config->acquire_ctx); + + /* now we hold the locks, so now that it is safe, stash the + * ctx for drm_modeset_unlock_all(): + */ + config->acquire_ctx = ctx; + + drm_warn_on_modeset_not_all_locked(dev); + + return; + +fail: + if (ret == -EDEADLK) { + drm_modeset_backoff(ctx); + goto retry; + } +} +EXPORT_SYMBOL(drm_modeset_lock_all); + +/** + * drm_modeset_unlock_all - drop all modeset locks + * @dev: device + * + * This function drop all modeset locks taken by drm_modeset_lock_all. + */ +void drm_modeset_unlock_all(struct drm_device *dev) +{ + struct drm_mode_config *config = &dev->mode_config; + struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx; + + if (WARN_ON(!ctx)) + return; + + config->acquire_ctx = NULL; + drm_modeset_drop_locks(ctx); + drm_modeset_acquire_fini(ctx); + + kfree(ctx); + + mutex_unlock(&dev->mode_config.mutex); +} +EXPORT_SYMBOL(drm_modeset_unlock_all); + +/** + * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked + * @dev: device + * + * Useful as a debug assert. + */ +void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) +{ + struct drm_crtc *crtc; + + /* Locking is currently fubar in the panic handler. */ + if (oops_in_progress) + return; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); + + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); +} +EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked); + /** * drm_modeset_acquire_init - initialize acquire context * @ctx: the acquire context diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 9f18e7022ab..a11d73422e7 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -218,10 +218,6 @@ struct drm_property { struct list_head enum_blob_list; }; -void drm_modeset_lock_all(struct drm_device *dev); -void drm_modeset_unlock_all(struct drm_device *dev); -void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); - struct drm_crtc; struct drm_connector; struct drm_encoder; diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index 402aa7a6a05..cf61e857bc0 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -120,6 +120,11 @@ int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock, void drm_modeset_unlock(struct drm_modeset_lock *lock); struct drm_device; + +void drm_modeset_lock_all(struct drm_device *dev); +void drm_modeset_unlock_all(struct drm_device *dev); +void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); + int drm_modeset_lock_all_crtcs(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); -- cgit v1.2.3-70-g09d2 From d059f652e73c35678d28d4cd09ab2cec89696af9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 25 Jul 2014 18:07:40 +0200 Subject: drm: Handle legacy per-crtc locking with full acquire ctx So drivers using the atomic interfaces expect that they can acquire additional locks internal to the driver as-needed. Examples would be locks to protect shared state like shared display PLLs. Unfortunately the legacy ioctls assume that all locking is fully done by the drm core. Now for those paths which grab all locks we already have to keep around an acquire context in dev->mode_config. Helper functions that implement legacy interfaces in terms of atomic support can therefore grab this acquire contexts and reuse it. The only interfaces left are the cursor and pageflip ioctls. So add functions to grab the crtc lock these need using an acquire context and preserve it for atomic drivers to reuse. v2: - Fixup comments&kerneldoc. - Drop the WARNING from modeset_lock_all_crtcs since that can be used in legacy paths with crtc locking. v3: Fix a type on the kerneldoc Dave spotted. Cc: Dave Airlie Reviewed-by: Dave Airlie Reviewed-by: Matt Roper Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_crtc.c | 8 ++-- drivers/gpu/drm/drm_modeset_lock.c | 84 ++++++++++++++++++++++++++++++++++++++ include/drm/drm_crtc.h | 6 +++ include/drm/drm_modeset_lock.h | 5 +++ 4 files changed, 99 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index caaa01f3b35..ab121b6d980 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2801,7 +2801,7 @@ static int drm_mode_cursor_common(struct drm_device *dev, if (crtc->cursor) return drm_mode_cursor_universal(crtc, req, file_priv); - drm_modeset_lock(&crtc->mutex, NULL); + drm_modeset_lock_crtc(crtc); if (req->flags & DRM_MODE_CURSOR_BO) { if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) { ret = -ENXIO; @@ -2825,7 +2825,7 @@ static int drm_mode_cursor_common(struct drm_device *dev, } } out: - drm_modeset_unlock(&crtc->mutex); + drm_modeset_unlock_crtc(crtc); return ret; @@ -4561,7 +4561,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, if (!crtc) return -ENOENT; - drm_modeset_lock(&crtc->mutex, NULL); + drm_modeset_lock_crtc(crtc); if (crtc->primary->fb == NULL) { /* The framebuffer is currently unbound, presumably * due to a hotplug event, that userspace has not @@ -4645,7 +4645,7 @@ out: drm_framebuffer_unreference(fb); if (old_fb) drm_framebuffer_unreference(old_fb); - drm_modeset_unlock(&crtc->mutex); + drm_modeset_unlock_crtc(crtc); return ret; } diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index 73e6534fd0a..4753c8bd5ab 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -129,6 +129,90 @@ void drm_modeset_unlock_all(struct drm_device *dev) } EXPORT_SYMBOL(drm_modeset_unlock_all); +/** + * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx + * @crtc: drm crtc + * + * This function locks the given crtc using a hidden acquire context. This is + * necessary so that drivers internally using the atomic interfaces can grab + * further locks with the lock acquire context. + */ +void drm_modeset_lock_crtc(struct drm_crtc *crtc) +{ + struct drm_modeset_acquire_ctx *ctx; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (WARN_ON(!ctx)) + return; + + drm_modeset_acquire_init(ctx, 0); + +retry: + ret = drm_modeset_lock(&crtc->mutex, ctx); + if (ret) + goto fail; + + WARN_ON(crtc->acquire_ctx); + + /* now we hold the locks, so now that it is safe, stash the + * ctx for drm_modeset_unlock_crtc(): + */ + crtc->acquire_ctx = ctx; + + return; + +fail: + if (ret == -EDEADLK) { + drm_modeset_backoff(ctx); + goto retry; + } +} +EXPORT_SYMBOL(drm_modeset_lock_crtc); + +/** + * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls + * crtc: drm crtc + * + * Legacy ioctl operations like cursor updates or page flips only have per-crtc + * locking, and store the acquire ctx in the corresponding crtc. All other + * legacy operations take all locks and use a global acquire context. This + * function grabs the right one. + */ +struct drm_modeset_acquire_ctx * +drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc) +{ + if (crtc->acquire_ctx) + return crtc->acquire_ctx; + + WARN_ON(!crtc->dev->mode_config.acquire_ctx); + + return crtc->dev->mode_config.acquire_ctx; +} +EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx); + +/** + * drm_modeset_unlock_crtc - drop crtc lock + * @crtc: drm crtc + * + * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other + * locks acquired through the hidden context. + */ +void drm_modeset_unlock_crtc(struct drm_crtc *crtc) +{ + struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx; + + if (WARN_ON(!ctx)) + return; + + crtc->acquire_ctx = NULL; + drm_modeset_drop_locks(ctx); + drm_modeset_acquire_fini(ctx); + + kfree(ctx); +} +EXPORT_SYMBOL(drm_modeset_unlock_crtc); + /** * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked * @dev: device diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index a11d73422e7..508817bae53 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -371,6 +371,12 @@ struct drm_crtc { void *helper_private; struct drm_object_properties properties; + + /* + * For legacy crtc ioctls so that atomic drivers can get at the locking + * acquire context. + */ + struct drm_modeset_acquire_ctx *acquire_ctx; }; diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index cf61e857bc0..d38e1508f11 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -120,10 +120,15 @@ int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock, void drm_modeset_unlock(struct drm_modeset_lock *lock); struct drm_device; +struct drm_crtc; void drm_modeset_lock_all(struct drm_device *dev); void drm_modeset_unlock_all(struct drm_device *dev); +void drm_modeset_lock_crtc(struct drm_crtc *crtc); +void drm_modeset_unlock_crtc(struct drm_crtc *crtc); void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); +struct drm_modeset_acquire_ctx * +drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc); int drm_modeset_lock_all_crtcs(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); -- cgit v1.2.3-70-g09d2 From 3d30a59bfcb7c96d4aacdb053c2ccc49394b2311 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 27 Jul 2014 13:42:42 +0200 Subject: drm: Move ->old_fb from crtc to plane Atomic implemenations for legacy ioctls must be able to drop locks. Which doesn't cause havoc since we only do that while constructing the new state, so no driver or hardware state change has happened. The only troubling bit is the fb refcounting the core does - if someone else has snuck in then it might potentially unref an outdated framebuffer. To fix that move the old_fb temporary storage into struct drm_plane for all ioctls, so that the atomic helpers can update it. v2: Fix up the error case handling as suggested by Matt Roper and just grab locks uncoditionally - there's no point in optimizing the locking for when userspace gets it wrong. Cc: Matt Roper Cc: Dave Airlie Reviewed-by: Matt Roper Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_crtc.c | 46 ++++++++++++++++++++++++---------------------- include/drm/drm_crtc.h | 8 ++++---- 2 files changed, 28 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index ab121b6d980..cacb460a714 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1287,19 +1287,21 @@ EXPORT_SYMBOL(drm_plane_index); */ void drm_plane_force_disable(struct drm_plane *plane) { - struct drm_framebuffer *old_fb = plane->fb; int ret; - if (!old_fb) + if (!plane->fb) return; + plane->old_fb = plane->fb; ret = plane->funcs->disable_plane(plane); if (ret) { DRM_ERROR("failed to disable plane with busy fb\n"); + plane->old_fb = NULL; return; } /* disconnect the plane from the fb and crtc: */ - __drm_framebuffer_unreference(old_fb); + __drm_framebuffer_unreference(plane->old_fb); + plane->old_fb = NULL; plane->fb = NULL; plane->crtc = NULL; } @@ -2275,23 +2277,21 @@ static int setplane_internal(struct drm_plane *plane, uint32_t src_w, uint32_t src_h) { struct drm_device *dev = plane->dev; - struct drm_framebuffer *old_fb = NULL; int ret = 0; unsigned int fb_width, fb_height; int i; + drm_modeset_lock_all(dev); /* No fb means shut it down */ if (!fb) { - drm_modeset_lock_all(dev); - old_fb = plane->fb; + plane->old_fb = plane->fb; ret = plane->funcs->disable_plane(plane); if (!ret) { plane->crtc = NULL; plane->fb = NULL; } else { - old_fb = NULL; + plane->old_fb = NULL; } - drm_modeset_unlock_all(dev); goto out; } @@ -2331,8 +2331,7 @@ static int setplane_internal(struct drm_plane *plane, goto out; } - drm_modeset_lock_all(dev); - old_fb = plane->fb; + plane->old_fb = plane->fb; ret = plane->funcs->update_plane(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h); @@ -2341,15 +2340,16 @@ static int setplane_internal(struct drm_plane *plane, plane->fb = fb; fb = NULL; } else { - old_fb = NULL; + plane->old_fb = NULL; } - drm_modeset_unlock_all(dev); out: if (fb) drm_framebuffer_unreference(fb); - if (old_fb) - drm_framebuffer_unreference(old_fb); + if (plane->old_fb) + drm_framebuffer_unreference(plane->old_fb); + plane->old_fb = NULL; + drm_modeset_unlock_all(dev); return ret; @@ -2456,7 +2456,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set) * crtcs. Atomic modeset will have saner semantics ... */ list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) - tmp->old_fb = tmp->primary->fb; + tmp->primary->old_fb = tmp->primary->fb; fb = set->fb; @@ -2469,8 +2469,9 @@ int drm_mode_set_config_internal(struct drm_mode_set *set) list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) { if (tmp->primary->fb) drm_framebuffer_reference(tmp->primary->fb); - if (tmp->old_fb) - drm_framebuffer_unreference(tmp->old_fb); + if (tmp->primary->old_fb) + drm_framebuffer_unreference(tmp->primary->old_fb); + tmp->primary->old_fb = NULL; } return ret; @@ -4545,7 +4546,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, { struct drm_mode_crtc_page_flip *page_flip = data; struct drm_crtc *crtc; - struct drm_framebuffer *fb = NULL, *old_fb = NULL; + struct drm_framebuffer *fb = NULL; struct drm_pending_vblank_event *e = NULL; unsigned long flags; int ret = -EINVAL; @@ -4617,7 +4618,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, (void (*) (struct drm_pending_event *)) kfree; } - old_fb = crtc->primary->fb; + crtc->primary->old_fb = crtc->primary->fb; ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags); if (ret) { if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { @@ -4627,7 +4628,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, kfree(e); } /* Keep the old fb, don't unref it. */ - old_fb = NULL; + crtc->primary->old_fb = NULL; } else { /* * Warn if the driver hasn't properly updated the crtc->fb @@ -4643,8 +4644,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, out: if (fb) drm_framebuffer_unreference(fb); - if (old_fb) - drm_framebuffer_unreference(old_fb); + if (crtc->primary->old_fb) + drm_framebuffer_unreference(crtc->primary->old_fb); + crtc->primary->old_fb = NULL; drm_modeset_unlock_crtc(crtc); return ret; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 508817bae53..279565aa0c3 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -341,10 +341,6 @@ struct drm_crtc { int cursor_x; int cursor_y; - /* Temporary tracking of the old fb while a modeset is ongoing. Used - * by drm_mode_set_config_internal to implement correct refcounting. */ - struct drm_framebuffer *old_fb; - bool enabled; /* Requested mode from modesetting. */ @@ -623,6 +619,10 @@ struct drm_plane { struct drm_crtc *crtc; struct drm_framebuffer *fb; + /* Temporary tracking of the old fb while a modeset is ongoing. Used + * by drm_mode_set_config_internal to implement correct refcounting. */ + struct drm_framebuffer *old_fb; + const struct drm_plane_funcs *funcs; struct drm_object_properties properties; -- cgit v1.2.3-70-g09d2 From cb597bb3a2fbfc871cc1c703fb330d247bd21394 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 27 Jul 2014 19:09:33 +0200 Subject: drm: trylock modest locking for fbdev panics In the fbdev code we want to do trylocks only to avoid deadlocks and other ugly issues. Thus far we've only grabbed the overall modeset lock, but that already failed to exclude a pile of potential concurrent operations. With proper atomic support this will be worse. So add a trylock mode to the modeset locking code which attempts all locks only with trylocks, if possible. We need to track this in the locking functions themselves and can't restrict this to drivers since driver-private w/w mutexes must be treated the same way. There's still the issue that other driver private locks aren't handled here at all, but well can't have everything. With this we will at least not regress, even once atomic allows lots of concurrent kms activity. Aside: We should move the acquire context to stack-based allocation in the callers to get rid of that awful WARN_ON(kmalloc_failed) control flow which just blows up when memory is short. But that's material for separate patches. v2: - Fix logic inversion fumble in the fb helper. - Add proper kerneldoc. Reviewed-by: Matt Roper Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_fb_helper.c | 10 +++---- drivers/gpu/drm/drm_modeset_lock.c | 56 ++++++++++++++++++++++++++++++-------- include/drm/drm_modeset_lock.h | 6 ++++ 3 files changed, 55 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 3a6b6635e3f..7b7b9565188 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -365,11 +365,11 @@ static bool drm_fb_helper_force_kernel_mode(void) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) continue; - /* NOTE: we use lockless flag below to avoid grabbing other - * modeset locks. So just trylock the underlying mutex - * directly: + /* + * NOTE: Use trylock mode to avoid deadlocks and sleeping in + * panic context. */ - if (!mutex_trylock(&dev->mode_config.mutex)) { + if (__drm_modeset_lock_all(dev, true) != 0) { error = true; continue; } @@ -378,7 +378,7 @@ static bool drm_fb_helper_force_kernel_mode(void) if (ret) error = true; - mutex_unlock(&dev->mode_config.mutex); + drm_modeset_unlock_all(dev); } return error; } diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index 4753c8bd5ab..5280b64a023 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -57,26 +57,37 @@ /** - * drm_modeset_lock_all - take all modeset locks - * @dev: drm device + * __drm_modeset_lock_all - internal helper to grab all modeset locks + * @dev: DRM device + * @trylock: trylock mode for atomic contexts * - * This function takes all modeset locks, suitable where a more fine-grained - * scheme isn't (yet) implemented. Locks must be dropped with - * drm_modeset_unlock_all. + * This is a special version of drm_modeset_lock_all() which can also be used in + * atomic contexts. Then @trylock must be set to true. + * + * Returns: + * 0 on success or negative error code on failure. */ -void drm_modeset_lock_all(struct drm_device *dev) +int __drm_modeset_lock_all(struct drm_device *dev, + bool trylock) { struct drm_mode_config *config = &dev->mode_config; struct drm_modeset_acquire_ctx *ctx; int ret; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (WARN_ON(!ctx)) - return; + ctx = kzalloc(sizeof(*ctx), + trylock ? GFP_ATOMIC : GFP_KERNEL); + if (!ctx) + return -ENOMEM; - mutex_lock(&config->mutex); + if (trylock) { + if (!mutex_trylock(&config->mutex)) + return -EBUSY; + } else { + mutex_lock(&config->mutex); + } drm_modeset_acquire_init(ctx, 0); + ctx->trylock_only = trylock; retry: ret = drm_modeset_lock(&config->connection_mutex, ctx); @@ -95,13 +106,29 @@ retry: drm_warn_on_modeset_not_all_locked(dev); - return; + return 0; fail: if (ret == -EDEADLK) { drm_modeset_backoff(ctx); goto retry; } + + return ret; +} +EXPORT_SYMBOL(__drm_modeset_lock_all); + +/** + * drm_modeset_lock_all - take all modeset locks + * @dev: drm device + * + * This function takes all modeset locks, suitable where a more fine-grained + * scheme isn't (yet) implemented. Locks must be dropped with + * drm_modeset_unlock_all. + */ +void drm_modeset_lock_all(struct drm_device *dev) +{ + WARN_ON(__drm_modeset_lock_all(dev, false) != 0); } EXPORT_SYMBOL(drm_modeset_lock_all); @@ -287,7 +314,12 @@ static inline int modeset_lock(struct drm_modeset_lock *lock, WARN_ON(ctx->contended); - if (interruptible && slow) { + if (ctx->trylock_only) { + if (!ww_mutex_trylock(&lock->mutex)) + return -EBUSY; + else + return 0; + } else if (interruptible && slow) { ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx); } else if (interruptible) { ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx); diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index d38e1508f11..a3f736d2438 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -53,6 +53,11 @@ struct drm_modeset_acquire_ctx { * list of held locks (drm_modeset_lock) */ struct list_head locked; + + /** + * Trylock mode, use only for panic handlers! + */ + bool trylock_only; }; /** @@ -123,6 +128,7 @@ struct drm_device; struct drm_crtc; void drm_modeset_lock_all(struct drm_device *dev); +int __drm_modeset_lock_all(struct drm_device *dev, bool trylock); void drm_modeset_unlock_all(struct drm_device *dev); void drm_modeset_lock_crtc(struct drm_crtc *crtc); void drm_modeset_unlock_crtc(struct drm_crtc *crtc); -- cgit v1.2.3-70-g09d2 From 2a0d7cfd9482ca4c10a4d8794791760a6a7ce40c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 29 Jul 2014 15:32:37 +0200 Subject: drm: Add a plane->reset hook In general having this can't hurt, and the atomic helpers will need it to be able to reset the state objects properly. The overall idea is to reset in the order pixels flow, so planes -> crtcs -> encoders -> connectors. v2: Squash in fixup from Ville to correctly deference struct drm_plane instead of drm_crtc when walking the plane list. Fixes an oops in driver init and resume. Reviewed-by: Matt Roper Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_crtc.c | 5 +++++ include/drm/drm_crtc.h | 1 + 2 files changed, 6 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index cacb460a714..285e62a134b 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -4663,9 +4663,14 @@ out: void drm_mode_config_reset(struct drm_device *dev) { struct drm_crtc *crtc; + struct drm_plane *plane; struct drm_encoder *encoder; struct drm_connector *connector; + list_for_each_entry(plane, &dev->mode_config.plane_list, head) + if (plane->funcs->reset) + plane->funcs->reset(plane); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) if (crtc->funcs->reset) crtc->funcs->reset(crtc); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 279565aa0c3..2c1f58d6957 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -581,6 +581,7 @@ struct drm_plane_funcs { uint32_t src_w, uint32_t src_h); int (*disable_plane)(struct drm_plane *plane); void (*destroy)(struct drm_plane *plane); + void (*reset)(struct drm_plane *plane); int (*set_property)(struct drm_plane *plane, struct drm_property *property, uint64_t val); -- cgit v1.2.3-70-g09d2 From e8450f51a4b39cfe0878b4aee339820b2bfff240 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 25 Jul 2014 23:34:03 +0200 Subject: drm/irq: Implement a generic vblank_wait function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As usual in both a crtc index and a struct drm_crtc * version. The function assumes that no one drivers their display below 10Hz, and it will complain if the vblank wait takes longer than that. v2: Also check dev->max_vblank_counter since some drivers register a fake get_vblank_counter function. v3: Use drm_vblank_count instead of calling the low-level ->get_vblank_counter callback. That way we'll get the sw-cooked counter for platforms without proper vblank support and so can ditch the max_vblank_counter check again. v4: Review from Michel Dänzer: - Restore lost notes about v3: - Spelling in kerneldoc. - Inline wait_event condition. - s/vblank_wait/wait_one_vblank/ Cc: Michel Dänzer Cc: Ville Syrjälä Reviewed-by: Michel Dänzer Reviewed-by: Matt Roper Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_irq.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ include/drm/drmP.h | 2 ++ 2 files changed, 46 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 6f16a104d6d..e64d24951fc 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -1009,6 +1009,50 @@ void drm_crtc_vblank_put(struct drm_crtc *crtc) } EXPORT_SYMBOL(drm_crtc_vblank_put); +/** + * drm_wait_one_vblank - wait for one vblank + * @dev: DRM device + * @crtc: crtc index + * + * This waits for one vblank to pass on @crtc, using the irq driver interfaces. + * It is a failure to call this when the vblank irq for @crtc is disabled, e.g. + * due to lack of driver support or because the crtc is off. + */ +void drm_wait_one_vblank(struct drm_device *dev, int crtc) +{ + int ret; + u32 last; + + ret = drm_vblank_get(dev, crtc); + if (WARN_ON(ret)) + return; + + last = drm_vblank_count(dev, crtc); + + ret = wait_event_timeout(dev->vblank[crtc].queue, + last != drm_vblank_count(dev, crtc), + msecs_to_jiffies(100)); + + WARN_ON(ret == 0); + + drm_vblank_put(dev, crtc); +} +EXPORT_SYMBOL(drm_wait_one_vblank); + +/** + * drm_crtc_wait_one_vblank - wait for one vblank + * @crtc: DRM crtc + * + * This waits for one vblank to pass on @crtc, using the irq driver interfaces. + * It is a failure to call this when the vblank irq for @crtc is disabled, e.g. + * due to lack of driver support or because the crtc is off. + */ +void drm_crtc_wait_one_vblank(struct drm_crtc *crtc) +{ + drm_wait_one_vblank(crtc->dev, drm_crtc_index(crtc)); +} +EXPORT_SYMBOL(drm_crtc_wait_one_vblank); + /** * drm_vblank_off - disable vblank events on a CRTC * @dev: DRM device diff --git a/include/drm/drmP.h b/include/drm/drmP.h index d3d9be6b83e..c2209178981 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1327,6 +1327,8 @@ extern int drm_vblank_get(struct drm_device *dev, int crtc); extern void drm_vblank_put(struct drm_device *dev, int crtc); extern int drm_crtc_vblank_get(struct drm_crtc *crtc); extern void drm_crtc_vblank_put(struct drm_crtc *crtc); +extern void drm_wait_one_vblank(struct drm_device *dev, int crtc); +extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc); extern void drm_vblank_off(struct drm_device *dev, int crtc); extern void drm_vblank_on(struct drm_device *dev, int crtc); extern void drm_crtc_vblank_off(struct drm_crtc *crtc); -- cgit v1.2.3-70-g09d2 From f72a113a71ab08c4df8a5f80ab2f8a140feb81f6 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 7 Aug 2014 09:36:00 +0200 Subject: drm/radeon: add userptr support v8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds an IOCTL for turning a pointer supplied by userspace into a buffer object. It imposes several restrictions upon the memory being mapped: 1. It must be page aligned (both start/end addresses, i.e ptr and size). 2. It must be normal system memory, not a pointer into another map of IO space (e.g. it must not be a GTT mmapping of another object). 3. The BO is mapped into GTT, so the maximum amount of memory mapped at all times is still the GTT limit. 4. The BO is only mapped readonly for now, so no write support. 5. List of backing pages is only acquired once, so they represent a snapshot of the first use. Exporting and sharing as well as mapping of buffer objects created by this function is forbidden and results in an -EPERM. v2: squash all previous changes into first public version v3: fix tabs, map readonly, don't use MM callback any more v4: set TTM_PAGE_FLAG_SG so that TTM never messes with the pages, pin/unpin pages on bind/unbind instead of populate/unpopulate v5: rebased on 3.17-wip, IOCTL renamed to userptr, reject any unknown flags, better handle READONLY flag, improve permission check v6: fix ptr cast warning, use set_page_dirty/mark_page_accessed on unpin v7: add warning about it's availability in the API definition v8: drop access_ok check, fix VM mapping bits Signed-off-by: Christian König Reviewed-by: Alex Deucher (v4) Reviewed-by: Jérôme Glisse (v4) Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 6 ++ drivers/gpu/drm/radeon/radeon_cs.c | 25 +++++- drivers/gpu/drm/radeon/radeon_drv.c | 5 +- drivers/gpu/drm/radeon/radeon_gem.c | 68 ++++++++++++++++ drivers/gpu/drm/radeon/radeon_kms.c | 1 + drivers/gpu/drm/radeon/radeon_object.c | 3 + drivers/gpu/drm/radeon/radeon_prime.c | 10 +++ drivers/gpu/drm/radeon/radeon_ttm.c | 145 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_vm.c | 3 + include/uapi/drm/radeon_drm.h | 16 ++++ 10 files changed, 279 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9e1732eb402..6f38a23a581 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2138,6 +2138,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, @@ -2871,6 +2873,10 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); +extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, + uint32_t flags); +extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm); +extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm); extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index ee712c199b2..1321491cf49 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -78,7 +78,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) struct radeon_cs_chunk *chunk; struct radeon_cs_buckets buckets; unsigned i, j; - bool duplicate; + bool duplicate, need_mmap_lock = false; + int r; if (p->chunk_relocs_idx == -1) { return 0; @@ -164,6 +165,19 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->relocs[i].allowed_domains = domain; } + if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { + uint32_t domain = p->relocs[i].prefered_domains; + if (!(domain & RADEON_GEM_DOMAIN_GTT)) { + DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is " + "allowed for userptr BOs\n"); + return -EINVAL; + } + need_mmap_lock = true; + domain = RADEON_GEM_DOMAIN_GTT; + p->relocs[i].prefered_domains = domain; + p->relocs[i].allowed_domains = domain; + } + p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; p->relocs[i].handle = r->handle; @@ -176,8 +190,15 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) if (p->cs_flags & RADEON_CS_USE_VM) p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, &p->validated); + if (need_mmap_lock) + down_read(¤t->mm->mmap_sem); + + r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); - return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); + if (need_mmap_lock) + up_read(¤t->mm->mmap_sem); + + return r; } static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index a773830c6c4..5b18af92652 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -114,6 +114,9 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); void radeon_gem_object_close(struct drm_gem_object *obj, struct drm_file *file_priv); +struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gobj, + int flags); extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags, int *vpos, int *hpos, ktime_t *stime, @@ -568,7 +571,7 @@ static struct drm_driver kms_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = drm_gem_prime_export, + .gem_prime_export = radeon_gem_prime_export, .gem_prime_import = drm_gem_prime_import, .gem_prime_pin = radeon_gem_prime_pin, .gem_prime_unpin = radeon_gem_prime_unpin, diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index bfd7e1b0ff3..993ab223b50 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -272,6 +272,65 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, return 0; } +int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct radeon_device *rdev = dev->dev_private; + struct drm_radeon_gem_userptr *args = data; + struct drm_gem_object *gobj; + struct radeon_bo *bo; + uint32_t handle; + int r; + + if (offset_in_page(args->addr | args->size)) + return -EINVAL; + + /* we only support read only mappings for now */ + if (!(args->flags & RADEON_GEM_USERPTR_READONLY)) + return -EACCES; + + /* reject unknown flag values */ + if (args->flags & ~RADEON_GEM_USERPTR_READONLY) + return -EINVAL; + + /* readonly pages not tested on older hardware */ + if (rdev->family < CHIP_R600) + return -EINVAL; + + down_read(&rdev->exclusive_lock); + + /* create a gem object to contain this object in */ + r = radeon_gem_object_create(rdev, args->size, 0, + RADEON_GEM_DOMAIN_CPU, 0, + false, &gobj); + if (r) + goto handle_lockup; + + bo = gem_to_radeon_bo(gobj); + r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); + if (r) + goto release_object; + + r = drm_gem_handle_create(filp, gobj, &handle); + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(gobj); + if (r) + goto handle_lockup; + + args->handle = handle; + up_read(&rdev->exclusive_lock); + return 0; + +release_object: + drm_gem_object_unreference_unlocked(gobj); + +handle_lockup: + up_read(&rdev->exclusive_lock); + r = radeon_gem_handle_lockup(rdev, r); + + return r; +} + int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { @@ -315,6 +374,10 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, return -ENOENT; } robj = gem_to_radeon_bo(gobj); + if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { + drm_gem_object_unreference_unlocked(gobj); + return -EPERM; + } *offset_p = radeon_bo_mmap_offset(robj); drm_gem_object_unreference_unlocked(gobj); return 0; @@ -532,6 +595,11 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data, return -ENOENT; } robj = gem_to_radeon_bo(gobj); + + r = -EPERM; + if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) + goto out; + r = radeon_bo_reserve(robj, false); if (unlikely(r)) goto out; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index eb7164d0798..8309b11e674 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -885,5 +885,6 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), }; int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 480c87d8edc..c73c1e32058 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -264,6 +264,9 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, { int r, i; + if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) + return -EPERM; + if (bo->pin_count) { bo->pin_count++; if (gpu_addr) diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index f7e48d329db..bb18bc74b7d 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -103,3 +103,13 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj) radeon_bo_unpin(bo); radeon_bo_unreserve(bo); } + +struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gobj, + int flags) +{ + struct radeon_bo *bo = gem_to_radeon_bo(gobj); + if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) + return ERR_PTR(-EPERM); + return drm_gem_prime_export(dev, gobj, flags); +} diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 72afe82a95c..b20933fa35c 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -39,6 +39,8 @@ #include #include #include +#include +#include #include #include "radeon_reg.h" #include "radeon.h" @@ -515,8 +517,92 @@ struct radeon_ttm_tt { struct ttm_dma_tt ttm; struct radeon_device *rdev; u64 offset; + + uint64_t userptr; + struct mm_struct *usermm; + uint32_t userflags; }; +/* prepare the sg table with the user pages */ +static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) +{ + struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_ttm_tt *gtt = (void *)ttm; + unsigned pinned = 0, nents; + int r; + + int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); + enum dma_data_direction direction = write ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + + if (current->mm != gtt->usermm) + return -EPERM; + + do { + unsigned num_pages = ttm->num_pages - pinned; + uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; + struct page **pages = ttm->pages + pinned; + + r = get_user_pages(current, current->mm, userptr, num_pages, + write, 0, pages, NULL); + if (r < 0) + goto release_pages; + + pinned += r; + + } while (pinned < ttm->num_pages); + + r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, + ttm->num_pages << PAGE_SHIFT, + GFP_KERNEL); + if (r) + goto release_sg; + + r = -ENOMEM; + nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); + if (nents != ttm->sg->nents) + goto release_sg; + + drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, + gtt->ttm.dma_address, ttm->num_pages); + + return 0; + +release_sg: + kfree(ttm->sg); + +release_pages: + release_pages(ttm->pages, pinned, 0); + return r; +} + +static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) +{ + struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_ttm_tt *gtt = (void *)ttm; + struct scatterlist *sg; + int i; + + int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); + enum dma_data_direction direction = write ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + + /* free the sg table and pages again */ + dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); + + for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) { + struct page *page = sg_page(sg); + + if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY)) + set_page_dirty(page); + + mark_page_accessed(page); + page_cache_release(page); + } + + sg_free_table(ttm->sg); +} + static int radeon_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { @@ -525,6 +611,11 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm, RADEON_GART_PAGE_WRITE; int r; + if (gtt->userptr) { + radeon_ttm_tt_pin_userptr(ttm); + flags &= ~RADEON_GART_PAGE_WRITE; + } + gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); if (!ttm->num_pages) { WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", @@ -547,6 +638,10 @@ static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) struct radeon_ttm_tt *gtt = (void *)ttm; radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); + + if (gtt->userptr) + radeon_ttm_tt_unpin_userptr(ttm); + return 0; } @@ -603,6 +698,16 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) if (ttm->state != tt_unpopulated) return 0; + if (gtt->userptr) { + ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL); + if (!ttm->sg) + return -ENOMEM; + + ttm->page_flags |= TTM_PAGE_FLAG_SG; + ttm->state = tt_unbound; + return 0; + } + if (slave && ttm->sg) { drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); @@ -652,6 +757,12 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) unsigned i; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); + if (gtt->userptr) { + kfree(ttm->sg); + ttm->page_flags &= ~TTM_PAGE_FLAG_SG; + return; + } + if (slave) return; @@ -680,6 +791,40 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) ttm_pool_unpopulate(ttm); } +int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, + uint32_t flags) +{ + struct radeon_ttm_tt *gtt = (void *)ttm; + + if (gtt == NULL) + return -EINVAL; + + gtt->userptr = addr; + gtt->usermm = current->mm; + gtt->userflags = flags; + return 0; +} + +bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm) +{ + struct radeon_ttm_tt *gtt = (void *)ttm; + + if (gtt == NULL) + return false; + + return !!gtt->userptr; +} + +bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm) +{ + struct radeon_ttm_tt *gtt = (void *)ttm; + + if (gtt == NULL) + return false; + + return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY); +} + static struct ttm_bo_driver radeon_bo_driver = { .ttm_tt_create = &radeon_ttm_tt_create, .ttm_tt_populate = &radeon_ttm_tt_populate, diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index ccae4d9dc3d..0e107c5650b 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -888,6 +888,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev, bo_va->flags &= ~RADEON_VM_PAGE_VALID; bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; + if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm)) + bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE; + if (mem) { addr = mem->start << PAGE_SHIFT; if (mem->mem_type != TTM_PL_SYSTEM) { diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 509b2d7a41b..3a9f2093037 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -511,6 +511,7 @@ typedef struct { #define DRM_RADEON_GEM_BUSY 0x2a #define DRM_RADEON_GEM_VA 0x2b #define DRM_RADEON_GEM_OP 0x2c +#define DRM_RADEON_GEM_USERPTR 0x2d #define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) #define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) @@ -554,6 +555,7 @@ typedef struct { #define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) #define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va) #define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op) +#define DRM_IOCTL_RADEON_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_USERPTR, struct drm_radeon_gem_userptr) typedef struct drm_radeon_init { enum { @@ -808,6 +810,20 @@ struct drm_radeon_gem_create { uint32_t flags; }; +/* + * This is not a reliable API and you should expect it to fail for any + * number of reasons and have fallback path that do not use userptr to + * perform any operation. + */ +#define RADEON_GEM_USERPTR_READONLY (1 << 0) + +struct drm_radeon_gem_userptr { + uint64_t addr; + uint64_t size; + uint32_t flags; + uint32_t handle; +}; + #define RADEON_TILING_MACRO 0x1 #define RADEON_TILING_MICRO 0x2 #define RADEON_TILING_SWAP_16BIT 0x4 -- cgit v1.2.3-70-g09d2 From ddd00e33e17a62c5f44377ab42e7562ccfae7bd1 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 7 Aug 2014 09:36:01 +0200 Subject: drm/radeon: add userptr flag to limit it to anonymous memory v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid problems with writeback by limiting userptr to anonymous memory. v2: add commit and code comments Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_gem.c | 3 ++- drivers/gpu/drm/radeon/radeon_ttm.c | 10 ++++++++++ include/uapi/drm/radeon_drm.h | 1 + 3 files changed, 13 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 993ab223b50..032736b429b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -290,7 +290,8 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, return -EACCES; /* reject unknown flag values */ - if (args->flags & ~RADEON_GEM_USERPTR_READONLY) + if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | + RADEON_GEM_USERPTR_ANONONLY)) return -EINVAL; /* readonly pages not tested on older hardware */ diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index b20933fa35c..12e37b1ddc4 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -538,6 +538,16 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) if (current->mm != gtt->usermm) return -EPERM; + if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) { + /* check that we only pin down anonymous memory + to prevent problems with writeback */ + unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; + struct vm_area_struct *vma; + vma = find_vma(gtt->usermm, gtt->userptr); + if (!vma || vma->vm_file || vma->vm_end < end) + return -EPERM; + } + do { unsigned num_pages = ttm->num_pages - pinned; uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 3a9f2093037..9720e1a3684 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -816,6 +816,7 @@ struct drm_radeon_gem_create { * perform any operation. */ #define RADEON_GEM_USERPTR_READONLY (1 << 0) +#define RADEON_GEM_USERPTR_ANONONLY (1 << 1) struct drm_radeon_gem_userptr { uint64_t addr; -- cgit v1.2.3-70-g09d2 From 2a84a4476d6e13de72472f6ca4338aed0a8269b8 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 7 Aug 2014 09:36:02 +0200 Subject: drm/radeon: add userptr flag to directly validate the BO to GTT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This way we test userptr availability at BO creation time instead of first use. Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_gem.c | 18 +++++++++++++++++- include/uapi/drm/radeon_drm.h | 1 + 2 files changed, 18 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 032736b429b..450656027ab 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -291,7 +291,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, /* reject unknown flag values */ if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | - RADEON_GEM_USERPTR_ANONONLY)) + RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE)) return -EINVAL; /* readonly pages not tested on older hardware */ @@ -312,6 +312,22 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, if (r) goto release_object; + if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { + down_read(¤t->mm->mmap_sem); + r = radeon_bo_reserve(bo, true); + if (r) { + up_read(¤t->mm->mmap_sem); + goto release_object; + } + + radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + radeon_bo_unreserve(bo); + up_read(¤t->mm->mmap_sem); + if (r) + goto release_object; + } + r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(gobj); diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 9720e1a3684..5dc61c2d4c7 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -817,6 +817,7 @@ struct drm_radeon_gem_create { */ #define RADEON_GEM_USERPTR_READONLY (1 << 0) #define RADEON_GEM_USERPTR_ANONONLY (1 << 1) +#define RADEON_GEM_USERPTR_VALIDATE (1 << 2) struct drm_radeon_gem_userptr { uint64_t addr; -- cgit v1.2.3-70-g09d2 From 341cb9e426fac32523427c80c67543a16be46605 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 7 Aug 2014 09:36:03 +0200 Subject: drm/radeon: add userptr flag to register MMU notifier v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Whenever userspace mapping related to our userptr change we wait for it to become idle and unmap it from GTT. v2: rebased, fix mutex unlock in error path v3: improve commit message Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/Kconfig | 1 + drivers/gpu/drm/radeon/Makefile | 2 +- drivers/gpu/drm/radeon/radeon.h | 12 ++ drivers/gpu/drm/radeon/radeon_device.c | 2 + drivers/gpu/drm/radeon/radeon_gem.c | 9 +- drivers/gpu/drm/radeon/radeon_mn.c | 272 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_object.c | 1 + include/uapi/drm/radeon_drm.h | 1 + 8 files changed, 298 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/radeon/radeon_mn.c (limited to 'include') diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index b066bb3ca01..358b6e8697e 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -115,6 +115,7 @@ config DRM_RADEON select HWMON select BACKLIGHT_CLASS_DEVICE select INTERVAL_TREE + select MMU_NOTIFIER help Choose this option if you have an ATI Radeon graphics card. There are both PCI and AGP versions. You don't need to choose this to diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 0013ad0db9e..c7fa1aeb8c3 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ - ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o + ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o # add async DMA block radeon-y += \ diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 6f38a23a581..542da820867 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -65,6 +65,7 @@ #include #include #include +#include #include #include @@ -487,6 +488,9 @@ struct radeon_bo { struct ttm_bo_kmap_obj dma_buf_vmap; pid_t pid; + + struct radeon_mn *mn; + struct interval_tree_node mn_it; }; #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) @@ -1725,6 +1729,11 @@ void radeon_test_ring_sync(struct radeon_device *rdev, struct radeon_ring *cpB); void radeon_test_syncing(struct radeon_device *rdev); +/* + * MMU Notifier + */ +int radeon_mn_register(struct radeon_bo *bo, unsigned long addr); +void radeon_mn_unregister(struct radeon_bo *bo); /* * Debugfs @@ -2372,6 +2381,9 @@ struct radeon_device { /* tracking pinned memory */ u64 vram_pin_size; u64 gart_pin_size; + + struct mutex mn_lock; + DECLARE_HASHTABLE(mn_hash, 7); }; bool radeon_is_px(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c8ea050c8fa..c58f84f3c6a 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1270,6 +1270,8 @@ int radeon_device_init(struct radeon_device *rdev, init_rwsem(&rdev->pm.mclk_lock); init_rwsem(&rdev->exclusive_lock); init_waitqueue_head(&rdev->irq.vblank_queue); + mutex_init(&rdev->mn_lock); + hash_init(rdev->mn_hash); r = radeon_gem_init(rdev); if (r) return r; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 450656027ab..2a6fbf101cf 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -291,7 +291,8 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, /* reject unknown flag values */ if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | - RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE)) + RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | + RADEON_GEM_USERPTR_REGISTER)) return -EINVAL; /* readonly pages not tested on older hardware */ @@ -312,6 +313,12 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, if (r) goto release_object; + if (args->flags & RADEON_GEM_USERPTR_REGISTER) { + r = radeon_mn_register(bo, args->addr); + if (r) + goto release_object; + } + if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { down_read(¤t->mm->mmap_sem); r = radeon_bo_reserve(bo, true); diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c new file mode 100644 index 00000000000..0157bc2f11f --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -0,0 +1,272 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: + * Christian König + */ + +#include +#include +#include +#include +#include + +#include "radeon.h" + +struct radeon_mn { + /* constant after initialisation */ + struct radeon_device *rdev; + struct mm_struct *mm; + struct mmu_notifier mn; + + /* only used on destruction */ + struct work_struct work; + + /* protected by rdev->mn_lock */ + struct hlist_node node; + + /* objects protected by lock */ + struct mutex lock; + struct rb_root objects; +}; + +/** + * radeon_mn_destroy - destroy the rmn + * + * @work: previously sheduled work item + * + * Lazy destroys the notifier from a work item + */ +static void radeon_mn_destroy(struct work_struct *work) +{ + struct radeon_mn *rmn = container_of(work, struct radeon_mn, work); + struct radeon_device *rdev = rmn->rdev; + struct radeon_bo *bo, *next; + + mutex_lock(&rdev->mn_lock); + mutex_lock(&rmn->lock); + hash_del(&rmn->node); + rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) { + interval_tree_remove(&bo->mn_it, &rmn->objects); + bo->mn = NULL; + } + mutex_unlock(&rmn->lock); + mutex_unlock(&rdev->mn_lock); + mmu_notifier_unregister(&rmn->mn, rmn->mm); + kfree(rmn); +} + +/** + * radeon_mn_release - callback to notify about mm destruction + * + * @mn: our notifier + * @mn: the mm this callback is about + * + * Shedule a work item to lazy destroy our notifier. + */ +static void radeon_mn_release(struct mmu_notifier *mn, + struct mm_struct *mm) +{ + struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); + INIT_WORK(&rmn->work, radeon_mn_destroy); + schedule_work(&rmn->work); +} + +/** + * radeon_mn_invalidate_range_start - callback to notify about mm change + * + * @mn: our notifier + * @mn: the mm this callback is about + * @start: start of updated range + * @end: end of updated range + * + * We block for all BOs between start and end to be idle and + * unmap them by move them into system domain again. + */ +static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); + struct interval_tree_node *it; + + /* notification is exclusive, but interval is inclusive */ + end -= 1; + + mutex_lock(&rmn->lock); + + it = interval_tree_iter_first(&rmn->objects, start, end); + while (it) { + struct radeon_bo *bo; + int r; + + bo = container_of(it, struct radeon_bo, mn_it); + it = interval_tree_iter_next(it, start, end); + + r = radeon_bo_reserve(bo, true); + if (r) { + DRM_ERROR("(%d) failed to reserve user bo\n", r); + continue; + } + + if (bo->tbo.sync_obj) { + r = radeon_fence_wait(bo->tbo.sync_obj, false); + if (r) + DRM_ERROR("(%d) failed to wait for user bo\n", r); + } + + radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + if (r) + DRM_ERROR("(%d) failed to validate user bo\n", r); + + radeon_bo_unreserve(bo); + } + + mutex_unlock(&rmn->lock); +} + +static const struct mmu_notifier_ops radeon_mn_ops = { + .release = radeon_mn_release, + .invalidate_range_start = radeon_mn_invalidate_range_start, +}; + +/** + * radeon_mn_get - create notifier context + * + * @rdev: radeon device pointer + * + * Creates a notifier context for current->mm. + */ +static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev) +{ + struct mm_struct *mm = current->mm; + struct radeon_mn *rmn; + int r; + + down_write(&mm->mmap_sem); + mutex_lock(&rdev->mn_lock); + + hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm) + if (rmn->mm == mm) + goto release_locks; + + rmn = kzalloc(sizeof(*rmn), GFP_KERNEL); + if (!rmn) { + rmn = ERR_PTR(-ENOMEM); + goto release_locks; + } + + rmn->rdev = rdev; + rmn->mm = mm; + rmn->mn.ops = &radeon_mn_ops; + mutex_init(&rmn->lock); + rmn->objects = RB_ROOT; + + r = __mmu_notifier_register(&rmn->mn, mm); + if (r) + goto free_rmn; + + hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm); + +release_locks: + mutex_unlock(&rdev->mn_lock); + up_write(&mm->mmap_sem); + + return rmn; + +free_rmn: + mutex_unlock(&rdev->mn_lock); + up_write(&mm->mmap_sem); + kfree(rmn); + + return ERR_PTR(r); +} + +/** + * radeon_mn_register - register a BO for notifier updates + * + * @bo: radeon buffer object + * @addr: userptr addr we should monitor + * + * Registers an MMU notifier for the given BO at the specified address. + * Returns 0 on success, -ERRNO if anything goes wrong. + */ +int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) +{ + unsigned long end = addr + radeon_bo_size(bo) - 1; + struct radeon_device *rdev = bo->rdev; + struct radeon_mn *rmn; + struct interval_tree_node *it; + + rmn = radeon_mn_get(rdev); + if (IS_ERR(rmn)) + return PTR_ERR(rmn); + + mutex_lock(&rmn->lock); + + it = interval_tree_iter_first(&rmn->objects, addr, end); + if (it) { + mutex_unlock(&rmn->lock); + return -EEXIST; + } + + bo->mn = rmn; + bo->mn_it.start = addr; + bo->mn_it.last = end; + interval_tree_insert(&bo->mn_it, &rmn->objects); + + mutex_unlock(&rmn->lock); + + return 0; +} + +/** + * radeon_mn_unregister - unregister a BO for notifier updates + * + * @bo: radeon buffer object + * + * Remove any registration of MMU notifier updates from the buffer object. + */ +void radeon_mn_unregister(struct radeon_bo *bo) +{ + struct radeon_device *rdev = bo->rdev; + struct radeon_mn *rmn; + + mutex_lock(&rdev->mn_lock); + rmn = bo->mn; + if (rmn == NULL) { + mutex_unlock(&rdev->mn_lock); + return; + } + + mutex_lock(&rmn->lock); + interval_tree_remove(&bo->mn_it, &rmn->objects); + bo->mn = NULL; + mutex_unlock(&rmn->lock); + mutex_unlock(&rdev->mn_lock); +} diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index c73c1e32058..28752380798 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -75,6 +75,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) bo = container_of(tbo, struct radeon_bo, tbo); radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); + radeon_mn_unregister(bo); mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 5dc61c2d4c7..c77495ffc44 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -818,6 +818,7 @@ struct drm_radeon_gem_create { #define RADEON_GEM_USERPTR_READONLY (1 << 0) #define RADEON_GEM_USERPTR_ANONONLY (1 << 1) #define RADEON_GEM_USERPTR_VALIDATE (1 << 2) +#define RADEON_GEM_USERPTR_REGISTER (1 << 3) struct drm_radeon_gem_userptr { uint64_t addr; -- cgit v1.2.3-70-g09d2 From ba07975f0fe5bf95107d71d0df0405c16f5c3266 Mon Sep 17 00:00:00 2001 From: Steve Longerbeam Date: Wed, 25 Jun 2014 18:05:30 -0700 Subject: gpu: ipu-v3: Add functions to set CSI/IC source muxes Adds two new functions, ipu_set_csi_src_mux() and ipu_set_ic_src_mux(), that select the inputs to the CSI and IC respectively. Both muxes are programmed in the IPU_CONF register. Signed-off-by: Steve Longerbeam Signed-off-by: Philipp Zabel --- drivers/gpu/ipu-v3/ipu-common.c | 51 +++++++++++++++++++++++++++++++++++++++++ include/video/imx-ipu-v3.h | 6 +++++ 2 files changed, 57 insertions(+) (limited to 'include') diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 5978e7aab8e..cae54311585 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -382,6 +382,57 @@ static int ipu_memory_reset(struct ipu_soc *ipu) return 0; } +/* + * Set the source mux for the given CSI. Selects either parallel or + * MIPI CSI2 sources. + */ +void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2) +{ + unsigned long flags; + u32 val, mask; + + mask = (csi_id == 1) ? IPU_CONF_CSI1_DATA_SOURCE : + IPU_CONF_CSI0_DATA_SOURCE; + + spin_lock_irqsave(&ipu->lock, flags); + + val = ipu_cm_read(ipu, IPU_CONF); + if (mipi_csi2) + val |= mask; + else + val &= ~mask; + ipu_cm_write(ipu, val, IPU_CONF); + + spin_unlock_irqrestore(&ipu->lock, flags); +} +EXPORT_SYMBOL_GPL(ipu_set_csi_src_mux); + +/* + * Set the source mux for the IC. Selects either CSI[01] or the VDI. + */ +void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&ipu->lock, flags); + + val = ipu_cm_read(ipu, IPU_CONF); + if (vdi) { + val |= IPU_CONF_IC_INPUT; + } else { + val &= ~IPU_CONF_IC_INPUT; + if (csi_id == 1) + val |= IPU_CONF_CSI_SEL; + else + val &= ~IPU_CONF_CSI_SEL; + } + ipu_cm_write(ipu, val, IPU_CONF); + + spin_unlock_irqrestore(&ipu->lock, flags); +} +EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux); + struct ipu_devtype { const char *name; unsigned long cm_ofs; diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index ef64b66b18d..f80fe13b0d4 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -92,6 +92,12 @@ int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel, #define IPU_IRQ_VSYNC_PRE_0 (448 + 14) #define IPU_IRQ_VSYNC_PRE_1 (448 + 15) +/* + * IPU Common functions + */ +void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2); +void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi); + /* * IPU Image DMA Controller (idmac) functions */ -- cgit v1.2.3-70-g09d2 From 3a5f87c286515c54ff5c52c3e64d0c522b7570c0 Mon Sep 17 00:00:00 2001 From: Thomas Wood Date: Wed, 20 Aug 2014 14:45:00 +0100 Subject: drm: fix plane rotation when restoring fbdev configuration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make sure plane rotation is reset correctly when restoring the fbdev configuration by using drm_mode_plane_set_obj_prop which calls the driver's set_property callback. The rotation reset feature was introduced in commit 9783de2 (drm: Resetting rotation property) and the callback issue was originally addressed in a previous version of the patch, but the fix was not present in the final version. v2: Fix documentation warning Add some more details to the commit message (Daniel Vetter) Testcase: igt/kms_rotation_crc Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=82236 Cc: Sonika Jindal Cc: Ville Syrjälä Cc: Dave Airlie Cc: Daniel Vetter Signed-off-by: Thomas Wood Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_crtc.c | 25 ++++++++++++++++++++----- drivers/gpu/drm/drm_fb_helper.c | 6 +++--- include/drm/drm_crtc.h | 3 +++ 3 files changed, 26 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 3c4a62169f2..d7e4c0e2e79 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -4156,12 +4156,25 @@ static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj, return ret; } -static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj, - struct drm_property *property, - uint64_t value) +/** + * drm_mode_plane_set_obj_prop - set the value of a property + * @plane: drm plane object to set property value for + * @property: property to set + * @value: value the property should be set to + * + * This functions sets a given property on a given plane object. This function + * calls the driver's ->set_property callback and changes the software state of + * the property if the callback succeeds. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_mode_plane_set_obj_prop(struct drm_plane *plane, + struct drm_property *property, + uint64_t value) { int ret = -EINVAL; - struct drm_plane *plane = obj_to_plane(obj); + struct drm_mode_object *obj = &plane->base; if (plane->funcs->set_property) ret = plane->funcs->set_property(plane, property, value); @@ -4170,6 +4183,7 @@ static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj, return ret; } +EXPORT_SYMBOL(drm_mode_plane_set_obj_prop); /** * drm_mode_getproperty_ioctl - get the current value of a object's property @@ -4308,7 +4322,8 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value); break; case DRM_MODE_OBJECT_PLANE: - ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value); + ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj), + property, arg->value); break; } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index d139eddb3d6..99569ee5ade 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -350,9 +350,9 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper) drm_plane_force_disable(plane); if (dev->mode_config.rotation_property) { - drm_object_property_set_value(&plane->base, - dev->mode_config.rotation_property, - BIT(DRM_ROTATE_0)); + drm_mode_plane_set_obj_prop(plane, + dev->mode_config.rotation_property, + BIT(DRM_ROTATE_0)); } } diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 62f73bdbcc4..38fae5d9ad7 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -1121,6 +1121,9 @@ extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane, + struct drm_property *property, + uint64_t value); extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp); -- cgit v1.2.3-70-g09d2 From f1217ed09f827e42a49ffa6a5aab672aa6f57a65 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 27 Aug 2014 13:16:04 +0200 Subject: drm/ttm: move fpfn and lpfn into each placement v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows us to more fine grained specify where to place the buffer object. v2: rebased on drm-next, add bochs changes as well Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/ast/ast_drv.h | 2 +- drivers/gpu/drm/ast/ast_ttm.c | 20 ++-- drivers/gpu/drm/bochs/bochs.h | 2 +- drivers/gpu/drm/bochs/bochs_mm.c | 20 ++-- drivers/gpu/drm/cirrus/cirrus_drv.h | 2 +- drivers/gpu/drm/cirrus/cirrus_ttm.c | 17 ++-- drivers/gpu/drm/mgag200/mgag200_drv.h | 2 +- drivers/gpu/drm/mgag200/mgag200_ttm.c | 20 ++-- drivers/gpu/drm/nouveau/nouveau_bo.c | 52 +++++++--- drivers/gpu/drm/nouveau/nouveau_bo.h | 4 +- drivers/gpu/drm/nouveau/nouveau_ttm.c | 9 +- drivers/gpu/drm/qxl/qxl_drv.h | 2 +- drivers/gpu/drm/qxl/qxl_object.c | 17 ++-- drivers/gpu/drm/qxl/qxl_ttm.c | 8 +- drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/radeon_object.c | 71 +++++++++----- drivers/gpu/drm/radeon/radeon_ttm.c | 25 ++--- drivers/gpu/drm/radeon/radeon_uvd.c | 8 +- drivers/gpu/drm/ttm/ttm_bo.c | 93 ++++++++---------- drivers/gpu/drm/ttm/ttm_bo_manager.c | 9 +- drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 136 ++++++++++++++++---------- drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | 22 +++-- drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 10 +- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 3 +- include/drm/ttm/ttm_bo_api.h | 40 ++++---- include/drm/ttm/ttm_bo_driver.h | 3 +- 26 files changed, 346 insertions(+), 253 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 957d4fabf1e..cb91c2acc3c 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -316,7 +316,7 @@ struct ast_bo { struct ttm_placement placement; struct ttm_bo_kmap_obj kmap; struct drm_gem_object gem; - u32 placements[3]; + struct ttm_place placements[3]; int pin_count; }; #define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem) diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index b8246227bab..8008ea0bc76 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -293,18 +293,22 @@ void ast_mm_fini(struct ast_private *ast) void ast_ttm_placement(struct ast_bo *bo, int domain) { u32 c = 0; - bo->placement.fpfn = 0; - bo->placement.lpfn = 0; + unsigned i; + bo->placement.placement = bo->placements; bo->placement.busy_placement = bo->placements; if (domain & TTM_PL_FLAG_VRAM) - bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; + bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; if (domain & TTM_PL_FLAG_SYSTEM) - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; if (!c) - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; bo->placement.num_placement = c; bo->placement.num_busy_placement = c; + for (i = 0; i < c; ++i) { + bo->placements[i].fpfn = 0; + bo->placements[i].lpfn = 0; + } } int ast_bo_create(struct drm_device *dev, int size, int align, @@ -360,7 +364,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr) ast_ttm_placement(bo, pl_flag); for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) return ret; @@ -383,7 +387,7 @@ int ast_bo_unpin(struct ast_bo *bo) return 0; for (i = 0; i < bo->placement.num_placement ; i++) - bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) return ret; @@ -407,7 +411,7 @@ int ast_bo_push_sysram(struct ast_bo *bo) ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); for (i = 0; i < bo->placement.num_placement ; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) { diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h index 7eb52dd44b0..4f6e7b3a363 100644 --- a/drivers/gpu/drm/bochs/bochs.h +++ b/drivers/gpu/drm/bochs/bochs.h @@ -99,7 +99,7 @@ struct bochs_bo { struct ttm_placement placement; struct ttm_bo_kmap_obj kmap; struct drm_gem_object gem; - u32 placements[3]; + struct ttm_place placements[3]; int pin_count; }; diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index 1728a1b0b81..2af30e7607d 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -257,20 +257,26 @@ void bochs_mm_fini(struct bochs_device *bochs) static void bochs_ttm_placement(struct bochs_bo *bo, int domain) { + unsigned i; u32 c = 0; - bo->placement.fpfn = 0; - bo->placement.lpfn = 0; bo->placement.placement = bo->placements; bo->placement.busy_placement = bo->placements; if (domain & TTM_PL_FLAG_VRAM) { - bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED + bo->placements[c++].flags = TTM_PL_FLAG_WC + | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; } if (domain & TTM_PL_FLAG_SYSTEM) { - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING + | TTM_PL_FLAG_SYSTEM; } if (!c) { - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING + | TTM_PL_FLAG_SYSTEM; + } + for (i = 0; i < c; ++i) { + bo->placements[i].fpfn = 0; + bo->placements[i].lpfn = 0; } bo->placement.num_placement = c; bo->placement.num_busy_placement = c; @@ -294,7 +300,7 @@ int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr) bochs_ttm_placement(bo, pl_flag); for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) return ret; @@ -319,7 +325,7 @@ int bochs_bo_unpin(struct bochs_bo *bo) return 0; for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) return ret; diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index 401c890b6c6..dd2cfc9024a 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -163,7 +163,7 @@ struct cirrus_bo { struct ttm_placement placement; struct ttm_bo_kmap_obj kmap; struct drm_gem_object gem; - u32 placements[3]; + struct ttm_place placements[3]; int pin_count; }; #define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem) diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 92e6b778609..3e7d758330a 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -298,18 +298,21 @@ void cirrus_mm_fini(struct cirrus_device *cirrus) void cirrus_ttm_placement(struct cirrus_bo *bo, int domain) { u32 c = 0; - bo->placement.fpfn = 0; - bo->placement.lpfn = 0; + unsigned i; bo->placement.placement = bo->placements; bo->placement.busy_placement = bo->placements; if (domain & TTM_PL_FLAG_VRAM) - bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; + bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; if (domain & TTM_PL_FLAG_SYSTEM) - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; if (!c) - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; bo->placement.num_placement = c; bo->placement.num_busy_placement = c; + for (i = 0; i < c; ++i) { + bo->placements[i].fpfn = 0; + bo->placements[i].lpfn = 0; + } } int cirrus_bo_create(struct drm_device *dev, int size, int align, @@ -365,7 +368,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr) cirrus_ttm_placement(bo, pl_flag); for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) return ret; @@ -392,7 +395,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo) cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); for (i = 0; i < bo->placement.num_placement ; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) { diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 80de23d9b9c..2e2b76aa4e1 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -224,7 +224,7 @@ struct mgag200_bo { struct ttm_placement placement; struct ttm_bo_kmap_obj kmap; struct drm_gem_object gem; - u32 placements[3]; + struct ttm_place placements[3]; int pin_count; }; #define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem) diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 5a00e90696d..be883ef5a1d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -293,18 +293,22 @@ void mgag200_mm_fini(struct mga_device *mdev) void mgag200_ttm_placement(struct mgag200_bo *bo, int domain) { u32 c = 0; - bo->placement.fpfn = 0; - bo->placement.lpfn = 0; + unsigned i; + bo->placement.placement = bo->placements; bo->placement.busy_placement = bo->placements; if (domain & TTM_PL_FLAG_VRAM) - bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; + bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; if (domain & TTM_PL_FLAG_SYSTEM) - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; if (!c) - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; bo->placement.num_placement = c; bo->placement.num_busy_placement = c; + for (i = 0; i < c; ++i) { + bo->placements[i].fpfn = 0; + bo->placements[i].lpfn = 0; + } } int mgag200_bo_create(struct drm_device *dev, int size, int align, @@ -361,7 +365,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr) mgag200_ttm_placement(bo, pl_flag); for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) return ret; @@ -384,7 +388,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo) return 0; for (i = 0; i < bo->placement.num_placement ; i++) - bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) return ret; @@ -408,7 +412,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo) mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); for (i = 0; i < bo->placement.num_placement ; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) { diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 01da508625f..0591ca0734e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -241,16 +241,16 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, } static void -set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) +set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags) { *n = 0; if (type & TTM_PL_FLAG_VRAM) - pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; + pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags; if (type & TTM_PL_FLAG_TT) - pl[(*n)++] = TTM_PL_FLAG_TT | flags; + pl[(*n)++].flags = TTM_PL_FLAG_TT | flags; if (type & TTM_PL_FLAG_SYSTEM) - pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; + pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags; } static void @@ -258,6 +258,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT; + unsigned i, fpfn, lpfn; if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && @@ -269,11 +270,19 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) * at the same time. */ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { - nvbo->placement.fpfn = vram_pages / 2; - nvbo->placement.lpfn = ~0; + fpfn = vram_pages / 2; + lpfn = ~0; } else { - nvbo->placement.fpfn = 0; - nvbo->placement.lpfn = vram_pages / 2; + fpfn = 0; + lpfn = vram_pages / 2; + } + for (i = 0; i < nvbo->placement.num_placement; ++i) { + nvbo->placements[i].fpfn = fpfn; + nvbo->placements[i].lpfn = lpfn; + } + for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { + nvbo->busy_placements[i].fpfn = fpfn; + nvbo->busy_placements[i].lpfn = lpfn; } } } @@ -1041,12 +1050,15 @@ static int nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { - u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; + struct ttm_place placement_memtype = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING + }; struct ttm_placement placement; struct ttm_mem_reg tmp_mem; int ret; - placement.fpfn = placement.lpfn = 0; placement.num_placement = placement.num_busy_placement = 1; placement.placement = placement.busy_placement = &placement_memtype; @@ -1074,12 +1086,15 @@ static int nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { - u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; + struct ttm_place placement_memtype = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING + }; struct ttm_placement placement; struct ttm_mem_reg tmp_mem; int ret; - placement.fpfn = placement.lpfn = 0; placement.num_placement = placement.num_busy_placement = 1; placement.placement = placement.busy_placement = &placement_memtype; @@ -1294,7 +1309,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) struct nouveau_bo *nvbo = nouveau_bo(bo); struct nvif_device *device = &drm->device; u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT; - int ret; + int i, ret; /* as long as the bo isn't in vram, and isn't tiled, we've got * nothing to do here. @@ -1319,9 +1334,16 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) bo->mem.start + bo->mem.num_pages < mappable) return 0; + for (i = 0; i < nvbo->placement.num_placement; ++i) { + nvbo->placements[i].fpfn = 0; + nvbo->placements[i].lpfn = mappable; + } + + for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { + nvbo->busy_placements[i].fpfn = 0; + nvbo->busy_placements[i].lpfn = mappable; + } - nvbo->placement.fpfn = 0; - nvbo->placement.lpfn = mappable; nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); return nouveau_bo_validate(nvbo, false, false); } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index ff17c1f432f..4ef88e84a69 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -9,8 +9,8 @@ struct nouveau_bo { struct ttm_buffer_object bo; struct ttm_placement placement; u32 valid_domains; - u32 placements[3]; - u32 busy_placements[3]; + struct ttm_place placements[3]; + struct ttm_place busy_placements[3]; struct ttm_bo_kmap_obj kmap; struct list_head head; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 53874b76b03..e81d086577c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -71,8 +71,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man, static int nouveau_vram_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, - struct ttm_placement *placement, - uint32_t flags, + const struct ttm_place *place, struct ttm_mem_reg *mem) { struct nouveau_drm *drm = nouveau_bdev(man->bdev); @@ -158,8 +157,7 @@ nouveau_gart_manager_del(struct ttm_mem_type_manager *man, static int nouveau_gart_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, - struct ttm_placement *placement, - uint32_t flags, + const struct ttm_place *place, struct ttm_mem_reg *mem) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); @@ -239,8 +237,7 @@ nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) static int nv04_gart_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, - struct ttm_placement *placement, - uint32_t flags, + const struct ttm_place *place, struct ttm_mem_reg *mem) { struct nouveau_mem *node; diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 36ed40ba773..f6022b70364 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -106,7 +106,7 @@ struct qxl_bo { /* Protected by gem.mutex */ struct list_head list; /* Protected by tbo.reserved */ - u32 placements[3]; + struct ttm_place placements[3]; struct ttm_placement placement; struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index b95f144f0b4..adad12d3037 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -55,21 +55,24 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) { u32 c = 0; u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; + unsigned i; - qbo->placement.fpfn = 0; - qbo->placement.lpfn = 0; qbo->placement.placement = qbo->placements; qbo->placement.busy_placement = qbo->placements; if (domain == QXL_GEM_DOMAIN_VRAM) - qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; + qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; if (domain == QXL_GEM_DOMAIN_SURFACE) - qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag; + qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag; if (domain == QXL_GEM_DOMAIN_CPU) - qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; + qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; if (!c) - qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; qbo->placement.num_placement = c; qbo->placement.num_busy_placement = c; + for (i = 0; i < c; ++i) { + qbo->placements[i].fpfn = 0; + qbo->placements[i].lpfn = 0; + } } @@ -259,7 +262,7 @@ int qxl_bo_unpin(struct qxl_bo *bo) if (bo->pin_count) return 0; for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (unlikely(r != 0)) dev_err(qdev->dev, "%p validate failed for unpin\n", bo); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 71a1baeac14..f66c59b222f 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -188,11 +188,13 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { struct qxl_bo *qbo; - static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + static struct ttm_place placements = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM + }; if (!qxl_ttm_bo_is_qxl_bo(bo)) { - placement->fpfn = 0; - placement->lpfn = 0; placement->placement = &placements; placement->busy_placement = &placements; placement->num_placement = 1; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b321ad4dcaf..bb01dab513d 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -469,7 +469,7 @@ struct radeon_bo { struct list_head list; /* Protected by tbo.reserved */ u32 initial_domain; - u32 placements[3]; + struct ttm_place placements[3]; struct ttm_placement placement; struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 28752380798..0129c7efae3 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -97,40 +97,56 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) { u32 c = 0, i; - rbo->placement.fpfn = 0; - rbo->placement.lpfn = 0; rbo->placement.placement = rbo->placements; rbo->placement.busy_placement = rbo->placements; if (domain & RADEON_GEM_DOMAIN_VRAM) - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + rbo->placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_VRAM; + if (domain & RADEON_GEM_DOMAIN_GTT) { if (rbo->flags & RADEON_GEM_GTT_UC) { - rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; + rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_TT; + } else if ((rbo->flags & RADEON_GEM_GTT_WC) || (rbo->rdev->flags & RADEON_IS_AGP)) { - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + rbo->placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; } else { - rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; + rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_TT; } } + if (domain & RADEON_GEM_DOMAIN_CPU) { if (rbo->flags & RADEON_GEM_GTT_UC) { - rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; + rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_SYSTEM; + } else if ((rbo->flags & RADEON_GEM_GTT_WC) || rbo->rdev->flags & RADEON_IS_AGP) { - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + rbo->placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; } else { - rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; + rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_SYSTEM; } } if (!c) - rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + rbo->placements[c++].flags = TTM_PL_MASK_CACHING | + TTM_PL_FLAG_SYSTEM; + rbo->placement.num_placement = c; rbo->placement.num_busy_placement = c; + for (i = 0; i < c; ++i) { + rbo->placements[i].fpfn = 0; + rbo->placements[i].lpfn = 0; + } + /* * Use two-ended allocation depending on the buffer size to * improve fragmentation quality. @@ -138,7 +154,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) */ if (rbo->tbo.mem.size > 512 * 1024) { for (i = 0; i < c; i++) { - rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN; + rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; } } } @@ -287,21 +303,22 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, return 0; } radeon_ttm_placement_from_domain(bo, domain); - if (domain == RADEON_GEM_DOMAIN_VRAM) { + for (i = 0; i < bo->placement.num_placement; i++) { + unsigned lpfn = 0; + /* force to pin into visible video ram */ - bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; - } - if (max_offset) { - u64 lpfn = max_offset >> PAGE_SHIFT; + if (bo->placements[i].flags & TTM_PL_FLAG_VRAM) + lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; + else + lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; /* ??? */ - if (!bo->placement.lpfn) - bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; + if (max_offset) + lpfn = min (lpfn, (unsigned)(max_offset >> PAGE_SHIFT)); - if (lpfn < bo->placement.lpfn) - bo->placement.lpfn = lpfn; + bo->placements[i].lpfn = lpfn; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; } - for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (likely(r == 0)) { bo->pin_count = 1; @@ -333,8 +350,10 @@ int radeon_bo_unpin(struct radeon_bo *bo) bo->pin_count--; if (bo->pin_count) return 0; - for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + for (i = 0; i < bo->placement.num_placement; i++) { + bo->placements[i].lpfn = 0; + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; + } r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (likely(r == 0)) { if (bo->tbo.mem.mem_type == TTM_PL_VRAM) @@ -735,7 +754,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* hurrah the memory is not visible ! */ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); - rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; + rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; r = ttm_bo_validate(bo, &rbo->placement, false, false); if (unlikely(r == -ENOMEM)) { radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 12e37b1ddc4..822eb363004 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -178,12 +178,15 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, static void radeon_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { + static struct ttm_place placements = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM + }; + struct radeon_bo *rbo; - static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; if (!radeon_ttm_bo_is_radeon_bo(bo)) { - placement->fpfn = 0; - placement->lpfn = 0; placement->placement = &placements; placement->busy_placement = &placements; placement->num_placement = 1; @@ -286,20 +289,20 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; - u32 placements; + struct ttm_place placements; struct ttm_placement placement; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - placement.fpfn = 0; - placement.lpfn = 0; placement.num_placement = 1; placement.placement = &placements; placement.num_busy_placement = 1; placement.busy_placement = &placements; - placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.fpfn = 0; + placements.lpfn = 0; + placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); if (unlikely(r)) { @@ -334,19 +337,19 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; struct ttm_placement placement; - u32 placements; + struct ttm_place placements; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - placement.fpfn = 0; - placement.lpfn = 0; placement.num_placement = 1; placement.placement = &placements; placement.num_busy_placement = 1; placement.busy_placement = &placements; - placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.fpfn = 0; + placements.lpfn = 0; + placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); if (unlikely(r)) { diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 341848a1437..25c8a1fd152 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -233,8 +233,12 @@ int radeon_uvd_resume(struct radeon_device *rdev) void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo) { - rbo->placement.fpfn = 0 >> PAGE_SHIFT; - rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; + int i; + + for (i = 0; i < rbo->placement.num_placement; ++i) { + rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; + rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; + } } void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 3da89d5dab6..b992ec3c318 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -53,12 +53,13 @@ static struct attribute ttm_bo_count = { .mode = S_IRUGO }; -static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) +static inline int ttm_mem_type_from_place(const struct ttm_place *place, + uint32_t *mem_type) { int i; for (i = 0; i <= TTM_PL_PRIV5; i++) - if (flags & (1 << i)) { + if (place->flags & (1 << i)) { *mem_type = i; return 0; } @@ -89,12 +90,12 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, bo, bo->mem.num_pages, bo->mem.size >> 10, bo->mem.size >> 20); for (i = 0; i < placement->num_placement; i++) { - ret = ttm_mem_type_from_flags(placement->placement[i], + ret = ttm_mem_type_from_place(&placement->placement[i], &mem_type); if (ret) return; pr_err(" placement[%d]=0x%08X (%d)\n", - i, placement->placement[i], mem_type); + i, placement->placement[i].flags, mem_type); ttm_mem_type_debug(bo->bdev, mem_type); } } @@ -685,8 +686,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, evict_mem.bus.io_reserved_vm = false; evict_mem.bus.io_reserved_count = 0; - placement.fpfn = 0; - placement.lpfn = 0; placement.num_placement = 0; placement.num_busy_placement = 0; bdev->driver->evict_flags(bo, &placement); @@ -774,7 +773,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put); */ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, uint32_t mem_type, - struct ttm_placement *placement, + const struct ttm_place *place, struct ttm_mem_reg *mem, bool interruptible, bool no_wait_gpu) @@ -784,7 +783,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, int ret; do { - ret = (*man->func->get_node)(man, bo, placement, 0, mem); + ret = (*man->func->get_node)(man, bo, place, mem); if (unlikely(ret != 0)) return ret; if (mem->mm_node) @@ -827,18 +826,18 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, uint32_t mem_type, - uint32_t proposed_placement, + const struct ttm_place *place, uint32_t *masked_placement) { uint32_t cur_flags = ttm_bo_type_flags(mem_type); - if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) + if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) return false; - if ((proposed_placement & man->available_caching) == 0) + if ((place->flags & man->available_caching) == 0) return false; - cur_flags |= (proposed_placement & man->available_caching); + cur_flags |= (place->flags & man->available_caching); *masked_placement = cur_flags; return true; @@ -869,15 +868,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, mem->mm_node = NULL; for (i = 0; i < placement->num_placement; ++i) { - ret = ttm_mem_type_from_flags(placement->placement[i], - &mem_type); + const struct ttm_place *place = &placement->placement[i]; + + ret = ttm_mem_type_from_place(place, &mem_type); if (ret) return ret; man = &bdev->man[mem_type]; - type_ok = ttm_bo_mt_compatible(man, - mem_type, - placement->placement[i], + type_ok = ttm_bo_mt_compatible(man, mem_type, place, &cur_flags); if (!type_ok) @@ -889,7 +887,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, * Use the access and other non-mapping-related flag bits from * the memory placement flags to the current flags */ - ttm_flag_masked(&cur_flags, placement->placement[i], + ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE); if (mem_type == TTM_PL_SYSTEM) @@ -897,8 +895,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (man->has_type && man->use_type) { type_found = true; - ret = (*man->func->get_node)(man, bo, placement, - cur_flags, mem); + ret = (*man->func->get_node)(man, bo, place, mem); if (unlikely(ret)) return ret; } @@ -916,17 +913,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, return -EINVAL; for (i = 0; i < placement->num_busy_placement; ++i) { - ret = ttm_mem_type_from_flags(placement->busy_placement[i], - &mem_type); + const struct ttm_place *place = &placement->busy_placement[i]; + + ret = ttm_mem_type_from_place(place, &mem_type); if (ret) return ret; man = &bdev->man[mem_type]; if (!man->has_type) continue; - if (!ttm_bo_mt_compatible(man, - mem_type, - placement->busy_placement[i], - &cur_flags)) + if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) continue; cur_flags = ttm_bo_select_caching(man, bo->mem.placement, @@ -935,7 +930,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, * Use the access and other non-mapping-related flag bits from * the memory placement flags to the current flags */ - ttm_flag_masked(&cur_flags, placement->busy_placement[i], + ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE); if (mem_type == TTM_PL_SYSTEM) { @@ -945,7 +940,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, return 0; } - ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, + ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, interruptible, no_wait_gpu); if (ret == 0 && mem->mm_node) { mem->placement = cur_flags; @@ -1006,20 +1001,27 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement, { int i; - if (mem->mm_node && placement->lpfn != 0 && - (mem->start < placement->fpfn || - mem->start + mem->num_pages > placement->lpfn)) - return false; - for (i = 0; i < placement->num_placement; i++) { - *new_flags = placement->placement[i]; + const struct ttm_place *heap = &placement->placement[i]; + if (mem->mm_node && heap->lpfn != 0 && + (mem->start < heap->fpfn || + mem->start + mem->num_pages > heap->lpfn)) + continue; + + *new_flags = heap->flags; if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && (*new_flags & mem->placement & TTM_PL_MASK_MEM)) return true; } for (i = 0; i < placement->num_busy_placement; i++) { - *new_flags = placement->busy_placement[i]; + const struct ttm_place *heap = &placement->busy_placement[i]; + if (mem->mm_node && heap->lpfn != 0 && + (mem->start < heap->fpfn || + mem->start + mem->num_pages > heap->lpfn)) + continue; + + *new_flags = heap->flags; if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && (*new_flags & mem->placement & TTM_PL_MASK_MEM)) return true; @@ -1037,11 +1039,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, uint32_t new_flags; lockdep_assert_held(&bo->resv->lock.base); - /* Check that range is valid */ - if (placement->lpfn || placement->fpfn) - if (placement->fpfn > placement->lpfn || - (placement->lpfn - placement->fpfn) < bo->num_pages) - return -EINVAL; /* * Check whether we need to move buffer. */ @@ -1070,15 +1067,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_validate); -int ttm_bo_check_placement(struct ttm_buffer_object *bo, - struct ttm_placement *placement) -{ - BUG_ON((placement->fpfn || placement->lpfn) && - (bo->mem.num_pages > (placement->lpfn - placement->fpfn))); - - return 0; -} - int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, unsigned long size, @@ -1147,15 +1135,12 @@ int ttm_bo_init(struct ttm_bo_device *bdev, atomic_inc(&bo->glob->bo_count); drm_vma_node_reset(&bo->vma_node); - ret = ttm_bo_check_placement(bo, placement); - /* * For ttm_bo_type_device buffers, allocate * address space from the device. */ - if (likely(!ret) && - (bo->type == ttm_bo_type_device || - bo->type == ttm_bo_type_sg)) + if (bo->type == ttm_bo_type_device || + bo->type == ttm_bo_type_sg) ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, bo->mem.num_pages); diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 9e103a4875c..964387fc5c8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -49,8 +49,7 @@ struct ttm_range_manager { static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, - struct ttm_placement *placement, - uint32_t flags, + const struct ttm_place *place, struct ttm_mem_reg *mem) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; @@ -60,7 +59,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, unsigned long lpfn; int ret; - lpfn = placement->lpfn; + lpfn = place->lpfn; if (!lpfn) lpfn = man->size; @@ -68,13 +67,13 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, if (!node) return -ENOMEM; - if (flags & TTM_PL_FLAG_TOPDOWN) + if (place->flags & TTM_PL_FLAG_TOPDOWN) aflags = DRM_MM_CREATE_TOP; spin_lock(&rman->lock); ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages, mem->page_alignment, 0, - placement->fpfn, lpfn, + place->fpfn, lpfn, DRM_MM_SEARCH_BEST, aflags); spin_unlock(&rman->lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 6327cfc3680..37c093c0c7b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -30,66 +30,101 @@ #include #include -static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM | - TTM_PL_FLAG_CACHED; - -static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | - TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_NO_EVICT; +static struct ttm_place vram_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED +}; -static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | - TTM_PL_FLAG_CACHED; +static struct ttm_place vram_ne_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT +}; -static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | - TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_NO_EVICT; +static struct ttm_place sys_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED +}; -static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | - TTM_PL_FLAG_CACHED; +static struct ttm_place sys_ne_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT +}; -static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | - TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_NO_EVICT; +static struct ttm_place gmr_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED +}; -static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB | - TTM_PL_FLAG_CACHED; +static struct ttm_place gmr_ne_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT +}; -struct ttm_placement vmw_vram_placement = { +static struct ttm_place mob_placement_flags = { .fpfn = 0, .lpfn = 0, + .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED +}; + +struct ttm_placement vmw_vram_placement = { .num_placement = 1, .placement = &vram_placement_flags, .num_busy_placement = 1, .busy_placement = &vram_placement_flags }; -static uint32_t vram_gmr_placement_flags[] = { - TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, - VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED +static struct ttm_place vram_gmr_placement_flags[] = { + { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + }, { + .fpfn = 0, + .lpfn = 0, + .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + } }; -static uint32_t gmr_vram_placement_flags[] = { - VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, - TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED +static struct ttm_place gmr_vram_placement_flags[] = { + { + .fpfn = 0, + .lpfn = 0, + .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + }, { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + } }; struct ttm_placement vmw_vram_gmr_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 2, .placement = vram_gmr_placement_flags, .num_busy_placement = 1, .busy_placement = &gmr_placement_flags }; -static uint32_t vram_gmr_ne_placement_flags[] = { - TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT, - VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT +static struct ttm_place vram_gmr_ne_placement_flags[] = { + { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_NO_EVICT + }, { + .fpfn = 0, + .lpfn = 0, + .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_NO_EVICT + } }; struct ttm_placement vmw_vram_gmr_ne_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 2, .placement = vram_gmr_ne_placement_flags, .num_busy_placement = 1, @@ -97,8 +132,6 @@ struct ttm_placement vmw_vram_gmr_ne_placement = { }; struct ttm_placement vmw_vram_sys_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 1, .placement = &vram_placement_flags, .num_busy_placement = 1, @@ -106,8 +139,6 @@ struct ttm_placement vmw_vram_sys_placement = { }; struct ttm_placement vmw_vram_ne_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 1, .placement = &vram_ne_placement_flags, .num_busy_placement = 1, @@ -115,8 +146,6 @@ struct ttm_placement vmw_vram_ne_placement = { }; struct ttm_placement vmw_sys_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 1, .placement = &sys_placement_flags, .num_busy_placement = 1, @@ -124,24 +153,33 @@ struct ttm_placement vmw_sys_placement = { }; struct ttm_placement vmw_sys_ne_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 1, .placement = &sys_ne_placement_flags, .num_busy_placement = 1, .busy_placement = &sys_ne_placement_flags }; -static uint32_t evictable_placement_flags[] = { - TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, - TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, - VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, - VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED +static struct ttm_place evictable_placement_flags[] = { + { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED + }, { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + }, { + .fpfn = 0, + .lpfn = 0, + .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + }, { + .fpfn = 0, + .lpfn = 0, + .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED + } }; struct ttm_placement vmw_evictable_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 4, .placement = evictable_placement_flags, .num_busy_placement = 1, @@ -149,8 +187,6 @@ struct ttm_placement vmw_evictable_placement = { }; struct ttm_placement vmw_srf_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 1, .num_busy_placement = 2, .placement = &gmr_placement_flags, @@ -158,8 +194,6 @@ struct ttm_placement vmw_srf_placement = { }; struct ttm_placement vmw_mob_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 1, .num_busy_placement = 1, .placement = &mob_placement_flags, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c index ed1d51006ab..914b375763d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c @@ -198,13 +198,19 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, { struct ttm_buffer_object *bo = &buf->base; struct ttm_placement placement; + struct ttm_place place; int ret = 0; if (pin) - placement = vmw_vram_ne_placement; + place = vmw_vram_ne_placement.placement[0]; else - placement = vmw_vram_placement; - placement.lpfn = bo->num_pages; + place = vmw_vram_placement.placement[0]; + place.lpfn = bo->num_pages; + + placement.num_placement = 1; + placement.placement = &place; + placement.num_busy_placement = 1; + placement.busy_placement = &place; ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); if (unlikely(ret != 0)) @@ -293,21 +299,23 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, */ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) { - uint32_t pl_flags; + struct ttm_place pl; struct ttm_placement placement; uint32_t old_mem_type = bo->mem.mem_type; int ret; lockdep_assert_held(&bo->resv->lock.base); - pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB + pl.fpfn = 0; + pl.lpfn = 0; + pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; if (pin) - pl_flags |= TTM_PL_FLAG_NO_EVICT; + pl.flags |= TTM_PL_FLAG_NO_EVICT; memset(&placement, 0, sizeof(placement)); placement.num_placement = 1; - placement.placement = &pl_flags; + placement.placement = &pl; ret = ttm_bo_validate(bo, &placement, false, true); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index b031b48dbb3..0a474f391fa 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -374,10 +374,16 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, size_t size, struct vmw_dma_buffer **out) { struct vmw_dma_buffer *vmw_bo; - struct ttm_placement ne_placement = vmw_vram_ne_placement; + struct ttm_place ne_place = vmw_vram_ne_placement.placement[0]; + struct ttm_placement ne_placement; int ret; - ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + ne_placement.num_placement = 1; + ne_placement.placement = &ne_place; + ne_placement.num_busy_placement = 1; + ne_placement.busy_placement = &ne_place; + + ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; (void) ttm_write_lock(&vmw_priv->reservation_sem, false); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index 26f8bdde352..170b61be1e4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -46,8 +46,7 @@ struct vmwgfx_gmrid_man { static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, - struct ttm_placement *placement, - uint32_t flags, + const struct ttm_place *place, struct ttm_mem_reg *mem) { struct vmwgfx_gmrid_man *gman = diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 7526c5bf561..e3d39c80a09 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -45,12 +45,24 @@ struct ttm_bo_device; struct drm_mm_node; +/** + * struct ttm_place + * + * @fpfn: first valid page frame number to put the object + * @lpfn: last valid page frame number to put the object + * @flags: memory domain and caching flags for the object + * + * Structure indicating a possible place to put an object. + */ +struct ttm_place { + unsigned fpfn; + unsigned lpfn; + uint32_t flags; +}; /** * struct ttm_placement * - * @fpfn: first valid page frame number to put the object - * @lpfn: last valid page frame number to put the object * @num_placement: number of preferred placements * @placement: preferred placements * @num_busy_placement: number of preferred placements when need to evict buffer @@ -59,12 +71,10 @@ struct drm_mm_node; * Structure indicating the placement you request for an object. */ struct ttm_placement { - unsigned fpfn; - unsigned lpfn; - unsigned num_placement; - const uint32_t *placement; - unsigned num_busy_placement; - const uint32_t *busy_placement; + unsigned num_placement; + const struct ttm_place *placement; + unsigned num_busy_placement; + const struct ttm_place *busy_placement; }; /** @@ -518,20 +528,6 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev, struct file *persistent_swap_storage, struct ttm_buffer_object **p_bo); -/** - * ttm_bo_check_placement - * - * @bo: the buffer object. - * @placement: placements - * - * Performs minimal validity checking on an intended change of - * placement flags. - * Returns - * -EINVAL: Intended change is invalid or not allowed. - */ -extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, - struct ttm_placement *placement); - /** * ttm_bo_init_mm * diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 1d9f0f1ff52..5c8bb5699a6 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -208,8 +208,7 @@ struct ttm_mem_type_manager_func { */ int (*get_node)(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, - struct ttm_placement *placement, - uint32_t flags, + const struct ttm_place *place, struct ttm_mem_reg *mem); /** -- cgit v1.2.3-70-g09d2 From dd7cfd641228abb2669d8d047d5ec377b1835900 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 21 Jan 2014 13:07:31 +0100 Subject: drm/ttm: kill fence_lock No users are left, kill it off! :D Conversion to the reservation api is next on the list, after that the functionality can be restored with rcu. Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/nouveau/nouveau_bo.c | 25 ++++------- drivers/gpu/drm/nouveau/nouveau_display.c | 6 +-- drivers/gpu/drm/nouveau/nouveau_gem.c | 16 +------ drivers/gpu/drm/qxl/qxl_cmd.c | 2 - drivers/gpu/drm/qxl/qxl_fence.c | 4 -- drivers/gpu/drm/qxl/qxl_object.h | 2 - drivers/gpu/drm/qxl/qxl_release.c | 2 - drivers/gpu/drm/radeon/radeon_display.c | 7 +-- drivers/gpu/drm/radeon/radeon_object.c | 2 - drivers/gpu/drm/ttm/ttm_bo.c | 75 ++++++++----------------------- drivers/gpu/drm/ttm/ttm_bo_util.c | 5 --- drivers/gpu/drm/ttm/ttm_bo_vm.c | 3 -- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 2 - drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 4 -- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 16 +++---- include/drm/ttm/ttm_bo_api.h | 5 +-- include/drm/ttm/ttm_bo_driver.h | 3 -- 17 files changed, 37 insertions(+), 142 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index ed966f51e29..8d8e5f6340d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1212,9 +1212,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, } /* Fallback to software copy. */ - spin_lock(&bo->bdev->fence_lock); ret = ttm_bo_wait(bo, true, intr, no_wait_gpu); - spin_unlock(&bo->bdev->fence_lock); if (ret == 0) ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); @@ -1457,26 +1455,19 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) ttm_pool_unpopulate(ttm); } +static void +nouveau_bo_fence_unref(void **sync_obj) +{ + nouveau_fence_unref((struct nouveau_fence **)sync_obj); +} + void nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) { - struct nouveau_fence *new_fence = nouveau_fence_ref(fence); - struct nouveau_fence *old_fence = NULL; - lockdep_assert_held(&nvbo->bo.resv->lock.base); - spin_lock(&nvbo->bo.bdev->fence_lock); - old_fence = nvbo->bo.sync_obj; - nvbo->bo.sync_obj = new_fence; - spin_unlock(&nvbo->bo.bdev->fence_lock); - - nouveau_fence_unref(&old_fence); -} - -static void -nouveau_bo_fence_unref(void **sync_obj) -{ - nouveau_fence_unref((struct nouveau_fence **)sync_obj); + nouveau_bo_fence_unref(&nvbo->bo.sync_obj); + nvbo->bo.sync_obj = nouveau_fence_ref(fence); } static void * diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 54b1f3d8fc7..e6867b9ebb4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -722,11 +722,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, goto fail_unpin; /* synchronise rendering channel with the kernel's channel */ - spin_lock(&new_bo->bo.bdev->fence_lock); - fence = nouveau_fence_ref(new_bo->bo.sync_obj); - spin_unlock(&new_bo->bo.bdev->fence_lock); - ret = nouveau_fence_sync(fence, chan); - nouveau_fence_unref(&fence); + ret = nouveau_fence_sync(new_bo->bo.sync_obj, chan); if (ret) { ttm_bo_unreserve(&new_bo->bo); goto fail_unpin; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 0054315eb87..1650c0bdb0f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -103,9 +103,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) list_del(&vma->head); if (mapped) { - spin_lock(&nvbo->bo.bdev->fence_lock); fence = nouveau_fence_ref(nvbo->bo.sync_obj); - spin_unlock(&nvbo->bo.bdev->fence_lock); } if (fence) { @@ -430,17 +428,11 @@ retry: static int validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) { - struct nouveau_fence *fence = NULL; + struct nouveau_fence *fence = nvbo->bo.sync_obj; int ret = 0; - spin_lock(&nvbo->bo.bdev->fence_lock); - fence = nouveau_fence_ref(nvbo->bo.sync_obj); - spin_unlock(&nvbo->bo.bdev->fence_lock); - - if (fence) { + if (fence) ret = nouveau_fence_sync(fence, chan); - nouveau_fence_unref(&fence); - } return ret; } @@ -659,9 +651,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, data |= r->vor; } - spin_lock(&nvbo->bo.bdev->fence_lock); ret = ttm_bo_wait(&nvbo->bo, false, false, false); - spin_unlock(&nvbo->bo.bdev->fence_lock); if (ret) { NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret); break; @@ -894,11 +884,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, ret = ttm_bo_reserve(&nvbo->bo, true, false, false, NULL); if (!ret) { - spin_lock(&nvbo->bo.bdev->fence_lock); ret = ttm_bo_wait(&nvbo->bo, true, true, true); if (!no_wait && ret) fence = nouveau_fence_ref(nvbo->bo.sync_obj); - spin_unlock(&nvbo->bo.bdev->fence_lock); ttm_bo_unreserve(&nvbo->bo); } diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index eb89653a7a1..45fad7b4548 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -628,9 +628,7 @@ static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stal if (stall) mutex_unlock(&qdev->surf_evict_mutex); - spin_lock(&surf->tbo.bdev->fence_lock); ret = ttm_bo_wait(&surf->tbo, true, true, !stall); - spin_unlock(&surf->tbo.bdev->fence_lock); if (stall) mutex_lock(&qdev->surf_evict_mutex); diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c index ae59e91cfb9..c7248418117 100644 --- a/drivers/gpu/drm/qxl/qxl_fence.c +++ b/drivers/gpu/drm/qxl/qxl_fence.c @@ -60,9 +60,6 @@ int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) { void *ret; int retval = 0; - struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence); - - spin_lock(&bo->tbo.bdev->fence_lock); ret = radix_tree_delete(&qfence->tree, rel_id); if (ret == qfence) @@ -71,7 +68,6 @@ int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id); retval = -ENOENT; } - spin_unlock(&bo->tbo.bdev->fence_lock); return retval; } diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index 83a423293af..1edaf576808 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -76,12 +76,10 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, } return r; } - spin_lock(&bo->tbo.bdev->fence_lock); if (mem_type) *mem_type = bo->tbo.mem.mem_type; if (bo->tbo.sync_obj) r = ttm_bo_wait(&bo->tbo, true, true, no_wait); - spin_unlock(&bo->tbo.bdev->fence_lock); ttm_bo_unreserve(&bo->tbo); return r; } diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 14e776f1d14..2e5e38fee9b 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -337,7 +337,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) glob = bo->glob; spin_lock(&glob->lru_lock); - spin_lock(&bdev->fence_lock); list_for_each_entry(entry, &release->bos, head) { bo = entry->bo; @@ -352,7 +351,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) __ttm_bo_unreserve(bo); entry->reserved = false; } - spin_unlock(&bdev->fence_lock); spin_unlock(&glob->lru_lock); ww_acquire_fini(&release->ticket); } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index bd0d687379e..7d0a7abdab2 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -476,11 +476,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, obj = new_radeon_fb->obj; new_rbo = gem_to_radeon_bo(obj); - spin_lock(&new_rbo->tbo.bdev->fence_lock); - if (new_rbo->tbo.sync_obj) - work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj); - spin_unlock(&new_rbo->tbo.bdev->fence_lock); - /* pin the new buffer */ DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n", work->old_rbo, new_rbo); @@ -499,6 +494,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, DRM_ERROR("failed to pin new rbo buffer before flip\n"); goto cleanup; } + work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj); radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); radeon_bo_unreserve(new_rbo); @@ -582,7 +578,6 @@ cleanup: drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); radeon_fence_unref(&work->fence); kfree(work); - return r; } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index c97a42432e2..cbac963571c 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -779,12 +779,10 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); if (unlikely(r != 0)) return r; - spin_lock(&bo->tbo.bdev->fence_lock); if (mem_type) *mem_type = bo->tbo.mem.mem_type; if (bo->tbo.sync_obj) r = ttm_bo_wait(&bo->tbo, true, true, no_wait); - spin_unlock(&bo->tbo.bdev->fence_lock); ttm_bo_unreserve(&bo->tbo); return r; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4f1bc948bda..195386f16ca 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -415,24 +415,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) spin_lock(&glob->lru_lock); ret = __ttm_bo_reserve(bo, false, true, false, NULL); - spin_lock(&bdev->fence_lock); - (void) ttm_bo_wait(bo, false, false, true); - if (!ret && !bo->sync_obj) { - spin_unlock(&bdev->fence_lock); - put_count = ttm_bo_del_from_lru(bo); + if (!ret) { + (void) ttm_bo_wait(bo, false, false, true); - spin_unlock(&glob->lru_lock); - ttm_bo_cleanup_memtype_use(bo); + if (!bo->sync_obj) { + put_count = ttm_bo_del_from_lru(bo); - ttm_bo_list_ref_sub(bo, put_count, true); + spin_unlock(&glob->lru_lock); + ttm_bo_cleanup_memtype_use(bo); - return; - } - if (bo->sync_obj) - sync_obj = driver->sync_obj_ref(bo->sync_obj); - spin_unlock(&bdev->fence_lock); + ttm_bo_list_ref_sub(bo, put_count, true); - if (!ret) { + return; + } + sync_obj = driver->sync_obj_ref(bo->sync_obj); /* * Make NO_EVICT bos immediately available to @@ -481,7 +477,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, int put_count; int ret; - spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, false, true); if (ret && !no_wait_gpu) { @@ -493,7 +488,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, * no new sync objects can be attached. */ sync_obj = driver->sync_obj_ref(bo->sync_obj); - spin_unlock(&bdev->fence_lock); __ttm_bo_unreserve(bo); spin_unlock(&glob->lru_lock); @@ -523,11 +517,9 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, * remove sync_obj with ttm_bo_wait, the wait should be * finished, and no new wait object should have been added. */ - spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, false, true); WARN_ON(ret); } - spin_unlock(&bdev->fence_lock); if (ret || unlikely(list_empty(&bo->ddestroy))) { __ttm_bo_unreserve(bo); @@ -665,9 +657,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, struct ttm_placement placement; int ret = 0; - spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); - spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) { @@ -958,7 +948,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, { int ret = 0; struct ttm_mem_reg mem; - struct ttm_bo_device *bdev = bo->bdev; lockdep_assert_held(&bo->resv->lock.base); @@ -967,9 +956,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, * Have the driver move function wait for idle when necessary, * instead of doing it here. */ - spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); - spin_unlock(&bdev->fence_lock); if (ret) return ret; mem.num_pages = bo->num_pages; @@ -1459,7 +1446,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, bdev->glob = glob; bdev->need_dma32 = need_dma32; bdev->val_seq = 0; - spin_lock_init(&bdev->fence_lock); mutex_lock(&glob->device_list_mutex); list_add_tail(&bdev->device_list, &glob->device_list); mutex_unlock(&glob->device_list_mutex); @@ -1517,7 +1503,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, bool interruptible, bool no_wait) { struct ttm_bo_driver *driver = bo->bdev->driver; - struct ttm_bo_device *bdev = bo->bdev; void *sync_obj; int ret = 0; @@ -1526,53 +1511,33 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, if (likely(bo->sync_obj == NULL)) return 0; - while (bo->sync_obj) { - + if (bo->sync_obj) { if (driver->sync_obj_signaled(bo->sync_obj)) { - void *tmp_obj = bo->sync_obj; - bo->sync_obj = NULL; + driver->sync_obj_unref(&bo->sync_obj); clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - spin_unlock(&bdev->fence_lock); - driver->sync_obj_unref(&tmp_obj); - spin_lock(&bdev->fence_lock); - continue; + return 0; } if (no_wait) return -EBUSY; sync_obj = driver->sync_obj_ref(bo->sync_obj); - spin_unlock(&bdev->fence_lock); ret = driver->sync_obj_wait(sync_obj, lazy, interruptible); - if (unlikely(ret != 0)) { - driver->sync_obj_unref(&sync_obj); - spin_lock(&bdev->fence_lock); - return ret; - } - spin_lock(&bdev->fence_lock); - if (likely(bo->sync_obj == sync_obj)) { - void *tmp_obj = bo->sync_obj; - bo->sync_obj = NULL; + + if (likely(ret == 0)) { clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - spin_unlock(&bdev->fence_lock); - driver->sync_obj_unref(&sync_obj); - driver->sync_obj_unref(&tmp_obj); - spin_lock(&bdev->fence_lock); - } else { - spin_unlock(&bdev->fence_lock); - driver->sync_obj_unref(&sync_obj); - spin_lock(&bdev->fence_lock); + driver->sync_obj_unref(&bo->sync_obj); } + driver->sync_obj_unref(&sync_obj); } - return 0; + return ret; } EXPORT_SYMBOL(ttm_bo_wait); int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) { - struct ttm_bo_device *bdev = bo->bdev; int ret = 0; /* @@ -1582,9 +1547,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); if (unlikely(ret != 0)) return ret; - spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, true, no_wait); - spin_unlock(&bdev->fence_lock); if (likely(ret == 0)) atomic_inc(&bo->cpu_writers); ttm_bo_unreserve(bo); @@ -1641,9 +1604,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) * Wait for GPU, then move to system cached. */ - spin_lock(&bo->bdev->fence_lock); ret = ttm_bo_wait(bo, false, false, false); - spin_unlock(&bo->bdev->fence_lock); if (unlikely(ret != 0)) goto out; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 30e5d90cb7b..495aebf0f9c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -466,12 +466,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, drm_vma_node_reset(&fbo->vma_node); atomic_set(&fbo->cpu_writers, 0); - spin_lock(&bdev->fence_lock); if (bo->sync_obj) fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); else fbo->sync_obj = NULL; - spin_unlock(&bdev->fence_lock); kref_init(&fbo->list_kref); kref_init(&fbo->kref); fbo->destroy = &ttm_transfered_destroy; @@ -657,7 +655,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_buffer_object *ghost_obj; void *tmp_obj = NULL; - spin_lock(&bdev->fence_lock); if (bo->sync_obj) { tmp_obj = bo->sync_obj; bo->sync_obj = NULL; @@ -665,7 +662,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, bo->sync_obj = driver->sync_obj_ref(sync_obj); if (evict) { ret = ttm_bo_wait(bo, false, false, false); - spin_unlock(&bdev->fence_lock); if (tmp_obj) driver->sync_obj_unref(&tmp_obj); if (ret) @@ -688,7 +684,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, */ set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - spin_unlock(&bdev->fence_lock); if (tmp_obj) driver->sync_obj_unref(&tmp_obj); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 0ce48e5a9cb..d05437f219e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -45,10 +45,8 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct vm_area_struct *vma, struct vm_fault *vmf) { - struct ttm_bo_device *bdev = bo->bdev; int ret = 0; - spin_lock(&bdev->fence_lock); if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags))) goto out_unlock; @@ -82,7 +80,6 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, VM_FAULT_NOPAGE; out_unlock: - spin_unlock(&bdev->fence_lock); return ret; } diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index e8dac875852..0fbbbbd67af 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -217,7 +217,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, glob = bo->glob; spin_lock(&glob->lru_lock); - spin_lock(&bdev->fence_lock); list_for_each_entry(entry, list, head) { bo = entry->bo; @@ -227,7 +226,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, __ttm_bo_unreserve(bo); entry->reserved = false; } - spin_unlock(&bdev->fence_lock); spin_unlock(&glob->lru_lock); if (ticket) ww_acquire_fini(ticket); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 37c093c0c7b..c133b3d10de 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -863,11 +863,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo, */ static void vmw_swap_notify(struct ttm_buffer_object *bo) { - struct ttm_bo_device *bdev = bo->bdev; - - spin_lock(&bdev->fence_lock); ttm_bo_wait(bo, false, false, false); - spin_unlock(&bdev->fence_lock); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index a432c0db257..1ee86bf8275 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -567,12 +567,13 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, int ret; if (flags & drm_vmw_synccpu_allow_cs) { - struct ttm_bo_device *bdev = bo->bdev; + bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); - spin_lock(&bdev->fence_lock); - ret = ttm_bo_wait(bo, false, true, - !!(flags & drm_vmw_synccpu_dontblock)); - spin_unlock(&bdev->fence_lock); + ret = ttm_bo_reserve(bo, true, nonblock, false, NULL); + if (!ret) { + ret = ttm_bo_wait(bo, false, true, nonblock); + ttm_bo_unreserve(bo); + } return ret; } @@ -1429,12 +1430,10 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, else driver->sync_obj_ref(fence); - spin_lock(&bdev->fence_lock); old_fence_obj = bo->sync_obj; bo->sync_obj = fence; - spin_unlock(&bdev->fence_lock); if (old_fence_obj) vmw_fence_obj_unreference(&old_fence_obj); @@ -1475,7 +1474,6 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, if (mem->mem_type != VMW_PL_MOB) { struct vmw_resource *res, *n; - struct ttm_bo_device *bdev = bo->bdev; struct ttm_validate_buffer val_buf; val_buf.bo = bo; @@ -1491,9 +1489,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, list_del_init(&res->mob_head); } - spin_lock(&bdev->fence_lock); (void) ttm_bo_wait(bo, false, false, false); - spin_unlock(&bdev->fence_lock); } } diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index e3d39c80a09..5805f4a4947 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -237,10 +237,7 @@ struct ttm_buffer_object { struct list_head io_reserve_lru; /** - * Members protected by struct buffer_object_device::fence_lock - * In addition, setting sync_obj to anything else - * than NULL requires bo::reserved to be held. This allows for - * checking NULL while reserved but not holding the mentioned lock. + * Members protected by a bo reservation. */ void *sync_obj; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 5c8bb5699a6..e1ee141e26c 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -521,8 +521,6 @@ struct ttm_bo_global { * * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. * @man: An array of mem_type_managers. - * @fence_lock: Protects the synchronizing members on *all* bos belonging - * to this device. * @vma_manager: Address space manager * lru_lock: Spinlock that protects the buffer+device lru lists and * ddestroy lists. @@ -542,7 +540,6 @@ struct ttm_bo_device { struct ttm_bo_global *glob; struct ttm_bo_driver *driver; struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; - spinlock_t fence_lock; /* * Protected by internal locks. -- cgit v1.2.3-70-g09d2 From 58b4d720c1620bbf09e42b4f218dcb2d0d8cdf3e Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 9 Jan 2014 11:03:08 +0100 Subject: drm/ttm: add interruptible parameter to ttm_eu_reserve_buffers It seems some drivers really want this as a parameter, like vmwgfx. Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/qxl/qxl_release.c | 2 +- drivers/gpu/drm/radeon/radeon_object.c | 2 +- drivers/gpu/drm/radeon/radeon_vm.c | 2 +- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 22 +++++++++++++--------- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 7 ++----- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 2 +- include/drm/ttm/ttm_execbuf_util.h | 9 +++++---- 7 files changed, 24 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 2e5e38fee9b..656f9d3a946 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -159,7 +159,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) if (list_is_singular(&release->bos)) return 0; - ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos); + ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr); if (ret) return ret; diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index cbac963571c..378fe9ea4d4 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -482,7 +482,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, u64 bytes_moved = 0, initial_bytes_moved; u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); - r = ttm_eu_reserve_buffers(ticket, head); + r = ttm_eu_reserve_buffers(ticket, head, true); if (unlikely(r != 0)) { return r; } diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 4751c6728fe..3d9a6a036f8 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -399,7 +399,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, INIT_LIST_HEAD(&head); list_add(&tv.head, &head); - r = ttm_eu_reserve_buffers(&ticket, &head); + r = ttm_eu_reserve_buffers(&ticket, &head, true); if (r) return r; diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 0fbbbbd67af..87d7deefc80 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -112,7 +112,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); */ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, - struct list_head *list) + struct list_head *list, bool intr) { struct ttm_bo_global *glob; struct ttm_validate_buffer *entry; @@ -140,7 +140,7 @@ retry: if (entry->reserved) continue; - ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true, + ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true, ticket); if (ret == -EDEADLK) { @@ -153,13 +153,17 @@ retry: ttm_eu_backoff_reservation_locked(list); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); - ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, - ticket); - if (unlikely(ret != 0)) { - if (ret == -EINTR) - ret = -ERESTARTSYS; - goto err_fini; - } + + if (intr) { + ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, + ticket); + if (unlikely(ret != 0)) { + if (ret == -EINTR) + ret = -ERESTARTSYS; + goto err_fini; + } + } else + ww_mutex_lock_slow(&bo->resv->lock, ticket); entry->reserved = true; if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 7bfdaa163a3..24f067bf438 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -2496,7 +2496,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, if (unlikely(ret != 0)) goto out_err_nores; - ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); + ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true); if (unlikely(ret != 0)) goto out_err; @@ -2684,10 +2684,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); list_add_tail(&query_val.head, &validate_list); - do { - ret = ttm_eu_reserve_buffers(&ticket, &validate_list); - } while (ret == -ERESTARTSYS); - + ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false); if (unlikely(ret != 0)) { vmw_execbuf_unpin_panic(dev_priv); goto out_no_reserve; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 1ee86bf8275..23169362bca 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1216,7 +1216,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, INIT_LIST_HEAD(&val_list); val_buf->bo = ttm_bo_reference(&res->backup->base); list_add_tail(&val_buf->head, &val_list); - ret = ttm_eu_reserve_buffers(NULL, &val_list); + ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible); if (unlikely(ret != 0)) goto out_no_reserve; diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index 16db7d01a33..fd95fd569ca 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -73,6 +73,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only * non-blocking reserves should be tried. * @list: thread private list of ttm_validate_buffer structs. + * @intr: should the wait be interruptible * * Tries to reserve bos pointed to by the list entries for validation. * If the function returns 0, all buffers are marked as "unfenced", @@ -84,9 +85,9 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, * CPU write reservations to be cleared, and for other threads to * unreserve their buffers. * - * This function may return -ERESTART or -EAGAIN if the calling process - * receives a signal while waiting. In that case, no buffers on the list - * will be reserved upon return. + * If intr is set to true, this function may return -ERESTARTSYS if the + * calling process receives a signal while waiting. In that case, no + * buffers on the list will be reserved upon return. * * Buffers reserved by this function should be unreserved by * a call to either ttm_eu_backoff_reservation() or @@ -95,7 +96,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, */ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, - struct list_head *list); + struct list_head *list, bool intr); /** * function ttm_eu_fence_buffer_objects. -- cgit v1.2.3-70-g09d2 From 1f0dc9a59afeccb96a35ebec36661266260f5eee Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 9 Jan 2014 11:03:08 +0100 Subject: drm/ttm: kill off some members to ttm_validate_buffer This reorders the list to keep track of what buffers are reserved, so previous members are always unreserved. This gets rid of some bookkeeping that's no longer needed, while simplifying the code some. Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/qxl/qxl_release.c | 1 - drivers/gpu/drm/ttm/ttm_execbuf_util.c | 142 +++++++++++--------------------- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 1 - include/drm/ttm/ttm_execbuf_util.h | 3 - 4 files changed, 50 insertions(+), 97 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 656f9d3a946..4045ba873ab 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -349,7 +349,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) ttm_bo_add_to_lru(bo); __ttm_bo_unreserve(bo); - entry->reserved = false; } spin_unlock(&glob->lru_lock); ww_acquire_fini(&release->ticket); diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 87d7deefc80..108730e9147 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -32,20 +32,12 @@ #include #include -static void ttm_eu_backoff_reservation_locked(struct list_head *list) +static void ttm_eu_backoff_reservation_reverse(struct list_head *list, + struct ttm_validate_buffer *entry) { - struct ttm_validate_buffer *entry; - - list_for_each_entry(entry, list, head) { + list_for_each_entry_continue_reverse(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - if (!entry->reserved) - continue; - entry->reserved = false; - if (entry->removed) { - ttm_bo_add_to_lru(bo); - entry->removed = false; - } __ttm_bo_unreserve(bo); } } @@ -56,27 +48,9 @@ static void ttm_eu_del_from_lru_locked(struct list_head *list) list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - if (!entry->reserved) - continue; + unsigned put_count = ttm_bo_del_from_lru(bo); - if (!entry->removed) { - entry->put_count = ttm_bo_del_from_lru(bo); - entry->removed = true; - } - } -} - -static void ttm_eu_list_ref_sub(struct list_head *list) -{ - struct ttm_validate_buffer *entry; - - list_for_each_entry(entry, list, head) { - struct ttm_buffer_object *bo = entry->bo; - - if (entry->put_count) { - ttm_bo_list_ref_sub(bo, entry->put_count, true); - entry->put_count = 0; - } + ttm_bo_list_ref_sub(bo, put_count, true); } } @@ -91,11 +65,18 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; + spin_lock(&glob->lru_lock); - ttm_eu_backoff_reservation_locked(list); + list_for_each_entry(entry, list, head) { + struct ttm_buffer_object *bo = entry->bo; + + ttm_bo_add_to_lru(bo); + __ttm_bo_unreserve(bo); + } + spin_unlock(&glob->lru_lock); + if (ticket) ww_acquire_fini(ticket); - spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_eu_backoff_reservation); @@ -121,64 +102,55 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, if (list_empty(list)) return 0; - list_for_each_entry(entry, list, head) { - entry->reserved = false; - entry->put_count = 0; - entry->removed = false; - } - entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; if (ticket) ww_acquire_init(ticket, &reservation_ww_class); -retry: + list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - /* already slowpath reserved? */ - if (entry->reserved) - continue; - ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true, ticket); + if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { + __ttm_bo_unreserve(bo); - if (ret == -EDEADLK) { - /* uh oh, we lost out, drop every reservation and try - * to only reserve this buffer, then start over if - * this succeeds. - */ - BUG_ON(ticket == NULL); - spin_lock(&glob->lru_lock); - ttm_eu_backoff_reservation_locked(list); - spin_unlock(&glob->lru_lock); - ttm_eu_list_ref_sub(list); - - if (intr) { - ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, - ticket); - if (unlikely(ret != 0)) { - if (ret == -EINTR) - ret = -ERESTARTSYS; - goto err_fini; - } - } else - ww_mutex_lock_slow(&bo->resv->lock, ticket); - - entry->reserved = true; - if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { - ret = -EBUSY; - goto err; - } - goto retry; - } else if (ret) - goto err; - - entry->reserved = true; - if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { ret = -EBUSY; - goto err; } + + if (!ret) + continue; + + /* uh oh, we lost out, drop every reservation and try + * to only reserve this buffer, then start over if + * this succeeds. + */ + ttm_eu_backoff_reservation_reverse(list, entry); + + if (ret == -EDEADLK && intr) { + ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, + ticket); + } else if (ret == -EDEADLK) { + ww_mutex_lock_slow(&bo->resv->lock, ticket); + ret = 0; + } + + if (unlikely(ret != 0)) { + if (ret == -EINTR) + ret = -ERESTARTSYS; + if (ticket) { + ww_acquire_done(ticket); + ww_acquire_fini(ticket); + } + return ret; + } + + /* move this item to the front of the list, + * forces correct iteration of the loop without keeping track + */ + list_del(&entry->head); + list_add(&entry->head, list); } if (ticket) @@ -186,20 +158,7 @@ retry: spin_lock(&glob->lru_lock); ttm_eu_del_from_lru_locked(list); spin_unlock(&glob->lru_lock); - ttm_eu_list_ref_sub(list); return 0; - -err: - spin_lock(&glob->lru_lock); - ttm_eu_backoff_reservation_locked(list); - spin_unlock(&glob->lru_lock); - ttm_eu_list_ref_sub(list); -err_fini: - if (ticket) { - ww_acquire_done(ticket); - ww_acquire_fini(ticket); - } - return ret; } EXPORT_SYMBOL(ttm_eu_reserve_buffers); @@ -228,7 +187,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, bo->sync_obj = driver->sync_obj_ref(sync_obj); ttm_bo_add_to_lru(bo); __ttm_bo_unreserve(bo); - entry->reserved = false; } spin_unlock(&glob->lru_lock); if (ticket) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 24f067bf438..b19b2b980cb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -346,7 +346,6 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, ++sw_context->cur_val_buf; val_buf = &vval_buf->base; val_buf->bo = ttm_bo_reference(bo); - val_buf->reserved = false; list_add_tail(&val_buf->head, &sw_context->validate_nodes); vval_buf->validate_as_mob = validate_as_mob; } diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index fd95fd569ca..8490cb8ee0d 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -48,9 +48,6 @@ struct ttm_validate_buffer { struct list_head head; struct ttm_buffer_object *bo; - bool reserved; - bool removed; - int put_count; void *old_sync_obj; }; -- cgit v1.2.3-70-g09d2 From 2ffd48f2e7ae06c3d7b2bcde9a0cb211d1a32468 Mon Sep 17 00:00:00 2001 From: Steve Longerbeam Date: Tue, 19 Aug 2014 10:52:40 -0700 Subject: gpu: ipu-v3: Add Camera Sensor Interface unit Adds the Camera Sensor Interface (CSI) unit required for video capture. Signed-off-by: Steve Longerbeam Removed the unused clk_get_rate in ipu_csi_init_interface and the ipu_csi_ccir_err_detection_enable/disable functions. Checkpatch cleanup. Signed-off-by: Philipp Zabel --- drivers/gpu/ipu-v3/Makefile | 2 +- drivers/gpu/ipu-v3/ipu-common.c | 44 ++- drivers/gpu/ipu-v3/ipu-csi.c | 741 ++++++++++++++++++++++++++++++++++++++++ drivers/gpu/ipu-v3/ipu-prv.h | 6 + include/video/imx-ipu-v3.h | 32 +- 5 files changed, 810 insertions(+), 15 deletions(-) create mode 100644 drivers/gpu/ipu-v3/ipu-csi.c (limited to 'include') diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile index 0b42836caae..d22bd06caa6 100644 --- a/drivers/gpu/ipu-v3/Makefile +++ b/drivers/gpu/ipu-v3/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o -imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-dc.o ipu-di.o \ +imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \ ipu-dp.o ipu-dmfc.o ipu-smfc.o diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index cae54311585..511c364231a 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -217,18 +217,6 @@ int ipu_module_disable(struct ipu_soc *ipu, u32 mask) } EXPORT_SYMBOL_GPL(ipu_module_disable); -int ipu_csi_enable(struct ipu_soc *ipu, int csi) -{ - return ipu_module_enable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN); -} -EXPORT_SYMBOL_GPL(ipu_csi_enable); - -int ipu_csi_disable(struct ipu_soc *ipu, int csi) -{ - return ipu_module_disable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN); -} -EXPORT_SYMBOL_GPL(ipu_csi_disable); - int ipu_smfc_enable(struct ipu_soc *ipu) { return ipu_module_enable(ipu, IPU_CONF_SMFC_EN); @@ -439,6 +427,8 @@ struct ipu_devtype { unsigned long cpmem_ofs; unsigned long srm_ofs; unsigned long tpm_ofs; + unsigned long csi0_ofs; + unsigned long csi1_ofs; unsigned long disp0_ofs; unsigned long disp1_ofs; unsigned long dc_tmpl_ofs; @@ -452,6 +442,8 @@ static struct ipu_devtype ipu_type_imx51 = { .cpmem_ofs = 0x1f000000, .srm_ofs = 0x1f040000, .tpm_ofs = 0x1f060000, + .csi0_ofs = 0x1f030000, + .csi1_ofs = 0x1f038000, .disp0_ofs = 0x1e040000, .disp1_ofs = 0x1e048000, .dc_tmpl_ofs = 0x1f080000, @@ -465,6 +457,8 @@ static struct ipu_devtype ipu_type_imx53 = { .cpmem_ofs = 0x07000000, .srm_ofs = 0x07040000, .tpm_ofs = 0x07060000, + .csi0_ofs = 0x07030000, + .csi1_ofs = 0x07038000, .disp0_ofs = 0x06040000, .disp1_ofs = 0x06048000, .dc_tmpl_ofs = 0x07080000, @@ -478,6 +472,8 @@ static struct ipu_devtype ipu_type_imx6q = { .cpmem_ofs = 0x00300000, .srm_ofs = 0x00340000, .tpm_ofs = 0x00360000, + .csi0_ofs = 0x00230000, + .csi1_ofs = 0x00238000, .disp0_ofs = 0x00240000, .disp1_ofs = 0x00248000, .dc_tmpl_ofs = 0x00380000, @@ -508,6 +504,20 @@ static int ipu_submodules_init(struct ipu_soc *ipu, goto err_cpmem; } + ret = ipu_csi_init(ipu, dev, 0, ipu_base + devtype->csi0_ofs, + IPU_CONF_CSI0_EN, ipu_clk); + if (ret) { + unit = "csi0"; + goto err_csi_0; + } + + ret = ipu_csi_init(ipu, dev, 1, ipu_base + devtype->csi1_ofs, + IPU_CONF_CSI1_EN, ipu_clk); + if (ret) { + unit = "csi1"; + goto err_csi_1; + } + ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs, IPU_CONF_DI0_EN, ipu_clk); if (ret) { @@ -562,6 +572,10 @@ err_dc: err_di_1: ipu_di_exit(ipu, 0); err_di_0: + ipu_csi_exit(ipu, 1); +err_csi_1: + ipu_csi_exit(ipu, 0); +err_csi_0: ipu_cpmem_exit(ipu); err_cpmem: dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret); @@ -640,6 +654,8 @@ static void ipu_submodules_exit(struct ipu_soc *ipu) ipu_dc_exit(ipu); ipu_di_exit(ipu, 1); ipu_di_exit(ipu, 0); + ipu_csi_exit(ipu, 1); + ipu_csi_exit(ipu, 0); ipu_cpmem_exit(ipu); } @@ -859,6 +875,10 @@ static int ipu_probe(struct platform_device *pdev) ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS); dev_dbg(&pdev->dev, "cpmem: 0x%08lx\n", ipu_base + devtype->cpmem_ofs); + dev_dbg(&pdev->dev, "csi0: 0x%08lx\n", + ipu_base + devtype->csi0_ofs); + dev_dbg(&pdev->dev, "csi1: 0x%08lx\n", + ipu_base + devtype->csi1_ofs); dev_dbg(&pdev->dev, "disp0: 0x%08lx\n", ipu_base + devtype->disp0_ofs); dev_dbg(&pdev->dev, "disp1: 0x%08lx\n", diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c new file mode 100644 index 00000000000..d6f56471bd2 --- /dev/null +++ b/drivers/gpu/ipu-v3/ipu-csi.c @@ -0,0 +1,741 @@ +/* + * Copyright (C) 2012-2014 Mentor Graphics Inc. + * Copyright (C) 2005-2009 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu-prv.h" + +struct ipu_csi { + void __iomem *base; + int id; + u32 module; + struct clk *clk_ipu; /* IPU bus clock */ + spinlock_t lock; + bool inuse; + struct ipu_soc *ipu; +}; + +/* CSI Register Offsets */ +#define CSI_SENS_CONF 0x0000 +#define CSI_SENS_FRM_SIZE 0x0004 +#define CSI_ACT_FRM_SIZE 0x0008 +#define CSI_OUT_FRM_CTRL 0x000c +#define CSI_TST_CTRL 0x0010 +#define CSI_CCIR_CODE_1 0x0014 +#define CSI_CCIR_CODE_2 0x0018 +#define CSI_CCIR_CODE_3 0x001c +#define CSI_MIPI_DI 0x0020 +#define CSI_SKIP 0x0024 +#define CSI_CPD_CTRL 0x0028 +#define CSI_CPD_RC(n) (0x002c + ((n)*4)) +#define CSI_CPD_RS(n) (0x004c + ((n)*4)) +#define CSI_CPD_GRC(n) (0x005c + ((n)*4)) +#define CSI_CPD_GRS(n) (0x007c + ((n)*4)) +#define CSI_CPD_GBC(n) (0x008c + ((n)*4)) +#define CSI_CPD_GBS(n) (0x00Ac + ((n)*4)) +#define CSI_CPD_BC(n) (0x00Bc + ((n)*4)) +#define CSI_CPD_BS(n) (0x00Dc + ((n)*4)) +#define CSI_CPD_OFFSET1 0x00ec +#define CSI_CPD_OFFSET2 0x00f0 + +/* CSI Register Fields */ +#define CSI_SENS_CONF_DATA_FMT_SHIFT 8 +#define CSI_SENS_CONF_DATA_FMT_MASK 0x00000700 +#define CSI_SENS_CONF_DATA_FMT_RGB_YUV444 0L +#define CSI_SENS_CONF_DATA_FMT_YUV422_YUYV 1L +#define CSI_SENS_CONF_DATA_FMT_YUV422_UYVY 2L +#define CSI_SENS_CONF_DATA_FMT_BAYER 3L +#define CSI_SENS_CONF_DATA_FMT_RGB565 4L +#define CSI_SENS_CONF_DATA_FMT_RGB555 5L +#define CSI_SENS_CONF_DATA_FMT_RGB444 6L +#define CSI_SENS_CONF_DATA_FMT_JPEG 7L + +#define CSI_SENS_CONF_VSYNC_POL_SHIFT 0 +#define CSI_SENS_CONF_HSYNC_POL_SHIFT 1 +#define CSI_SENS_CONF_DATA_POL_SHIFT 2 +#define CSI_SENS_CONF_PIX_CLK_POL_SHIFT 3 +#define CSI_SENS_CONF_SENS_PRTCL_MASK 0x00000070 +#define CSI_SENS_CONF_SENS_PRTCL_SHIFT 4 +#define CSI_SENS_CONF_PACK_TIGHT_SHIFT 7 +#define CSI_SENS_CONF_DATA_WIDTH_SHIFT 11 +#define CSI_SENS_CONF_EXT_VSYNC_SHIFT 15 +#define CSI_SENS_CONF_DIVRATIO_SHIFT 16 + +#define CSI_SENS_CONF_DIVRATIO_MASK 0x00ff0000 +#define CSI_SENS_CONF_DATA_DEST_SHIFT 24 +#define CSI_SENS_CONF_DATA_DEST_MASK 0x07000000 +#define CSI_SENS_CONF_JPEG8_EN_SHIFT 27 +#define CSI_SENS_CONF_JPEG_EN_SHIFT 28 +#define CSI_SENS_CONF_FORCE_EOF_SHIFT 29 +#define CSI_SENS_CONF_DATA_EN_POL_SHIFT 31 + +#define CSI_DATA_DEST_IC 2 +#define CSI_DATA_DEST_IDMAC 4 + +#define CSI_CCIR_ERR_DET_EN 0x01000000 +#define CSI_HORI_DOWNSIZE_EN 0x80000000 +#define CSI_VERT_DOWNSIZE_EN 0x40000000 +#define CSI_TEST_GEN_MODE_EN 0x01000000 + +#define CSI_HSC_MASK 0x1fff0000 +#define CSI_HSC_SHIFT 16 +#define CSI_VSC_MASK 0x00000fff +#define CSI_VSC_SHIFT 0 + +#define CSI_TEST_GEN_R_MASK 0x000000ff +#define CSI_TEST_GEN_R_SHIFT 0 +#define CSI_TEST_GEN_G_MASK 0x0000ff00 +#define CSI_TEST_GEN_G_SHIFT 8 +#define CSI_TEST_GEN_B_MASK 0x00ff0000 +#define CSI_TEST_GEN_B_SHIFT 16 + +#define CSI_MAX_RATIO_SKIP_SMFC_MASK 0x00000007 +#define CSI_MAX_RATIO_SKIP_SMFC_SHIFT 0 +#define CSI_SKIP_SMFC_MASK 0x000000f8 +#define CSI_SKIP_SMFC_SHIFT 3 +#define CSI_ID_2_SKIP_MASK 0x00000300 +#define CSI_ID_2_SKIP_SHIFT 8 + +#define CSI_COLOR_FIRST_ROW_MASK 0x00000002 +#define CSI_COLOR_FIRST_COMP_MASK 0x00000001 + +/* MIPI CSI-2 data types */ +#define MIPI_DT_YUV420 0x18 /* YYY.../UYVY.... */ +#define MIPI_DT_YUV420_LEGACY 0x1a /* UYY.../VYY... */ +#define MIPI_DT_YUV422 0x1e /* UYVY... */ +#define MIPI_DT_RGB444 0x20 +#define MIPI_DT_RGB555 0x21 +#define MIPI_DT_RGB565 0x22 +#define MIPI_DT_RGB666 0x23 +#define MIPI_DT_RGB888 0x24 +#define MIPI_DT_RAW6 0x28 +#define MIPI_DT_RAW7 0x29 +#define MIPI_DT_RAW8 0x2a +#define MIPI_DT_RAW10 0x2b +#define MIPI_DT_RAW12 0x2c +#define MIPI_DT_RAW14 0x2d + +/* + * Bitfield of CSI bus signal polarities and modes. + */ +struct ipu_csi_bus_config { + unsigned data_width:4; + unsigned clk_mode:3; + unsigned ext_vsync:1; + unsigned vsync_pol:1; + unsigned hsync_pol:1; + unsigned pixclk_pol:1; + unsigned data_pol:1; + unsigned sens_clksrc:1; + unsigned pack_tight:1; + unsigned force_eof:1; + unsigned data_en_pol:1; + + unsigned data_fmt; + unsigned mipi_dt; +}; + +/* + * Enumeration of CSI data bus widths. + */ +enum ipu_csi_data_width { + IPU_CSI_DATA_WIDTH_4 = 0, + IPU_CSI_DATA_WIDTH_8 = 1, + IPU_CSI_DATA_WIDTH_10 = 3, + IPU_CSI_DATA_WIDTH_12 = 5, + IPU_CSI_DATA_WIDTH_16 = 9, +}; + +/* + * Enumeration of CSI clock modes. + */ +enum ipu_csi_clk_mode { + IPU_CSI_CLK_MODE_GATED_CLK, + IPU_CSI_CLK_MODE_NONGATED_CLK, + IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE, + IPU_CSI_CLK_MODE_CCIR656_INTERLACED, + IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR, + IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR, + IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR, + IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR, +}; + +static inline u32 ipu_csi_read(struct ipu_csi *csi, unsigned offset) +{ + return readl(csi->base + offset); +} + +static inline void ipu_csi_write(struct ipu_csi *csi, u32 value, + unsigned offset) +{ + writel(value, csi->base + offset); +} + +/* + * Set mclk division ratio for generating test mode mclk. Only used + * for test generator. + */ +static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk, + u32 ipu_clk) +{ + u32 temp; + u32 div_ratio; + + div_ratio = (ipu_clk / pixel_clk) - 1; + + if (div_ratio > 0xFF || div_ratio < 0) { + dev_err(csi->ipu->dev, + "value of pixel_clk extends normal range\n"); + return -EINVAL; + } + + temp = ipu_csi_read(csi, CSI_SENS_CONF); + temp &= ~CSI_SENS_CONF_DIVRATIO_MASK; + ipu_csi_write(csi, temp | (div_ratio << CSI_SENS_CONF_DIVRATIO_SHIFT), + CSI_SENS_CONF); + + return 0; +} + +/* + * Find the CSI data format and data width for the given V4L2 media + * bus pixel format code. + */ +static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code) +{ + switch (mbus_code) { + case V4L2_MBUS_FMT_BGR565_2X8_BE: + case V4L2_MBUS_FMT_BGR565_2X8_LE: + case V4L2_MBUS_FMT_RGB565_2X8_BE: + case V4L2_MBUS_FMT_RGB565_2X8_LE: + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565; + cfg->mipi_dt = MIPI_DT_RGB565; + cfg->data_width = IPU_CSI_DATA_WIDTH_8; + break; + case V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE: + case V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE: + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB444; + cfg->mipi_dt = MIPI_DT_RGB444; + cfg->data_width = IPU_CSI_DATA_WIDTH_8; + break; + case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE: + case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB555; + cfg->mipi_dt = MIPI_DT_RGB555; + cfg->data_width = IPU_CSI_DATA_WIDTH_8; + break; + case V4L2_MBUS_FMT_UYVY8_2X8: + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY; + cfg->mipi_dt = MIPI_DT_YUV422; + cfg->data_width = IPU_CSI_DATA_WIDTH_8; + break; + case V4L2_MBUS_FMT_YUYV8_2X8: + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV; + cfg->mipi_dt = MIPI_DT_YUV422; + cfg->data_width = IPU_CSI_DATA_WIDTH_8; + break; + case V4L2_MBUS_FMT_UYVY8_1X16: + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY; + cfg->mipi_dt = MIPI_DT_YUV422; + cfg->data_width = IPU_CSI_DATA_WIDTH_16; + break; + case V4L2_MBUS_FMT_YUYV8_1X16: + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV; + cfg->mipi_dt = MIPI_DT_YUV422; + cfg->data_width = IPU_CSI_DATA_WIDTH_16; + break; + case V4L2_MBUS_FMT_SBGGR8_1X8: + case V4L2_MBUS_FMT_SGBRG8_1X8: + case V4L2_MBUS_FMT_SGRBG8_1X8: + case V4L2_MBUS_FMT_SRGGB8_1X8: + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; + cfg->mipi_dt = MIPI_DT_RAW8; + cfg->data_width = IPU_CSI_DATA_WIDTH_8; + break; + case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8: + case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8: + case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8: + case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8: + case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE: + case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE: + case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE: + case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE: + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; + cfg->mipi_dt = MIPI_DT_RAW10; + cfg->data_width = IPU_CSI_DATA_WIDTH_8; + break; + case V4L2_MBUS_FMT_SBGGR10_1X10: + case V4L2_MBUS_FMT_SGBRG10_1X10: + case V4L2_MBUS_FMT_SGRBG10_1X10: + case V4L2_MBUS_FMT_SRGGB10_1X10: + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; + cfg->mipi_dt = MIPI_DT_RAW10; + cfg->data_width = IPU_CSI_DATA_WIDTH_10; + break; + case V4L2_MBUS_FMT_SBGGR12_1X12: + case V4L2_MBUS_FMT_SGBRG12_1X12: + case V4L2_MBUS_FMT_SGRBG12_1X12: + case V4L2_MBUS_FMT_SRGGB12_1X12: + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; + cfg->mipi_dt = MIPI_DT_RAW12; + cfg->data_width = IPU_CSI_DATA_WIDTH_12; + break; + case V4L2_MBUS_FMT_JPEG_1X8: + /* TODO */ + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_JPEG; + cfg->mipi_dt = MIPI_DT_RAW8; + cfg->data_width = IPU_CSI_DATA_WIDTH_8; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * Fill a CSI bus config struct from mbus_config and mbus_framefmt. + */ +static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg, + struct v4l2_mbus_config *mbus_cfg, + struct v4l2_mbus_framefmt *mbus_fmt) +{ + memset(csicfg, 0, sizeof(*csicfg)); + + mbus_code_to_bus_cfg(csicfg, mbus_fmt->code); + + switch (mbus_cfg->type) { + case V4L2_MBUS_PARALLEL: + csicfg->ext_vsync = 1; + csicfg->vsync_pol = (mbus_cfg->flags & + V4L2_MBUS_VSYNC_ACTIVE_LOW) ? 1 : 0; + csicfg->hsync_pol = (mbus_cfg->flags & + V4L2_MBUS_HSYNC_ACTIVE_LOW) ? 1 : 0; + csicfg->pixclk_pol = (mbus_cfg->flags & + V4L2_MBUS_PCLK_SAMPLE_FALLING) ? 1 : 0; + csicfg->clk_mode = IPU_CSI_CLK_MODE_GATED_CLK; + break; + case V4L2_MBUS_BT656: + csicfg->ext_vsync = 0; + if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field)) + csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED; + else + csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE; + break; + case V4L2_MBUS_CSI2: + /* + * MIPI CSI-2 requires non gated clock mode, all other + * parameters are not applicable for MIPI CSI-2 bus. + */ + csicfg->clk_mode = IPU_CSI_CLK_MODE_NONGATED_CLK; + break; + default: + /* will never get here, keep compiler quiet */ + break; + } +} + +int ipu_csi_init_interface(struct ipu_csi *csi, + struct v4l2_mbus_config *mbus_cfg, + struct v4l2_mbus_framefmt *mbus_fmt) +{ + struct ipu_csi_bus_config cfg; + unsigned long flags; + u32 data = 0; + + fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt); + + /* Set the CSI_SENS_CONF register remaining fields */ + data |= cfg.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT | + cfg.data_fmt << CSI_SENS_CONF_DATA_FMT_SHIFT | + cfg.data_pol << CSI_SENS_CONF_DATA_POL_SHIFT | + cfg.vsync_pol << CSI_SENS_CONF_VSYNC_POL_SHIFT | + cfg.hsync_pol << CSI_SENS_CONF_HSYNC_POL_SHIFT | + cfg.pixclk_pol << CSI_SENS_CONF_PIX_CLK_POL_SHIFT | + cfg.ext_vsync << CSI_SENS_CONF_EXT_VSYNC_SHIFT | + cfg.clk_mode << CSI_SENS_CONF_SENS_PRTCL_SHIFT | + cfg.pack_tight << CSI_SENS_CONF_PACK_TIGHT_SHIFT | + cfg.force_eof << CSI_SENS_CONF_FORCE_EOF_SHIFT | + cfg.data_en_pol << CSI_SENS_CONF_DATA_EN_POL_SHIFT; + + spin_lock_irqsave(&csi->lock, flags); + + ipu_csi_write(csi, data, CSI_SENS_CONF); + + /* Setup sensor frame size */ + ipu_csi_write(csi, + (mbus_fmt->width - 1) | ((mbus_fmt->height - 1) << 16), + CSI_SENS_FRM_SIZE); + + /* Set CCIR registers */ + + switch (cfg.clk_mode) { + case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE: + ipu_csi_write(csi, 0x40030, CSI_CCIR_CODE_1); + ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3); + break; + case IPU_CSI_CLK_MODE_CCIR656_INTERLACED: + if (mbus_fmt->width == 720 && mbus_fmt->height == 576) { + /* + * PAL case + * + * Field0BlankEnd = 0x6, Field0BlankStart = 0x2, + * Field0ActiveEnd = 0x4, Field0ActiveStart = 0 + * Field1BlankEnd = 0x7, Field1BlankStart = 0x3, + * Field1ActiveEnd = 0x5, Field1ActiveStart = 0x1 + */ + ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN, + CSI_CCIR_CODE_1); + ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2); + ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3); + + } else if (mbus_fmt->width == 720 && mbus_fmt->height == 480) { + /* + * NTSC case + * + * Field0BlankEnd = 0x7, Field0BlankStart = 0x3, + * Field0ActiveEnd = 0x5, Field0ActiveStart = 0x1 + * Field1BlankEnd = 0x6, Field1BlankStart = 0x2, + * Field1ActiveEnd = 0x4, Field1ActiveStart = 0 + */ + ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN, + CSI_CCIR_CODE_1); + ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2); + ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3); + } else { + dev_err(csi->ipu->dev, + "Unsupported CCIR656 interlaced video mode\n"); + spin_unlock_irqrestore(&csi->lock, flags); + return -EINVAL; + } + break; + case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR: + case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR: + case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR: + case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR: + ipu_csi_write(csi, 0x40030 | CSI_CCIR_ERR_DET_EN, + CSI_CCIR_CODE_1); + ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3); + break; + case IPU_CSI_CLK_MODE_GATED_CLK: + case IPU_CSI_CLK_MODE_NONGATED_CLK: + ipu_csi_write(csi, 0, CSI_CCIR_CODE_1); + break; + } + + dev_dbg(csi->ipu->dev, "CSI_SENS_CONF = 0x%08X\n", + ipu_csi_read(csi, CSI_SENS_CONF)); + dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n", + ipu_csi_read(csi, CSI_ACT_FRM_SIZE)); + + spin_unlock_irqrestore(&csi->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_csi_init_interface); + +bool ipu_csi_is_interlaced(struct ipu_csi *csi) +{ + unsigned long flags; + u32 sensor_protocol; + + spin_lock_irqsave(&csi->lock, flags); + sensor_protocol = + (ipu_csi_read(csi, CSI_SENS_CONF) & + CSI_SENS_CONF_SENS_PRTCL_MASK) >> + CSI_SENS_CONF_SENS_PRTCL_SHIFT; + spin_unlock_irqrestore(&csi->lock, flags); + + switch (sensor_protocol) { + case IPU_CSI_CLK_MODE_GATED_CLK: + case IPU_CSI_CLK_MODE_NONGATED_CLK: + case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE: + case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR: + case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR: + return false; + case IPU_CSI_CLK_MODE_CCIR656_INTERLACED: + case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR: + case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR: + return true; + default: + dev_err(csi->ipu->dev, + "CSI %d sensor protocol unsupported\n", csi->id); + return false; + } +} +EXPORT_SYMBOL_GPL(ipu_csi_is_interlaced); + +void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w) +{ + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&csi->lock, flags); + + reg = ipu_csi_read(csi, CSI_ACT_FRM_SIZE); + w->width = (reg & 0xFFFF) + 1; + w->height = (reg >> 16 & 0xFFFF) + 1; + + reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL); + w->left = (reg & CSI_HSC_MASK) >> CSI_HSC_SHIFT; + w->top = (reg & CSI_VSC_MASK) >> CSI_VSC_SHIFT; + + spin_unlock_irqrestore(&csi->lock, flags); +} +EXPORT_SYMBOL_GPL(ipu_csi_get_window); + +void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w) +{ + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&csi->lock, flags); + + ipu_csi_write(csi, (w->width - 1) | ((w->height - 1) << 16), + CSI_ACT_FRM_SIZE); + + reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL); + reg &= ~(CSI_HSC_MASK | CSI_VSC_MASK); + reg |= ((w->top << CSI_VSC_SHIFT) | (w->left << CSI_HSC_SHIFT)); + ipu_csi_write(csi, reg, CSI_OUT_FRM_CTRL); + + spin_unlock_irqrestore(&csi->lock, flags); +} +EXPORT_SYMBOL_GPL(ipu_csi_set_window); + +void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active, + u32 r_value, u32 g_value, u32 b_value, + u32 pix_clk) +{ + unsigned long flags; + u32 ipu_clk = clk_get_rate(csi->clk_ipu); + u32 temp; + + spin_lock_irqsave(&csi->lock, flags); + + temp = ipu_csi_read(csi, CSI_TST_CTRL); + + if (active == false) { + temp &= ~CSI_TEST_GEN_MODE_EN; + ipu_csi_write(csi, temp, CSI_TST_CTRL); + } else { + /* Set sensb_mclk div_ratio */ + ipu_csi_set_testgen_mclk(csi, pix_clk, ipu_clk); + + temp &= ~(CSI_TEST_GEN_R_MASK | CSI_TEST_GEN_G_MASK | + CSI_TEST_GEN_B_MASK); + temp |= CSI_TEST_GEN_MODE_EN; + temp |= (r_value << CSI_TEST_GEN_R_SHIFT) | + (g_value << CSI_TEST_GEN_G_SHIFT) | + (b_value << CSI_TEST_GEN_B_SHIFT); + ipu_csi_write(csi, temp, CSI_TST_CTRL); + } + + spin_unlock_irqrestore(&csi->lock, flags); +} +EXPORT_SYMBOL_GPL(ipu_csi_set_test_generator); + +int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc, + struct v4l2_mbus_framefmt *mbus_fmt) +{ + struct ipu_csi_bus_config cfg; + unsigned long flags; + u32 temp; + + if (vc > 3) + return -EINVAL; + + mbus_code_to_bus_cfg(&cfg, mbus_fmt->code); + + spin_lock_irqsave(&csi->lock, flags); + + temp = ipu_csi_read(csi, CSI_MIPI_DI); + temp &= ~(0xff << (vc * 8)); + temp |= (cfg.mipi_dt << (vc * 8)); + ipu_csi_write(csi, temp, CSI_MIPI_DI); + + spin_unlock_irqrestore(&csi->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_csi_set_mipi_datatype); + +int ipu_csi_set_skip_smfc(struct ipu_csi *csi, u32 skip, + u32 max_ratio, u32 id) +{ + unsigned long flags; + u32 temp; + + if (max_ratio > 5 || id > 3) + return -EINVAL; + + spin_lock_irqsave(&csi->lock, flags); + + temp = ipu_csi_read(csi, CSI_SKIP); + temp &= ~(CSI_MAX_RATIO_SKIP_SMFC_MASK | CSI_ID_2_SKIP_MASK | + CSI_SKIP_SMFC_MASK); + temp |= (max_ratio << CSI_MAX_RATIO_SKIP_SMFC_SHIFT) | + (id << CSI_ID_2_SKIP_SHIFT) | + (skip << CSI_SKIP_SMFC_SHIFT); + ipu_csi_write(csi, temp, CSI_SKIP); + + spin_unlock_irqrestore(&csi->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_csi_set_skip_smfc); + +int ipu_csi_set_dest(struct ipu_csi *csi, enum ipu_csi_dest csi_dest) +{ + unsigned long flags; + u32 csi_sens_conf, dest; + + if (csi_dest == IPU_CSI_DEST_IDMAC) + dest = CSI_DATA_DEST_IDMAC; + else + dest = CSI_DATA_DEST_IC; /* IC or VDIC */ + + spin_lock_irqsave(&csi->lock, flags); + + csi_sens_conf = ipu_csi_read(csi, CSI_SENS_CONF); + csi_sens_conf &= ~CSI_SENS_CONF_DATA_DEST_MASK; + csi_sens_conf |= (dest << CSI_SENS_CONF_DATA_DEST_SHIFT); + ipu_csi_write(csi, csi_sens_conf, CSI_SENS_CONF); + + spin_unlock_irqrestore(&csi->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_csi_set_dest); + +int ipu_csi_enable(struct ipu_csi *csi) +{ + ipu_module_enable(csi->ipu, csi->module); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_csi_enable); + +int ipu_csi_disable(struct ipu_csi *csi) +{ + ipu_module_disable(csi->ipu, csi->module); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_csi_disable); + +struct ipu_csi *ipu_csi_get(struct ipu_soc *ipu, int id) +{ + unsigned long flags; + struct ipu_csi *csi, *ret; + + if (id > 1) + return ERR_PTR(-EINVAL); + + csi = ipu->csi_priv[id]; + ret = csi; + + spin_lock_irqsave(&csi->lock, flags); + + if (csi->inuse) { + ret = ERR_PTR(-EBUSY); + goto unlock; + } + + csi->inuse = true; +unlock: + spin_unlock_irqrestore(&csi->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(ipu_csi_get); + +void ipu_csi_put(struct ipu_csi *csi) +{ + unsigned long flags; + + spin_lock_irqsave(&csi->lock, flags); + csi->inuse = false; + spin_unlock_irqrestore(&csi->lock, flags); +} +EXPORT_SYMBOL_GPL(ipu_csi_put); + +int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id, + unsigned long base, u32 module, struct clk *clk_ipu) +{ + struct ipu_csi *csi; + + if (id > 1) + return -ENODEV; + + csi = devm_kzalloc(dev, sizeof(*csi), GFP_KERNEL); + if (!csi) + return -ENOMEM; + + ipu->csi_priv[id] = csi; + + spin_lock_init(&csi->lock); + csi->module = module; + csi->id = id; + csi->clk_ipu = clk_ipu; + csi->base = devm_ioremap(dev, base, PAGE_SIZE); + if (!csi->base) + return -ENOMEM; + + dev_dbg(dev, "CSI%d base: 0x%08lx remapped to %p\n", + id, base, csi->base); + csi->ipu = ipu; + + return 0; +} + +void ipu_csi_exit(struct ipu_soc *ipu, int id) +{ +} + +void ipu_csi_dump(struct ipu_csi *csi) +{ + dev_dbg(csi->ipu->dev, "CSI_SENS_CONF: %08x\n", + ipu_csi_read(csi, CSI_SENS_CONF)); + dev_dbg(csi->ipu->dev, "CSI_SENS_FRM_SIZE: %08x\n", + ipu_csi_read(csi, CSI_SENS_FRM_SIZE)); + dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE: %08x\n", + ipu_csi_read(csi, CSI_ACT_FRM_SIZE)); + dev_dbg(csi->ipu->dev, "CSI_OUT_FRM_CTRL: %08x\n", + ipu_csi_read(csi, CSI_OUT_FRM_CTRL)); + dev_dbg(csi->ipu->dev, "CSI_TST_CTRL: %08x\n", + ipu_csi_read(csi, CSI_TST_CTRL)); + dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_1: %08x\n", + ipu_csi_read(csi, CSI_CCIR_CODE_1)); + dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_2: %08x\n", + ipu_csi_read(csi, CSI_CCIR_CODE_2)); + dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_3: %08x\n", + ipu_csi_read(csi, CSI_CCIR_CODE_3)); + dev_dbg(csi->ipu->dev, "CSI_MIPI_DI: %08x\n", + ipu_csi_read(csi, CSI_MIPI_DI)); + dev_dbg(csi->ipu->dev, "CSI_SKIP: %08x\n", + ipu_csi_read(csi, CSI_SKIP)); +} +EXPORT_SYMBOL_GPL(ipu_csi_dump); diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h index 1a5c55c05fe..9b274f1259e 100644 --- a/drivers/gpu/ipu-v3/ipu-prv.h +++ b/drivers/gpu/ipu-v3/ipu-prv.h @@ -157,6 +157,7 @@ struct ipuv3_channel { }; struct ipu_cpmem; +struct ipu_csi; struct ipu_dc_priv; struct ipu_dmfc_priv; struct ipu_di; @@ -189,6 +190,7 @@ struct ipu_soc { struct ipu_dp_priv *dp_priv; struct ipu_dmfc_priv *dmfc_priv; struct ipu_di *di_priv[2]; + struct ipu_csi *csi_priv[2]; struct ipu_smfc_priv *smfc_priv; }; @@ -211,6 +213,10 @@ int ipu_module_disable(struct ipu_soc *ipu, u32 mask); bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno); int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms); +int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id, + unsigned long base, u32 module, struct clk *clk_ipu); +void ipu_csi_exit(struct ipu_soc *ipu, int id); + int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id, unsigned long base, u32 module, struct clk *ipu_clk); void ipu_di_exit(struct ipu_soc *ipu, int id); diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index f80fe13b0d4..6d254275192 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -16,6 +16,7 @@ #include #include #include +#include struct ipu_soc; @@ -61,6 +62,15 @@ struct ipu_di_signal_cfg { u8 vsync_pin; }; +/* + * Enumeration of CSI destinations + */ +enum ipu_csi_dest { + IPU_CSI_DEST_IDMAC, /* to memory via SMFC */ + IPU_CSI_DEST_IC, /* to Image Converter */ + IPU_CSI_DEST_VDIC, /* to VDIC */ +}; + enum ipu_color_space { IPUV3_COLORSPACE_RGB, IPUV3_COLORSPACE_YUV, @@ -211,8 +221,26 @@ int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable, u8 alpha, /* * IPU CMOS Sensor Interface (csi) functions */ -int ipu_csi_enable(struct ipu_soc *ipu, int csi); -int ipu_csi_disable(struct ipu_soc *ipu, int csi); +struct ipu_csi; +int ipu_csi_init_interface(struct ipu_csi *csi, + struct v4l2_mbus_config *mbus_cfg, + struct v4l2_mbus_framefmt *mbus_fmt); +bool ipu_csi_is_interlaced(struct ipu_csi *csi); +void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w); +void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w); +void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active, + u32 r_value, u32 g_value, u32 b_value, + u32 pix_clk); +int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc, + struct v4l2_mbus_framefmt *mbus_fmt); +int ipu_csi_set_skip_smfc(struct ipu_csi *csi, u32 skip, + u32 max_ratio, u32 id); +int ipu_csi_set_dest(struct ipu_csi *csi, enum ipu_csi_dest csi_dest); +int ipu_csi_enable(struct ipu_csi *csi); +int ipu_csi_disable(struct ipu_csi *csi); +struct ipu_csi *ipu_csi_get(struct ipu_soc *ipu, int id); +void ipu_csi_put(struct ipu_csi *csi); +void ipu_csi_dump(struct ipu_csi *csi); /* * IPU Sensor Multiple FIFO Controller (SMFC) functions -- cgit v1.2.3-70-g09d2 From 1aa8ea0d2bd5d4ba7b5d2b132a02157bc1fb9793 Mon Sep 17 00:00:00 2001 From: Steve Longerbeam Date: Mon, 11 Aug 2014 13:04:50 +0200 Subject: gpu: ipu-v3: Add Image Converter unit Adds the Image Converter (IC) unit. Signed-off-by: Steve Longerbeam Condensed the three CSC setup functions into a single one that uses static tables to set up the CSC task parameters. Signed-off-by: Philipp Zabel --- drivers/gpu/ipu-v3/Makefile | 2 +- drivers/gpu/ipu-v3/ipu-common.c | 19 +- drivers/gpu/ipu-v3/ipu-ic.c | 778 ++++++++++++++++++++++++++++++++++++++++ drivers/gpu/ipu-v3/ipu-prv.h | 6 + include/video/imx-ipu-v3.h | 45 +++ 5 files changed, 848 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/ipu-v3/ipu-ic.c (limited to 'include') diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile index d22bd06caa6..107ec236a4a 100644 --- a/drivers/gpu/ipu-v3/Makefile +++ b/drivers/gpu/ipu-v3/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \ - ipu-dp.o ipu-dmfc.o ipu-smfc.o + ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-smfc.o diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 511c364231a..312eef6ffca 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -429,6 +429,7 @@ struct ipu_devtype { unsigned long tpm_ofs; unsigned long csi0_ofs; unsigned long csi1_ofs; + unsigned long ic_ofs; unsigned long disp0_ofs; unsigned long disp1_ofs; unsigned long dc_tmpl_ofs; @@ -444,6 +445,7 @@ static struct ipu_devtype ipu_type_imx51 = { .tpm_ofs = 0x1f060000, .csi0_ofs = 0x1f030000, .csi1_ofs = 0x1f038000, + .ic_ofs = 0x1f020000, .disp0_ofs = 0x1e040000, .disp1_ofs = 0x1e048000, .dc_tmpl_ofs = 0x1f080000, @@ -459,6 +461,7 @@ static struct ipu_devtype ipu_type_imx53 = { .tpm_ofs = 0x07060000, .csi0_ofs = 0x07030000, .csi1_ofs = 0x07038000, + .ic_ofs = 0x07020000, .disp0_ofs = 0x06040000, .disp1_ofs = 0x06048000, .dc_tmpl_ofs = 0x07080000, @@ -474,6 +477,7 @@ static struct ipu_devtype ipu_type_imx6q = { .tpm_ofs = 0x00360000, .csi0_ofs = 0x00230000, .csi1_ofs = 0x00238000, + .ic_ofs = 0x00220000, .disp0_ofs = 0x00240000, .disp1_ofs = 0x00248000, .dc_tmpl_ofs = 0x00380000, @@ -518,8 +522,16 @@ static int ipu_submodules_init(struct ipu_soc *ipu, goto err_csi_1; } + ret = ipu_ic_init(ipu, dev, + ipu_base + devtype->ic_ofs, + ipu_base + devtype->tpm_ofs); + if (ret) { + unit = "ic"; + goto err_ic; + } + ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs, - IPU_CONF_DI0_EN, ipu_clk); + IPU_CONF_DI0_EN, ipu_clk); if (ret) { unit = "di0"; goto err_di_0; @@ -572,6 +584,8 @@ err_dc: err_di_1: ipu_di_exit(ipu, 0); err_di_0: + ipu_ic_exit(ipu); +err_ic: ipu_csi_exit(ipu, 1); err_csi_1: ipu_csi_exit(ipu, 0); @@ -654,6 +668,7 @@ static void ipu_submodules_exit(struct ipu_soc *ipu) ipu_dc_exit(ipu); ipu_di_exit(ipu, 1); ipu_di_exit(ipu, 0); + ipu_ic_exit(ipu); ipu_csi_exit(ipu, 1); ipu_csi_exit(ipu, 0); ipu_cpmem_exit(ipu); @@ -879,6 +894,8 @@ static int ipu_probe(struct platform_device *pdev) ipu_base + devtype->csi0_ofs); dev_dbg(&pdev->dev, "csi1: 0x%08lx\n", ipu_base + devtype->csi1_ofs); + dev_dbg(&pdev->dev, "ic: 0x%08lx\n", + ipu_base + devtype->ic_ofs); dev_dbg(&pdev->dev, "disp0: 0x%08lx\n", ipu_base + devtype->disp0_ofs); dev_dbg(&pdev->dev, "disp1: 0x%08lx\n", diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c new file mode 100644 index 00000000000..ad75588e162 --- /dev/null +++ b/drivers/gpu/ipu-v3/ipu-ic.c @@ -0,0 +1,778 @@ +/* + * Copyright (C) 2012-2014 Mentor Graphics Inc. + * Copyright 2005-2012 Freescale Semiconductor, Inc. All Rights Reserved. + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ipu-prv.h" + +/* IC Register Offsets */ +#define IC_CONF 0x0000 +#define IC_PRP_ENC_RSC 0x0004 +#define IC_PRP_VF_RSC 0x0008 +#define IC_PP_RSC 0x000C +#define IC_CMBP_1 0x0010 +#define IC_CMBP_2 0x0014 +#define IC_IDMAC_1 0x0018 +#define IC_IDMAC_2 0x001C +#define IC_IDMAC_3 0x0020 +#define IC_IDMAC_4 0x0024 + +/* IC Register Fields */ +#define IC_CONF_PRPENC_EN (1 << 0) +#define IC_CONF_PRPENC_CSC1 (1 << 1) +#define IC_CONF_PRPENC_ROT_EN (1 << 2) +#define IC_CONF_PRPVF_EN (1 << 8) +#define IC_CONF_PRPVF_CSC1 (1 << 9) +#define IC_CONF_PRPVF_CSC2 (1 << 10) +#define IC_CONF_PRPVF_CMB (1 << 11) +#define IC_CONF_PRPVF_ROT_EN (1 << 12) +#define IC_CONF_PP_EN (1 << 16) +#define IC_CONF_PP_CSC1 (1 << 17) +#define IC_CONF_PP_CSC2 (1 << 18) +#define IC_CONF_PP_CMB (1 << 19) +#define IC_CONF_PP_ROT_EN (1 << 20) +#define IC_CONF_IC_GLB_LOC_A (1 << 28) +#define IC_CONF_KEY_COLOR_EN (1 << 29) +#define IC_CONF_RWS_EN (1 << 30) +#define IC_CONF_CSI_MEM_WR_EN (1 << 31) + +#define IC_IDMAC_1_CB0_BURST_16 (1 << 0) +#define IC_IDMAC_1_CB1_BURST_16 (1 << 1) +#define IC_IDMAC_1_CB2_BURST_16 (1 << 2) +#define IC_IDMAC_1_CB3_BURST_16 (1 << 3) +#define IC_IDMAC_1_CB4_BURST_16 (1 << 4) +#define IC_IDMAC_1_CB5_BURST_16 (1 << 5) +#define IC_IDMAC_1_CB6_BURST_16 (1 << 6) +#define IC_IDMAC_1_CB7_BURST_16 (1 << 7) +#define IC_IDMAC_1_PRPENC_ROT_MASK (0x7 << 11) +#define IC_IDMAC_1_PRPENC_ROT_OFFSET 11 +#define IC_IDMAC_1_PRPVF_ROT_MASK (0x7 << 14) +#define IC_IDMAC_1_PRPVF_ROT_OFFSET 14 +#define IC_IDMAC_1_PP_ROT_MASK (0x7 << 17) +#define IC_IDMAC_1_PP_ROT_OFFSET 17 +#define IC_IDMAC_1_PP_FLIP_RS (1 << 22) +#define IC_IDMAC_1_PRPVF_FLIP_RS (1 << 21) +#define IC_IDMAC_1_PRPENC_FLIP_RS (1 << 20) + +#define IC_IDMAC_2_PRPENC_HEIGHT_MASK (0x3ff << 0) +#define IC_IDMAC_2_PRPENC_HEIGHT_OFFSET 0 +#define IC_IDMAC_2_PRPVF_HEIGHT_MASK (0x3ff << 10) +#define IC_IDMAC_2_PRPVF_HEIGHT_OFFSET 10 +#define IC_IDMAC_2_PP_HEIGHT_MASK (0x3ff << 20) +#define IC_IDMAC_2_PP_HEIGHT_OFFSET 20 + +#define IC_IDMAC_3_PRPENC_WIDTH_MASK (0x3ff << 0) +#define IC_IDMAC_3_PRPENC_WIDTH_OFFSET 0 +#define IC_IDMAC_3_PRPVF_WIDTH_MASK (0x3ff << 10) +#define IC_IDMAC_3_PRPVF_WIDTH_OFFSET 10 +#define IC_IDMAC_3_PP_WIDTH_MASK (0x3ff << 20) +#define IC_IDMAC_3_PP_WIDTH_OFFSET 20 + +struct ic_task_regoffs { + u32 rsc; + u32 tpmem_csc[2]; +}; + +struct ic_task_bitfields { + u32 ic_conf_en; + u32 ic_conf_rot_en; + u32 ic_conf_cmb_en; + u32 ic_conf_csc1_en; + u32 ic_conf_csc2_en; + u32 ic_cmb_galpha_bit; +}; + +static const struct ic_task_regoffs ic_task_reg[IC_NUM_TASKS] = { + [IC_TASK_ENCODER] = { + .rsc = IC_PRP_ENC_RSC, + .tpmem_csc = {0x2008, 0}, + }, + [IC_TASK_VIEWFINDER] = { + .rsc = IC_PRP_VF_RSC, + .tpmem_csc = {0x4028, 0x4040}, + }, + [IC_TASK_POST_PROCESSOR] = { + .rsc = IC_PP_RSC, + .tpmem_csc = {0x6060, 0x6078}, + }, +}; + +static const struct ic_task_bitfields ic_task_bit[IC_NUM_TASKS] = { + [IC_TASK_ENCODER] = { + .ic_conf_en = IC_CONF_PRPENC_EN, + .ic_conf_rot_en = IC_CONF_PRPENC_ROT_EN, + .ic_conf_cmb_en = 0, /* NA */ + .ic_conf_csc1_en = IC_CONF_PRPENC_CSC1, + .ic_conf_csc2_en = 0, /* NA */ + .ic_cmb_galpha_bit = 0, /* NA */ + }, + [IC_TASK_VIEWFINDER] = { + .ic_conf_en = IC_CONF_PRPVF_EN, + .ic_conf_rot_en = IC_CONF_PRPVF_ROT_EN, + .ic_conf_cmb_en = IC_CONF_PRPVF_CMB, + .ic_conf_csc1_en = IC_CONF_PRPVF_CSC1, + .ic_conf_csc2_en = IC_CONF_PRPVF_CSC2, + .ic_cmb_galpha_bit = 0, + }, + [IC_TASK_POST_PROCESSOR] = { + .ic_conf_en = IC_CONF_PP_EN, + .ic_conf_rot_en = IC_CONF_PP_ROT_EN, + .ic_conf_cmb_en = IC_CONF_PP_CMB, + .ic_conf_csc1_en = IC_CONF_PP_CSC1, + .ic_conf_csc2_en = IC_CONF_PP_CSC2, + .ic_cmb_galpha_bit = 8, + }, +}; + +struct ipu_ic_priv; + +struct ipu_ic { + enum ipu_ic_task task; + const struct ic_task_regoffs *reg; + const struct ic_task_bitfields *bit; + + enum ipu_color_space in_cs, g_in_cs; + enum ipu_color_space out_cs; + bool graphics; + bool rotation; + bool in_use; + + struct ipu_ic_priv *priv; +}; + +struct ipu_ic_priv { + void __iomem *base; + void __iomem *tpmem_base; + spinlock_t lock; + struct ipu_soc *ipu; + int use_count; + struct ipu_ic task[IC_NUM_TASKS]; +}; + +static inline u32 ipu_ic_read(struct ipu_ic *ic, unsigned offset) +{ + return readl(ic->priv->base + offset); +} + +static inline void ipu_ic_write(struct ipu_ic *ic, u32 value, unsigned offset) +{ + writel(value, ic->priv->base + offset); +} + +struct ic_csc_params { + s16 coeff[3][3]; /* signed 9-bit integer coefficients */ + s16 offset[3]; /* signed 11+2-bit fixed point offset */ + u8 scale:2; /* scale coefficients * 2^(scale-1) */ + bool sat:1; /* saturate to (16, 235(Y) / 240(U, V)) */ +}; + +/* + * Y = R * .299 + G * .587 + B * .114; + * U = R * -.169 + G * -.332 + B * .500 + 128.; + * V = R * .500 + G * -.419 + B * -.0813 + 128.; + */ +static const struct ic_csc_params ic_csc_rgb2ycbcr = { + .coeff = { + { 77, 150, 29 }, + { 469, 427, 128 }, + { 128, 405, 491 }, + }, + .offset = { 0, 512, 512 }, + .scale = 1, +}; + +/* transparent RGB->RGB matrix for graphics combining */ +static const struct ic_csc_params ic_csc_rgb2rgb = { + .coeff = { + { 128, 0, 0 }, + { 0, 128, 0 }, + { 0, 0, 128 }, + }, + .scale = 2, +}; + +/* + * R = (1.164 * (Y - 16)) + (1.596 * (Cr - 128)); + * G = (1.164 * (Y - 16)) - (0.392 * (Cb - 128)) - (0.813 * (Cr - 128)); + * B = (1.164 * (Y - 16)) + (2.017 * (Cb - 128); + */ +static const struct ic_csc_params ic_csc_ycbcr2rgb = { + .coeff = { + { 149, 0, 204 }, + { 149, 462, 408 }, + { 149, 255, 0 }, + }, + .offset = { -446, 266, -554 }, + .scale = 2, +}; + +static int init_csc(struct ipu_ic *ic, + enum ipu_color_space inf, + enum ipu_color_space outf, + int csc_index) +{ + struct ipu_ic_priv *priv = ic->priv; + const struct ic_csc_params *params; + u32 __iomem *base; + const u16 (*c)[3]; + const u16 *a; + u32 param; + + base = (u32 __iomem *) + (priv->tpmem_base + ic->reg->tpmem_csc[csc_index]); + + if (inf == IPUV3_COLORSPACE_YUV && outf == IPUV3_COLORSPACE_RGB) + params = &ic_csc_ycbcr2rgb; + else if (inf == IPUV3_COLORSPACE_RGB && outf == IPUV3_COLORSPACE_YUV) + params = &ic_csc_rgb2ycbcr; + else if (inf == IPUV3_COLORSPACE_RGB && outf == IPUV3_COLORSPACE_RGB) + params = &ic_csc_rgb2rgb; + else { + dev_err(priv->ipu->dev, "Unsupported color space conversion\n"); + return -EINVAL; + } + + /* Cast to unsigned */ + c = (const u16 (*)[3])params->coeff; + a = (const u16 *)params->offset; + + param = ((a[0] & 0x1f) << 27) | ((c[0][0] & 0x1ff) << 18) | + ((c[1][1] & 0x1ff) << 9) | (c[2][2] & 0x1ff); + writel(param, base++); + + param = ((a[0] & 0x1fe0) >> 5) | (params->scale << 8) | + (params->sat << 9); + writel(param, base++); + + param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) | + ((c[1][0] & 0x1ff) << 9) | (c[2][0] & 0x1ff); + writel(param, base++); + + param = ((a[1] & 0x1fe0) >> 5); + writel(param, base++); + + param = ((a[2] & 0x1f) << 27) | ((c[0][2] & 0x1ff) << 18) | + ((c[1][2] & 0x1ff) << 9) | (c[2][1] & 0x1ff); + writel(param, base++); + + param = ((a[2] & 0x1fe0) >> 5); + writel(param, base++); + + return 0; +} + +static int calc_resize_coeffs(struct ipu_ic *ic, + u32 in_size, u32 out_size, + u32 *resize_coeff, + u32 *downsize_coeff) +{ + struct ipu_ic_priv *priv = ic->priv; + struct ipu_soc *ipu = priv->ipu; + u32 temp_size, temp_downsize; + + /* + * Input size cannot be more than 4096, and output size cannot + * be more than 1024 + */ + if (in_size > 4096) { + dev_err(ipu->dev, "Unsupported resize (in_size > 4096)\n"); + return -EINVAL; + } + if (out_size > 1024) { + dev_err(ipu->dev, "Unsupported resize (out_size > 1024)\n"); + return -EINVAL; + } + + /* Cannot downsize more than 8:1 */ + if ((out_size << 3) < in_size) { + dev_err(ipu->dev, "Unsupported downsize\n"); + return -EINVAL; + } + + /* Compute downsizing coefficient */ + temp_downsize = 0; + temp_size = in_size; + while (((temp_size > 1024) || (temp_size >= out_size * 2)) && + (temp_downsize < 2)) { + temp_size >>= 1; + temp_downsize++; + } + *downsize_coeff = temp_downsize; + + /* + * compute resizing coefficient using the following equation: + * resize_coeff = M * (SI - 1) / (SO - 1) + * where M = 2^13, SI = input size, SO = output size + */ + *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1); + if (*resize_coeff >= 16384L) { + dev_err(ipu->dev, "Warning! Overflow on resize coeff.\n"); + *resize_coeff = 0x3FFF; + } + + return 0; +} + +void ipu_ic_task_enable(struct ipu_ic *ic) +{ + struct ipu_ic_priv *priv = ic->priv; + unsigned long flags; + u32 ic_conf; + + spin_lock_irqsave(&priv->lock, flags); + + ic_conf = ipu_ic_read(ic, IC_CONF); + + ic_conf |= ic->bit->ic_conf_en; + + if (ic->rotation) + ic_conf |= ic->bit->ic_conf_rot_en; + + if (ic->in_cs != ic->out_cs) + ic_conf |= ic->bit->ic_conf_csc1_en; + + if (ic->graphics) { + ic_conf |= ic->bit->ic_conf_cmb_en; + ic_conf |= ic->bit->ic_conf_csc1_en; + + if (ic->g_in_cs != ic->out_cs) + ic_conf |= ic->bit->ic_conf_csc2_en; + } + + ipu_ic_write(ic, ic_conf, IC_CONF); + + spin_unlock_irqrestore(&priv->lock, flags); +} +EXPORT_SYMBOL_GPL(ipu_ic_task_enable); + +void ipu_ic_task_disable(struct ipu_ic *ic) +{ + struct ipu_ic_priv *priv = ic->priv; + unsigned long flags; + u32 ic_conf; + + spin_lock_irqsave(&priv->lock, flags); + + ic_conf = ipu_ic_read(ic, IC_CONF); + + ic_conf &= ~(ic->bit->ic_conf_en | + ic->bit->ic_conf_csc1_en | + ic->bit->ic_conf_rot_en); + if (ic->bit->ic_conf_csc2_en) + ic_conf &= ~ic->bit->ic_conf_csc2_en; + if (ic->bit->ic_conf_cmb_en) + ic_conf &= ~ic->bit->ic_conf_cmb_en; + + ipu_ic_write(ic, ic_conf, IC_CONF); + + ic->rotation = ic->graphics = false; + + spin_unlock_irqrestore(&priv->lock, flags); +} +EXPORT_SYMBOL_GPL(ipu_ic_task_disable); + +int ipu_ic_task_graphics_init(struct ipu_ic *ic, + enum ipu_color_space in_g_cs, + bool galpha_en, u32 galpha, + bool colorkey_en, u32 colorkey) +{ + struct ipu_ic_priv *priv = ic->priv; + unsigned long flags; + u32 reg, ic_conf; + int ret = 0; + + if (ic->task == IC_TASK_ENCODER) + return -EINVAL; + + spin_lock_irqsave(&priv->lock, flags); + + ic_conf = ipu_ic_read(ic, IC_CONF); + + if (!(ic_conf & ic->bit->ic_conf_csc1_en)) { + /* need transparent CSC1 conversion */ + ret = init_csc(ic, IPUV3_COLORSPACE_RGB, + IPUV3_COLORSPACE_RGB, 0); + if (ret) + goto unlock; + } + + ic->g_in_cs = in_g_cs; + + if (ic->g_in_cs != ic->out_cs) { + ret = init_csc(ic, ic->g_in_cs, ic->out_cs, 1); + if (ret) + goto unlock; + } + + if (galpha_en) { + ic_conf |= IC_CONF_IC_GLB_LOC_A; + reg = ipu_ic_read(ic, IC_CMBP_1); + reg &= ~(0xff << ic->bit->ic_cmb_galpha_bit); + reg |= (galpha << ic->bit->ic_cmb_galpha_bit); + ipu_ic_write(ic, reg, IC_CMBP_1); + } else + ic_conf &= ~IC_CONF_IC_GLB_LOC_A; + + if (colorkey_en) { + ic_conf |= IC_CONF_KEY_COLOR_EN; + ipu_ic_write(ic, colorkey, IC_CMBP_2); + } else + ic_conf &= ~IC_CONF_KEY_COLOR_EN; + + ipu_ic_write(ic, ic_conf, IC_CONF); + + ic->graphics = true; +unlock: + spin_unlock_irqrestore(&priv->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(ipu_ic_task_graphics_init); + +int ipu_ic_task_init(struct ipu_ic *ic, + int in_width, int in_height, + int out_width, int out_height, + enum ipu_color_space in_cs, + enum ipu_color_space out_cs) +{ + struct ipu_ic_priv *priv = ic->priv; + u32 reg, downsize_coeff, resize_coeff; + unsigned long flags; + int ret = 0; + + /* Setup vertical resizing */ + ret = calc_resize_coeffs(ic, in_height, out_height, + &resize_coeff, &downsize_coeff); + if (ret) + return ret; + + reg = (downsize_coeff << 30) | (resize_coeff << 16); + + /* Setup horizontal resizing */ + ret = calc_resize_coeffs(ic, in_width, out_width, + &resize_coeff, &downsize_coeff); + if (ret) + return ret; + + reg |= (downsize_coeff << 14) | resize_coeff; + + spin_lock_irqsave(&priv->lock, flags); + + ipu_ic_write(ic, reg, ic->reg->rsc); + + /* Setup color space conversion */ + ic->in_cs = in_cs; + ic->out_cs = out_cs; + + if (ic->in_cs != ic->out_cs) { + ret = init_csc(ic, ic->in_cs, ic->out_cs, 0); + if (ret) + goto unlock; + } + +unlock: + spin_unlock_irqrestore(&priv->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(ipu_ic_task_init); + +int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel, + u32 width, u32 height, int burst_size, + enum ipu_rotate_mode rot) +{ + struct ipu_ic_priv *priv = ic->priv; + struct ipu_soc *ipu = priv->ipu; + u32 ic_idmac_1, ic_idmac_2, ic_idmac_3; + u32 temp_rot = bitrev8(rot) >> 5; + bool need_hor_flip = false; + unsigned long flags; + int ret = 0; + + if ((burst_size != 8) && (burst_size != 16)) { + dev_err(ipu->dev, "Illegal burst length for IC\n"); + return -EINVAL; + } + + width--; + height--; + + if (temp_rot & 0x2) /* Need horizontal flip */ + need_hor_flip = true; + + spin_lock_irqsave(&priv->lock, flags); + + ic_idmac_1 = ipu_ic_read(ic, IC_IDMAC_1); + ic_idmac_2 = ipu_ic_read(ic, IC_IDMAC_2); + ic_idmac_3 = ipu_ic_read(ic, IC_IDMAC_3); + + switch (channel->num) { + case IPUV3_CHANNEL_IC_PP_MEM: + if (burst_size == 16) + ic_idmac_1 |= IC_IDMAC_1_CB2_BURST_16; + else + ic_idmac_1 &= ~IC_IDMAC_1_CB2_BURST_16; + + if (need_hor_flip) + ic_idmac_1 |= IC_IDMAC_1_PP_FLIP_RS; + else + ic_idmac_1 &= ~IC_IDMAC_1_PP_FLIP_RS; + + ic_idmac_2 &= ~IC_IDMAC_2_PP_HEIGHT_MASK; + ic_idmac_2 |= height << IC_IDMAC_2_PP_HEIGHT_OFFSET; + + ic_idmac_3 &= ~IC_IDMAC_3_PP_WIDTH_MASK; + ic_idmac_3 |= width << IC_IDMAC_3_PP_WIDTH_OFFSET; + break; + case IPUV3_CHANNEL_MEM_IC_PP: + if (burst_size == 16) + ic_idmac_1 |= IC_IDMAC_1_CB5_BURST_16; + else + ic_idmac_1 &= ~IC_IDMAC_1_CB5_BURST_16; + break; + case IPUV3_CHANNEL_MEM_ROT_PP: + ic_idmac_1 &= ~IC_IDMAC_1_PP_ROT_MASK; + ic_idmac_1 |= temp_rot << IC_IDMAC_1_PP_ROT_OFFSET; + break; + case IPUV3_CHANNEL_MEM_IC_PRP_VF: + if (burst_size == 16) + ic_idmac_1 |= IC_IDMAC_1_CB6_BURST_16; + else + ic_idmac_1 &= ~IC_IDMAC_1_CB6_BURST_16; + break; + case IPUV3_CHANNEL_IC_PRP_ENC_MEM: + if (burst_size == 16) + ic_idmac_1 |= IC_IDMAC_1_CB0_BURST_16; + else + ic_idmac_1 &= ~IC_IDMAC_1_CB0_BURST_16; + + if (need_hor_flip) + ic_idmac_1 |= IC_IDMAC_1_PRPENC_FLIP_RS; + else + ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_FLIP_RS; + + ic_idmac_2 &= ~IC_IDMAC_2_PRPENC_HEIGHT_MASK; + ic_idmac_2 |= height << IC_IDMAC_2_PRPENC_HEIGHT_OFFSET; + + ic_idmac_3 &= ~IC_IDMAC_3_PRPENC_WIDTH_MASK; + ic_idmac_3 |= width << IC_IDMAC_3_PRPENC_WIDTH_OFFSET; + break; + case IPUV3_CHANNEL_MEM_ROT_ENC: + ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_ROT_MASK; + ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPENC_ROT_OFFSET; + break; + case IPUV3_CHANNEL_IC_PRP_VF_MEM: + if (burst_size == 16) + ic_idmac_1 |= IC_IDMAC_1_CB1_BURST_16; + else + ic_idmac_1 &= ~IC_IDMAC_1_CB1_BURST_16; + + if (need_hor_flip) + ic_idmac_1 |= IC_IDMAC_1_PRPVF_FLIP_RS; + else + ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_FLIP_RS; + + ic_idmac_2 &= ~IC_IDMAC_2_PRPVF_HEIGHT_MASK; + ic_idmac_2 |= height << IC_IDMAC_2_PRPVF_HEIGHT_OFFSET; + + ic_idmac_3 &= ~IC_IDMAC_3_PRPVF_WIDTH_MASK; + ic_idmac_3 |= width << IC_IDMAC_3_PRPVF_WIDTH_OFFSET; + break; + case IPUV3_CHANNEL_MEM_ROT_VF: + ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_ROT_MASK; + ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPVF_ROT_OFFSET; + break; + case IPUV3_CHANNEL_G_MEM_IC_PRP_VF: + if (burst_size == 16) + ic_idmac_1 |= IC_IDMAC_1_CB3_BURST_16; + else + ic_idmac_1 &= ~IC_IDMAC_1_CB3_BURST_16; + break; + case IPUV3_CHANNEL_G_MEM_IC_PP: + if (burst_size == 16) + ic_idmac_1 |= IC_IDMAC_1_CB4_BURST_16; + else + ic_idmac_1 &= ~IC_IDMAC_1_CB4_BURST_16; + break; + case IPUV3_CHANNEL_VDI_MEM_IC_VF: + if (burst_size == 16) + ic_idmac_1 |= IC_IDMAC_1_CB7_BURST_16; + else + ic_idmac_1 &= ~IC_IDMAC_1_CB7_BURST_16; + break; + default: + goto unlock; + } + + ipu_ic_write(ic, ic_idmac_1, IC_IDMAC_1); + ipu_ic_write(ic, ic_idmac_2, IC_IDMAC_2); + ipu_ic_write(ic, ic_idmac_3, IC_IDMAC_3); + + if (rot >= IPU_ROTATE_90_RIGHT) + ic->rotation = true; + +unlock: + spin_unlock_irqrestore(&priv->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(ipu_ic_task_idma_init); + +int ipu_ic_enable(struct ipu_ic *ic) +{ + struct ipu_ic_priv *priv = ic->priv; + unsigned long flags; + u32 module = IPU_CONF_IC_EN; + + spin_lock_irqsave(&priv->lock, flags); + + if (ic->rotation) + module |= IPU_CONF_ROT_EN; + + if (!priv->use_count) + ipu_module_enable(priv->ipu, module); + + priv->use_count++; + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_ic_enable); + +int ipu_ic_disable(struct ipu_ic *ic) +{ + struct ipu_ic_priv *priv = ic->priv; + unsigned long flags; + u32 module = IPU_CONF_IC_EN | IPU_CONF_ROT_EN; + + spin_lock_irqsave(&priv->lock, flags); + + priv->use_count--; + + if (!priv->use_count) + ipu_module_disable(priv->ipu, module); + + if (priv->use_count < 0) + priv->use_count = 0; + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_ic_disable); + +struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task) +{ + struct ipu_ic_priv *priv = ipu->ic_priv; + unsigned long flags; + struct ipu_ic *ic, *ret; + + if (task >= IC_NUM_TASKS) + return ERR_PTR(-EINVAL); + + ic = &priv->task[task]; + + spin_lock_irqsave(&priv->lock, flags); + + if (ic->in_use) { + ret = ERR_PTR(-EBUSY); + goto unlock; + } + + ic->in_use = true; + ret = ic; + +unlock: + spin_unlock_irqrestore(&priv->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(ipu_ic_get); + +void ipu_ic_put(struct ipu_ic *ic) +{ + struct ipu_ic_priv *priv = ic->priv; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + ic->in_use = false; + spin_unlock_irqrestore(&priv->lock, flags); +} +EXPORT_SYMBOL_GPL(ipu_ic_put); + +int ipu_ic_init(struct ipu_soc *ipu, struct device *dev, + unsigned long base, unsigned long tpmem_base) +{ + struct ipu_ic_priv *priv; + int i; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ipu->ic_priv = priv; + + spin_lock_init(&priv->lock); + priv->base = devm_ioremap(dev, base, PAGE_SIZE); + if (!priv->base) + return -ENOMEM; + priv->tpmem_base = devm_ioremap(dev, tpmem_base, SZ_64K); + if (!priv->tpmem_base) + return -ENOMEM; + + dev_dbg(dev, "IC base: 0x%08lx remapped to %p\n", base, priv->base); + + priv->ipu = ipu; + + for (i = 0; i < IC_NUM_TASKS; i++) { + priv->task[i].task = i; + priv->task[i].priv = priv; + priv->task[i].reg = &ic_task_reg[i]; + priv->task[i].bit = &ic_task_bit[i]; + } + + return 0; +} + +void ipu_ic_exit(struct ipu_soc *ipu) +{ +} + +void ipu_ic_dump(struct ipu_ic *ic) +{ + struct ipu_ic_priv *priv = ic->priv; + struct ipu_soc *ipu = priv->ipu; + + dev_dbg(ipu->dev, "IC_CONF = \t0x%08X\n", + ipu_ic_read(ic, IC_CONF)); + dev_dbg(ipu->dev, "IC_PRP_ENC_RSC = \t0x%08X\n", + ipu_ic_read(ic, IC_PRP_ENC_RSC)); + dev_dbg(ipu->dev, "IC_PRP_VF_RSC = \t0x%08X\n", + ipu_ic_read(ic, IC_PRP_VF_RSC)); + dev_dbg(ipu->dev, "IC_PP_RSC = \t0x%08X\n", + ipu_ic_read(ic, IC_PP_RSC)); + dev_dbg(ipu->dev, "IC_CMBP_1 = \t0x%08X\n", + ipu_ic_read(ic, IC_CMBP_1)); + dev_dbg(ipu->dev, "IC_CMBP_2 = \t0x%08X\n", + ipu_ic_read(ic, IC_CMBP_2)); + dev_dbg(ipu->dev, "IC_IDMAC_1 = \t0x%08X\n", + ipu_ic_read(ic, IC_IDMAC_1)); + dev_dbg(ipu->dev, "IC_IDMAC_2 = \t0x%08X\n", + ipu_ic_read(ic, IC_IDMAC_2)); + dev_dbg(ipu->dev, "IC_IDMAC_3 = \t0x%08X\n", + ipu_ic_read(ic, IC_IDMAC_3)); + dev_dbg(ipu->dev, "IC_IDMAC_4 = \t0x%08X\n", + ipu_ic_read(ic, IC_IDMAC_4)); +} +EXPORT_SYMBOL_GPL(ipu_ic_dump); diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h index 9b274f1259e..1596a4f52fa 100644 --- a/drivers/gpu/ipu-v3/ipu-prv.h +++ b/drivers/gpu/ipu-v3/ipu-prv.h @@ -161,6 +161,7 @@ struct ipu_csi; struct ipu_dc_priv; struct ipu_dmfc_priv; struct ipu_di; +struct ipu_ic_priv; struct ipu_smfc_priv; struct ipu_devtype; @@ -191,6 +192,7 @@ struct ipu_soc { struct ipu_dmfc_priv *dmfc_priv; struct ipu_di *di_priv[2]; struct ipu_csi *csi_priv[2]; + struct ipu_ic_priv *ic_priv; struct ipu_smfc_priv *smfc_priv; }; @@ -217,6 +219,10 @@ int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id, unsigned long base, u32 module, struct clk *clk_ipu); void ipu_csi_exit(struct ipu_soc *ipu, int id); +int ipu_ic_init(struct ipu_soc *ipu, struct device *dev, + unsigned long base, unsigned long tpmem_base); +void ipu_ic_exit(struct ipu_soc *ipu); + int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id, unsigned long base, u32 module, struct clk *ipu_clk); void ipu_di_exit(struct ipu_soc *ipu, int id); diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index 6d254275192..a477814a03a 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -71,6 +71,20 @@ enum ipu_csi_dest { IPU_CSI_DEST_VDIC, /* to VDIC */ }; +/* + * Enumeration of IPU rotation modes + */ +enum ipu_rotate_mode { + IPU_ROTATE_NONE = 0, + IPU_ROTATE_VERT_FLIP, + IPU_ROTATE_HORIZ_FLIP, + IPU_ROTATE_180, + IPU_ROTATE_90_RIGHT, + IPU_ROTATE_90_RIGHT_VFLIP, + IPU_ROTATE_90_RIGHT_HFLIP, + IPU_ROTATE_90_LEFT, +}; + enum ipu_color_space { IPUV3_COLORSPACE_RGB, IPUV3_COLORSPACE_YUV, @@ -242,6 +256,37 @@ struct ipu_csi *ipu_csi_get(struct ipu_soc *ipu, int id); void ipu_csi_put(struct ipu_csi *csi); void ipu_csi_dump(struct ipu_csi *csi); +/* + * IPU Image Converter (ic) functions + */ +enum ipu_ic_task { + IC_TASK_ENCODER, + IC_TASK_VIEWFINDER, + IC_TASK_POST_PROCESSOR, + IC_NUM_TASKS, +}; + +struct ipu_ic; +int ipu_ic_task_init(struct ipu_ic *ic, + int in_width, int in_height, + int out_width, int out_height, + enum ipu_color_space in_cs, + enum ipu_color_space out_cs); +int ipu_ic_task_graphics_init(struct ipu_ic *ic, + enum ipu_color_space in_g_cs, + bool galpha_en, u32 galpha, + bool colorkey_en, u32 colorkey); +void ipu_ic_task_enable(struct ipu_ic *ic); +void ipu_ic_task_disable(struct ipu_ic *ic); +int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel, + u32 width, u32 height, int burst_size, + enum ipu_rotate_mode rot); +int ipu_ic_enable(struct ipu_ic *ic); +int ipu_ic_disable(struct ipu_ic *ic); +struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task); +void ipu_ic_put(struct ipu_ic *ic); +void ipu_ic_dump(struct ipu_ic *ic); + /* * IPU Sensor Multiple FIFO Controller (SMFC) functions */ -- cgit v1.2.3-70-g09d2 From 7fafa8f06f9bdf32b806b4612bfe387de8e34125 Mon Sep 17 00:00:00 2001 From: Steve Longerbeam Date: Wed, 25 Jun 2014 18:05:34 -0700 Subject: gpu: ipu-v3: smfc: Convert to per-channel Convert the smfc object to be specific to a single smfc channel. Add ipu_smfc_{get|put} to retrieve and release a single smfc channel for exclusive use, and add use counter to ipu_smfc_{enable|disable}. Signed-off-by: Steve Longerbeam Signed-off-by: Philipp Zabel --- drivers/gpu/ipu-v3/ipu-smfc.c | 132 +++++++++++++++++++++++++++++++++--------- include/video/imx-ipu-v3.h | 10 ++-- 2 files changed, 112 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/drivers/gpu/ipu-v3/ipu-smfc.c b/drivers/gpu/ipu-v3/ipu-smfc.c index 87ac624dd7c..a6429ca913c 100644 --- a/drivers/gpu/ipu-v3/ipu-smfc.c +++ b/drivers/gpu/ipu-v3/ipu-smfc.c @@ -21,9 +21,18 @@ #include "ipu-prv.h" +struct ipu_smfc { + struct ipu_smfc_priv *priv; + int chno; + bool inuse; +}; + struct ipu_smfc_priv { void __iomem *base; spinlock_t lock; + struct ipu_soc *ipu; + struct ipu_smfc channel[4]; + int use_count; }; /*SMFC Registers */ @@ -31,75 +40,146 @@ struct ipu_smfc_priv { #define SMFC_WMC 0x0004 #define SMFC_BS 0x0008 -int ipu_smfc_set_burstsize(struct ipu_soc *ipu, int channel, int burstsize) +int ipu_smfc_set_burstsize(struct ipu_smfc *smfc, int burstsize) { - struct ipu_smfc_priv *smfc = ipu->smfc_priv; + struct ipu_smfc_priv *priv = smfc->priv; unsigned long flags; u32 val, shift; - spin_lock_irqsave(&smfc->lock, flags); + spin_lock_irqsave(&priv->lock, flags); - shift = channel * 4; - val = readl(smfc->base + SMFC_BS); + shift = smfc->chno * 4; + val = readl(priv->base + SMFC_BS); val &= ~(0xf << shift); val |= burstsize << shift; - writel(val, smfc->base + SMFC_BS); + writel(val, priv->base + SMFC_BS); - spin_unlock_irqrestore(&smfc->lock, flags); + spin_unlock_irqrestore(&priv->lock, flags); return 0; } EXPORT_SYMBOL_GPL(ipu_smfc_set_burstsize); -int ipu_smfc_map_channel(struct ipu_soc *ipu, int channel, int csi_id, int mipi_id) +int ipu_smfc_map_channel(struct ipu_smfc *smfc, int csi_id, int mipi_id) { - struct ipu_smfc_priv *smfc = ipu->smfc_priv; + struct ipu_smfc_priv *priv = smfc->priv; unsigned long flags; u32 val, shift; - spin_lock_irqsave(&smfc->lock, flags); + spin_lock_irqsave(&priv->lock, flags); - shift = channel * 3; - val = readl(smfc->base + SMFC_MAP); + shift = smfc->chno * 3; + val = readl(priv->base + SMFC_MAP); val &= ~(0x7 << shift); val |= ((csi_id << 2) | mipi_id) << shift; - writel(val, smfc->base + SMFC_MAP); + writel(val, priv->base + SMFC_MAP); - spin_unlock_irqrestore(&smfc->lock, flags); + spin_unlock_irqrestore(&priv->lock, flags); return 0; } EXPORT_SYMBOL_GPL(ipu_smfc_map_channel); -int ipu_smfc_enable(struct ipu_soc *ipu) +int ipu_smfc_enable(struct ipu_smfc *smfc) { - return ipu_module_enable(ipu, IPU_CONF_SMFC_EN); + struct ipu_smfc_priv *priv = smfc->priv; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + if (!priv->use_count) + ipu_module_enable(priv->ipu, IPU_CONF_SMFC_EN); + + priv->use_count++; + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; } EXPORT_SYMBOL_GPL(ipu_smfc_enable); -int ipu_smfc_disable(struct ipu_soc *ipu) +int ipu_smfc_disable(struct ipu_smfc *smfc) { - return ipu_module_disable(ipu, IPU_CONF_SMFC_EN); + struct ipu_smfc_priv *priv = smfc->priv; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + priv->use_count--; + + if (!priv->use_count) + ipu_module_disable(priv->ipu, IPU_CONF_SMFC_EN); + + if (priv->use_count < 0) + priv->use_count = 0; + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; } EXPORT_SYMBOL_GPL(ipu_smfc_disable); +struct ipu_smfc *ipu_smfc_get(struct ipu_soc *ipu, unsigned int chno) +{ + struct ipu_smfc_priv *priv = ipu->smfc_priv; + struct ipu_smfc *smfc, *ret; + unsigned long flags; + + if (chno >= 4) + return ERR_PTR(-EINVAL); + + smfc = &priv->channel[chno]; + ret = smfc; + + spin_lock_irqsave(&priv->lock, flags); + + if (smfc->inuse) { + ret = ERR_PTR(-EBUSY); + goto unlock; + } + + smfc->inuse = true; +unlock: + spin_unlock_irqrestore(&priv->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(ipu_smfc_get); + +void ipu_smfc_put(struct ipu_smfc *smfc) +{ + struct ipu_smfc_priv *priv = smfc->priv; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + smfc->inuse = false; + spin_unlock_irqrestore(&priv->lock, flags); +} +EXPORT_SYMBOL_GPL(ipu_smfc_put); + int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base) { - struct ipu_smfc_priv *smfc; + struct ipu_smfc_priv *priv; + int i; - smfc = devm_kzalloc(dev, sizeof(*smfc), GFP_KERNEL); - if (!smfc) + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) return -ENOMEM; - ipu->smfc_priv = smfc; - spin_lock_init(&smfc->lock); + ipu->smfc_priv = priv; + spin_lock_init(&priv->lock); + priv->ipu = ipu; - smfc->base = devm_ioremap(dev, base, PAGE_SIZE); - if (!smfc->base) + priv->base = devm_ioremap(dev, base, PAGE_SIZE); + if (!priv->base) return -ENOMEM; - pr_debug("%s: ioremap 0x%08lx -> %p\n", __func__, base, smfc->base); + for (i = 0; i < 4; i++) { + priv->channel[i].priv = priv; + priv->channel[i].chno = i; + } + + pr_debug("%s: ioremap 0x%08lx -> %p\n", __func__, base, priv->base); return 0; } diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index a477814a03a..a695ee83e4e 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -290,10 +290,12 @@ void ipu_ic_dump(struct ipu_ic *ic); /* * IPU Sensor Multiple FIFO Controller (SMFC) functions */ -int ipu_smfc_enable(struct ipu_soc *ipu); -int ipu_smfc_disable(struct ipu_soc *ipu); -int ipu_smfc_map_channel(struct ipu_soc *ipu, int channel, int csi_id, int mipi_id); -int ipu_smfc_set_burstsize(struct ipu_soc *ipu, int channel, int burstsize); +struct ipu_smfc *ipu_smfc_get(struct ipu_soc *ipu, unsigned int chno); +void ipu_smfc_put(struct ipu_smfc *smfc); +int ipu_smfc_enable(struct ipu_smfc *smfc); +int ipu_smfc_disable(struct ipu_smfc *smfc); +int ipu_smfc_map_channel(struct ipu_smfc *smfc, int csi_id, int mipi_id); +int ipu_smfc_set_burstsize(struct ipu_smfc *smfc, int burstsize); enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc); enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat); -- cgit v1.2.3-70-g09d2 From a2be35e3320b27c84488729e9fb56a62e74d65fa Mon Sep 17 00:00:00 2001 From: Steve Longerbeam Date: Wed, 25 Jun 2014 18:05:35 -0700 Subject: gpu: ipu-v3: smfc: Add ipu_smfc_set_watermark() Adds ipu_smfc_set_watermark() which programs a channel's SMFC FIFO levels at which the watermark signal is set and cleared. Signed-off-by: Steve Longerbeam Signed-off-by: Philipp Zabel --- drivers/gpu/ipu-v3/ipu-smfc.c | 20 ++++++++++++++++++++ include/video/imx-ipu-v3.h | 1 + 2 files changed, 21 insertions(+) (limited to 'include') diff --git a/drivers/gpu/ipu-v3/ipu-smfc.c b/drivers/gpu/ipu-v3/ipu-smfc.c index a6429ca913c..6ca9b43ce25 100644 --- a/drivers/gpu/ipu-v3/ipu-smfc.c +++ b/drivers/gpu/ipu-v3/ipu-smfc.c @@ -80,6 +80,26 @@ int ipu_smfc_map_channel(struct ipu_smfc *smfc, int csi_id, int mipi_id) } EXPORT_SYMBOL_GPL(ipu_smfc_map_channel); +int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level) +{ + struct ipu_smfc_priv *priv = smfc->priv; + unsigned long flags; + u32 val, shift; + + spin_lock_irqsave(&priv->lock, flags); + + shift = smfc->chno * 6 + (smfc->chno > 1 ? 4 : 0); + val = readl(priv->base + SMFC_WMC); + val &= ~(0x3f << shift); + val |= ((clr_level << 3) | set_level) << shift; + writel(val, priv->base + SMFC_WMC); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_smfc_set_watermark); + int ipu_smfc_enable(struct ipu_smfc *smfc) { struct ipu_smfc_priv *priv = smfc->priv; diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index a695ee83e4e..49e5954ac03 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -296,6 +296,7 @@ int ipu_smfc_enable(struct ipu_smfc *smfc); int ipu_smfc_disable(struct ipu_smfc *smfc); int ipu_smfc_map_channel(struct ipu_smfc *smfc, int csi_id, int mipi_id); int ipu_smfc_set_burstsize(struct ipu_smfc *smfc, int burstsize); +int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level); enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc); enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat); -- cgit v1.2.3-70-g09d2 From ae0e9708b30b3eebe5a58e4d055eb49a73d641dd Mon Sep 17 00:00:00 2001 From: Steve Longerbeam Date: Wed, 25 Jun 2014 18:05:36 -0700 Subject: gpu: ipu-v3: Add ipu_mbus_code_to_colorspace() Add ipu_mbus_code_to_colorspace() to find ipu_color_space from a media bus pixel format code. Signed-off-by: Steve Longerbeam Signed-off-by: Philipp Zabel --- drivers/gpu/ipu-v3/ipu-common.c | 13 +++++++++++++ include/video/imx-ipu-v3.h | 1 + 2 files changed, 14 insertions(+) (limited to 'include') diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index f5a4e1ac2b5..49ee990b4d1 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -101,6 +101,19 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat) } EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace); +enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code) +{ + switch (mbus_code & 0xf000) { + case 0x1000: + return IPUV3_COLORSPACE_RGB; + case 0x2000: + return IPUV3_COLORSPACE_YUV; + default: + return IPUV3_COLORSPACE_UNKNOWN; + } +} +EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace); + struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num) { struct ipuv3_channel *channel; diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index 49e5954ac03..7c97ccaf39f 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -300,6 +300,7 @@ int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level); enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc); enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat); +enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code); struct ipu_client_platformdata { int csi; -- cgit v1.2.3-70-g09d2 From f835f386a119c3f78f5acb93e86a4f025211739a Mon Sep 17 00:00:00 2001 From: Steve Longerbeam Date: Wed, 25 Jun 2014 18:05:37 -0700 Subject: gpu: ipu-v3: Add rotation mode conversion utilities Add two functions: - ipu_degrees_to_rot_mode(): converts a degrees, hflip, and vflip setting to an IPU rotation mode. - ipu_rot_mode_to_degrees(): converts an IPU rotation mode with given hflip and vflip settings to degrees. Signed-off-by: Steve Longerbeam Signed-off-by: Philipp Zabel --- drivers/gpu/ipu-v3/ipu-common.c | 64 +++++++++++++++++++++++++++++++++++++++++ include/video/imx-ipu-v3.h | 4 +++ 2 files changed, 68 insertions(+) (limited to 'include') diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 49ee990b4d1..a1d42eed5d0 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -114,6 +114,70 @@ enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code) } EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace); +int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees, + bool hflip, bool vflip) +{ + u32 r90, vf, hf; + + switch (degrees) { + case 0: + vf = hf = r90 = 0; + break; + case 90: + vf = hf = 0; + r90 = 1; + break; + case 180: + vf = hf = 1; + r90 = 0; + break; + case 270: + vf = hf = r90 = 1; + break; + default: + return -EINVAL; + } + + hf ^= (u32)hflip; + vf ^= (u32)vflip; + + *mode = (enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf); + return 0; +} +EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode); + +int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode, + bool hflip, bool vflip) +{ + u32 r90, vf, hf; + + r90 = ((u32)mode >> 2) & 0x1; + hf = ((u32)mode >> 1) & 0x1; + vf = ((u32)mode >> 0) & 0x1; + hf ^= (u32)hflip; + vf ^= (u32)vflip; + + switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) { + case IPU_ROTATE_NONE: + *degrees = 0; + break; + case IPU_ROTATE_90_RIGHT: + *degrees = 90; + break; + case IPU_ROTATE_180: + *degrees = 180; + break; + case IPU_ROTATE_90_LEFT: + *degrees = 270; + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees); + struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num) { struct ipuv3_channel *channel; diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index 7c97ccaf39f..3562698528b 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -301,6 +301,10 @@ int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level); enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc); enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat); enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code); +int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees, + bool hflip, bool vflip); +int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode, + bool hflip, bool vflip); struct ipu_client_platformdata { int csi; -- cgit v1.2.3-70-g09d2 From 4cea940d34319fb5d5e2f4d554e23f766c228e90 Mon Sep 17 00:00:00 2001 From: Steve Longerbeam Date: Wed, 25 Jun 2014 18:05:38 -0700 Subject: gpu: ipu-v3: Add helper function checking if pixfmt is planar Add simple helper function returning true if passed pixel format is one of supported planar ones. Signed-off-by: Dmitry Eremin-Solenikov Signed-off-by: Philipp Zabel --- drivers/gpu/ipu-v3/ipu-common.c | 12 ++++++++++++ include/video/imx-ipu-v3.h | 1 + 2 files changed, 13 insertions(+) (limited to 'include') diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index a1d42eed5d0..18563c240f1 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -101,6 +101,18 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat) } EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace); +bool ipu_pixelformat_is_planar(u32 pixelformat) +{ + switch (pixelformat) { + case V4L2_PIX_FMT_YUV420: + case V4L2_PIX_FMT_YVU420: + return true; + } + + return false; +} +EXPORT_SYMBOL_GPL(ipu_pixelformat_is_planar); + enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code) { switch (mbus_code & 0xf000) { diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index 3562698528b..ecb01f843ae 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -301,6 +301,7 @@ int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level); enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc); enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat); enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code); +bool ipu_pixelformat_is_planar(u32 pixelformat); int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees, bool hflip, bool vflip); int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode, -- cgit v1.2.3-70-g09d2 From a4cd8f229ff71db0c95c0d96381d4fb9239fdb19 Mon Sep 17 00:00:00 2001 From: Steve Longerbeam Date: Wed, 25 Jun 2014 18:05:39 -0700 Subject: gpu: ipu-v3: Move IDMAC channel names to imx-ipu-v3.h Move the IDMAC channel names to imx-ipu-v3.h, to make the names available outside IPU. Add a couple new channels in the process (async display BG/FG, channels 24 and 29). Signed-off-by: Steve Longerbeam Signed-off-by: Philipp Zabel --- drivers/gpu/ipu-v3/ipu-prv.h | 25 ------------------------- include/video/imx-ipu-v3.h | 30 ++++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h index 1596a4f52fa..7f08a461c92 100644 --- a/drivers/gpu/ipu-v3/ipu-prv.h +++ b/drivers/gpu/ipu-v3/ipu-prv.h @@ -24,31 +24,6 @@ struct ipu_soc; #include