summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_modeset_lock.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2014-07-27 21:09:33 +0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-08-08 19:47:01 +0400
commitcb597bb3a2fbfc871cc1c703fb330d247bd21394 (patch)
tree4bedf2841031c958813a71d8358ec1aa6178a76d /drivers/gpu/drm/drm_modeset_lock.c
parent3d30a59bfcb7c96d4aacdb053c2ccc49394b2311 (diff)
downloadlinux-cb597bb3a2fbfc871cc1c703fb330d247bd21394.tar.xz
drm: trylock modest locking for fbdev panics
In the fbdev code we want to do trylocks only to avoid deadlocks and other ugly issues. Thus far we've only grabbed the overall modeset lock, but that already failed to exclude a pile of potential concurrent operations. With proper atomic support this will be worse. So add a trylock mode to the modeset locking code which attempts all locks only with trylocks, if possible. We need to track this in the locking functions themselves and can't restrict this to drivers since driver-private w/w mutexes must be treated the same way. There's still the issue that other driver private locks aren't handled here at all, but well can't have everything. With this we will at least not regress, even once atomic allows lots of concurrent kms activity. Aside: We should move the acquire context to stack-based allocation in the callers to get rid of that awful WARN_ON(kmalloc_failed) control flow which just blows up when memory is short. But that's material for separate patches. v2: - Fix logic inversion fumble in the fb helper. - Add proper kerneldoc. Reviewed-by: Matt Roper <matthew.d.roper@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/drm_modeset_lock.c')
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c56
1 files changed, 44 insertions, 12 deletions
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 4753c8bd5ab5..5280b64a0230 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -57,26 +57,37 @@
/**
- * drm_modeset_lock_all - take all modeset locks
- * @dev: drm device
+ * __drm_modeset_lock_all - internal helper to grab all modeset locks
+ * @dev: DRM device
+ * @trylock: trylock mode for atomic contexts
*
- * This function takes all modeset locks, suitable where a more fine-grained
- * scheme isn't (yet) implemented. Locks must be dropped with
- * drm_modeset_unlock_all.
+ * This is a special version of drm_modeset_lock_all() which can also be used in
+ * atomic contexts. Then @trylock must be set to true.
+ *
+ * Returns:
+ * 0 on success or negative error code on failure.
*/
-void drm_modeset_lock_all(struct drm_device *dev)
+int __drm_modeset_lock_all(struct drm_device *dev,
+ bool trylock)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx;
int ret;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (WARN_ON(!ctx))
- return;
+ ctx = kzalloc(sizeof(*ctx),
+ trylock ? GFP_ATOMIC : GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
- mutex_lock(&config->mutex);
+ if (trylock) {
+ if (!mutex_trylock(&config->mutex))
+ return -EBUSY;
+ } else {
+ mutex_lock(&config->mutex);
+ }
drm_modeset_acquire_init(ctx, 0);
+ ctx->trylock_only = trylock;
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
@@ -95,13 +106,29 @@ retry:
drm_warn_on_modeset_not_all_locked(dev);
- return;
+ return 0;
fail:
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
+
+ return ret;
+}
+EXPORT_SYMBOL(__drm_modeset_lock_all);
+
+/**
+ * drm_modeset_lock_all - take all modeset locks
+ * @dev: drm device
+ *
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented. Locks must be dropped with
+ * drm_modeset_unlock_all.
+ */
+void drm_modeset_lock_all(struct drm_device *dev)
+{
+ WARN_ON(__drm_modeset_lock_all(dev, false) != 0);
}
EXPORT_SYMBOL(drm_modeset_lock_all);
@@ -287,7 +314,12 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
WARN_ON(ctx->contended);
- if (interruptible && slow) {
+ if (ctx->trylock_only) {
+ if (!ww_mutex_trylock(&lock->mutex))
+ return -EBUSY;
+ else
+ return 0;
+ } else if (interruptible && slow) {
ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
} else if (interruptible) {
ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);