drm: Nerf drm_global_mutex BKL for good drivers
This catches the majority of drivers (unfortunately not if we take
users into account, because all the big drivers have at least a
lastclose hook).
With the prep patches out of the way all drm state is fully protected
and either prevents or can deal with the races from dropping the BKL
around open/close. The only thing left to audit are the various driver
hooks - by keeping the BKL around if any of them are set we have a
very simple cop-out!
Note that one of the biggest prep pieces to get here was making
dev->open_count atomic, which was done in
commit 7e13ad8964
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri Jan 24 13:01:07 2020 +0000
drm: Avoid drm_global_mutex for simple inc/dec of dev->open_count
v2:
- Rebase and fix locking in drm_open() (Chris)
- Indentation fix in drm_release
- Typo fix in the commit message (Sam)
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Sam Ravnborg <sam@ravnborg.org>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Tested-by: Thomas Zimmermann <tzimmermann@suse.de>
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200204150146.2006481-6-daniel.vetter@ffwll.ch
This commit is contained in:
parent
591a2abf14
commit
4017ad7bbc
@ -946,7 +946,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
|
||||
struct drm_driver *driver = dev->driver;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&drm_global_mutex);
|
||||
if (drm_dev_needs_global_mutex(dev))
|
||||
mutex_lock(&drm_global_mutex);
|
||||
|
||||
ret = drm_minor_register(dev, DRM_MINOR_RENDER);
|
||||
if (ret)
|
||||
@ -986,7 +987,8 @@ err_minors:
|
||||
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
|
||||
drm_minor_unregister(dev, DRM_MINOR_RENDER);
|
||||
out_unlock:
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
if (drm_dev_needs_global_mutex(dev))
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dev_register);
|
||||
|
@ -51,6 +51,37 @@
|
||||
/* from BKL pushdown */
|
||||
DEFINE_MUTEX(drm_global_mutex);
|
||||
|
||||
bool drm_dev_needs_global_mutex(struct drm_device *dev)
|
||||
{
|
||||
/*
|
||||
* Legacy drivers rely on all kinds of BKL locking semantics, don't
|
||||
* bother. They also still need BKL locking for their ioctls, so better
|
||||
* safe than sorry.
|
||||
*/
|
||||
if (drm_core_check_feature(dev, DRIVER_LEGACY))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* The deprecated ->load callback must be called after the driver is
|
||||
* already registered. This means such drivers rely on the BKL to make
|
||||
* sure an open can't proceed until the driver is actually fully set up.
|
||||
* Similar hilarity holds for the unload callback.
|
||||
*/
|
||||
if (dev->driver->load || dev->driver->unload)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Drivers with the lastclose callback assume that it's synchronized
|
||||
* against concurrent opens, which again needs the BKL. The proper fix
|
||||
* is to use the drm_client infrastructure with proper locking for each
|
||||
* client.
|
||||
*/
|
||||
if (dev->driver->lastclose)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: file operations
|
||||
*
|
||||
@ -378,9 +409,10 @@ int drm_open(struct inode *inode, struct file *filp)
|
||||
if (IS_ERR(minor))
|
||||
return PTR_ERR(minor);
|
||||
|
||||
mutex_lock(&drm_global_mutex);
|
||||
|
||||
dev = minor->dev;
|
||||
if (drm_dev_needs_global_mutex(dev))
|
||||
mutex_lock(&drm_global_mutex);
|
||||
|
||||
if (!atomic_fetch_inc(&dev->open_count))
|
||||
need_setup = 1;
|
||||
|
||||
@ -398,13 +430,15 @@ int drm_open(struct inode *inode, struct file *filp)
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
if (drm_dev_needs_global_mutex(dev))
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
err_undo:
|
||||
atomic_dec(&dev->open_count);
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
if (drm_dev_needs_global_mutex(dev))
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
drm_minor_release(minor);
|
||||
return retcode;
|
||||
}
|
||||
@ -444,7 +478,8 @@ int drm_release(struct inode *inode, struct file *filp)
|
||||
struct drm_minor *minor = file_priv->minor;
|
||||
struct drm_device *dev = minor->dev;
|
||||
|
||||
mutex_lock(&drm_global_mutex);
|
||||
if (drm_dev_needs_global_mutex(dev))
|
||||
mutex_lock(&drm_global_mutex);
|
||||
|
||||
DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
|
||||
|
||||
@ -453,7 +488,8 @@ int drm_release(struct inode *inode, struct file *filp)
|
||||
if (atomic_dec_and_test(&dev->open_count))
|
||||
drm_lastclose(dev);
|
||||
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
if (drm_dev_needs_global_mutex(dev))
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
|
||||
drm_minor_release(minor);
|
||||
|
||||
|
@ -41,6 +41,7 @@ struct drm_printer;
|
||||
|
||||
/* drm_file.c */
|
||||
extern struct mutex drm_global_mutex;
|
||||
bool drm_dev_needs_global_mutex(struct drm_device *dev);
|
||||
struct drm_file *drm_file_alloc(struct drm_minor *minor);
|
||||
void drm_file_free(struct drm_file *file);
|
||||
void drm_lastclose(struct drm_device *dev);
|
||||
|
Loading…
Reference in New Issue
Block a user