From ec79d6056de58511d8e46d9ae59d3878f958dc3e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 1 Jun 2010 22:53:01 +0200 Subject: tty: replace BKL with a new tty_lock As a preparation for replacing the big kernel lock in the TTY layer, wrap all the callers in new macros tty_lock, tty_lock_nested and tty_unlock. Signed-off-by: Arnd Bergmann Cc: Alan Cox Signed-off-by: Greg Kroah-Hartman --- include/linux/tty.h | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'include') diff --git a/include/linux/tty.h b/include/linux/tty.h index 2df60e4ff40..6ead6b60c74 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -13,6 +13,7 @@ #include #include #include +#include #include @@ -576,5 +577,35 @@ extern int vt_ioctl(struct tty_struct *tty, struct file *file, extern long vt_compat_ioctl(struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg); +/* functions for preparation of BKL removal */ + +/* + * tty_lock_nested get the tty_lock while potentially holding it + * + * The Big TTY Mutex is a recursive lock, meaning you can take it + * from a thread that is already holding it. + * This is bad for a number of reasons, so tty_lock_nested should + * really be used as rarely as possible. If a code location can + * be shown to never get called with this held already, it should + * use tty_lock() instead. + */ +static inline void __lockfunc tty_lock_nested(void) __acquires(kernel_lock) +{ + lock_kernel(); +} +static inline void tty_lock(void) __acquires(kernel_lock) +{ +#ifdef CONFIG_LOCK_KERNEL + /* kernel_locked is 1 for !CONFIG_LOCK_KERNEL */ + WARN_ON(kernel_locked()); +#endif + lock_kernel(); +} +static inline void tty_unlock(void) __releases(kernel_lock) +{ + unlock_kernel(); +} +#define tty_locked() (kernel_locked()) + #endif /* __KERNEL__ */ #endif -- cgit v1.2.3-70-g09d2