diff options
296 files changed, 8484 insertions, 5138 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb b/Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb new file mode 100644 index 00000000000..2107082426d --- /dev/null +++ b/Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb @@ -0,0 +1,44 @@ +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_alpha +Date: May 2012 +Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Stores the alpha blending value for the overlay. Values range + from 0 (transparent) to 255 (opaque). The value is ignored if + the mode is not set to Alpha Blending. + +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_mode +Date: May 2012 +Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Selects the composition mode for the overlay. Possible values + are + + 0 - Alpha Blending + 1 - ROP3 + +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_position +Date: May 2012 +Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Stores the x,y overlay position on the display in pixels. The + position format is `[0-9]+,[0-9]+'. + +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_rop3 +Date: May 2012 +Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Stores the raster operation (ROP3) for the overlay. Values + range from 0 to 255. The value is ignored if the mode is not + set to ROP3. diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt index d8147b336c3..6518a55273e 100644 --- a/Documentation/block/queue-sysfs.txt +++ b/Documentation/block/queue-sysfs.txt @@ -38,6 +38,13 @@ read or write requests. Note that the total allocated number may be twice this amount, since it applies only to reads or writes (not the accumulated sum). +To avoid priority inversion through request starvation, a request +queue maintains a separate request pool per each cgroup when +CONFIG_BLK_CGROUP is enabled, and this parameter applies to each such +per-block-cgroup request pool. IOW, if there are N block cgroups, +each request queue may have upto N request pools, each independently +regulated by nr_requests. + read_ahead_kb (RW) ------------------ Maximum number of kilobytes to read-ahead for filesystems on this block diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt index 946c73342cd..1c184495716 100644 --- a/Documentation/device-mapper/dm-raid.txt +++ b/Documentation/device-mapper/dm-raid.txt @@ -27,6 +27,10 @@ The target is named "raid" and it accepts the following parameters: - rotating parity N (right-to-left) with data restart raid6_nc RAID6 N continue - rotating parity N (right-to-left) with data continuation + raid10 Various RAID10 inspired algorithms chosen by additional params + - RAID10: Striped Mirrors (aka 'Striping on top of mirrors') + - RAID1E: Integrated Adjacent Stripe Mirroring + - and other similar RAID10 variants Reference: Chapter 4 of http://www.snia.org/sites/default/files/SNIA_DDF_Technical_Position_v2.0.pdf @@ -59,6 +63,28 @@ The target is named "raid" and it accepts the following parameters: logical size of the array. The bitmap records the device synchronisation state for each region. + [raid10_copies <# copies>] + [raid10_format near] + These two options are used to alter the default layout of + a RAID10 configuration. The number of copies is can be + specified, but the default is 2. There are other variations + to how the copies are laid down - the default and only current + option is "near". Near copies are what most people think of + with respect to mirroring. If these options are left + unspecified, or 'raid10_copies 2' and/or 'raid10_format near' + are given, then the layouts for 2, 3 and 4 devices are: + 2 drives 3 drives 4 drives + -------- ---------- -------------- + A1 A1 A1 A1 A2 A1 A1 A2 A2 + A2 A2 A2 A3 A3 A3 A3 A4 A4 + A3 A3 A4 A4 A5 A5 A5 A6 A6 + A4 A4 A5 A6 A6 A7 A7 A8 A8 + .. .. .. .. .. .. .. .. .. + The 2-device layout is equivalent 2-way RAID1. The 4-device + layout is what a traditional RAID10 would look like. The + 3-device layout is what might be called a 'RAID1E - Integrated + Adjacent Stripe Mirroring'. + <#raid_devs>: The number of devices composing the array. Each device consists of two entries. The first is the device containing the metadata (if any); the second is the one containing the diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 72ed15075f7..afaff312bf4 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -626,3 +626,14 @@ Why: New drivers should use new V4L2_CAP_VIDEO_M2M capability flag Who: Sylwester Nawrocki <s.nawrocki@samsung.com> ---------------------------- + +What: OMAP private DMA implementation +When: 2013 +Why: We have a DMA engine implementation; all users should be updated + to use this rather than persisting with the old APIs. The old APIs + block merging the old DMA engine implementation into the DMA + engine driver. +Who: Russell King <linux@arm.linux.org.uk>, + Santosh Shilimkar <santosh.shilimkar@ti.com> + +---------------------------- diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 2db1900d753..0f103e39b4f 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -138,8 +138,8 @@ evict_inode: put_super: write write_super: read sync_fs: read -freeze_fs: read -unfreeze_fs: read +freeze_fs: write +unfreeze_fs: write statfs: maybe(read) (see below) remount_fs: write umount_begin: no @@ -359,7 +359,6 @@ prototypes: int (*lm_compare_owner)(struct file_lock *, struct file_lock *); void (*lm_notify)(struct file_lock *); /* unblock callback */ int (*lm_grant)(struct file_lock *, struct file_lock *, int); - void (*lm_release_private)(struct file_lock *); void (*lm_break)(struct file_lock *); /* break_lease callback */ int (*lm_change)(struct file_lock **, int); @@ -368,7 +367,6 @@ locking rules: lm_compare_owner: yes no lm_notify: yes no lm_grant: no no -lm_release_private: maybe no lm_break: yes no lm_change yes no diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index 7456360e161..a92bba81684 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt @@ -53,6 +53,7 @@ ALC882/883/885/888/889 acer-aspire-8930g Acer Aspire 8330G/6935G acer-aspire Acer Aspire others inv-dmic Inverted internal mic workaround + no-primary-hp VAIO Z workaround (for fixed speaker DAC) ALC861/660 ========== @@ -273,6 +274,10 @@ STAC92HD83* dell-s14 Dell laptop dell-vostro-3500 Dell Vostro 3500 laptop hp-dv7-4000 HP dv-7 4000 + hp_cNB11_intquad HP CNB models with 4 speakers + hp-zephyr HP Zephyr + hp-led HP with broken BIOS for mute LED + hp-inv-led HP with broken BIOS for inverted mute LED auto BIOS setup (default) STAC9872 diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt index 8c235b6e424..88152f214f4 100644 --- a/Documentation/sysctl/fs.txt +++ b/Documentation/sysctl/fs.txt @@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/fs: - nr_open - overflowuid - overflowgid +- protected_hardlinks +- protected_symlinks - suid_dumpable - super-max - super-nr @@ -157,6 +159,46 @@ The default is 65534. ============================================================== +protected_hardlinks: + +A long-standing class of security issues is the hardlink-based +time-of-check-time-of-use race, most commonly seen in world-writable +directories like /tmp. The common method of exploitation of this flaw +is to cross privilege boundaries when following a given hardlink (i.e. a +root process follows a hardlink created by another user). Additionally, +on systems without separated partitions, this stops unauthorized users +from "pinning" vulnerable setuid/setgid files against being upgraded by +the administrator, or linking to special files. + +When set to "0", hardlink creation behavior is unrestricted. + +When set to "1" hardlinks cannot be created by users if they do not +already own the source file, or do not have read/write access to it. + +This protection is based on the restrictions in Openwall and grsecurity. + +============================================================== + +protected_symlinks: + +A long-standing class of security issues is the symlink-based +time-of-check-time-of-use race, most commonly seen in world-writable +directories like /tmp. The common method of exploitation of this flaw +is to cross privilege boundaries when following a given symlink (i.e. a +root process follows a symlink belonging to another user). For a likely +incomplete list of hundreds of examples across the years, please see: +http://cve.mitre.org/cgi-bin/cvekey.cgi?keyword=/tmp + +When set to "0", symlink following behavior is unrestricted. + +When set to "1" symlinks are permitted to be followed only when outside +a sticky world-writable directory, or when the uid of the symlink and +follower match, or when the directory owner matches the symlink's owner. + +This protection is based on the restrictions in Openwall and grsecurity. + +============================================================== + suid_dumpable: This value can be used to query and set the core dump mode for setuid diff --git a/MAINTAINERS b/MAINTAINERS index 6720018bc67..94b823f71e9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7382,6 +7382,7 @@ W: http://user-mode-linux.sourceforge.net S: Maintained F: Documentation/virtual/uml/ F: arch/um/ +F: arch/x86/um/ F: fs/hostfs/ F: fs/hppfs/ diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index b152de79fd9..e58edc36b40 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -193,6 +193,8 @@ CONFIG_MMC_OMAP_HS=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_TWL92330=y CONFIG_RTC_DRV_TWL4030=y +CONFIG_DMADEVICES=y +CONFIG_DMA_OMAP=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_FS_XATTR is not set diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 004c1bc95d2..e4448e16046 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -215,7 +215,9 @@ static inline void vivt_flush_cache_mm(struct mm_struct *mm) static inline void vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) + struct mm_struct *mm = vma->vm_mm; + + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), vma->vm_flags); } @@ -223,7 +225,9 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned static inline void vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { + struct mm_struct *mm = vma->vm_mm; + + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { unsigned long addr = user_addr & PAGE_MASK; __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); } diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h index 93226cf23ae..b1479fd04a9 100644 --- a/arch/arm/include/asm/mutex.h +++ b/arch/arm/include/asm/mutex.h @@ -7,121 +7,10 @@ */ #ifndef _ASM_MUTEX_H #define _ASM_MUTEX_H - -#if __LINUX_ARM_ARCH__ < 6 -/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */ -# include <asm-generic/mutex-xchg.h> -#else - /* - * Attempting to lock a mutex on ARMv6+ can be done with a bastardized - * atomic decrement (it is not a reliable atomic decrement but it satisfies - * the defined semantics for our purpose, while being smaller and faster - * than a real atomic decrement or atomic swap. The idea is to attempt - * decrementing the lock value only once. If once decremented it isn't zero, - * or if its store-back fails due to a dispute on the exclusive store, we - * simply bail out immediately through the slow path where the lock will be - * reattempted until it succeeds. + * On pre-ARMv6 hardware this results in a swp-based implementation, + * which is the most efficient. For ARMv6+, we emit a pair of exclusive + * accesses instead. */ -static inline void -__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res; - - __asm__ ( - - "ldrex %0, [%2] \n\t" - "sub %0, %0, #1 \n\t" - "strex %1, %0, [%2] " - - : "=&r" (__res), "=&r" (__ex_flag) - : "r" (&(count)->counter) - : "cc","memory" ); - - __res |= __ex_flag; - if (unlikely(__res != 0)) - fail_fn(count); -} - -static inline int -__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res; - - __asm__ ( - - "ldrex %0, [%2] \n\t" - "sub %0, %0, #1 \n\t" - "strex %1, %0, [%2] " - - : "=&r" (__res), "=&r" (__ex_flag) - : "r" (&(count)->counter) - : "cc","memory" ); - - __res |= __ex_flag; - if (unlikely(__res != 0)) - __res = fail_fn(count); - return __res; -} - -/* - * Same trick is used for the unlock fast path. However the original value, - * rather than the result, is used to test for success in order to have - * better generated assembly. - */ -static inline void -__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res, __orig; - - __asm__ ( - - "ldrex %0, [%3] \n\t" - "add %1, %0, #1 \n\t" - "strex %2, %1, [%3] " - - : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) - : "r" (&(count)->counter) - : "cc","memory" ); - - __orig |= __ex_flag; - if (unlikely(__orig != 0)) - fail_fn(count); -} - -/* - * If the unlock was done on a contended lock, or if the unlock simply fails - * then the mutex remains locked. - */ -#define __mutex_slowpath_needs_to_unlock() 1 - -/* - * For __mutex_fastpath_trylock we use another construct which could be - * described as a "single value cmpxchg". - * - * This provides the needed trylock semantics like cmpxchg would, but it is - * lighter and less generic than a true cmpxchg implementation. - */ -static inline int -__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res, __orig; - - __asm__ ( - - "1: ldrex %0, [%3] \n\t" - "subs %1, %0, #1 \n\t" - "strexeq %2, %1, [%3] \n\t" - "movlt %0, #0 \n\t" - "cmpeq %2, #0 \n\t" - "bgt 1b " - - : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) - : "r" (&count->counter) - : "cc", "memory" ); - - return __orig; -} - -#endif +#include <asm-generic/mutex-xchg.h> #endif diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 23ebc0c82a3..24d284a1bfc 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -196,7 +196,7 @@ static const struct tagtable __tagtable_##fn __tag = { tag, fn } struct membank { phys_addr_t start; - unsigned long size; + phys_addr_t size; unsigned int highmem; }; @@ -217,7 +217,7 @@ extern struct meminfo meminfo; #define bank_phys_end(bank) ((bank)->start + (bank)->size) #define bank_phys_size(bank) (bank)->size -extern int arm_add_memory(phys_addr_t start, unsigned long size); +extern int arm_add_memory(phys_addr_t start, phys_addr_t size); extern void early_print(const char *str, ...); extern void dump_machine_table(void); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 0d1851ca6eb..0f82098c9bf 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -244,6 +244,19 @@ svc_preempt: b 1b #endif +__und_fault: + @ Correct the PC such that it is pointing at the instruction + @ which caused the fault. If the faulting instruction was ARM + @ the PC will be pointing at the next instruction, and have to + @ subtract 4. Otherwise, it is Thumb, and the PC will be + @ pointing at the second half of the Thumb instruction. We + @ have to subtract 2. + ldr r2, [r0, #S_PC] + sub r2, r2, r1 + str r2, [r0, #S_PC] + b do_undefinstr +ENDPROC(__und_fault) + .align 5 __und_svc: #ifdef CONFIG_KPROBES @@ -261,25 +274,32 @@ __und_svc: @ @ r0 - instruction @ -#ifndef CONFIG_THUMB2_KERNEL +#ifndef CONFIG_THUMB2_KERNEL ldr r0, [r4, #-4] #else + mov r1, #2 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 - ldrhhs r9, [r4] @ bottom 16 bits - orrhs r0, r9, r0, lsl #16 + blo __und_svc_fault + ldrh r9, [r4] @ bottom 16 bits + add r4, r4, #2 + str r4, [sp, #S_PC] + orr r0, r9, r0, lsl #16 #endif - adr r9, BSYM(1f) + adr r9, BSYM(__und_svc_finish) mov r2, r4 bl call_fpe + mov r1, #4 @ PC correction to apply +__und_svc_fault: mov r0, sp @ struct pt_regs *regs - bl do_undefinstr + bl __und_fault @ @ IRQs off again before pulling preserved data off the stack @ -1: disable_irq_notrace +__und_svc_finish: + disable_irq_notrace @ @ restore SPSR and restart the instruction @@ -423,25 +443,33 @@ __und_usr: mov r2, r4 mov r3, r5 + @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the + @ faulting instruction depending on Thumb mode. + @ r3 = regs->ARM_cpsr @ - @ fall through to the emulation code, which returns using r9 if - @ it has emulated the instruction, or the more conventional lr - @ if we are to treat this as a real undefined instruction - @ - @ r0 - instruction + @ The emulation code returns using r9 if it has emulated the + @ instruction, or the more conventional lr if we are to treat + @ this as a real undefined instruction @ adr r9, BSYM(ret_from_exception) - adr lr, BSYM(__und_usr_unknown) + tst r3, #PSR_T_BIT @ Thumb mode? - itet eq @ explicit IT needed for the 1f label - subeq r4, r2, #4 @ ARM instr at LR - 4 - subne r4, r2, #2 @ Thumb instr at LR - 2 -1: ldreqt r0, [r4] + bne __und_usr_thumb + sub r4, r2, #4 @ ARM instr at LR - 4 +1: ldrt r0, [r4] #ifdef CONFIG_CPU_ENDIAN_BE8 - reveq r0, r0 @ little endian instruction + rev r0, r0 @ little endian instruction #endif - beq call_fpe + @ r0 = 32-bit ARM instruction which caused the exception + @ r2 = PC value for the following instruction (:= regs->ARM_pc) + @ r4 = PC value for the faulting instruction + @ lr = 32-bit undefined instruction function + adr lr, BSYM(__und_usr_fault_32) + b call_fpe + +__und_usr_thumb: @ Thumb instruction + sub r4, r2, #2 @ First half of thumb instr at LR - 2 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 /* * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms @@ -455,7 +483,7 @@ __und_usr: ldr r5, .LCcpu_architecture ldr r5, [r5] cmp r5, #CPU_ARCH_ARMv7 - blo __und_usr_unknown + blo __und_usr_fault_16 @ 16bit undefined instruction /* * The following code won't get run unless the running CPU really is v7, so * coding round the lack of ldrht on older arches is pointless. Temporarily @@ -463,15 +491,18 @@ __und_usr: */ .arch armv6t2 #endif -2: - ARM( ldrht r5, [r4], #2 ) - THUMB( ldrht r5, [r4] ) - THUMB( add r4, r4, #2 ) +2: ldrht r5, [r4] cmp r5, #0xe800 @ 32bit instruction if xx != 0 - blo __und_usr_unknown -3: ldrht r0, [r4] + blo __und_usr_fault_16 @ 16bit undefined instruction +3: ldrht r0, [r2] add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 + str r2, [sp, #S_PC] @ it's a 2x16bit instr, update orr r0, r0, r5, lsl #16 + adr lr, BSYM(__und_usr_fault_32) + @ r0 = the two 16-bit Thumb instructions which caused the exception + @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc) + @ r4 = PC value for the first 16-bit Thumb instruction + @ lr = 32bit undefined instruction function #if __LINUX_ARM_ARCH__ < 7 /* If the target arch was overridden, change it back: */ @@ -482,17 +513,13 @@ __und_usr: #endif #endif /* __LINUX_ARM_ARCH__ < 7 */ #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ - b __und_usr_unknown + b __und_usr_fault_16 #endif - UNWIND(.fnend ) + UNWIND(.fnend) ENDPROC(__und_usr) - @ - @ fallthrough to call_fpe - @ - /* - * The out of line fixup for the ldrt above. + * The out of line fixup for the ldrt instructions above. */ .pushsection .fixup, "ax" .align 2 @@ -524,11 +551,12 @@ ENDPROC(__und_usr) * NEON handler code. * * Emulators may wish to make use of the following registers: - * r0 = instruction opcode. - * r2 = PC+4 + * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) + * r2 = PC value to resume execution after successful emulation * r9 = normal "successful" return address - * r10 = this threads thread_info structure. + * r10 = this threads thread_info structure * lr = unrecognised instruction return address + * IRQs disabled, FIQs enabled. */ @ @ Fall-through from Thumb-2 __und_usr @@ -659,12 +687,17 @@ ENTRY(no_fp) mov pc, lr ENDPROC(no_fp) -__und_usr_unknown: - enable_irq +__und_usr_fault_32: + mov r1, #4 + b 1f +__und_usr_fault_16: + mov r1, #2 +1: enable_irq mov r0, sp adr lr, BSYM(ret_from_exception) - b do_undefinstr -ENDPROC(__und_usr_unknown) + b __und_fault +ENDPROC(__und_usr_fault_32) +ENDPROC(__und_usr_fault_16) .align 5 __pabt_usr: diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 49d9f930524..978eac57e04 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -51,23 +51,15 @@ ret_fast_syscall: fast_work_pending: str r0, [sp, #S_R0+S_OFF]! @ returned r0 work_pending: - tst r1, #_TIF_NEED_RESCHED - bne work_resched - /* - * TIF_SIGPENDING or TIF_NOTIFY_RESUME must've been set if we got here - */ - ldr r2, [sp, #S_PSR] mov r0, sp @ 'regs' - tst r2, #15 @ are we returning to user mode? - bne no_work_pending @ no? just leave, then... mov r2, why @ 'syscall' - tst r1, #_TIF_SIGPENDING @ delivering a signal? - movne why, #0 @ prevent further restarts - bl do_notify_resume - b ret_slow_syscall @ Check work again + bl do_work_pending + cmp r0, #0 + beq no_work_pending + movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) + ldmia sp, {r0 - r6} @ have to reload r0 - r6 + b local_restart @ ... and off we go -work_resched: - bl schedule /* * "slow" syscall return path. "why" tells us if this was a real syscall. */ @@ -409,6 +401,7 @@ ENTRY(vector_swi) eor scno, scno, #__NR_SYSCALL_BASE @ check OS number #endif +local_restart: ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing stmdb sp!, {r4, r5} @ push fifth and sixth args @@ -450,7 +443,8 @@ __sys_trace: mov scno, r0 @ syscall number (possibly new) add r1, sp, #S_R0 + S_OFF @ pointer to regs cmp scno, #NR_syscalls @ check upper syscall limit - ldmccia r1, {r0 - r3} @ have to reload r0 - r3 + ldmccia r1, {r0 - r6} @ have to reload r0 - r6 + stmccia sp, {r4, r5} @ and update the stack args ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine b 2b diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index df0bf0c8cb7..34e56647dce 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -179,19 +179,20 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, old = *parent; *parent = return_hooker; - err = ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer); - if (err == -EBUSY) { - *parent = old; - return; - } - trace.func = self_addr; + trace.depth = current->curr_ret_stack + 1; /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { - current->curr_ret_stack--; *parent = old; + return; + } + + err = ftrace_push_return_trace(old, self_addr, &trace.depth, + frame_pointer); + if (err == -EBUSY) { + *parent = old; + return; } } diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 19c95ea65b2..693b744fd57 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -247,6 +247,7 @@ void machine_shutdown(void) void machine_halt(void) { machine_shutdown(); + local_irq_disable(); while (1); } @@ -268,6 +269,7 @@ void machine_restart(char *cmd) /* Whoops - the platform was unable to reboot. Tell the user! */ printk("Reboot failed -- System halted\n"); + local_irq_disable(); while (1); } diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index dab711e6e1c..3e0fc5f7ed4 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -25,6 +25,7 @@ #include <linux/regset.h> #include <linux/audit.h> #include <linux/tracehook.h> +#include <linux/unistd.h> #include <asm/pgtable.h> #include <asm/traps.h> diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index e15d83bb4ea..a81dcecc734 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -508,7 +508,7 @@ void __init dump_machine_table(void) /* can't use cpu_relax() here as it may require MMU setup */; } -int __init arm_add_memory(phys_addr_t start, unsigned long size) +int __init arm_add_memory(phys_addr_t start, phys_addr_t size) { struct membank *bank = &meminfo.bank[meminfo.nr_banks]; @@ -538,7 +538,7 @@ int __init arm_add_memory(phys_addr_t start, unsigned long size) } #endif - bank->size = size & PAGE_MASK; + bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1); /* * Check whether this memory region has non-zero size or @@ -558,7 +558,7 @@ int __init arm_add_memory(phys_addr_t start, unsigned long size) static int __init early_mem(char *p) { static int usermem __initdata = 0; - unsigned long size; + phys_addr_t size; phys_addr_t start; char *endp; diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 536c5d6b340..f27789e4e38 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -27,7 +27,6 @@ */ #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)) #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)) -#define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE) /* * With EABI, the syscall number has to be loaded into r7. @@ -48,18 +47,6 @@ const unsigned long sigreturn_codes[7] = { }; /* - * Either we support OABI only, or we have EABI with the OABI - * compat layer enabled. In the later case we don't know if - * user space is EABI or not, and if not we must not clobber r7. - * Always using the OABI syscall solves that issue and works for - * all those cases. - */ -const unsigned long syscall_restart_code[2] = { - SWI_SYS_RESTART, /* swi __NR_restart_syscall */ - 0xe49df004, /* ldr pc, [sp], #4 */ -}; - -/* * atomically swap in the new signal mask, and wait for a signal. */ asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask) @@ -582,12 +569,13 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ -static void do_signal(struct pt_regs *regs, int syscall) +static int do_signal(struct pt_regs *regs, int syscall) { unsigned int retval = 0, continue_addr = 0, restart_addr = 0; struct k_sigaction ka; siginfo_t info; int signr; + int restart = 0; /* * If we were from a system call, check for system call restarting... @@ -602,15 +590,15 @@ static void do_signal(struct pt_regs *regs, int syscall) * debugger will see the already changed PSW. */ switch (retval) { + case -ERESTART_RESTARTBLOCK: + restart -= 2; case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: + restart++; regs->ARM_r0 = regs->ARM_ORIG_r0; regs->ARM_pc = restart_addr; break; - case -ERESTART_RESTARTBLOCK: - regs->ARM_r0 = -EINTR; - break; } } @@ -619,14 +607,17 @@ static void do_signal(struct pt_regs *regs, int syscall) * point the debugger may change all our registers ... */ signr = get_signal_to_deliver(&info, &ka, regs, NULL); + /* + * Depending on the signal settings we may need to revert the + * decision to restart the system call. But skip this if a + * debugger has chosen to restart at a different PC. + */ + if (regs->ARM_pc != restart_addr) + restart = 0; if (signr > 0) { - /* - * Depending on the signal settings we may need to revert the - * decision to restart the system call. But skip this if a - * debugger has chosen to restart at a different PC. - */ - if (regs->ARM_pc == restart_addr) { - if (retval == -ERESTARTNOHAND + if (unlikely(restart)) { + if (retval == -ERESTARTNOHAND || + retval == -ERESTART_RESTARTBLOCK || (retval == -ERESTARTSYS && !(ka.sa.sa_flags & SA_RESTART))) { regs->ARM_r0 = -EINTR; @@ -635,52 +626,43 @@ static void do_signal(struct pt_regs *regs, int syscall) } handle_signal(signr, &ka, &info, regs); - return; - } - - if (syscall) { - /* - * Handle restarting a different system call. As above, - * if a debugger has chosen to restart at a different PC, - * ignore the restart. - */ - if (retval == -ERESTART_RESTARTBLOCK - && regs->ARM_pc == continue_addr) { - if (thumb_mode(regs)) { - regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; - regs->ARM_pc -= 2; - } else { -#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT) - regs->ARM_r7 = __NR_restart_syscall; - regs->ARM_pc -= 4; -#else - u32 __user *usp; - - regs->ARM_sp -= 4; - usp = (u32 __user *)regs->ARM_sp; - - if (put_user(regs->ARM_pc, usp) == 0) { - regs->ARM_pc = KERN_RESTART_CODE; - } else { - regs->ARM_sp += 4; - force_sigsegv(0, current); - } -#endif - } - } + return 0; } restore_saved_sigmask(); + if (unlikely(restart)) + regs->ARM_pc = continue_addr; + return restart; } -asmlinkage void -do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) +asmlinkage int +do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) { - if (thread_flags & _TIF_SIGPENDING) - do_signal(regs, syscall); - - if (thread_flags & _TIF_NOTIFY_RESUME) { - clear_thread_flag(TIF_NOTIFY_RESUME); - tracehook_notify_resume(regs); - } + do { + if (likely(thread_flags & _TIF_NEED_RESCHED)) { + schedule(); + } else { + if (unlikely(!user_mode(regs))) + return 0; + local_irq_enable(); + if (thread_flags & _TIF_SIGPENDING) { + int restart = do_signal(regs, syscall); + if (unlikely(restart)) { + /* + * Restart without handlers. + * Deal with it without leaving + * the kernel space. + */ + return restart; + } + syscall = 0; + } else { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(regs); + } + } + local_irq_disable(); + thread_flags = current_thread_info()->flags; + } while (thread_flags & _TIF_WORK_MASK); + return 0; } diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h index 6fcfe8398aa..5ff067b7c75 100644 --- a/arch/arm/kernel/signal.h +++ b/arch/arm/kernel/signal.h @@ -8,7 +8,5 @@ * published by the Free Software Foundation. */ #define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500) -#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes)) extern const unsigned long sigreturn_codes[7]; -extern const unsigned long syscall_restart_code[2]; diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index aea74f5bc34..ebd8ad274d7 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -563,7 +563,8 @@ void smp_send_stop(void) cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); - smp_cross_call(&mask, IPI_CPU_STOP); + if (!cpumask_empty(&mask)) + smp_cross_call(&mask, IPI_CPU_STOP); /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 8b97d739b17..f7945218b8c 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -402,18 +402,10 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr) asmlinkage void __exception do_undefinstr(struct pt_regs *regs) { - unsigned int correction = thumb_mode(regs) ? 2 : 4; unsigned int instr; siginfo_t info; void __user *pc; - /* - * According to the ARM ARM, PC is 2 or 4 bytes ahead, - * depending whether we're in Thumb mode or not. - * Correct this offset. - */ - regs->ARM_pc -= correction; - pc = (void __user *)instruction_pointer(regs); if (processor_mode(regs) == SVC_MODE) { @@ -852,8 +844,6 @@ void __init early_trap_init(void *vectors_base) */ memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), sigreturn_codes, sizeof(sigreturn_codes)); - memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE), - syscall_restart_code, sizeof(syscall_restart_code)); flush_icache_range(vectors, vectors + PAGE_SIZE); modify_domain(DOMAIN_USER, DOMAIN_CLIENT); diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index d1624a315c9..783eab6845c 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -546,6 +546,7 @@ static struct lcd_ctrl_config lcd_cfg = { .sync_edge = 0, .sync_ctrl = 1, .raster_order = 0, + .fifo_th = 6, }; struct da8xx_lcdc_platform_data sharp_lcd035q3dg01_pdata = { diff --git a/arch/arm/mach-omap1/board-h2-mmc.c b/arch/arm/mach-omap1/board-h2-mmc.c index da0e37d4082..e1362ce4849 100644 --- a/arch/arm/mach-omap1/board-h2-mmc.c +++ b/arch/arm/mach-omap1/board-h2-mmc.c @@ -54,7 +54,6 @@ static struct omap_mmc_platform_data mmc1_data = { .nr_slots = 1, .init = mmc_late_init, .cleanup = mmc_cleanup, - .dma_mask = 0xffffffff, .slots[0] = { .set_power = mmc_set_power, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, diff --git a/arch/arm/mach-omap1/board-h3-mmc.c b/arch/arm/mach-omap1/board-h3-mmc.c index f8242aa9b76..c74daace8cd 100644 --- a/arch/arm/mach-omap1/board-h3-mmc.c +++ b/arch/arm/mach-omap1/board-h3-mmc.c @@ -36,7 +36,6 @@ static int mmc_set_power(struct device *dev, int slot, int power_on, */ static struct omap_mmc_platform_data mmc1_data = { .nr_slots = 1, - .dma_mask = 0xffffffff, .slots[0] = { .set_power = mmc_set_power, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c index 4007a372481..2c0ca8fc338 100644 --- a/arch/arm/mach-omap1/board-nokia770.c +++ b/arch/arm/mach-omap1/board-nokia770.c @@ -185,7 +185,6 @@ static int nokia770_mmc_get_cover_state(struct device *dev, int slot) static struct omap_mmc_platform_data nokia770_mmc2_data = { .nr_slots = 1, - .dma_mask = 0xffffffff, .max_freq = 12000000, .slots[0] = { .set_power = nokia770_mmc_set_power, diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c index 2c5d0ed7528..677357ff61a 100644 --- a/arch/arm/mach-omap2/board-n8x0.c +++ b/arch/arm/mach-omap2/board-n8x0.c @@ -468,7 +468,6 @@ static struct omap_mmc_platform_data mmc1_data = { .cleanup = n8x0_mmc_cleanup, .shutdown = n8x0_mmc_shutdown, .max_freq = 24000000, - .dma_mask = 0xffffffff, .slots[0] = { .wires = 4, .set_power = n8x0_mmc_set_power, diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 5fb47a14f4b..af1ed7d24a1 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -37,6 +37,7 @@ #define DISPC_CONTROL 0x0040 #define DISPC_CONTROL2 0x0238 +#define DISPC_CONTROL3 0x0848 #define DISPC_IRQSTATUS 0x0018 #define DSS_SYSCONFIG 0x10 @@ -52,6 +53,7 @@ #define EVSYNC_EVEN_IRQ_SHIFT 2 #define EVSYNC_ODD_IRQ_SHIFT 3 #define FRAMEDONE2_IRQ_SHIFT 22 +#define FRAMEDONE3_IRQ_SHIFT 30 #define FRAMEDONETV_IRQ_SHIFT 24 /* @@ -376,7 +378,7 @@ int __init omap_display_init(struct omap_dss_board_info *board_data) static void dispc_disable_outputs(void) { u32 v, irq_mask = 0; - bool lcd_en, digit_en, lcd2_en = false; + bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false; int i; struct omap_dss_dispc_dev_attr *da; struct omap_hwmod *oh; @@ -405,7 +407,13 @@ static void dispc_disable_outputs(void) lcd2_en = v & LCD_EN_MASK; } - if (!(lcd_en | digit_en | lcd2_en)) + /* store value of LCDENABLE for LCD3 */ + if (da->manager_count > 3) { + v = omap_hwmod_read(oh, DISPC_CONTROL3); + lcd3_en = v & LCD_EN_MASK; + } + + if (!(lcd_en | digit_en | lcd2_en | lcd3_en)) return; /* no managers currently enabled */ /* @@ -426,10 +434,12 @@ static void dispc_disable_outputs(void) if (lcd2_en) irq_mask |= 1 << FRAMEDONE2_IRQ_SHIFT; + if (lcd3_en) + irq_mask |= 1 << FRAMEDONE3_IRQ_SHIFT; /* * clear any previous FRAMEDONE, FRAMEDONETV, - * EVSYNC_EVEN/ODD or FRAMEDONE2 interrupts + * EVSYNC_EVEN/ODD, FRAMEDONE2 or FRAMEDONE3 interrupts */ omap_hwmod_write(irq_mask, oh, DISPC_IRQSTATUS); @@ -445,12 +455,19 @@ static void dispc_disable_outputs(void) omap_hwmod_write(v, oh, DISPC_CONTROL2); } + /* disable LCD3 manager */ + if (da->manager_count > 3) { + v = omap_hwmod_read(oh, DISPC_CONTROL3); + v &= ~LCD_EN_MASK; + omap_hwmod_write(v, oh, DISPC_CONTROL3); + } + i = 0; while ((omap_hwmod_read(oh, DISPC_IRQSTATUS) & irq_mask) != irq_mask) { i++; if (i > FRAMEDONE_IRQ_TIMEOUT) { - pr_err("didn't get FRAMEDONE1/2 or TV interrupt\n"); + pr_err("didn't get FRAMEDONE1/2/3 or TV interrupt\n"); break; } mdelay(1); diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index be697d4e084..a9675d8d182 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c @@ -315,7 +315,6 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, mmc->slots[0].caps = c->caps; mmc->slots[0].pm_caps = c->pm_caps; mmc->slots[0].internal_clock = !c->ext_clock; - mmc->dma_mask = 0xffffffff; mmc->max_freq = c->max_freq; if (cpu_is_omap44xx()) mmc->reg_offset = OMAP4_MMC_REG_OFFSET; diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c index 0f882ecb7d8..6ec30054996 100644 --- a/arch/arm/mach-spear3xx/spear300.c +++ b/arch/arm/mach-spear3xx/spear300.c @@ -120,182 +120,156 @@ struct pl08x_channel_data spear300_dma_info[] = { .min_signal = 2, .max_signal = 2, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, }; diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c index bbcf4571d36..1d0e435b904 100644 --- a/arch/arm/mach-spear3xx/spear310.c +++ b/arch/arm/mach-spear3xx/spear310.c @@ -205,182 +205,156 @@ struct pl08x_channel_data spear310_dma_info[] = { .min_signal = 2, .max_signal = 2, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart2_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart2_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart3_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart3_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart4_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart4_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart5_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart5_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, }; diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c index 88d483bcd66..fd823c62457 100644 --- a/arch/arm/mach-spear3xx/spear320.c +++ b/arch/arm/mach-spear3xx/spear320.c @@ -213,182 +213,156 @@ struct pl08x_channel_data spear320_dma_info[] = { .min_signal = 2, .max_signal = 2, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c0_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c0_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp1_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp1_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp2_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp2_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart1_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart1_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart2_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart2_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c1_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c1_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c2_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c2_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2s_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2s_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "rs485_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "rs485_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, }; diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c index 66db5f13af8..98144baf888 100644 --- a/arch/arm/mach-spear3xx/spear3xx.c +++ b/arch/arm/mach-spear3xx/spear3xx.c @@ -46,7 +46,8 @@ struct pl022_ssp_controller pl022_plat_data = { struct pl08x_platform_data pl080_plat_data = { .memcpy_channel = { .bus_id = "memcpy", - .cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ + .cctl_memcpy = + (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \ diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c index 9af67d003c6..5a5a52db252 100644 --- a/arch/arm/mach-spear6xx/spear6xx.c +++ b/arch/arm/mach-spear6xx/spear6xx.c @@ -36,336 +36,288 @@ static struct pl08x_channel_data spear600_dma_info[] = { .min_signal = 0, .max_signal = 0, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp1_tx", .min_signal = 1, .max_signal = 1, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_rx", .min_signal = 2, .max_signal = 2, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_rx", .min_signal = 4, .max_signal = 4, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_tx", .min_signal = 5, .max_signal = 5, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp2_rx", .min_signal = 6, .max_signal = 6, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp2_tx", .min_signal = 7, .max_signal = 7, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ext0_rx", .min_signal = 0, .max_signal = 0, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext0_tx", .min_signal = 1, .max_signal = 1, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext1_rx", .min_signal = 2, .max_signal = 2, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext1_tx", .min_signal = 3, .max_signal = 3, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext2_rx", .min_signal = 4, .max_signal = 4, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext2_tx", .min_signal = 5, .max_signal = 5, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext3_rx", .min_signal = 6, .max_signal = 6, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext3_tx", .min_signal = 7, .max_signal = 7, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext4_rx", .min_signal = 8, .max_signal = 8, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext4_tx", .min_signal = 9, .max_signal = 9, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext5_rx", .min_signal = 10, .max_signal = 10, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext5_tx", .min_signal = 11, .max_signal = 11, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext6_rx", .min_signal = 12, .max_signal = 12, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext6_tx", .min_signal = 13, .max_signal = 13, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext7_rx", .min_signal = 14, .max_signal = 14, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext7_tx", .min_signal = 15, .max_signal = 15, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, }; @@ -373,7 +325,8 @@ static struct pl08x_channel_data spear600_dma_info[] = { struct pl08x_platform_data pl080_plat_data = { .memcpy_channel = { .bus_id = "memcpy", - .cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ + .cctl_memcpy = + (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \ diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index 845f461f8ec..c2021139cb5 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -38,11 +38,19 @@ ENTRY(v7wbi_flush_user_tlb_range) dsb mov r0, r0, lsr #PAGE_SHIFT @ align address mov r1, r1, lsr #PAGE_SHIFT +#ifdef CONFIG_ARM_ERRATA_720789 + mov r3, #0 +#else asid r3, r3 @ mask ASID +#endif orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA mov r1, r1, lsl #PAGE_SHIFT 1: +#ifdef CONFIG_ARM_ERRATA_720789 + ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable) +#else ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) +#endif ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA add r0, r0, #PAGE_SZ @@ -67,7 +75,11 @@ ENTRY(v7wbi_flush_kern_tlb_range) mov r0, r0, lsl #PAGE_SHIFT mov r1, r1, lsl #PAGE_SHIFT 1: +#ifdef CONFIG_ARM_ERRATA_720789 + ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable) +#else ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) +#endif ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA add r0, r0, #PAGE_SZ cmp r0, r1 diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h index 5493bd95da5..eb3e4d55534 100644 --- a/arch/arm/plat-omap/include/plat/mmc.h +++ b/arch/arm/plat-omap/include/plat/mmc.h @@ -81,8 +81,6 @@ struct omap_mmc_platform_data { /* Return context loss count due to PM states changing */ int (*get_context_loss_count)(struct device *dev); - u64 dma_mask; - /* Integrating attributes from the omap_hwmod layer */ u8 controller_flags; diff --git a/arch/arm/plat-spear/include/plat/pl080.h b/arch/arm/plat-spear/include/plat/pl080.h index 2bc6b54460a..eb6590ded40 100644 --- a/arch/arm/plat-spear/include/plat/pl080.h +++ b/arch/arm/plat-spear/include/plat/pl080.h @@ -14,8 +14,8 @@ #ifndef __PLAT_PL080_H #define __PLAT_PL080_H -struct pl08x_dma_chan; -int pl080_get_signal(struct pl08x_dma_chan *ch); -void pl080_put_signal(struct pl08x_dma_chan *ch); +struct pl08x_channel_data; +int pl080_get_signal(const struct pl08x_channel_data *cd); +void pl080_put_signal(const struct pl08x_channel_data *cd, int signal); #endif /* __PLAT_PL080_H */ diff --git a/arch/arm/plat-spear/pl080.c b/arch/arm/plat-spear/pl080.c index 12cf27f935f..cfa1199d0f4 100644 --- a/arch/arm/plat-spear/pl080.c +++ b/arch/arm/plat-spear/pl080.c @@ -27,9 +27,8 @@ struct { unsigned char val; } signals[16] = {{0, 0}, }; -int pl080_get_signal(struct pl08x_dma_chan *ch) +int pl080_get_signal(const struct pl08x_channel_data *cd) { - const struct pl08x_channel_data *cd = ch->cd; unsigned int signal = cd->min_signal, val; unsigned long flags; @@ -63,18 +62,17 @@ int pl080_get_signal(struct pl08x_dma_chan *ch) return signal; } -void pl080_put_signal(struct pl08x_dma_chan *ch) +void pl080_put_signal(const struct pl08x_channel_data *cd, int signal) { - const struct pl08x_channel_data *cd = ch->cd; unsigned long flags; spin_lock_irqsave(&lock, flags); /* if signal is not used */ - if (!signals[cd->min_signal].busy) + if (!signals[signal].busy) BUG(); - signals[cd->min_signal].busy--; + signals[signal].busy--; spin_unlock_irqrestore(&lock, flags); } diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S index 4fa9903b83c..cc926c98598 100644 --- a/arch/arm/vfp/entry.S +++ b/arch/arm/vfp/entry.S @@ -7,18 +7,20 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * Basic entry code, called from the kernel's undefined instruction trap. - * r0 = faulted instruction - * r5 = faulted PC+4 - * r9 = successful return - * r10 = thread_info structure - * lr = failure return */ #include <asm/thread_info.h> #include <asm/vfpmacros.h> #include "../kernel/entry-header.S" +@ VFP entry point. +@ +@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) +@ r2 = PC value to resume execution after successful emulation +@ r9 = normal "successful" return address +@ r10 = this threads thread_info structure +@ lr = unrecognised instruction return address +@ IRQs disabled. +@ ENTRY(do_vfp) #ifdef CONFIG_PREEMPT ldr r4, [r10, #TI_PREEMPT] @ get preempt count diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index d50f0e486cf..ea0349f6358 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -62,13 +62,13 @@ @ VFP hardware support entry point. @ -@ r0 = faulted instruction -@ r2 = faulted PC+4 -@ r9 = successful return +@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) +@ r2 = PC value to resume execution after successful emulation +@ r9 = normal "successful" return address @ r10 = vfp_state union @ r11 = CPU number -@ lr = failure return - +@ lr = unrecognised instruction return address +@ IRQs enabled. ENTRY(vfp_support_entry) DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 @@ -162,9 +162,12 @@ vfp_hw_state_valid: @ exception before retrying branch @ out before setting an FPEXC that @ stops us reading stuff - VFPFMXR FPEXC, r1 @ restore FPEXC last - sub r2, r2, #4 - str r2, [sp, #S_PC] @ retry the instruction + VFPFMXR FPEXC, r1 @ Restore FPEXC last + sub r2, r2, #4 @ Retry current instruction - if Thumb + str r2, [sp, #S_PC] @ mode it's two 16-bit instructions, + @ else it's one 32-bit instruction, so + @ always subtract 4 from the following + @ instruction address. #ifdef CONFIG_PREEMPT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 586961929e9..fb849d044bd 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -457,10 +457,16 @@ static int vfp_pm_suspend(void) /* disable, just in case */ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); + } else if (vfp_current_hw_state[ti->cpu]) { +#ifndef CONFIG_SMP + fmxr(FPEXC, fpexc | FPEXC_EN); + vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc); + fmxr(FPEXC, fpexc); +#endif } /* clear any information we had about last context state */ - memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state)); + vfp_current_hw_state[ti->cpu] = NULL; return 0; } diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index e3efc06e640..331d574df99 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -77,6 +77,7 @@ config AR7 select SYS_SUPPORTS_ZBOOT_UART16550 select ARCH_REQUIRE_GPIOLIB select VLYNQ + select HAVE_CLK help Support for the Texas Instruments AR7 System-on-a-Chip family: TNETD7100, 7200 and 7300. @@ -124,6 +125,7 @@ config BCM63XX select SYS_HAS_EARLY_PRINTK select SWAP_IO_SPACE select ARCH_REQUIRE_GPIOLIB + select HAVE_CLK help Support for BCM63XX based boards diff --git a/arch/mips/include/asm/clock.h b/arch/mips/include/asm/clock.h index 83894aa7932..c9456e7a728 100644 --- a/arch/mips/include/asm/clock.h +++ b/arch/mips/include/asm/clock.h @@ -50,15 +50,4 @@ void clk_recalc_rate(struct clk *); int clk_register(struct clk *); void clk_unregister(struct clk *); -/* the exported API, in addition to clk_set_rate */ -/** - * clk_set_rate_ex - set the clock rate for a clock source, with additional parameter - * @clk: clock source - * @rate: desired clock rate in Hz - * @algo_id: algorithm id to be passed down to ops->set_rate - * - * Returns success (0) or negative errno. - */ -int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id); - #endif /* __ASM_MIPS_CLOCK_H */ diff --git a/arch/mips/include/asm/mach-loongson/loongson.h b/arch/mips/include/asm/mach-loongson/loongson.h index 06367c37e1b..5222a007bc2 100644 --- a/arch/mips/include/asm/mach-loongson/loongson.h +++ b/arch/mips/include/asm/mach-loongson/loongson.h @@ -245,7 +245,6 @@ static inline void do_perfcnt_IRQ(void) #ifdef CONFIG_CPU_SUPPORTS_CPUFREQ #include <linux/cpufreq.h> -extern void loongson2_cpu_wait(void); extern struct cpufreq_frequency_table loongson2_clockmod_table[]; /* Chip Config */ diff --git a/arch/mips/kernel/cpufreq/Makefile b/arch/mips/kernel/cpufreq/Makefile index c3479a432ef..05a5715ee38 100644 --- a/arch/mips/kernel/cpufreq/Makefile +++ b/arch/mips/kernel/cpufreq/Makefile @@ -2,4 +2,4 @@ # Makefile for the Linux/MIPS cpufreq. # -obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o loongson2_clock.o +obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c index ae5db206347..e7c98e2b78b 100644 --- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c +++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c @@ -19,7 +19,7 @@ #include <asm/clock.h> -#include <loongson.h> +#include <asm/mach-loongson/loongson.h> static uint nowait; @@ -181,6 +181,25 @@ static struct platform_driver platform_driver = { .id_table = platform_device_ids, }; +/* + * This is the simple version of Loongson-2 wait, Maybe we need do this in + * interrupt disabled context. + */ + +static DEFINE_SPINLOCK(loongson2_wait_lock); + +static void loongson2_cpu_wait(void) +{ + unsigned long flags; + u32 cpu_freq; + + spin_lock_irqsave(&loongson2_wait_lock, flags); + cpu_freq = LOONGSON_CHIPCFG0; + LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ + LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ + spin_unlock_irqrestore(&loongson2_wait_lock, flags); +} + static int __init cpufreq_init(void) { int ret; diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c index d3bcc33f469..ce2f129b081 100644 --- a/arch/mips/lantiq/clk.c +++ b/arch/mips/lantiq/clk.c @@ -135,6 +135,11 @@ void clk_deactivate(struct clk *clk) } EXPORT_SYMBOL(clk_deactivate); +struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) +{ + return NULL; +} + static inline u32 get_counter_resolution(void) { u32 res; diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c index d185e8477fd..6cfd6117fbf 100644 --- a/arch/mips/lantiq/prom.c +++ b/arch/mips/lantiq/prom.c @@ -8,7 +8,10 @@ #include <linux/export.h> #include <linux/clk.h> +#include <linux/bootmem.h> #include <linux/of_platform.h> +#include <linux/of_fdt.h> + #include <asm/bootinfo.h> #include <asm/time.h> @@ -70,6 +73,25 @@ void __init plat_mem_setup(void) __dt_setup_arch(&__dtb_start); } +void __init device_tree_init(void) +{ + unsigned long base, size; + + if (!initial_boot_params) + return; + + base = virt_to_phys((void *)initial_boot_params); + size = be32_to_cpu(initial_boot_params->totalsize); + + /* Before we do anything, lets reserve the dt blob */ + reserve_bootmem(base, size, BOOTMEM_DEFAULT); + + unflatten_device_tree(); + + /* free the space reserved for the dt blob */ + free_bootmem(base, size); +} + void __init prom_init(void) { /* call the soc specific detetcion code and get it to fill soc_info */ diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index 83780f7c842..befbb760ab7 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -20,10 +20,12 @@ /* clock control register */ #define CGU_IFCCR 0x0018 +#define CGU_IFCCR_VR9 0x0024 /* system clock register */ #define CGU_SYS 0x0010 /* pci control register */ #define CGU_PCICR 0x0034 +#define CGU_PCICR_VR9 0x0038 /* ephy configuration register */ #define CGU_EPHY 0x10 /* power control register */ @@ -80,6 +82,9 @@ static void __iomem *pmu_membase; void __iomem *ltq_cgu_membase; void __iomem *ltq_ebu_membase; +static u32 ifccr = CGU_IFCCR; +static u32 pcicr = CGU_PCICR; + /* legacy function kept alive to ease clkdev transition */ void ltq_pmu_enable(unsigned int module) { @@ -103,14 +108,14 @@ EXPORT_SYMBOL(ltq_pmu_disable); /* enable a hw clock */ static int cgu_enable(struct clk *clk) { - ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | clk->bits, CGU_IFCCR); + ltq_cgu_w32(ltq_cgu_r32(ifccr) | clk->bits, ifccr); return 0; } /* disable a hw clock */ static void cgu_disable(struct clk *clk) { - ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~clk->bits, CGU_IFCCR); + ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~clk->bits, ifccr); } /* enable a clock gate */ @@ -138,22 +143,22 @@ static void pmu_disable(struct clk *clk) /* the pci enable helper */ static int pci_enable(struct clk *clk) { - unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR); + unsigned int val = ltq_cgu_r32(ifccr); /* set bus clock speed */ if (of_machine_is_compatible("lantiq,ar9")) { - ifccr &= ~0x1f00000; + val &= ~0x1f00000; if (clk->rate == CLOCK_33M) - ifccr |= 0xe00000; + val |= 0xe00000; else - ifccr |= 0x700000; /* 62.5M */ + val |= 0x700000; /* 62.5M */ } else { - ifccr &= ~0xf00000; + val &= ~0xf00000; if (clk->rate == CLOCK_33M) - ifccr |= 0x800000; + val |= 0x800000; else - ifccr |= 0x400000; /* 62.5M */ + val |= 0x400000; /* 62.5M */ } - ltq_cgu_w32(ifccr, CGU_IFCCR); + ltq_cgu_w32(val, ifccr); pmu_enable(clk); return 0; } @@ -161,18 +166,16 @@ static int pci_enable(struct clk *clk) /* enable the external clock as a source */ static int pci_ext_enable(struct clk *clk) { - ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~(1 << 16), - CGU_IFCCR); - ltq_cgu_w32((1 << 30), CGU_PCICR); + ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~(1 << 16), ifccr); + ltq_cgu_w32((1 << 30), pcicr); return 0; } /* disable the external clock as a source */ static void pci_ext_disable(struct clk *clk) { - ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | (1 << 16), - CGU_IFCCR); - ltq_cgu_w32((1 << 31) | (1 << 30), CGU_PCICR); + ltq_cgu_w32(ltq_cgu_r32(ifccr) | (1 << 16), ifccr); + ltq_cgu_w32((1 << 31) | (1 << 30), pcicr); } /* enable a clockout source */ @@ -184,11 +187,11 @@ static int clkout_enable(struct clk *clk) for (i = 0; i < 4; i++) { if (clk->rates[i] == clk->rate) { int shift = 14 - (2 * clk->module); - unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR); + unsigned int val = ltq_cgu_r32(ifccr); - ifccr &= ~(3 << shift); - ifccr |= i << shift; - ltq_cgu_w32(ifccr, CGU_IFCCR); + val &= ~(3 << shift); + val |= i << shift; + ltq_cgu_w32(val, ifccr); return 0; } } @@ -336,8 +339,12 @@ void __init ltq_soc_init(void) clkdev_add_clkout(); /* add the soc dependent clocks */ - if (!of_machine_is_compatible("lantiq,vr9")) + if (of_machine_is_compatible("lantiq,vr9")) { + ifccr = CGU_IFCCR_VR9; + pcicr = CGU_PCICR_VR9; + } else { clkdev_add_pmu("1e180000.etop", NULL, 0, PMU_PPE); + } if (!of_machine_is_compatible("lantiq,ase")) { clkdev_add_pmu("1e100c00.serial", NULL, 0, PMU_ASC1); diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig index aca93eed877..263beb9322a 100644 --- a/arch/mips/loongson/Kconfig +++ b/arch/mips/loongson/Kconfig @@ -41,6 +41,7 @@ config LEMOTE_MACH2F select CSRC_R4K if ! MIPS_EXTERNAL_TIMER select DMA_NONCOHERENT select GENERIC_ISA_DMA_SUPPORT_BROKEN + select HAVE_CLK select HW_HAS_PCI select I8259 select IRQ_CPU diff --git a/arch/mips/loongson/lemote-2f/Makefile b/arch/mips/loongson/lemote-2f/Makefile index 8699a53f047..4f9eaa328a1 100644 --- a/arch/mips/loongson/lemote-2f/Makefile +++ b/arch/mips/loongson/lemote-2f/Makefile @@ -2,7 +2,7 @@ # Makefile for lemote loongson2f family machines # -obj-y += machtype.o irq.o reset.o ec_kb3310b.o +obj-y += clock.o machtype.o irq.o reset.o ec_kb3310b.o # # Suspend Support diff --git a/arch/mips/kernel/cpufreq/loongson2_clock.c b/arch/mips/loongson/lemote-2f/clock.c index 5426779d9fd..bc739d4bab2 100644 --- a/arch/mips/kernel/cpufreq/loongson2_clock.c +++ b/arch/mips/loongson/lemote-2f/clock.c @@ -6,14 +6,17 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. */ - -#include <linux/module.h> +#include <linux/clk.h> #include <linux/cpufreq.h> -#include <linux/platform_device.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> #include <asm/clock.h> - -#include <loongson.h> +#include <asm/mach-loongson/loongson.h> static LIST_HEAD(clock_list); static DEFINE_SPINLOCK(clock_lock); @@ -89,12 +92,6 @@ EXPORT_SYMBOL(clk_put); int clk_set_rate(struct clk *clk, unsigned long rate) { - return clk_set_rate_ex(clk, rate, 0); -} -EXPORT_SYMBOL_GPL(clk_set_rate); - -int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) -{ int ret = 0; int regval; int i; @@ -103,7 +100,7 @@ int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) unsigned long flags; spin_lock_irqsave(&clock_lock, flags); - ret = clk->ops->set_rate(clk, rate, algo_id); + ret = clk->ops->set_rate(clk, rate, 0); spin_unlock_irqrestore(&clock_lock, flags); } @@ -129,7 +126,7 @@ int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) return ret; } -EXPORT_SYMBOL_GPL(clk_set_rate_ex); +EXPORT_SYMBOL_GPL(clk_set_rate); long clk_round_rate(struct clk *clk, unsigned long rate) { @@ -146,26 +143,3 @@ long clk_round_rate(struct clk *clk, unsigned long rate) return rate; } EXPORT_SYMBOL_GPL(clk_round_rate); - -/* - * This is the simple version of Loongson-2 wait, Maybe we need do this in - * interrupt disabled content - */ - -DEFINE_SPINLOCK(loongson2_wait_lock); -void loongson2_cpu_wait(void) -{ - u32 cpu_freq; - unsigned long flags; - - spin_lock_irqsave(&loongson2_wait_lock, flags); - cpu_freq = LOONGSON_CHIPCFG0; - LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ - LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ - spin_unlock_irqrestore(&loongson2_wait_lock, flags); -} -EXPORT_SYMBOL_GPL(loongson2_cpu_wait); - -MODULE_AUTHOR("Yanhua <yanh@lemote.com>"); -MODULE_DESCRIPTION("cpufreq driver for Loongson 2F"); -MODULE_LICENSE("GPL"); diff --git a/arch/mips/loongson1/Kconfig b/arch/mips/loongson1/Kconfig index 237fa214de9..a9a14d6e81a 100644 --- a/arch/mips/loongson1/Kconfig +++ b/arch/mips/loongson1/Kconfig @@ -15,6 +15,7 @@ config LOONGSON1_LS1B select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_HIGHMEM select SYS_HAS_EARLY_PRINTK + select HAVE_CLK endchoice diff --git a/arch/mips/loongson1/common/clock.c b/arch/mips/loongson1/common/clock.c index 2d98fb03059..1bbbbec1208 100644 --- a/arch/mips/loongson1/common/clock.c +++ b/arch/mips/loongson1/common/clock.c @@ -38,12 +38,28 @@ struct clk *clk_get(struct device *dev, const char *name) } EXPORT_SYMBOL(clk_get); +int clk_enable(struct clk *clk) +{ + return 0; +} +EXPORT_SYMBOL(clk_enable); + +void clk_disable(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_disable); + unsigned long clk_get_rate(struct clk *clk) { return clk->rate; } EXPORT_SYMBOL(clk_get_rate); +void clk_put(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_put); + static void pll_clk_init(struct clk *clk) { u32 pll; diff --git a/arch/mips/txx9/Kconfig b/arch/mips/txx9/Kconfig index 852ae4bb7a8..6d40bc78345 100644 --- a/arch/mips/txx9/Kconfig +++ b/arch/mips/txx9/Kconfig @@ -20,6 +20,7 @@ config MACH_TXX9 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_BIG_ENDIAN + select HAVE_CLK config TOSHIBA_JMR3927 bool "Toshiba JMR-TX3927 board" diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index d544d7816df..dba1ce235da 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -186,10 +186,13 @@ static void spufs_prune_dir(struct dentry *dir) static int spufs_rmdir(struct inode *parent, struct dentry *dir) { /* remove all entries */ + int res; spufs_prune_dir(dir); d_drop(dir); - - return simple_rmdir(parent, dir); + res = simple_rmdir(parent, dir); + /* We have to give up the mm_struct */ + spu_forget(SPUFS_I(dir->d_inode)->i_ctx); + return res; } static int spufs_fill_dir(struct dentry *dir, @@ -245,9 +248,6 @@ static int spufs_dir_close(struct inode *inode, struct file *file) mutex_unlock(&parent->i_mutex); WARN_ON(ret); - /* We have to give up the mm_struct */ - spu_forget(ctx); - return dcache_dir_close(inode, file); } @@ -450,28 +450,24 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, struct spu_context *neighbor; struct path path = {.mnt = mnt, .dentry = dentry}; - ret = -EPERM; if ((flags & SPU_CREATE_NOSCHED) && !capable(CAP_SYS_NICE)) - goto out_unlock; + return -EPERM; - ret = -EINVAL; if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE)) == SPU_CREATE_ISOLATE) - goto out_unlock; + return -EINVAL; - ret = -ENODEV; if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader) - goto out_unlock; + return -ENODEV; gang = NULL; neighbor = NULL; affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU); if (affinity) { gang = SPUFS_I(inode)->i_gang; - ret = -EINVAL; if (!gang) - goto out_unlock; + return -EINVAL; mutex_lock(&gang->aff_mutex); neighbor = spufs_assert_affinity(flags, gang, aff_filp); if (IS_ERR(neighbor)) { @@ -492,22 +488,12 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, } ret = spufs_context_open(&path); - if (ret < 0) { + if (ret < 0) WARN_ON(spufs_rmdir(inode, dentry)); - if (affinity) - mutex_unlock(&gang->aff_mutex); - mutex_unlock(&inode->i_mutex); - spu_forget(SPUFS_I(dentry->d_inode)->i_ctx); - goto out; - } out_aff_unlock: if (affinity) mutex_unlock(&gang->aff_mutex); -out_unlock: - mutex_unlock(&inode->i_mutex); -out: - dput(dentry); return ret; } @@ -580,18 +566,13 @@ static int spufs_create_gang(struct inode *inode, int ret; ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO); - if (ret) - goto out; - - ret = spufs_gang_open(&path); - if (ret < 0) { - int err = simple_rmdir(inode, dentry); - WARN_ON(err); + if (!ret) { + ret = spufs_gang_open(&path); + if (ret < 0) { + int err = simple_rmdir(inode, dentry); + WARN_ON(err); + } } - -out: - mutex_unlock(&inode->i_mutex); - dput(dentry); return ret; } @@ -601,40 +582,32 @@ static struct file_system_type spufs_type; long spufs_create(struct path *path, struct dentry *dentry, unsigned int flags, umode_t mode, struct file *filp) { + struct inode *dir = path->dentry->d_inode; int ret; - ret = -EINVAL; /* check if we are on spufs */ if (path->dentry->d_sb->s_type != &spufs_type) - goto out; + return -EINVAL; /* don't accept undefined flags */ if (flags & (~SPU_CREATE_FLAG_ALL)) - goto out; + return -EINVAL; /* only threads can be underneath a gang */ - if (path->dentry != path->dentry->d_sb->s_root) { - if ((flags & SPU_CREATE_GANG) || - !SPUFS_I(path->dentry->d_inode)->i_gang) - goto out; - } + if (path->dentry != path->dentry->d_sb->s_root) + if ((flags & SPU_CREATE_GANG) || !SPUFS_I(dir)->i_gang) + return -EINVAL; mode &= ~current_umask(); if (flags & SPU_CREATE_GANG) - ret = spufs_create_gang(path->dentry->d_inode, - dentry, path->mnt, mode); + ret = spufs_create_gang(dir, dentry, path->mnt, mode); else - ret = spufs_create_context(path->dentry->d_inode, - dentry, path->mnt, flags, mode, + ret = spufs_create_context(dir, dentry, path->mnt, flags, mode, filp); if (ret >= 0) - fsnotify_mkdir(path->dentry->d_inode, dentry); - return ret; + fsnotify_mkdir(dir, dentry); -out: - mutex_unlock(&path->dentry->d_inode->i_mutex); - dput(dentry); return ret; } diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c index 5665dcc382c..5b7d8ffbf89 100644 --- a/arch/powerpc/platforms/cell/spufs/syscalls.c +++ b/arch/powerpc/platforms/cell/spufs/syscalls.c @@ -70,7 +70,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags, ret = PTR_ERR(dentry); if (!IS_ERR(dentry)) { ret = spufs_create(&path, dentry, flags, mode, neighbor); - path_put(&path); + done_path_create(&path, dentry); } return ret; diff --git a/arch/um/defconfig b/arch/um/defconfig index fec0d5d2746..08107a79506 100644 --- a/arch/um/defconfig +++ b/arch/um/defconfig @@ -163,7 +163,7 @@ CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y # CONFIG_CFS_BANDWIDTH is not set # CONFIG_RT_GROUP_SCHED is not set -CONFIG_BLK_CGROUP=m +CONFIG_BLK_CGROUP=y # CONFIG_DEBUG_BLK_CGROUP is not set # CONFIG_CHECKPOINT_RESTORE is not set CONFIG_NAMESPACES=y diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c index 45e248c2f43..87eebfe03c6 100644 --- a/arch/um/drivers/chan_kern.c +++ b/arch/um/drivers/chan_kern.c @@ -150,9 +150,11 @@ void chan_enable_winch(struct chan *chan, struct tty_struct *tty) static void line_timer_cb(struct work_struct *work) { struct line *line = container_of(work, struct line, task.work); + struct tty_struct *tty = tty_port_tty_get(&line->port); if (!line->throttled) - chan_interrupt(line, line->tty, line->driver->read_irq); + chan_interrupt(line, tty, line->driver->read_irq); + tty_kref_put(tty); } int enable_chan(struct line *line) diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index ac9d25c8dc0..bbaf2c59830 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -19,9 +19,11 @@ static irqreturn_t line_interrupt(int irq, void *data) { struct chan *chan = data; struct line *line = chan->line; + struct tty_struct *tty = tty_port_tty_get(&line->port); if (line) - chan_interrupt(line, line->tty, irq); + chan_interrupt(line, tty, irq); + tty_kref_put(tty); return IRQ_HANDLED; } @@ -219,92 +221,6 @@ void line_set_termios(struct tty_struct *tty, struct ktermios * old) /* nothing */ } -static const struct { - int cmd; - char *level; - char *name; -} tty_ioctls[] = { - /* don't print these, they flood the log ... */ - { TCGETS, NULL, "TCGETS" }, - { TCSETS, NULL, "TCSETS" }, - { TCSETSW, NULL, "TCSETSW" }, - { TCFLSH, NULL, "TCFLSH" }, - { TCSBRK, NULL, "TCSBRK" }, - - /* general tty stuff */ - { TCSETSF, KERN_DEBUG, "TCSETSF" }, - { TCGETA, KERN_DEBUG, "TCGETA" }, - { TIOCMGET, KERN_DEBUG, "TIOCMGET" }, - { TCSBRKP, KERN_DEBUG, "TCSBRKP" }, - { TIOCMSET, KERN_DEBUG, "TIOCMSET" }, - - /* linux-specific ones */ - { TIOCLINUX, KERN_INFO, "TIOCLINUX" }, - { KDGKBMODE, KERN_INFO, "KDGKBMODE" }, - { KDGKBTYPE, KERN_INFO, "KDGKBTYPE" }, - { KDSIGACCEPT, KERN_INFO, "KDSIGACCEPT" }, -}; - -int line_ioctl(struct tty_struct *tty, unsigned int cmd, - unsigned long arg) -{ - int ret; - int i; - - ret = 0; - switch(cmd) { -#ifdef TIOCGETP - case TIOCGETP: - case TIOCSETP: - case TIOCSETN: -#endif -#ifdef TIOCGETC - case TIOCGETC: - case TIOCSETC: -#endif -#ifdef TIOCGLTC - case TIOCGLTC: - case TIOCSLTC: -#endif - /* Note: these are out of date as we now have TCGETS2 etc but this - whole lot should probably go away */ - case TCGETS: - case TCSETSF: - case TCSETSW: - case TCSETS: - case TCGETA: - case TCSETAF: - case TCSETAW: - case TCSETA: - case TCXONC: - case TCFLSH: - case TIOCOUTQ: - case TIOCINQ: - case TIOCGLCKTRMIOS: - case TIOCSLCKTRMIOS: - case TIOCPKT: - case TIOCGSOFTCAR: - case TIOCSSOFTCAR: - return -ENOIOCTLCMD; -#if 0 - case TCwhatever: - /* do something */ - break; -#endif - default: - for (i = 0; i < ARRAY_SIZE(tty_ioctls); i++) - if (cmd == tty_ioctls[i].cmd) - break; - if (i == ARRAY_SIZE(tty_ioctls)) { - printk(KERN_ERR "%s: %s: unknown ioctl: 0x%x\n", - __func__, tty->name, cmd); - } - ret = -ENOIOCTLCMD; - break; - } - return ret; -} - void line_throttle(struct tty_struct *tty) { struct line *line = tty->driver_data; @@ -333,7 +249,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data) { struct chan *chan = data; struct line *line = chan->line; - struct tty_struct *tty = line->tty; + struct tty_struct *tty; int err; /* @@ -352,10 +268,13 @@ static irqreturn_t line_write_interrupt(int irq, void *data) } spin_unlock(&line->lock); + tty = tty_port_tty_get(&line->port); if (tty == NULL) return IRQ_NONE; tty_wakeup(tty); + tty_kref_put(tty); + return IRQ_HANDLED; } @@ -377,43 +296,14 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data) return err; } -/* - * Normally, a driver like this can rely mostly on the tty layer - * locking, particularly when it comes to the driver structure. - * However, in this case, mconsole requests can come in "from the - * side", and race with opens and closes. - * - * mconsole config requests will want to be sure the device isn't in - * use, and get_config, open, and close will want a stable - * configuration. The checking and modification of the configuration - * is done under a spinlock. Checking whether the device is in use is - * line->tty->count > 1, also under the spinlock. - * - * line->count serves to decide whether the device should be enabled or - * disabled on the host. If it's equal to 0, then we are doing the - * first open or last close. Otherwise, open and close just return. - */ - -int line_open(struct line *lines, struct tty_struct *tty) +static int line_activate(struct tty_port *port, struct tty_struct *tty) { - struct line *line = &lines[tty->index]; - int err = -ENODEV; - - mutex_lock(&line->count_lock); - if (!line->valid) - goto out_unlock; - - err = 0; - if (line->count++) - goto out_unlock; - - BUG_ON(tty->driver_data); - tty->driver_data = line; - line->tty = tty; + int ret; + struct line *line = tty->driver_data; - err = enable_chan(line); - if (err) /* line_close() will be called by our caller */ - goto out_unlock; + ret = enable_chan(line); + if (ret) + return ret; if (!line->sigio) { chan_enable_winch(line->chan_out, tty); @@ -421,44 +311,60 @@ int line_open(struct line *lines, struct tty_struct *tty) } chan_window_size(line, &tty->winsize.ws_row, - &tty->winsize.ws_col); -out_unlock: - mutex_unlock(&line->count_lock); - return err; + &tty->winsize.ws_col); + + return 0; } -static void unregister_winch(struct tty_struct *tty); +static const struct tty_port_operations line_port_ops = { + .activate = line_activate, +}; -void line_close(struct tty_struct *tty, struct file * filp) +int line_open(struct tty_struct *tty, struct file *filp) { struct line *line = tty->driver_data; - /* - * If line_open fails (and tty->driver_data is never set), - * tty_open will call line_close. So just return in this case. - */ - if (line == NULL) - return; + return tty_port_open(&line->port, tty, filp); +} - /* We ignore the error anyway! */ - flush_buffer(line); +int line_install(struct tty_driver *driver, struct tty_struct *tty, + struct line *line) +{ + int ret; - mutex_lock(&line->count_lock); - BUG_ON(!line->valid); + ret = tty_standard_install(driver, tty); + if (ret) + return ret; - if (--line->count) - goto out_unlock; + tty->driver_data = line; - line->tty = NULL; - tty->driver_data = NULL; + return 0; +} + +static void unregister_winch(struct tty_struct *tty); + +void line_cleanup(struct tty_struct *tty) +{ + struct line *line = tty->driver_data; if (line->sigio) { unregister_winch(tty); line->sigio = 0; } +} + +void line_close(struct tty_struct *tty, struct file * filp) +{ + struct line *line = tty->driver_data; -out_unlock: - mutex_unlock(&line->count_lock); + tty_port_close(&line->port, tty, filp); +} + +void line_hangup(struct tty_struct *tty) +{ + struct line *line = tty->driver_data; + + tty_port_hangup(&line->port); } void close_lines(struct line *lines, int nlines) @@ -476,9 +382,7 @@ int setup_one_line(struct line *lines, int n, char *init, struct tty_driver *driver = line->driver->driver; int err = -EINVAL; - mutex_lock(&line->count_lock); - - if (line->count) { + if (line->port.count) { *error_out = "Device is already open"; goto out; } @@ -519,7 +423,6 @@ int setup_one_line(struct line *lines, int n, char *init, } } out: - mutex_unlock(&line->count_lock); return err; } @@ -607,13 +510,17 @@ int line_get_config(char *name, struct line *lines, unsigned int num, char *str, line = &lines[dev]; - mutex_lock(&line->count_lock); if (!line->valid) CONFIG_CHUNK(str, size, n, "none", 1); - else if (line->tty == NULL) - CONFIG_CHUNK(str, size, n, line->init_str, 1); - else n = chan_config_string(line, str, size, error_out); - mutex_unlock(&line->count_lock); + else { + struct tty_struct *tty = tty_port_tty_get(&line->port); + if (tty == NULL) { + CONFIG_CHUNK(str, size, n, line->init_str, 1); + } else { + n = chan_config_string(line, str, size, error_out); + tty_kref_put(tty); + } + } return n; } @@ -663,8 +570,9 @@ int register_lines(struct line_driver *line_driver, driver->init_termios = tty_std_termios; for (i = 0; i < nlines; i++) { + tty_port_init(&lines[i].port); + lines[i].port.ops = &line_port_ops; spin_lock_init(&lines[i].lock); - mutex_init(&lines[i].count_lock); lines[i].driver = line_driver; INIT_LIST_HEAD(&lines[i].chan_list); } diff --git a/arch/um/drivers/line.h b/arch/um/drivers/line.h index 0a1834719db..bae95611e7a 100644 --- a/arch/um/drivers/line.h +++ b/arch/um/drivers/line.h @@ -32,9 +32,7 @@ struct line_driver { }; struct line { - struct tty_struct *tty; - struct mutex count_lock; - unsigned long count; + struct tty_port port; int valid; char *init_str; @@ -59,7 +57,11 @@ struct line { }; extern void line_close(struct tty_struct *tty, struct file * filp); -extern int line_open(struct line *lines, struct tty_struct *tty); +extern int line_open(struct tty_struct *tty, struct file *filp); +extern int line_install(struct tty_driver *driver, struct tty_struct *tty, + struct line *line); +extern void line_cleanup(struct tty_struct *tty); +extern void line_hangup(struct tty_struct *tty); extern int line_setup(char **conf, unsigned nlines, char **def, char *init, char *name); extern int line_write(struct tty_struct *tty, const unsigned char *buf, @@ -70,8 +72,6 @@ extern int line_chars_in_buffer(struct tty_struct *tty); extern void line_flush_buffer(struct tty_struct *tty); extern void line_flush_chars(struct tty_struct *tty); extern int line_write_room(struct tty_struct *tty); -extern int line_ioctl(struct tty_struct *tty, unsigned int cmd, - unsigned long arg); extern void line_throttle(struct tty_struct *tty); extern void line_unthrottle(struct tty_struct *tty); diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c index e09801a1327..7e86f007012 100644 --- a/arch/um/drivers/ssl.c +++ b/arch/um/drivers/ssl.c @@ -87,40 +87,13 @@ static int ssl_remove(int n, char **error_out) error_out); } -static int ssl_open(struct tty_struct *tty, struct file *filp) -{ - int err = line_open(serial_lines, tty); - - if (err) - printk(KERN_ERR "Failed to open serial line %d, err = %d\n", - tty->index, err); - - return err; -} - -#if 0 -static void ssl_flush_buffer(struct tty_struct *tty) -{ - return; -} - -static void ssl_stop(struct tty_struct *tty) -{ - printk(KERN_ERR "Someone should implement ssl_stop\n"); -} - -static void ssl_start(struct tty_struct *tty) -{ - printk(KERN_ERR "Someone should implement ssl_start\n"); -} - -void ssl_hangup(struct tty_struct *tty) +static int ssl_install(struct tty_driver *driver, struct tty_struct *tty) { + return line_install(driver, tty, &serial_lines[tty->index]); } -#endif static const struct tty_operations ssl_ops = { - .open = ssl_open, + .open = line_open, .close = line_close, .write = line_write, .put_char = line_put_char, @@ -129,14 +102,11 @@ static const struct tty_operations ssl_ops = { .flush_buffer = line_flush_buffer, .flush_chars = line_flush_chars, .set_termios = line_set_termios, - .ioctl = line_ioctl, .throttle = line_throttle, .unthrottle = line_unthrottle, -#if 0 - .stop = ssl_stop, - .start = ssl_start, - .hangup = ssl_hangup, -#endif + .install = ssl_install, + .cleanup = line_cleanup, + .hangup = line_hangup, }; /* Changed by ssl_init and referenced by ssl_exit, which are both serialized diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c index 7663541c372..929b99a261f 100644 --- a/arch/um/drivers/stdio_console.c +++ b/arch/um/drivers/stdio_console.c @@ -89,21 +89,17 @@ static int con_remove(int n, char **error_out) return line_remove(vts, ARRAY_SIZE(vts), n, error_out); } -static int con_open(struct tty_struct *tty, struct file *filp) -{ - int err = line_open(vts, tty); - if (err) - printk(KERN_ERR "Failed to open console %d, err = %d\n", - tty->index, err); - - return err; -} - /* Set in an initcall, checked in an exitcall */ static int con_init_done = 0; +static int con_install(struct tty_driver *driver, struct tty_struct *tty) +{ + return line_install(driver, tty, &vts[tty->index]); +} + static const struct tty_operations console_ops = { - .open = con_open, + .open = line_open, + .install = con_install, .close = line_close, .write = line_write, .put_char = line_put_char, @@ -112,9 +108,10 @@ static const struct tty_operations console_ops = { .flush_buffer = line_flush_buffer, .flush_chars = line_flush_chars, .set_termios = line_set_termios, - .ioctl = line_ioctl, .throttle = line_throttle, .unthrottle = line_unthrottle, + .cleanup = line_cleanup, + .hangup = line_hangup, }; static void uml_console_write(struct console *console, const char *string, diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 20505cafa29..0643e5bc9f4 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -514,7 +514,7 @@ static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out) goto out; } - fd = os_open_file(ubd_dev->file, global_openflags, 0); + fd = os_open_file(ubd_dev->file, of_read(OPENFLAGS()), 0); if (fd < 0) return fd; diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h index e786a6a3ec5..442f1d025dc 100644 --- a/arch/um/include/asm/ptrace-generic.h +++ b/arch/um/include/asm/ptrace-generic.h @@ -37,6 +37,8 @@ extern int putreg(struct task_struct *child, int regno, unsigned long value); extern int arch_copy_tls(struct task_struct *new); extern void clear_flushed_tls(struct task_struct *task); +extern void syscall_trace_enter(struct pt_regs *regs); +extern void syscall_trace_leave(struct pt_regs *regs); #endif diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h index 896e1660217..86daa546181 100644 --- a/arch/um/include/shared/as-layout.h +++ b/arch/um/include/shared/as-layout.h @@ -60,7 +60,8 @@ extern unsigned long host_task_size; extern int linux_main(int argc, char **argv); -extern void (*sig_info[])(int, struct uml_pt_regs *); +struct siginfo; +extern void (*sig_info[])(int, struct siginfo *si, struct uml_pt_regs *); #endif diff --git a/arch/um/include/shared/irq_user.h b/arch/um/include/shared/irq_user.h index c6c784df267..2b6d703925b 100644 --- a/arch/um/include/shared/irq_user.h +++ b/arch/um/include/shared/irq_user.h @@ -20,7 +20,8 @@ struct irq_fd { enum { IRQ_READ, IRQ_WRITE }; -extern void sigio_handler(int sig, struct uml_pt_regs *regs); +struct siginfo; +extern void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs); extern void free_irq_by_fd(int fd); extern void reactivate_fd(int fd, int irqnum); extern void deactivate_fd(int fd, int irqnum); diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h index 00965d06d2c..af6b6dc868b 100644 --- a/arch/um/include/shared/kern_util.h +++ b/arch/um/include/shared/kern_util.h @@ -9,6 +9,8 @@ #include "sysdep/ptrace.h" #include "sysdep/faultinfo.h" +struct siginfo; + extern int uml_exitcode; extern int ncpus; @@ -22,7 +24,7 @@ extern void free_stack(unsigned long stack, int order); extern int do_signal(void); extern void interrupt_end(void); -extern void relay_signal(int sig, struct uml_pt_regs *regs); +extern void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs); extern unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, struct uml_pt_regs *regs); @@ -33,9 +35,8 @@ extern unsigned int do_IRQ(int irq, struct uml_pt_regs *regs); extern int smp_sigio_handler(void); extern void initial_thread_cb(void (*proc)(void *), void *arg); extern int is_syscall(unsigned long addr); -extern void timer_handler(int sig, struct uml_pt_regs *regs); -extern void timer_handler(int sig, struct uml_pt_regs *regs); +extern void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs); extern int start_uml(void); extern void paging_init(void); @@ -59,9 +60,9 @@ extern unsigned long from_irq_stack(int nested); extern void syscall_trace(struct uml_pt_regs *regs, int entryexit); extern int singlestepping(void *t); -extern void segv_handler(int sig, struct uml_pt_regs *regs); -extern void bus_handler(int sig, struct uml_pt_regs *regs); -extern void winch(int sig, struct uml_pt_regs *regs); +extern void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs); +extern void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs); +extern void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs); extern void fatal_sigsegv(void) __attribute__ ((noreturn)); diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 00506c3d5d6..9883026f073 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -30,7 +30,7 @@ static struct irq_fd **last_irq_ptr = &active_fds; extern void free_irqs(void); -void sigio_handler(int sig, struct uml_pt_regs *regs) +void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) { struct irq_fd *irq_fd; int n; diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index ccb9a9d283f..57fc7028714 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -151,12 +151,10 @@ void new_thread_handler(void) * 0 if it just exits */ n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf); - if (n == 1) { - /* Handle any immediate reschedules or signals */ - interrupt_end(); + if (n == 1) userspace(¤t->thread.regs.regs); - } - else do_exit(0); + else + do_exit(0); } /* Called magically, see new_thread_handler above */ @@ -175,9 +173,6 @@ void fork_handler(void) current->thread.prev_sched = NULL; - /* Handle any immediate reschedules or signals */ - interrupt_end(); - userspace(¤t->thread.regs.regs); } @@ -193,7 +188,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, if (current->thread.forking) { memcpy(&p->thread.regs.regs, ®s->regs, sizeof(p->thread.regs.regs)); - UPT_SET_SYSCALL_RETURN(&p->thread.regs.regs, 0); + PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0); if (sp != 0) REGS_SP(p->thread.regs.regs.gp) = sp; diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c index 06b19039050..694d551c889 100644 --- a/arch/um/kernel/ptrace.c +++ b/arch/um/kernel/ptrace.c @@ -3,11 +3,12 @@ * Licensed under the GPL */ -#include "linux/audit.h" -#include "linux/ptrace.h" -#include "linux/sched.h" -#include "asm/uaccess.h" -#include "skas_ptrace.h" +#include <linux/audit.h> +#include <linux/ptrace.h> +#include <linux/sched.h> +#include <linux/tracehook.h> +#include <asm/uaccess.h> +#include <skas_ptrace.h> @@ -162,48 +163,36 @@ static void send_sigtrap(struct task_struct *tsk, struct uml_pt_regs *regs, * XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and * PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check */ -void syscall_trace(struct uml_pt_regs *regs, int entryexit) +void syscall_trace_enter(struct pt_regs *regs) { - int is_singlestep = (current->ptrace & PT_DTRACE) && entryexit; - int tracesysgood; - - if (!entryexit) - audit_syscall_entry(HOST_AUDIT_ARCH, - UPT_SYSCALL_NR(regs), - UPT_SYSCALL_ARG1(regs), - UPT_SYSCALL_ARG2(regs), - UPT_SYSCALL_ARG3(regs), - UPT_SYSCALL_ARG4(regs)); - else - audit_syscall_exit(regs); - - /* Fake a debug trap */ - if (is_singlestep) - send_sigtrap(current, regs, 0); + audit_syscall_entry(HOST_AUDIT_ARCH, + UPT_SYSCALL_NR(®s->regs), + UPT_SYSCALL_ARG1(®s->regs), + UPT_SYSCALL_ARG2(®s->regs), + UPT_SYSCALL_ARG3(®s->regs), + UPT_SYSCALL_ARG4(®s->regs)); if (!test_thread_flag(TIF_SYSCALL_TRACE)) return; - if (!(current->ptrace & PT_PTRACED)) - return; + tracehook_report_syscall_entry(regs); +} - /* - * the 0x80 provides a way for the tracing parent to distinguish - * between a syscall stop and SIGTRAP delivery - */ - tracesysgood = (current->ptrace & PT_TRACESYSGOOD); - ptrace_notify(SIGTRAP | (tracesysgood ? 0x80 : 0)); +void syscall_trace_leave(struct pt_regs *regs) +{ + int ptraced = current->ptrace; - if (entryexit) /* force do_signal() --> is_syscall() */ - set_thread_flag(TIF_SIGPENDING); + audit_syscall_exit(regs); - /* - * this isn't the same as continuing with a signal, but it will do - * for normal use. strace only continues with a signal if the - * stopping signal is not SIGTRAP. -brl - */ - if (current->exit_code) { - send_sig(current->exit_code, current, 1); - current->exit_code = 0; - } + /* Fake a debug trap */ + if (ptraced & PT_DTRACE) + send_sigtrap(current, ®s->regs, 0); + + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + return; + + tracehook_report_syscall_exit(regs, 0); + /* force do_signal() --> is_syscall() */ + if (ptraced & PT_PTRACED) + set_thread_flag(TIF_SIGPENDING); } diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c index 05fbeb480e0..86368a025a9 100644 --- a/arch/um/kernel/skas/syscall.c +++ b/arch/um/kernel/skas/syscall.c @@ -18,7 +18,7 @@ void handle_syscall(struct uml_pt_regs *r) long result; int syscall; - syscall_trace(r, 0); + syscall_trace_enter(regs); /* * This should go in the declaration of syscall, but when I do that, @@ -34,7 +34,7 @@ void handle_syscall(struct uml_pt_regs *r) result = -ENOSYS; else result = EXECUTE_SYSCALL(syscall, regs); - UPT_SET_SYSCALL_RETURN(r, result); + PT_REGS_SET_SYSCALL_RETURN(regs, result); - syscall_trace(r, 1); + syscall_trace_leave(regs); } diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index d1a23fb3190..5f76d4ba151 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -13,7 +13,7 @@ #include "kern_util.h" #include "os.h" -void timer_handler(int sig, struct uml_pt_regs *regs) +void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) { unsigned long flags; diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 3be60765c0e..0353b98ae35 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -172,7 +172,7 @@ void fatal_sigsegv(void) os_dump_core(); } -void segv_handler(int sig, struct uml_pt_regs *regs) +void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) { struct faultinfo * fi = UPT_FAULTINFO(regs); @@ -258,8 +258,11 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, return 0; } -void relay_signal(int sig, struct uml_pt_regs *regs) +void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs) { + struct faultinfo *fi; + struct siginfo clean_si; + if (!UPT_IS_USER(regs)) { if (sig == SIGBUS) printk(KERN_ERR "Bus error - the host /dev/shm or /tmp " @@ -269,18 +272,40 @@ void relay_signal(int sig, struct uml_pt_regs *regs) arch_examine_signal(sig, regs); - current->thread.arch.faultinfo = *UPT_FAULTINFO(regs); - force_sig(sig, current); + memset(&clean_si, 0, sizeof(clean_si)); + clean_si.si_signo = si->si_signo; + clean_si.si_errno = si->si_errno; + clean_si.si_code = si->si_code; + switch (sig) { + case SIGILL: + case SIGFPE: + case SIGSEGV: + case SIGBUS: + case SIGTRAP: + fi = UPT_FAULTINFO(regs); + clean_si.si_addr = (void __user *) FAULT_ADDRESS(*fi); + current->thread.arch.faultinfo = *fi; +#ifdef __ARCH_SI_TRAPNO + clean_si.si_trapno = si->si_trapno; +#endif + break; + default: + printk(KERN_ERR "Attempted to relay unknown signal %d (si_code = %d)\n", + sig, si->si_code); + } + + force_sig_info(sig, &clean_si, current); } -void bus_handler(int sig, struct uml_pt_regs *regs) +void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs) { if (current->thread.fault_catcher != NULL) UML_LONGJMP(current->thread.fault_catcher, 1); - else relay_signal(sig, regs); + else + relay_signal(sig, si, regs); } -void winch(int sig, struct uml_pt_regs *regs) +void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) { do_IRQ(WINCH_IRQ, regs); } diff --git a/arch/um/os-Linux/internal.h b/arch/um/os-Linux/internal.h index 2c3c3ecd8c0..0dc2c9f135f 100644 --- a/arch/um/os-Linux/internal.h +++ b/arch/um/os-Linux/internal.h @@ -1 +1 @@ -void alarm_handler(int, mcontext_t *); +void alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc); diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c index 2d22f1fcd8e..6366ce904b9 100644 --- a/arch/um/os-Linux/signal.c +++ b/arch/um/os-Linux/signal.c @@ -13,8 +13,9 @@ #include "kern_util.h" #include "os.h" #include "sysdep/mcontext.h" +#include "internal.h" -void (*sig_info[NSIG])(int, struct uml_pt_regs *) = { +void (*sig_info[NSIG])(int, siginfo_t *, struct uml_pt_regs *) = { [SIGTRAP] = relay_signal, [SIGFPE] = relay_signal, [SIGILL] = relay_signal, @@ -24,7 +25,7 @@ void (*sig_info[NSIG])(int, struct uml_pt_regs *) = { [SIGIO] = sigio_handler, [SIGVTALRM] = timer_handler }; -static void sig_handler_common(int sig, mcontext_t *mc) +static void sig_handler_common(int sig, siginfo_t *si, mcontext_t *mc) { struct uml_pt_regs r; int save_errno = errno; @@ -40,7 +41,7 @@ static void sig_handler_common(int sig, mcontext_t *mc) if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGVTALRM)) unblock_signals(); - (*sig_info[sig])(sig, &r); + (*sig_info[sig])(sig, si, &r); errno = save_errno; } @@ -60,7 +61,7 @@ static void sig_handler_common(int sig, mcontext_t *mc) static int signals_enabled; static unsigned int signals_pending; -void sig_handler(int sig, mcontext_t *mc) +void sig_handler(int sig, siginfo_t *si, mcontext_t *mc) { int enabled; @@ -72,7 +73,7 @@ void sig_handler(int sig, mcontext_t *mc) block_signals(); - sig_handler_common(sig, mc); + sig_handler_common(sig, si, mc); set_signals(enabled); } @@ -85,10 +86,10 @@ static void real_alarm_handler(mcontext_t *mc) get_regs_from_mc(®s, mc); regs.is_user = 0; unblock_signals(); - timer_handler(SIGVTALRM, ®s); + timer_handler(SIGVTALRM, NULL, ®s); } -void alarm_handler(int sig, mcontext_t *mc) +void alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc) { int enabled; @@ -119,7 +120,7 @@ void set_sigstack(void *sig_stack, int size) panic("enabling signal stack failed, errno = %d\n", errno); } -static void (*handlers[_NSIG])(int sig, mcontext_t *mc) = { +static void (*handlers[_NSIG])(int sig, siginfo_t *si, mcontext_t *mc) = { [SIGSEGV] = sig_handler, [SIGBUS] = sig_handler, [SIGILL] = sig_handler, @@ -132,7 +133,7 @@ static void (*handlers[_NSIG])(int sig, mcontext_t *mc) = { }; -static void hard_handler(int sig, siginfo_t *info, void *p) +static void hard_handler(int sig, siginfo_t *si, void *p) { struct ucontext *uc = p; mcontext_t *mc = &uc->uc_mcontext; @@ -161,7 +162,7 @@ static void hard_handler(int sig, siginfo_t *info, void *p) while ((sig = ffs(pending)) != 0){ sig--; pending &= ~(1 << sig); - (*handlers[sig])(sig, mc); + (*handlers[sig])(sig, si, mc); } /* @@ -273,9 +274,12 @@ void unblock_signals(void) * Deal with SIGIO first because the alarm handler might * schedule, leaving the pending SIGIO stranded until we come * back here. + * + * SIGIO's handler doesn't use siginfo or mcontext, + * so they can be NULL. */ if (save_pending & SIGIO_MASK) - sig_handler_common(SIGIO, NULL); + sig_handler_common(SIGIO, NULL, NULL); if (save_pending & SIGVTALRM_MASK) real_alarm_handler(NULL); diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index cd65727854e..d93bb40499f 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -346,6 +346,10 @@ void userspace(struct uml_pt_regs *regs) int err, status, op, pid = userspace_pid[0]; /* To prevent races if using_sysemu changes under us.*/ int local_using_sysemu; + siginfo_t si; + + /* Handle any immediate reschedules or signals */ + interrupt_end(); if (getitimer(ITIMER_VIRTUAL, &timer)) printk(UM_KERN_ERR "Failed to get itimer, errno = %d\n", errno); @@ -404,13 +408,17 @@ void userspace(struct uml_pt_regs *regs) if (WIFSTOPPED(status)) { int sig = WSTOPSIG(status); + + ptrace(PTRACE_GETSIGINFO, pid, 0, &si); + switch (sig) { case SIGSEGV: if (PTRACE_FULL_FAULTINFO || !ptrace_faultinfo) { get_skas_faultinfo(pid, ®s->faultinfo); - (*sig_info[SIGSEGV])(SIGSEGV, regs); + (*sig_info[SIGSEGV])(SIGSEGV, &si, + regs); } else handle_segv(pid, regs); break; @@ -418,14 +426,14 @@ void userspace(struct uml_pt_regs *regs) handle_trap(pid, regs, local_using_sysemu); break; case SIGTRAP: - relay_signal(SIGTRAP, regs); + relay_signal(SIGTRAP, &si, regs); break; case SIGVTALRM: now = os_nsecs(); if (now < nsecs) break; block_signals(); - (*sig_info[sig])(sig, regs); + (*sig_info[sig])(sig, &si, regs); unblock_signals(); nsecs = timer.it_value.tv_sec * UM_NSEC_PER_SEC + @@ -439,7 +447,7 @@ void userspace(struct uml_pt_regs *regs) case SIGFPE: case SIGWINCH: block_signals(); - (*sig_info[sig])(sig, regs); + (*sig_info[sig])(sig, &si, regs); unblock_signals(); break; default: diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c index 910499d76a6..f60238559af 100644 --- a/arch/um/os-Linux/time.c +++ b/arch/um/os-Linux/time.c @@ -87,7 +87,7 @@ static int after_sleep_interval(struct timespec *ts) static void deliver_alarm(void) { - alarm_handler(SIGVTALRM, NULL); + alarm_handler(SIGVTALRM, NULL, NULL); } static unsigned long long sleep_time(unsigned long long nsecs) diff --git a/arch/x86/um/asm/ptrace.h b/arch/x86/um/asm/ptrace.h index 950dfb7b841..e72cd0df5ba 100644 --- a/arch/x86/um/asm/ptrace.h +++ b/arch/x86/um/asm/ptrace.h @@ -30,10 +30,10 @@ #define profile_pc(regs) PT_REGS_IP(regs) #define UPT_RESTART_SYSCALL(r) (UPT_IP(r) -= 2) -#define UPT_SET_SYSCALL_RETURN(r, res) (UPT_AX(r) = (res)) +#define PT_REGS_SET_SYSCALL_RETURN(r, res) (PT_REGS_AX(r) = (res)) -static inline long regs_return_value(struct uml_pt_regs *regs) +static inline long regs_return_value(struct pt_regs *regs) { - return UPT_AX(regs); + return PT_REGS_AX(regs); } #endif /* __UM_X86_PTRACE_H */ diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index e7dee617358..f3b44a65fc7 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -31,27 +31,6 @@ EXPORT_SYMBOL_GPL(blkcg_root); static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; -struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) -{ - return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), - struct blkcg, css); -} -EXPORT_SYMBOL_GPL(cgroup_to_blkcg); - -static struct blkcg *task_blkcg(struct task_struct *tsk) -{ - return container_of(task_subsys_state(tsk, blkio_subsys_id), - struct blkcg, css); -} - -struct blkcg *bio_blkcg(struct bio *bio) -{ - if (bio && bio->bi_css) - return container_of(bio->bi_css, struct blkcg, css); - return task_blkcg(current); -} -EXPORT_SYMBOL_GPL(bio_blkcg); - static bool blkcg_policy_enabled(struct request_queue *q, const struct blkcg_policy *pol) { @@ -84,6 +63,7 @@ static void blkg_free(struct blkcg_gq *blkg) kfree(pd); } + blk_exit_rl(&blkg->rl); kfree(blkg); } @@ -91,16 +71,18 @@ static void blkg_free(struct blkcg_gq *blkg) * blkg_alloc - allocate a blkg * @blkcg: block cgroup the new blkg is associated with * @q: request_queue the new blkg is associated with + * @gfp_mask: allocation mask to use * * Allocate a new blkg assocating @blkcg and @q. */ -static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) +static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, + gfp_t gfp_mask) { struct blkcg_gq *blkg; int i; /* alloc and init base part */ - blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); + blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); if (!blkg) return NULL; @@ -109,6 +91,13 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) blkg->blkcg = blkcg; blkg->refcnt = 1; + /* root blkg uses @q->root_rl, init rl only for !root blkgs */ + if (blkcg != &blkcg_root) { + if (blk_init_rl(&blkg->rl, q, gfp_mask)) + goto err_free; + blkg->rl.blkg = blkg; + } + for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; struct blkg_policy_data *pd; @@ -117,11 +106,9 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) continue; /* alloc per-policy data and attach it to blkg */ - pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node); - if (!pd) { - blkg_free(blkg); - return NULL; - } + pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); + if (!pd) + goto err_free; blkg->pd[i] = pd; pd->blkg = blkg; @@ -132,6 +119,10 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) } return blkg; + +err_free: + blkg_free(blkg); + return NULL; } static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, @@ -175,9 +166,13 @@ struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) } EXPORT_SYMBOL_GPL(blkg_lookup); +/* + * If @new_blkg is %NULL, this function tries to allocate a new one as + * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. + */ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q) - __releases(q->queue_lock) __acquires(q->queue_lock) + struct request_queue *q, + struct blkcg_gq *new_blkg) { struct blkcg_gq *blkg; int ret; @@ -189,24 +184,26 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, blkg = __blkg_lookup(blkcg, q); if (blkg) { rcu_assign_pointer(blkcg->blkg_hint, blkg); - return blkg; + goto out_free; } /* blkg holds a reference to blkcg */ - if (!css_tryget(&blkcg->css)) - return ERR_PTR(-EINVAL); + if (!css_tryget(&blkcg->css)) { + blkg = ERR_PTR(-EINVAL); + goto out_free; + } /* allocate */ - ret = -ENOMEM; - blkg = blkg_alloc(blkcg, q); - if (unlikely(!blkg)) - goto err_put; + if (!new_blkg) { + new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); + if (unlikely(!new_blkg)) { + blkg = ERR_PTR(-ENOMEM); + goto out_put; + } + } + blkg = new_blkg; /* insert */ - ret = radix_tree_preload(GFP_ATOMIC); - if (ret) - goto err_free; - spin_lock(&blkcg->lock); ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); if (likely(!ret)) { @@ -215,15 +212,15 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, } spin_unlock(&blkcg->lock); - radix_tree_preload_end(); - if (!ret) return blkg; -err_free: - blkg_free(blkg); -err_put: + + blkg = ERR_PTR(ret); +out_put: css_put(&blkcg->css); - return ERR_PTR(ret); +out_free: + blkg_free(new_blkg); + return blkg; } struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, @@ -235,7 +232,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, */ if (unlikely(blk_queue_bypass(q))) return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); - return __blkg_lookup_create(blkcg, q); + return __blkg_lookup_create(blkcg, q, NULL); } EXPORT_SYMBOL_GPL(blkg_lookup_create); @@ -313,6 +310,38 @@ void __blkg_release(struct blkcg_gq *blkg) } EXPORT_SYMBOL_GPL(__blkg_release); +/* + * The next function used by blk_queue_for_each_rl(). It's a bit tricky + * because the root blkg uses @q->root_rl instead of its own rl. + */ +struct request_list *__blk_queue_next_rl(struct request_list *rl, + struct request_queue *q) +{ + struct list_head *ent; + struct blkcg_gq *blkg; + + /* + * Determine the current blkg list_head. The first entry is + * root_rl which is off @q->blkg_list and mapped to the head. + */ + if (rl == &q->root_rl) { + ent = &q->blkg_list; + } else { + blkg = container_of(rl, struct blkcg_gq, rl); + ent = &blkg->q_node; + } + + /* walk to the next list_head, skip root blkcg */ + ent = ent->next; + if (ent == &q->root_blkg->q_node) + ent = ent->next; + if (ent == &q->blkg_list) + return NULL; + + blkg = container_of(ent, struct blkcg_gq, q_node); + return &blkg->rl; +} + static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) { @@ -734,24 +763,36 @@ int blkcg_activate_policy(struct request_queue *q, struct blkcg_gq *blkg; struct blkg_policy_data *pd, *n; int cnt = 0, ret; + bool preloaded; if (blkcg_policy_enabled(q, pol)) return 0; + /* preallocations for root blkg */ + blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); + if (!blkg) + return -ENOMEM; + + preloaded = !radix_tree_preload(GFP_KERNEL); + blk_queue_bypass_start(q); /* make sure the root blkg exists and count the existing blkgs */ spin_lock_irq(q->queue_lock); rcu_read_lock(); - blkg = __blkg_lookup_create(&blkcg_root, q); + blkg = __blkg_lookup_create(&blkcg_root, q, blkg); rcu_read_unlock(); + if (preloaded) + radix_tree_preload_end(); + if (IS_ERR(blkg)) { ret = PTR_ERR(blkg); goto out_unlock; } q->root_blkg = blkg; + q->root_rl.blkg = blkg; list_for_each_entry(blkg, &q->blkg_list, q_node) cnt++; diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 8ac457ce778..24597309e23 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -17,6 +17,7 @@ #include <linux/u64_stats_sync.h> #include <linux/seq_file.h> #include <linux/radix-tree.h> +#include <linux/blkdev.h> /* Max limits for throttle policy */ #define THROTL_IOPS_MAX UINT_MAX @@ -93,6 +94,8 @@ struct blkcg_gq { struct list_head q_node; struct hlist_node blkcg_node; struct blkcg *blkcg; + /* request allocation list for this blkcg-q pair */ + struct request_list rl; /* reference count */ int refcnt; @@ -120,8 +123,6 @@ struct blkcg_policy { extern struct blkcg blkcg_root; -struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup); -struct blkcg *bio_blkcg(struct bio *bio); struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q); struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q); @@ -160,6 +161,25 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, void blkg_conf_finish(struct blkg_conf_ctx *ctx); +static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) +{ + return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), + struct blkcg, css); +} + +static inline struct blkcg *task_blkcg(struct task_struct *tsk) +{ + return container_of(task_subsys_state(tsk, blkio_subsys_id), + struct blkcg, css); +} + +static inline struct blkcg *bio_blkcg(struct bio *bio) +{ + if (bio && bio->bi_css) + return container_of(bio->bi_css, struct blkcg, css); + return task_blkcg(current); +} + /** * blkg_to_pdata - get policy private data * @blkg: blkg of interest @@ -234,6 +254,95 @@ static inline void blkg_put(struct blkcg_gq *blkg) } /** + * blk_get_rl - get request_list to use + * @q: request_queue of interest + * @bio: bio which will be attached to the allocated request (may be %NULL) + * + * The caller wants to allocate a request from @q to use for @bio. Find + * the request_list to use and obtain a reference on it. Should be called + * under queue_lock. This function is guaranteed to return non-%NULL + * request_list. + */ +static inline struct request_list *blk_get_rl(struct request_queue *q, + struct bio *bio) +{ + struct blkcg *blkcg; + struct blkcg_gq *blkg; + + rcu_read_lock(); + + blkcg = bio_blkcg(bio); + + /* bypass blkg lookup and use @q->root_rl directly for root */ + if (blkcg == &blkcg_root) + goto root_rl; + + /* + * Try to use blkg->rl. blkg lookup may fail under memory pressure + * or if either the blkcg or queue is going away. Fall back to + * root_rl in such cases. + */ + blkg = blkg_lookup_create(blkcg, q); + if (unlikely(IS_ERR(blkg))) + goto root_rl; + + blkg_get(blkg); + rcu_read_unlock(); + return &blkg->rl; +root_rl: + rcu_read_unlock(); + return &q->root_rl; +} + +/** + * blk_put_rl - put request_list + * @rl: request_list to put + * + * Put the reference acquired by blk_get_rl(). Should be called under + * queue_lock. + */ +static inline void blk_put_rl(struct request_list *rl) +{ + /* root_rl may not have blkg set */ + if (rl->blkg && rl->blkg->blkcg != &blkcg_root) + blkg_put(rl->blkg); +} + +/** + * blk_rq_set_rl - associate a request with a request_list + * @rq: request of interest + * @rl: target request_list + * + * Associate @rq with @rl so that accounting and freeing can know the + * request_list @rq came from. + */ +static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) +{ + rq->rl = rl; +} + +/** + * blk_rq_rl - return the request_list a request came from + * @rq: request of interest + * + * Return the request_list @rq is allocated from. + */ +static inline struct request_list *blk_rq_rl(struct request *rq) +{ + return rq->rl; +} + +struct request_list *__blk_queue_next_rl(struct request_list *rl, + struct request_queue *q); +/** + * blk_queue_for_each_rl - iterate through all request_lists of a request_queue + * + * Should be used under queue_lock. + */ +#define blk_queue_for_each_rl(rl, q) \ + for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) + +/** * blkg_stat_add - add a value to a blkg_stat * @stat: target blkg_stat * @val: value to add @@ -351,6 +460,7 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) #else /* CONFIG_BLK_CGROUP */ struct cgroup; +struct blkcg; struct blkg_policy_data { }; @@ -361,8 +471,6 @@ struct blkcg_gq { struct blkcg_policy { }; -static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; } -static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } static inline int blkcg_init_queue(struct request_queue *q) { return 0; } static inline void blkcg_drain_queue(struct request_queue *q) { } @@ -374,6 +482,9 @@ static inline int blkcg_activate_policy(struct request_queue *q, static inline void blkcg_deactivate_policy(struct request_queue *q, const struct blkcg_policy *pol) { } +static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; } +static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } + static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, struct blkcg_policy *pol) { return NULL; } static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } @@ -381,5 +492,14 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } static inline void blkg_get(struct blkcg_gq *blkg) { } static inline void blkg_put(struct blkcg_gq *blkg) { } +static inline struct request_list *blk_get_rl(struct request_queue *q, + struct bio *bio) { return &q->root_rl; } +static inline void blk_put_rl(struct request_list *rl) { } +static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } +static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } + +#define blk_queue_for_each_rl(rl, q) \ + for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) + #endif /* CONFIG_BLK_CGROUP */ #endif /* _BLK_CGROUP_H */ diff --git a/block/blk-core.c b/block/blk-core.c index 93eb3e4f88c..4b4dbdfbca8 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -387,7 +387,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) if (!list_empty(&q->queue_head) && q->request_fn) __blk_run_queue(q); - drain |= q->rq.elvpriv; + drain |= q->nr_rqs_elvpriv; /* * Unfortunately, requests are queued at and tracked from @@ -397,7 +397,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) if (drain_all) { drain |= !list_empty(&q->queue_head); for (i = 0; i < 2; i++) { - drain |= q->rq.count[i]; + drain |= q->nr_rqs[i]; drain |= q->in_flight[i]; drain |= !list_empty(&q->flush_queue[i]); } @@ -416,9 +416,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) * left with hung waiters. We need to wake up those waiters. */ if (q->request_fn) { + struct request_list *rl; + spin_lock_irq(q->queue_lock); - for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++) - wake_up_all(&q->rq.wait[i]); + + blk_queue_for_each_rl(rl, q) + for (i = 0; i < ARRAY_SIZE(rl->wait); i++) + wake_up_all(&rl->wait[i]); + spin_unlock_irq(q->queue_lock); } } @@ -517,28 +522,33 @@ void blk_cleanup_queue(struct request_queue *q) } EXPORT_SYMBOL(blk_cleanup_queue); -static int blk_init_free_list(struct request_queue *q) +int blk_init_rl(struct request_list *rl, struct request_queue *q, + gfp_t gfp_mask) { - struct request_list *rl = &q->rq; - if (unlikely(rl->rq_pool)) return 0; + rl->q = q; rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; - rl->elvpriv = 0; init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, - mempool_free_slab, request_cachep, q->node); - + mempool_free_slab, request_cachep, + gfp_mask, q->node); if (!rl->rq_pool) return -ENOMEM; return 0; } +void blk_exit_rl(struct request_list *rl) +{ + if (rl->rq_pool) + mempool_destroy(rl->rq_pool); +} + struct request_queue *blk_alloc_queue(gfp_t gfp_mask) { return blk_alloc_queue_node(gfp_mask, -1); @@ -680,7 +690,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, if (!q) return NULL; - if (blk_init_free_list(q)) + if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) return NULL; q->request_fn = rfn; @@ -722,15 +732,15 @@ bool blk_get_queue(struct request_queue *q) } EXPORT_SYMBOL(blk_get_queue); -static inline void blk_free_request(struct request_queue *q, struct request *rq) +static inline void blk_free_request(struct request_list *rl, struct request *rq) { if (rq->cmd_flags & REQ_ELVPRIV) { - elv_put_request(q, rq); + elv_put_request(rl->q, rq); if (rq->elv.icq) put_io_context(rq->elv.icq->ioc); } - mempool_free(rq, q->rq.rq_pool); + mempool_free(rq, rl->rq_pool); } /* @@ -767,18 +777,23 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) ioc->last_waited = jiffies; } -static void __freed_request(struct request_queue *q, int sync) +static void __freed_request(struct request_list *rl, int sync) { - struct request_list *rl = &q->rq; + struct request_queue *q = rl->q; - if (rl->count[sync] < queue_congestion_off_threshold(q)) + /* + * bdi isn't aware of blkcg yet. As all async IOs end up root + * blkcg anyway, just use root blkcg state. + */ + if (rl == &q->root_rl && + rl->count[sync] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, sync); if (rl->count[sync] + 1 <= q->nr_requests) { if (waitqueue_active(&rl->wait[sync])) wake_up(&rl->wait[sync]); - blk_clear_queue_full(q, sync); + blk_clear_rl_full(rl, sync); } } @@ -786,19 +801,20 @@ static void __freed_request(struct request_queue *q, int sync) * A request has just been released. Account for it, update the full and * congestion status, wake up any waiters. Called under q->queue_lock. */ -static void freed_request(struct request_queue *q, unsigned int flags) +static void freed_request(struct request_list *rl, unsigned int flags) { - struct request_list *rl = &q->rq; + struct request_queue *q = rl->q; int sync = rw_is_sync(flags); + q->nr_rqs[sync]--; rl->count[sync]--; if (flags & REQ_ELVPRIV) - rl->elvpriv--; + q->nr_rqs_elvpriv--; - __freed_request(q, sync); + __freed_request(rl, sync); if (unlikely(rl->starved[sync ^ 1])) - __freed_request(q, sync ^ 1); + __freed_request(rl, sync ^ 1); } /* @@ -837,8 +853,8 @@ static struct io_context *rq_ioc(struct bio *bio) } /** - * get_request - get a free request - * @q: request_queue to allocate request from + * __get_request - get a free request + * @rl: request list to allocate from * @rw_flags: RW and SYNC flags * @bio: bio to allocate request for (can be %NULL) * @gfp_mask: allocation mask @@ -850,20 +866,16 @@ static struct io_context *rq_ioc(struct bio *bio) * Returns %NULL on failure, with @q->queue_lock held. * Returns !%NULL on success, with @q->queue_lock *not held*. */ -static struct request *get_request(struct request_queue *q, int rw_flags, - struct bio *bio, gfp_t gfp_mask) +static struct request *__get_request(struct request_list *rl, int rw_flags, + struct bio *bio, gfp_t gfp_mask) { + struct request_queue *q = rl->q; struct request *rq; - struct request_list *rl = &q->rq; - struct elevator_type *et; - struct io_context *ioc; + struct elevator_type *et = q->elevator->type; + struct io_context *ioc = rq_ioc(bio); struct io_cq *icq = NULL; const bool is_sync = rw_is_sync(rw_flags) != 0; - bool retried = false; int may_queue; -retry: - et = q->elevator->type; - ioc = rq_ioc(bio); if (unlikely(blk_queue_dead(q))) return NULL; @@ -875,28 +887,14 @@ retry: if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { if (rl->count[is_sync]+1 >= q->nr_requests) { /* - * We want ioc to record batching state. If it's - * not already there, creating a new one requires - * dropping queue_lock, which in turn requires - * retesting conditions to avoid queue hang. - */ - if (!ioc && !retried) { - spin_unlock_irq(q->queue_lock); - create_io_context(gfp_mask, q->node); - spin_lock_irq(q->queue_lock); - retried = true; - goto retry; - } - - /* * The queue will fill after this allocation, so set * it as full, and mark this process as "batching". * This process will be allowed to complete a batch of * requests, others will be blocked. */ - if (!blk_queue_full(q, is_sync)) { + if (!blk_rl_full(rl, is_sync)) { ioc_set_batching(q, ioc); - blk_set_queue_full(q, is_sync); + blk_set_rl_full(rl, is_sync); } else { if (may_queue != ELV_MQUEUE_MUST && !ioc_batching(q, ioc)) { @@ -909,7 +907,12 @@ retry: } } } - blk_set_queue_congested(q, is_sync); + /* + * bdi isn't aware of blkcg yet. As all async IOs end up + * root blkcg anyway, just use root blkcg state. + */ + if (rl == &q->root_rl) + blk_set_queue_congested(q, is_sync); } /* @@ -920,6 +923,7 @@ retry: if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) return NULL; + q->nr_rqs[is_sync]++; rl->count[is_sync]++; rl->starved[is_sync] = 0; @@ -935,7 +939,7 @@ retry: */ if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { rw_flags |= REQ_ELVPRIV; - rl->elvpriv++; + q->nr_rqs_elvpriv++; if (et->icq_cache && ioc) icq = ioc_lookup_icq(ioc, q); } @@ -945,22 +949,19 @@ retry: spin_unlock_irq(q->queue_lock); /* allocate and init request */ - rq = mempool_alloc(q->rq.rq_pool, gfp_mask); + rq = mempool_alloc(rl->rq_pool, gfp_mask); if (!rq) goto fail_alloc; blk_rq_init(q, rq); + blk_rq_set_rl(rq, rl); rq->cmd_flags = rw_flags | REQ_ALLOCED; /* init elvpriv */ if (rw_flags & REQ_ELVPRIV) { if (unlikely(et->icq_cache && !icq)) { - create_io_context(gfp_mask, q->node); - ioc = rq_ioc(bio); - if (!ioc) - goto fail_elvpriv; - - icq = ioc_create_icq(ioc, q, gfp_mask); + if (ioc) + icq = ioc_create_icq(ioc, q, gfp_mask); if (!icq) goto fail_elvpriv; } @@ -1000,7 +1001,7 @@ fail_elvpriv: rq->elv.icq = NULL; spin_lock_irq(q->queue_lock); - rl->elvpriv--; + q->nr_rqs_elvpriv--; spin_unlock_irq(q->queue_lock); goto out; @@ -1013,7 +1014,7 @@ fail_alloc: * queue, but this is pretty rare. */ spin_lock_irq(q->queue_lock); - freed_request(q, rw_flags); + freed_request(rl, rw_flags); /* * in the very unlikely event that allocation failed and no @@ -1029,56 +1030,58 @@ rq_starved: } /** - * get_request_wait - get a free request with retry + * get_request - get a free request * @q: request_queue to allocate request from * @rw_flags: RW and SYNC flags * @bio: bio to allocate request for (can be %NULL) + * @gfp_mask: allocation mask * - * Get a free request from @q. This function keeps retrying under memory - * pressure and fails iff @q is dead. + * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this + * function keeps retrying under memory pressure and fails iff @q is dead. * * Must be callled with @q->queue_lock held and, * Returns %NULL on failure, with @q->queue_lock held. * Returns !%NULL on success, with @q->queue_lock *not held*. */ -static struct request *get_request_wait(struct request_queue *q, int rw_flags, - struct bio *bio) +static struct request *get_request(struct request_queue *q, int rw_flags, + struct bio *bio, gfp_t gfp_mask) { const bool is_sync = rw_is_sync(rw_flags) != 0; + DEFINE_WAIT(wait); + struct request_list *rl; struct request *rq; - rq = get_request(q, rw_flags, bio, GFP_NOIO); - while (!rq) { - DEFINE_WAIT(wait); - struct request_list *rl = &q->rq; - - if (unlikely(blk_queue_dead(q))) - return NULL; + rl = blk_get_rl(q, bio); /* transferred to @rq on success */ +retry: + rq = __get_request(rl, rw_flags, bio, gfp_mask); + if (rq) + return rq; - prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, - TASK_UNINTERRUPTIBLE); + if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) { + blk_put_rl(rl); + return NULL; + } - trace_block_sleeprq(q, bio, rw_flags & 1); + /* wait on @rl and retry */ + prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, + TASK_UNINTERRUPTIBLE); - spin_unlock_irq(q->queue_lock); - io_schedule(); + trace_block_sleeprq(q, bio, rw_flags & 1); - /* - * After sleeping, we become a "batching" process and - * will be able to allocate at least one request, and - * up to a big batch of them for a small period time. - * See ioc_batching, ioc_set_batching - */ - create_io_context(GFP_NOIO, q->node); - ioc_set_batching(q, current->io_context); + spin_unlock_irq(q->queue_lock); + io_schedule(); - spin_lock_irq(q->queue_lock); - finish_wait(&rl->wait[is_sync], &wait); + /* + * After sleeping, we become a "batching" process and will be able + * to allocate at least one request, and up to a big batch of them + * for a small period time. See ioc_batching, ioc_set_batching + */ + ioc_set_batching(q, current->io_context); - rq = get_request(q, rw_flags, bio, GFP_NOIO); - }; + spin_lock_irq(q->queue_lock); + finish_wait(&rl->wait[is_sync], &wait); - return rq; + goto retry; } struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) @@ -1087,11 +1090,11 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) BUG_ON(rw != READ && rw != WRITE); + /* create ioc upfront */ + create_io_context(gfp_mask, q->node); + spin_lock_irq(q->queue_lock); - if (gfp_mask & __GFP_WAIT) - rq = get_request_wait(q, rw, NULL); - else - rq = get_request(q, rw, NULL, gfp_mask); + rq = get_request(q, rw, NULL, gfp_mask); if (!rq) spin_unlock_irq(q->queue_lock); /* q->queue_lock is unlocked at this point */ @@ -1248,12 +1251,14 @@ void __blk_put_request(struct request_queue *q, struct request *req) */ if (req->cmd_flags & REQ_ALLOCED) { unsigned int flags = req->cmd_flags; + struct request_list *rl = blk_rq_rl(req); BUG_ON(!list_empty(&req->queuelist)); BUG_ON(!hlist_unhashed(&req->hash)); - blk_free_request(q, req); - freed_request(q, flags); + blk_free_request(rl, req); + freed_request(rl, flags); + blk_put_rl(rl); } } EXPORT_SYMBOL_GPL(__blk_put_request); @@ -1481,7 +1486,7 @@ get_rq: * Grab a free request. This is might sleep but can not fail. * Returns with the queue unlocked. */ - req = get_request_wait(q, rw_flags, bio); + req = get_request(q, rw_flags, bio, GFP_NOIO); if (unlikely(!req)) { bio_endio(bio, -ENODEV); /* @q is dead */ goto out_unlock; @@ -1702,6 +1707,14 @@ generic_make_request_checks(struct bio *bio) goto end_io; } + /* + * Various block parts want %current->io_context and lazy ioc + * allocation ends up trading a lot of pain for a small amount of + * memory. Just allocate it upfront. This may fail and block + * layer knows how to live with it. + */ + create_io_context(GFP_ATOMIC, q->node); + if (blk_throtl_bio(q, bio)) return false; /* throttled, will be resubmitted later */ @@ -2896,23 +2909,47 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, } -static void flush_plug_callbacks(struct blk_plug *plug) +static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) { LIST_HEAD(callbacks); - if (list_empty(&plug->cb_list)) - return; + while (!list_empty(&plug->cb_list)) { + list_splice_init(&plug->cb_list, &callbacks); - list_splice_init(&plug->cb_list, &callbacks); - - while (!list_empty(&callbacks)) { - struct blk_plug_cb *cb = list_first_entry(&callbacks, + while (!list_empty(&callbacks)) { + struct blk_plug_cb *cb = list_first_entry(&callbacks, struct blk_plug_cb, list); - list_del(&cb->list); - cb->callback(cb); + list_del(&cb->list); + cb->callback(cb, from_schedule); + } + } +} + +struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, + int size) +{ + struct blk_plug *plug = current->plug; + struct blk_plug_cb *cb; + + if (!plug) + return NULL; + + list_for_each_entry(cb, &plug->cb_list, list) + if (cb->callback == unplug && cb->data == data) + return cb; + + /* Not currently on the callback list */ + BUG_ON(size < sizeof(*cb)); + cb = kzalloc(size, GFP_ATOMIC); + if (cb) { + cb->data = data; + cb->callback = unplug; + list_add(&cb->list, &plug->cb_list); } + return cb; } +EXPORT_SYMBOL(blk_check_plugged); void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { @@ -2924,7 +2961,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) BUG_ON(plug->magic != PLUG_MAGIC); - flush_plug_callbacks(plug); + flush_plug_callbacks(plug, from_schedule); if (list_empty(&plug->list)) return; diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 893b8007c65..fab4cdd3f7b 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -244,6 +244,7 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) /* initialize */ atomic_long_set(&ioc->refcount, 1); + atomic_set(&ioc->nr_tasks, 1); atomic_set(&ioc->active_ref, 1); spin_lock_init(&ioc->lock); INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH); diff --git a/block/blk-settings.c b/block/blk-settings.c index d3234fc494a..565a6786032 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -143,8 +143,7 @@ void blk_set_stacking_limits(struct queue_limits *lim) lim->discard_zeroes_data = 1; lim->max_segments = USHRT_MAX; lim->max_hw_sectors = UINT_MAX; - - lim->max_sectors = BLK_DEF_MAX_SECTORS; + lim->max_sectors = UINT_MAX; } EXPORT_SYMBOL(blk_set_stacking_limits); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index aa41b47c22d..9628b291f96 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -40,7 +40,7 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page) static ssize_t queue_requests_store(struct request_queue *q, const char *page, size_t count) { - struct request_list *rl = &q->rq; + struct request_list *rl; unsigned long nr; int ret; @@ -55,6 +55,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) q->nr_requests = nr; blk_queue_congestion_threshold(q); + /* congestion isn't cgroup aware and follows root blkcg for now */ + rl = &q->root_rl; + if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) blk_set_queue_congested(q, BLK_RW_SYNC); else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) @@ -65,19 +68,22 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, BLK_RW_ASYNC); - if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { - blk_set_queue_full(q, BLK_RW_SYNC); - } else { - blk_clear_queue_full(q, BLK_RW_SYNC); - wake_up(&rl->wait[BLK_RW_SYNC]); + blk_queue_for_each_rl(rl, q) { + if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_SYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_SYNC); + wake_up(&rl->wait[BLK_RW_SYNC]); + } + + if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_ASYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_ASYNC); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } } - if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { - blk_set_queue_full(q, BLK_RW_ASYNC); - } else { - blk_clear_queue_full(q, BLK_RW_ASYNC); - wake_up(&rl->wait[BLK_RW_ASYNC]); - } spin_unlock_irq(q->queue_lock); return ret; } @@ -476,7 +482,6 @@ static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); - struct request_list *rl = &q->rq; blk_sync_queue(q); @@ -489,8 +494,7 @@ static void blk_release_queue(struct kobject *kobj) elevator_exit(q->elevator); } - if (rl->rq_pool) - mempool_destroy(rl->rq_pool); + blk_exit_rl(&q->root_rl); if (q->queue_tags) __blk_queue_free_tags(q); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 5b065951204..e287c19908c 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1123,9 +1123,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) goto out; } - /* bio_associate_current() needs ioc, try creating */ - create_io_context(GFP_ATOMIC, q->node); - /* * A throtl_grp pointer retrieved under rcu can be used to access * basic fields like stats and io rates. If a group has no rules, diff --git a/block/blk.h b/block/blk.h index 85f6ae42f7d..2a0ea32d249 100644 --- a/block/blk.h +++ b/block/blk.h @@ -18,6 +18,9 @@ static inline void __blk_get_queue(struct request_queue *q) kobject_get(&q->kobj); } +int blk_init_rl(struct request_list *rl, struct request_queue *q, + gfp_t gfp_mask); +void blk_exit_rl(struct request_list *rl); void init_request_from_bio(struct request *req, struct bio *bio); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio); @@ -33,7 +36,6 @@ bool __blk_end_bidi_request(struct request *rq, int error, void blk_rq_timed_out_timer(unsigned long data); void blk_delete_timer(struct request *); void blk_add_timer(struct request *); -void __generic_unplug_device(struct request_queue *); /* * Internal atomic flags for request handling diff --git a/block/bsg-lib.c b/block/bsg-lib.c index 7ad49c88f6b..deee61fbb74 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -243,56 +243,3 @@ int bsg_setup_queue(struct device *dev, struct request_queue *q, return 0; } EXPORT_SYMBOL_GPL(bsg_setup_queue); - -/** - * bsg_remove_queue - Deletes the bsg dev from the q - * @q: the request_queue that is to be torn down. - * - * Notes: - * Before unregistering the queue empty any requests that are blocked - */ -void bsg_remove_queue(struct request_queue *q) -{ - struct request *req; /* block request */ - int counts; /* totals for request_list count and starved */ - - if (!q) - return; - - /* Stop taking in new requests */ - spin_lock_irq(q->queue_lock); - blk_stop_queue(q); - - /* drain all requests in the queue */ - while (1) { - /* need the lock to fetch a request - * this may fetch the same reqeust as the previous pass - */ - req = blk_fetch_request(q); - /* save requests in use and starved */ - counts = q->rq.count[0] + q->rq.count[1] + - q->rq.starved[0] + q->rq.starved[1]; - spin_unlock_irq(q->queue_lock); - /* any requests still outstanding? */ - if (counts == 0) - break; - - /* This may be the same req as the previous iteration, - * always send the blk_end_request_all after a prefetch. - * It is not okay to not end the request because the - * prefetch started the request. - */ - if (req) { - /* return -ENXIO to indicate that this queue is - * going away - */ - req->errors = -ENXIO; - blk_end_request_all(req, -ENXIO); - } - - msleep(200); /* allow bsg to possibly finish */ - spin_lock_irq(q->queue_lock); - } - bsg_unregister_queue(q); -} -EXPORT_SYMBOL_GPL(bsg_remove_queue); diff --git a/block/genhd.c b/block/genhd.c index 9cf5583c90f..cac7366957c 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -154,7 +154,7 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) part = rcu_dereference(ptbl->part[piter->idx]); if (!part) continue; - if (!part->nr_sects && + if (!part_nr_sects_read(part) && !(piter->flags & DISK_PITER_INCL_EMPTY) && !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && piter->idx == 0)) @@ -191,7 +191,7 @@ EXPORT_SYMBOL_GPL(disk_part_iter_exit); static inline int sector_in_part(struct hd_struct *part, sector_t sector) { return part->start_sect <= sector && - sector < part->start_sect + part->nr_sects; + sector < part->start_sect + part_nr_sects_read(part); } /** @@ -769,8 +769,8 @@ void __init printk_all_partitions(void) printk("%s%s %10llu %s %s", is_part0 ? "" : " ", bdevt_str(part_devt(part), devt_buf), - (unsigned long long)part->nr_sects >> 1, - disk_name(disk, part->partno, name_buf), + (unsigned long long)part_nr_sects_read(part) >> 1 + , disk_name(disk, part->partno, name_buf), uuid_buf); if (is_part0) { if (disk->driverfs_dev != NULL && @@ -862,7 +862,7 @@ static int show_partition(struct seq_file *seqf, void *v) while ((part = disk_part_iter_next(&piter))) seq_printf(seqf, "%4d %7d %10llu %s\n", MAJOR(part_devt(part)), MINOR(part_devt(part)), - (unsigned long long)part->nr_sects >> 1, + (unsigned long long)part_nr_sects_read(part) >> 1, disk_name(sgp, part->partno, buf)); disk_part_iter_exit(&piter); @@ -1268,6 +1268,16 @@ struct gendisk *alloc_disk_node(int minors, int node_id) } disk->part_tbl->part[0] = &disk->part0; + /* + * set_capacity() and get_capacity() currently don't use + * seqcounter to read/update the part0->nr_sects. Still init + * the counter as we can read the sectors in IO submission + * patch using seqence counters. + * + * TODO: Ideally set_capacity() and get_capacity() should be + * converted to make use of bd_mutex and sequence counters. + */ + seqcount_init(&disk->part0.nr_sects_seq); hd_ref_init(&disk->part0); disk->minors = minors; diff --git a/block/ioctl.c b/block/ioctl.c index ba15b2dbfb9..4476e0e85d1 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -13,7 +13,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user { struct block_device *bdevp; struct gendisk *disk; - struct hd_struct *part; + struct hd_struct *part, *lpart; struct blkpg_ioctl_arg a; struct blkpg_partition p; struct disk_part_iter piter; @@ -36,8 +36,8 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user case BLKPG_ADD_PARTITION: start = p.start >> 9; length = p.length >> 9; - /* check for fit in a hd_struct */ - if (sizeof(sector_t) == sizeof(long) && + /* check for fit in a hd_struct */ + if (sizeof(sector_t) == sizeof(long) && sizeof(long long) > sizeof(long)) { long pstart = start, plength = length; if (pstart != start || plength != length @@ -92,6 +92,59 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user bdput(bdevp); return 0; + case BLKPG_RESIZE_PARTITION: + start = p.start >> 9; + /* new length of partition in bytes */ + length = p.length >> 9; + /* check for fit in a hd_struct */ + if (sizeof(sector_t) == sizeof(long) && + sizeof(long long) > sizeof(long)) { + long pstart = start, plength = length; + if (pstart != start || plength != length + || pstart < 0 || plength < 0) + return -EINVAL; + } + part = disk_get_part(disk, partno); + if (!part) + return -ENXIO; + bdevp = bdget(part_devt(part)); + if (!bdevp) { + disk_put_part(part); + return -ENOMEM; + } + mutex_lock(&bdevp->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, 1); + if (start != part->start_sect) { + mutex_unlock(&bdevp->bd_mutex); + mutex_unlock(&bdev->bd_mutex); + bdput(bdevp); + disk_put_part(part); + return -EINVAL; + } + /* overlap? */ + disk_part_iter_init(&piter, disk, + DISK_PITER_INCL_EMPTY); + while ((lpart = disk_part_iter_next(&piter))) { + if (lpart->partno != partno && + !(start + length <= lpart->start_sect || + start >= lpart->start_sect + lpart->nr_sects) + ) { + disk_part_iter_exit(&piter); + mutex_unlock(&bdevp->bd_mutex); + mutex_unlock(&bdev->bd_mutex); + bdput(bdevp); + disk_put_part(part); + return -EBUSY; + } + } + disk_part_iter_exit(&piter); + part_nr_sects_write(part, (sector_t)length); + i_size_write(bdevp->bd_inode, p.length); + mutex_unlock(&bdevp->bd_mutex); + mutex_unlock(&bdev->bd_mutex); + bdput(bdevp); + disk_put_part(part); + return 0; default: return -EINVAL; } diff --git a/block/partition-generic.c b/block/partition-generic.c index 6df5d6928a4..f1d14519cc0 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -84,7 +84,7 @@ ssize_t part_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); - return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects); + return sprintf(buf, "%llu\n",(unsigned long long)part_nr_sects_read(p)); } static ssize_t part_ro_show(struct device *dev, @@ -294,6 +294,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, err = -ENOMEM; goto out_free; } + + seqcount_init(&p->nr_sects_seq); pdev = part_to_dev(p); p->start_sect = start; diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index d91a3a0b232..deb4a456cf8 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -156,9 +156,7 @@ static int dev_mkdir(const char *name, umode_t mode) if (!err) /* mark as kernel-created inode */ dentry->d_inode->i_private = &thread; - dput(dentry); - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); + done_path_create(&path, dentry); return err; } @@ -218,10 +216,7 @@ static int handle_create(const char *nodename, umode_t mode, struct device *dev) /* mark as kernel-created inode */ dentry->d_inode->i_private = &thread; } - dput(dentry); - - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); + done_path_create(&path, dentry); return err; } diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index e54e31b02b8..3fbef018ce5 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -411,7 +411,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) + mdev->ldev->md.al_offset + mdev->al_tr_pos; if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); if (++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) @@ -876,7 +876,11 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, unsigned int enr, count = 0; struct lc_element *e; - if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { + /* this should be an empty REQ_FLUSH */ + if (size == 0) + return 0; + + if (size < 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { dev_err(DEV, "sector: %llus, size: %d\n", (unsigned long long)sector, size); return 0; diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index fcb956bb4b4..ba91b408aba 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -1096,7 +1096,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w if (ctx->error) { dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); err = -EIO; /* ctx->error ? */ } @@ -1212,7 +1212,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done); if (ctx->error) - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); /* that should force detach, so the in memory bitmap will be * gone in a moment as well. */ diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 02f013a073a..b2ca143d005 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -813,7 +813,6 @@ enum { SIGNAL_ASENDER, /* whether asender wants to be interrupted */ SEND_PING, /* whether asender should send a ping asap */ - UNPLUG_QUEUED, /* only relevant with kernel 2.4 */ UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ MD_DIRTY, /* current uuids and flags not yet on disk */ DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */ @@ -824,7 +823,6 @@ enum { CRASHED_PRIMARY, /* This node was a crashed primary. * Gets cleared when the state.conn * goes into C_CONNECTED state. */ - NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */ CONSIDER_RESYNC, MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ @@ -834,6 +832,7 @@ enum { BITMAP_IO_QUEUED, /* Started bitmap IO */ GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */ WAS_IO_ERROR, /* Local disk failed returned IO error */ + FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */ RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ NET_CONGESTED, /* The data socket is congested */ @@ -851,6 +850,13 @@ enum { AL_SUSPENDED, /* Activity logging is currently suspended. */ AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ STATE_SENT, /* Do not change state/UUIDs while this is set */ + + CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC) + * pending, from drbd worker context. + * If set, bdi_write_congested() returns true, + * so shrink_page_list() would not recurse into, + * and potentially deadlock on, this drbd worker. + */ }; struct drbd_bitmap; /* opaque for drbd_conf */ @@ -1130,8 +1136,8 @@ struct drbd_conf { int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ int rs_planed; /* resync sectors already planned */ atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ - int peer_max_bio_size; - int local_max_bio_size; + unsigned int peer_max_bio_size; + unsigned int local_max_bio_size; }; static inline struct drbd_conf *minor_to_mdev(unsigned int minor) @@ -1435,9 +1441,9 @@ struct bm_extent { * hash table. */ #define HT_SHIFT 8 #define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) -#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */ +#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */ -#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ +#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* The old header only allows packets up to 32Kib data */ /* Number of elements in the app_reads_hash */ #define APP_R_HSIZE 15 @@ -1840,12 +1846,20 @@ static inline int drbd_request_state(struct drbd_conf *mdev, return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED); } +enum drbd_force_detach_flags { + DRBD_IO_ERROR, + DRBD_META_IO_ERROR, + DRBD_FORCE_DETACH, +}; + #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) -static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where) +static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, + enum drbd_force_detach_flags forcedetach, + const char *where) { switch (mdev->ldev->dc.on_io_error) { case EP_PASS_ON: - if (!forcedetach) { + if (forcedetach == DRBD_IO_ERROR) { if (__ratelimit(&drbd_ratelimit_state)) dev_err(DEV, "Local IO failed in %s.\n", where); if (mdev->state.disk > D_INCONSISTENT) @@ -1856,6 +1870,8 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, case EP_DETACH: case EP_CALL_HELPER: set_bit(WAS_IO_ERROR, &mdev->flags); + if (forcedetach == DRBD_FORCE_DETACH) + set_bit(FORCE_DETACH, &mdev->flags); if (mdev->state.disk > D_FAILED) { _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); dev_err(DEV, @@ -1875,7 +1891,7 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, */ #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) static inline void drbd_chk_io_error_(struct drbd_conf *mdev, - int error, int forcedetach, const char *where) + int error, enum drbd_force_detach_flags forcedetach, const char *where) { if (error) { unsigned long flags; @@ -2405,15 +2421,17 @@ static inline void dec_ap_bio(struct drbd_conf *mdev) int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt); D_ASSERT(ap_bio >= 0); + + if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) { + if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) + drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); + } + /* this currently does wake_up for every dec_ap_bio! * maybe rather introduce some type of hysteresis? * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */ if (ap_bio < mxb) wake_up(&mdev->misc_wait); - if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) { - if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) - drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); - } } static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 920ede2829d..2e0e7fc1dbb 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1514,6 +1514,13 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, /* Do not change the order of the if above and the two below... */ if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ + /* we probably will start a resync soon. + * make sure those things are properly reset. */ + mdev->rs_total = 0; + mdev->rs_failed = 0; + atomic_set(&mdev->rs_pending_cnt, 0); + drbd_rs_cancel_all(mdev); + drbd_send_uuids(mdev); drbd_send_state(mdev, ns); } @@ -1630,9 +1637,24 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, eh = mdev->ldev->dc.on_io_error; was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags); - /* Immediately allow completion of all application IO, that waits - for completion from the local disk. */ - tl_abort_disk_io(mdev); + if (was_io_error && eh == EP_CALL_HELPER) + drbd_khelper(mdev, "local-io-error"); + + /* Immediately allow completion of all application IO, + * that waits for completion from the local disk, + * if this was a force-detach due to disk_timeout + * or administrator request (drbdsetup detach --force). + * Do NOT abort otherwise. + * Aborting local requests may cause serious problems, + * if requests are completed to upper layers already, + * and then later the already submitted local bio completes. + * This can cause DMA into former bio pages that meanwhile + * have been re-used for other things. + * So aborting local requests may cause crashes, + * or even worse, silent data corruption. + */ + if (test_and_clear_bit(FORCE_DETACH, &mdev->flags)) + tl_abort_disk_io(mdev); /* current state still has to be D_FAILED, * there is only one way out: to D_DISKLESS, @@ -1653,9 +1675,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, drbd_md_sync(mdev); } put_ldev(mdev); - - if (was_io_error && eh == EP_CALL_HELPER) - drbd_khelper(mdev, "local-io-error"); } /* second half of local IO error, failure to attach, @@ -1669,10 +1688,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, "ASSERT FAILED: disk is %s while going diskless\n", drbd_disk_str(mdev->state.disk)); - mdev->rs_total = 0; - mdev->rs_failed = 0; - atomic_set(&mdev->rs_pending_cnt, 0); - if (ns.conn >= C_CONNECTED) drbd_send_state(mdev, ns); @@ -2194,7 +2209,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl { struct p_sizes p; sector_t d_size, u_size; - int q_order_type, max_bio_size; + int q_order_type; + unsigned int max_bio_size; int ok; if (get_ldev_if_state(mdev, D_NEGOTIATING)) { @@ -2203,7 +2219,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl u_size = mdev->ldev->dc.disk_size; q_order_type = drbd_queue_order_type(mdev); max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9; - max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE); + max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE); put_ldev(mdev); } else { d_size = 0; @@ -2214,7 +2230,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */ if (mdev->agreed_pro_version <= 94) - max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET); + max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET); p.d_size = cpu_to_be64(d_size); p.u_size = cpu_to_be64(u_size); @@ -3541,6 +3557,22 @@ static int drbd_congested(void *congested_data, int bdi_bits) goto out; } + if (test_bit(CALLBACK_PENDING, &mdev->flags)) { + r |= (1 << BDI_async_congested); + /* Without good local data, we would need to read from remote, + * and that would need the worker thread as well, which is + * currently blocked waiting for that usermode helper to + * finish. + */ + if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) + r |= (1 << BDI_sync_congested); + else + put_ldev(mdev); + r &= bdi_bits; + reason = 'c'; + goto out; + } + if (get_ldev(mdev)) { q = bdev_get_queue(mdev->ldev->backing_bdev); r = bdi_congested(&q->backing_dev_info, bdi_bits); @@ -3604,6 +3636,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor) q->backing_dev_info.congested_data = mdev; blk_queue_make_request(q, drbd_make_request); + blk_queue_flush(q, REQ_FLUSH | REQ_FUA); /* Setting the max_hw_sectors to an odd value of 8kibyte here This triggers a max_bio_size message upon first attach or connect */ blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); @@ -3870,7 +3903,7 @@ void drbd_md_sync(struct drbd_conf *mdev) if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { /* this was a try anyways ... */ dev_err(DEV, "meta data update failed!\n"); - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); } /* Update mdev->ldev->md.la_size_sect, @@ -3950,9 +3983,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) spin_lock_irq(&mdev->req_lock); if (mdev->state.conn < C_CONNECTED) { - int peer; + unsigned int peer; peer = be32_to_cpu(buffer->la_peer_max_bio_size); - peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE); + peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE); mdev->peer_max_bio_size = peer; } spin_unlock_irq(&mdev->req_lock); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 6d4de6a72e8..fb9dce8daa2 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -147,6 +147,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd) char *argv[] = {usermode_helper, cmd, mb, NULL }; int ret; + if (current == mdev->worker.task) + set_bit(CALLBACK_PENDING, &mdev->flags); + snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); if (get_net_conf(mdev)) { @@ -189,6 +192,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd) usermode_helper, cmd, mb, (ret >> 8) & 0xff, ret); + if (current == mdev->worker.task) + clear_bit(CALLBACK_PENDING, &mdev->flags); + if (ret < 0) /* Ignore any ERRNOs we got. */ ret = 0; @@ -795,8 +801,8 @@ static int drbd_check_al_size(struct drbd_conf *mdev) static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) { struct request_queue * const q = mdev->rq_queue; - int max_hw_sectors = max_bio_size >> 9; - int max_segments = 0; + unsigned int max_hw_sectors = max_bio_size >> 9; + unsigned int max_segments = 0; if (get_ldev_if_state(mdev, D_ATTACHING)) { struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; @@ -829,7 +835,7 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) { - int now, new, local, peer; + unsigned int now, new, local, peer; now = queue_max_hw_sectors(mdev->rq_queue) << 9; local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */ @@ -840,13 +846,14 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) mdev->local_max_bio_size = local; put_ldev(mdev); } + local = min(local, DRBD_MAX_BIO_SIZE); /* We may ignore peer limits if the peer is modern enough. Because new from 8.3.8 onwards the peer can use multiple BIOs for a single peer_request */ if (mdev->state.conn >= C_CONNECTED) { if (mdev->agreed_pro_version < 94) { - peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); + peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ } else if (mdev->agreed_pro_version == 94) peer = DRBD_MAX_SIZE_H80_PACKET; @@ -854,10 +861,10 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) peer = DRBD_MAX_BIO_SIZE; } - new = min_t(int, local, peer); + new = min(local, peer); if (mdev->state.role == R_PRIMARY && new < now) - dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now); + dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now); if (new != now) dev_info(DEV, "max BIO size = %u\n", new); @@ -950,6 +957,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp * to realize a "hot spare" feature (not that I'd recommend that) */ wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); + /* make sure there is no leftover from previous force-detach attempts */ + clear_bit(FORCE_DETACH, &mdev->flags); + + /* and no leftover from previously aborted resync or verify, either */ + mdev->rs_total = 0; + mdev->rs_failed = 0; + atomic_set(&mdev->rs_pending_cnt, 0); + /* allocation not in the IO path, cqueue thread context */ nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); if (!nbc) { @@ -1345,6 +1360,7 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, } if (dt.detach_force) { + set_bit(FORCE_DETACH, &mdev->flags); drbd_force_state(mdev, NS(disk, D_FAILED)); reply->ret_code = SS_SUCCESS; goto out; @@ -1962,9 +1978,11 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl int retcode; /* If there is still bitmap IO pending, probably because of a previous - * resync just being finished, wait for it before requesting a new resync. */ + * resync just being finished, wait for it before requesting a new resync. + * Also wait for it's after_state_ch(). */ drbd_suspend_io(mdev); wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); + drbd_flush_workqueue(mdev); retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); @@ -2003,9 +2021,11 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re int retcode; /* If there is still bitmap IO pending, probably because of a previous - * resync just being finished, wait for it before requesting a new resync. */ + * resync just being finished, wait for it before requesting a new resync. + * Also wait for it's after_state_ch(). */ drbd_suspend_io(mdev); wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); + drbd_flush_workqueue(mdev); retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 869bada2ed0..5496104f90b 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -245,6 +245,9 @@ static int drbd_seq_show(struct seq_file *seq, void *v) mdev->state.role == R_SECONDARY) { seq_printf(seq, "%2d: cs:Unconfigured\n", i); } else { + /* reset mdev->congestion_reason */ + bdi_rw_congested(&mdev->rq_queue->backing_dev_info); + seq_printf(seq, "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n" " ns:%u nr:%u dw:%u dr:%u al:%u bm:%u " diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index ea4836e0ae9..c74ca2df743 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -277,6 +277,9 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; int i; + if (page == NULL) + return; + if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count) i = page_chain_free(page); else { @@ -316,7 +319,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, gfp_t gfp_mask) __must_hold(local) { struct drbd_epoch_entry *e; - struct page *page; + struct page *page = NULL; unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) @@ -329,9 +332,11 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, return NULL; } - page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); - if (!page) - goto fail; + if (data_size) { + page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); + if (!page) + goto fail; + } INIT_HLIST_NODE(&e->collision); e->epoch = NULL; @@ -1270,7 +1275,6 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ data_size -= dgs; - ERR_IF(data_size == 0) return NULL; ERR_IF(data_size & 0x1ff) return NULL; ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL; @@ -1291,6 +1295,9 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ if (!e) return NULL; + if (!data_size) + return e; + ds = data_size; page = e->pages; page_chain_for_each(page) { @@ -1715,6 +1722,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned dp_flags = be32_to_cpu(p->dp_flags); rw |= wire_flags_to_bio(mdev, dp_flags); + if (e->pages == NULL) { + D_ASSERT(e->size == 0); + D_ASSERT(dp_flags & DP_FLUSH); + } if (dp_flags & DP_MAY_SET_IN_SYNC) e->flags |= EE_MAY_SET_IN_SYNC; @@ -3801,11 +3812,18 @@ void drbd_free_tl_hash(struct drbd_conf *mdev) mdev->ee_hash = NULL; mdev->ee_hash_s = 0; - /* paranoia code */ - for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) - if (h->first) - dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n", - (int)(h - mdev->tl_hash), h->first); + /* We may not have had the chance to wait for all locally pending + * application requests. The hlist_add_fake() prevents access after + * free on master bio completion. */ + for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) { + struct drbd_request *req; + struct hlist_node *pos, *n; + hlist_for_each_entry_safe(req, pos, n, h, collision) { + hlist_del_init(&req->collision); + hlist_add_fake(&req->collision); + } + } + kfree(mdev->tl_hash); mdev->tl_hash = NULL; mdev->tl_hash_s = 0; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 8e93a6ac9bb..910335c3092 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -455,7 +455,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, req->rq_state |= RQ_LOCAL_COMPLETED; req->rq_state &= ~RQ_LOCAL_PENDING; - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); _req_may_be_done_not_susp(req, m); break; @@ -477,7 +477,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, break; } - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); goto_queue_for_net_read: @@ -1111,13 +1111,12 @@ void drbd_make_request(struct request_queue *q, struct bio *bio) /* * what we "blindly" assume: */ - D_ASSERT(bio->bi_size > 0); D_ASSERT((bio->bi_size & 0x1ff) == 0); /* to make some things easier, force alignment of requests within the * granularity of our hash tables */ s_enr = bio->bi_sector >> HT_SHIFT; - e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT; + e_enr = bio->bi_size ? (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT : s_enr; if (likely(s_enr == e_enr)) { do { @@ -1275,7 +1274,7 @@ void request_timer_fn(unsigned long data) time_after(now, req->start_time + dt) && !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) { dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n"); - __drbd_chk_io_error(mdev, 1); + __drbd_chk_io_error(mdev, DRBD_FORCE_DETACH); } nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; spin_unlock_irq(&mdev->req_lock); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 620c70ff223..6bce2cc179d 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -111,7 +111,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local) if (list_empty(&mdev->read_ee)) wake_up(&mdev->ee_wait); if (test_bit(__EE_WAS_ERROR, &e->flags)) - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); spin_unlock_irqrestore(&mdev->req_lock, flags); drbd_queue_work(&mdev->data.work, &e->w); @@ -154,7 +154,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo : list_empty(&mdev->active_ee); if (test_bit(__EE_WAS_ERROR, &e->flags)) - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); spin_unlock_irqrestore(&mdev->req_lock, flags); if (is_syncer_req) @@ -1501,14 +1501,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) return; } - if (mdev->state.conn < C_AHEAD) { - /* In case a previous resync run was aborted by an IO error/detach on the peer. */ - drbd_rs_cancel_all(mdev); - /* This should be done when we abort the resync. We definitely do not - want to have this for connections going back and forth between - Ahead/Behind and SyncSource/SyncTarget */ - } - if (side == C_SYNC_TARGET) { /* Since application IO was locked out during C_WF_BITMAP_T and C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 553f43a9095..a7d6347aaa7 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -191,6 +191,7 @@ static int print_unex = 1; #include <linux/mutex.h> #include <linux/io.h> #include <linux/uaccess.h> +#include <linux/async.h> /* * PS/2 floppies have much slower step rates than regular floppies. @@ -2516,8 +2517,7 @@ static int make_raw_rw_request(void) set_fdc((long)current_req->rq_disk->private_data); raw_cmd = &default_raw_cmd; - raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK | - FD_RAW_NEED_SEEK; + raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK; raw_cmd->cmd_count = NR_RW; if (rq_data_dir(current_req) == READ) { raw_cmd->flags |= FD_RAW_READ; @@ -4123,7 +4123,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) return get_disk(disks[drive]); } -static int __init floppy_init(void) +static int __init do_floppy_init(void) { int i, unit, drive; int err, dr; @@ -4338,6 +4338,24 @@ out_put_disk: return err; } +#ifndef MODULE +static __init void floppy_async_init(void *data, async_cookie_t cookie) +{ + do_floppy_init(); +} +#endif + +static int __init floppy_init(void) +{ +#ifdef MODULE + return do_floppy_init(); +#else + /* Don't hold up the bootup by the floppy initialization */ + async_schedule(floppy_async_init, NULL); + return 0; +#endif +} + static const struct io_region { int offset; int size; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 76bc96fd01c..d07c9f7fded 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -485,7 +485,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req) nbd_end_request(req); } else { spin_lock(&nbd->queue_lock); - list_add(&req->queuelist, &nbd->queue_head); + list_add_tail(&req->queuelist, &nbd->queue_head); spin_unlock(&nbd->queue_lock); } diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 9a72277a31d..eb0d8216f55 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -513,42 +513,19 @@ static void process_page(unsigned long data) } } -struct mm_plug_cb { - struct blk_plug_cb cb; - struct cardinfo *card; -}; - -static void mm_unplug(struct blk_plug_cb *cb) +static void mm_unplug(struct blk_plug_cb *cb, bool from_schedule) { - struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb); + struct cardinfo *card = cb->data; - spin_lock_irq(&mmcb->card->lock); - activate(mmcb->card); - spin_unlock_irq(&mmcb->card->lock); - kfree(mmcb); + spin_lock_irq(&card->lock); + activate(card); + spin_unlock_irq(&card->lock); + kfree(cb); } static int mm_check_plugged(struct cardinfo *card) { - struct blk_plug *plug = current->plug; - struct mm_plug_cb *mmcb; - - if (!plug) - return 0; - - list_for_each_entry(mmcb, &plug->cb_list, cb.list) { - if (mmcb->cb.callback == mm_unplug && mmcb->card == card) - return 1; - } - /* Not currently on the callback list */ - mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC); - if (!mmcb) - return 0; - - mmcb->card = card; - mmcb->cb.callback = mm_unplug; - list_add(&mmcb->cb.list, &plug->cb_list); - return 1; + return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb)); } static void mm_make_request(struct request_queue *q, struct bio *bio) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d45cf1bcbde..d06ea2950dd 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -53,6 +53,7 @@ config AMBA_PL08X bool "ARM PrimeCell PL080 or PL081 support" depends on ARM_AMBA && EXPERIMENTAL select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS help Platform has a PL08x DMAC device which can provide DMA engine support @@ -269,6 +270,7 @@ config DMA_SA11X0 tristate "SA-11x0 DMA support" depends on ARCH_SA1100 select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS help Support the DMA engine found on Intel StrongARM SA-1100 and SA-1110 SoCs. This DMA engine can only be used with on-chip @@ -284,9 +286,18 @@ config MMP_TDMA Say Y here if you enabled MMP ADMA, otherwise say N. +config DMA_OMAP + tristate "OMAP DMA support" + depends on ARCH_OMAP + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + config DMA_ENGINE bool +config DMA_VIRTUAL_CHANNELS + tristate + comment "DMA Clients" depends on DMA_ENGINE diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 640356add0a..4cf6b128ab9 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -2,6 +2,7 @@ ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG obj-$(CONFIG_DMA_ENGINE) += dmaengine.o +obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o obj-$(CONFIG_DMATEST) += dmatest.o @@ -30,3 +31,4 @@ obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o +obj-$(CONFIG_DMA_OMAP) += omap-dma.o diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 49ecbbb8932..6fbeebb9486 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -86,10 +86,12 @@ #include <asm/hardware/pl080.h> #include "dmaengine.h" +#include "virt-dma.h" #define DRIVER_NAME "pl08xdmac" static struct amba_driver pl08x_amba_driver; +struct pl08x_driver_data; /** * struct vendor_data - vendor-specific config parameters for PL08x derivatives @@ -119,6 +121,123 @@ struct pl08x_lli { }; /** + * struct pl08x_bus_data - information of source or destination + * busses for a transfer + * @addr: current address + * @maxwidth: the maximum width of a transfer on this bus + * @buswidth: the width of this bus in bytes: 1, 2 or 4 + */ +struct pl08x_bus_data { + dma_addr_t addr; + u8 maxwidth; + u8 buswidth; +}; + +/** + * struct pl08x_phy_chan - holder for the physical channels + * @id: physical index to this channel + * @lock: a lock to use when altering an instance of this struct + * @serving: the virtual channel currently being served by this physical + * channel + * @locked: channel unavailable for the system, e.g. dedicated to secure + * world + */ +struct pl08x_phy_chan { + unsigned int id; + void __iomem *base; + spinlock_t lock; + struct pl08x_dma_chan *serving; + bool locked; +}; + +/** + * struct pl08x_sg - structure containing data per sg + * @src_addr: src address of sg + * @dst_addr: dst address of sg + * @len: transfer len in bytes + * @node: node for txd's dsg_list + */ +struct pl08x_sg { + dma_addr_t src_addr; + dma_addr_t dst_addr; + size_t len; + struct list_head node; +}; + +/** + * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor + * @vd: virtual DMA descriptor + * @dsg_list: list of children sg's + * @llis_bus: DMA memory address (physical) start for the LLIs + * @llis_va: virtual memory address start for the LLIs + * @cctl: control reg values for current txd + * @ccfg: config reg values for current txd + * @done: this marks completed descriptors, which should not have their + * mux released. + */ +struct pl08x_txd { + struct virt_dma_desc vd; + struct list_head dsg_list; + dma_addr_t llis_bus; + struct pl08x_lli *llis_va; + /* Default cctl value for LLIs */ + u32 cctl; + /* + * Settings to be put into the physical channel when we + * trigger this txd. Other registers are in llis_va[0]. + */ + u32 ccfg; + bool done; +}; + +/** + * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel + * states + * @PL08X_CHAN_IDLE: the channel is idle + * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport + * channel and is running a transfer on it + * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport + * channel, but the transfer is currently paused + * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport + * channel to become available (only pertains to memcpy channels) + */ +enum pl08x_dma_chan_state { + PL08X_CHAN_IDLE, + PL08X_CHAN_RUNNING, + PL08X_CHAN_PAUSED, + PL08X_CHAN_WAITING, +}; + +/** + * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel + * @vc: wrappped virtual channel + * @phychan: the physical channel utilized by this channel, if there is one + * @name: name of channel + * @cd: channel platform data + * @runtime_addr: address for RX/TX according to the runtime config + * @at: active transaction on this channel + * @lock: a lock for this channel data + * @host: a pointer to the host (internal use) + * @state: whether the channel is idle, paused, running etc + * @slave: whether this channel is a device (slave) or for memcpy + * @signal: the physical DMA request signal which this channel is using + * @mux_use: count of descriptors using this DMA request signal setting + */ +struct pl08x_dma_chan { + struct virt_dma_chan vc; + struct pl08x_phy_chan *phychan; + const char *name; + const struct pl08x_channel_data *cd; + struct dma_slave_config cfg; + struct pl08x_txd *at; + struct pl08x_driver_data *host; + enum pl08x_dma_chan_state state; + bool slave; + int signal; + unsigned mux_use; +}; + +/** * struct pl08x_driver_data - the local state holder for the PL08x * @slave: slave engine for this instance * @memcpy: memcpy engine for this instance @@ -128,7 +247,6 @@ struct pl08x_lli { * @pd: platform data passed in from the platform/machine * @phy_chans: array of data for the physical channels * @pool: a pool for the LLI descriptors - * @pool_ctr: counter of LLIs in the pool * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI * fetches * @mem_buses: set to indicate memory transfers on AHB2. @@ -143,10 +261,8 @@ struct pl08x_driver_data { struct pl08x_platform_data *pd; struct pl08x_phy_chan *phy_chans; struct dma_pool *pool; - int pool_ctr; u8 lli_buses; u8 mem_buses; - spinlock_t lock; }; /* @@ -162,12 +278,51 @@ struct pl08x_driver_data { static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) { - return container_of(chan, struct pl08x_dma_chan, chan); + return container_of(chan, struct pl08x_dma_chan, vc.chan); } static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) { - return container_of(tx, struct pl08x_txd, tx); + return container_of(tx, struct pl08x_txd, vd.tx); +} + +/* + * Mux handling. + * + * This gives us the DMA request input to the PL08x primecell which the + * peripheral described by the channel data will be routed to, possibly + * via a board/SoC specific external MUX. One important point to note + * here is that this does not depend on the physical channel. + */ +static int pl08x_request_mux(struct pl08x_dma_chan *plchan) +{ + const struct pl08x_platform_data *pd = plchan->host->pd; + int ret; + + if (plchan->mux_use++ == 0 && pd->get_signal) { + ret = pd->get_signal(plchan->cd); + if (ret < 0) { + plchan->mux_use = 0; + return ret; + } + + plchan->signal = ret; + } + return 0; +} + +static void pl08x_release_mux(struct pl08x_dma_chan *plchan) +{ + const struct pl08x_platform_data *pd = plchan->host->pd; + + if (plchan->signal >= 0) { + WARN_ON(plchan->mux_use == 0); + + if (--plchan->mux_use == 0 && pd->put_signal) { + pd->put_signal(plchan->cd, plchan->signal); + plchan->signal = -1; + } + } } /* @@ -189,20 +344,25 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) * been set when the LLIs were constructed. Poke them into the hardware * and start the transfer. */ -static void pl08x_start_txd(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) +static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) { struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_phy_chan *phychan = plchan->phychan; - struct pl08x_lli *lli = &txd->llis_va[0]; + struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); + struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); + struct pl08x_lli *lli; u32 val; + list_del(&txd->vd.node); + plchan->at = txd; /* Wait for channel inactive */ while (pl08x_phy_channel_busy(phychan)) cpu_relax(); + lli = &txd->llis_va[0]; + dev_vdbg(&pl08x->adev->dev, "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", @@ -311,10 +471,8 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) { struct pl08x_phy_chan *ch; struct pl08x_txd *txd; - unsigned long flags; size_t bytes = 0; - spin_lock_irqsave(&plchan->lock, flags); ch = plchan->phychan; txd = plchan->at; @@ -354,18 +512,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) } } - /* Sum up all queued transactions */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *txdi; - list_for_each_entry(txdi, &plchan->pend_list, node) { - struct pl08x_sg *dsg; - list_for_each_entry(dsg, &txd->dsg_list, node) - bytes += dsg->len; - } - } - - spin_unlock_irqrestore(&plchan->lock, flags); - return bytes; } @@ -391,7 +537,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, if (!ch->locked && !ch->serving) { ch->serving = virt_chan; - ch->signal = -1; spin_unlock_irqrestore(&ch->lock, flags); break; } @@ -404,25 +549,114 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, return NULL; } - pm_runtime_get_sync(&pl08x->adev->dev); return ch; } +/* Mark the physical channel as free. Note, this write is atomic. */ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, struct pl08x_phy_chan *ch) { - unsigned long flags; + ch->serving = NULL; +} + +/* + * Try to allocate a physical channel. When successful, assign it to + * this virtual channel, and initiate the next descriptor. The + * virtual channel lock must be held at this point. + */ +static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) +{ + struct pl08x_driver_data *pl08x = plchan->host; + struct pl08x_phy_chan *ch; - spin_lock_irqsave(&ch->lock, flags); + ch = pl08x_get_phy_channel(pl08x, plchan); + if (!ch) { + dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); + plchan->state = PL08X_CHAN_WAITING; + return; + } - /* Stop the channel and clear its interrupts */ - pl08x_terminate_phy_chan(pl08x, ch); + dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", + ch->id, plchan->name); - pm_runtime_put(&pl08x->adev->dev); + plchan->phychan = ch; + plchan->state = PL08X_CHAN_RUNNING; + pl08x_start_next_txd(plchan); +} - /* Mark it as free */ - ch->serving = NULL; - spin_unlock_irqrestore(&ch->lock, flags); +static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, + struct pl08x_dma_chan *plchan) +{ + struct pl08x_driver_data *pl08x = plchan->host; + + dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", + ch->id, plchan->name); + + /* + * We do this without taking the lock; we're really only concerned + * about whether this pointer is NULL or not, and we're guaranteed + * that this will only be called when it _already_ is non-NULL. + */ + ch->serving = plchan; + plchan->phychan = ch; + plchan->state = PL08X_CHAN_RUNNING; + pl08x_start_next_txd(plchan); +} + +/* + * Free a physical DMA channel, potentially reallocating it to another + * virtual channel if we have any pending. + */ +static void pl08x_phy_free(struct pl08x_dma_chan *plchan) +{ + struct pl08x_driver_data *pl08x = plchan->host; + struct pl08x_dma_chan *p, *next; + + retry: + next = NULL; + + /* Find a waiting virtual channel for the next transfer. */ + list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) + if (p->state == PL08X_CHAN_WAITING) { + next = p; + break; + } + + if (!next) { + list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) + if (p->state == PL08X_CHAN_WAITING) { + next = p; + break; + } + } + + /* Ensure that the physical channel is stopped */ + pl08x_terminate_phy_chan(pl08x, plchan->phychan); + + if (next) { + bool success; + + /* + * Eww. We know this isn't going to deadlock + * but lockdep probably doesn't. + */ + spin_lock(&next->vc.lock); + /* Re-check the state now that we have the lock */ + success = next->state == PL08X_CHAN_WAITING; + if (success) + pl08x_phy_reassign_start(plchan->phychan, next); + spin_unlock(&next->vc.lock); + + /* If the state changed, try to find another channel */ + if (!success) + goto retry; + } else { + /* No more jobs, so free up the physical channel */ + pl08x_put_phy_channel(pl08x, plchan->phychan); + } + + plchan->phychan = NULL; + plchan->state = PL08X_CHAN_IDLE; } /* @@ -585,8 +819,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, return 0; } - pl08x->pool_ctr++; - bd.txd = txd; bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; cctl = txd->cctl; @@ -802,18 +1034,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, return num_llis; } -/* You should call this with the struct pl08x lock held */ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, struct pl08x_txd *txd) { struct pl08x_sg *dsg, *_dsg; - /* Free the LLI */ if (txd->llis_va) dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); - pl08x->pool_ctr--; - list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { list_del(&dsg->node); kfree(dsg); @@ -822,133 +1050,75 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, kfree(txd); } -static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, - struct pl08x_dma_chan *plchan) +static void pl08x_unmap_buffers(struct pl08x_txd *txd) { - struct pl08x_txd *txdi = NULL; - struct pl08x_txd *next; - - if (!list_empty(&plchan->pend_list)) { - list_for_each_entry_safe(txdi, - next, &plchan->pend_list, node) { - list_del(&txdi->node); - pl08x_free_txd(pl08x, txdi); + struct device *dev = txd->vd.tx.chan->device->dev; + struct pl08x_sg *dsg; + + if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_single(dev, dsg->src_addr, dsg->len, + DMA_TO_DEVICE); + else { + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_page(dev, dsg->src_addr, dsg->len, + DMA_TO_DEVICE); } } + if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_single(dev, dsg->dst_addr, dsg->len, + DMA_FROM_DEVICE); + else + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_page(dev, dsg->dst_addr, dsg->len, + DMA_FROM_DEVICE); + } } -/* - * The DMA ENGINE API - */ -static int pl08x_alloc_chan_resources(struct dma_chan *chan) +static void pl08x_desc_free(struct virt_dma_desc *vd) { - return 0; -} + struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); + struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); -static void pl08x_free_chan_resources(struct dma_chan *chan) -{ + if (!plchan->slave) + pl08x_unmap_buffers(txd); + + if (!txd->done) + pl08x_release_mux(plchan); + + pl08x_free_txd(plchan->host, txd); } -/* - * This should be called with the channel plchan->lock held - */ -static int prep_phy_channel(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) +static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, + struct pl08x_dma_chan *plchan) { - struct pl08x_driver_data *pl08x = plchan->host; - struct pl08x_phy_chan *ch; - int ret; - - /* Check if we already have a channel */ - if (plchan->phychan) { - ch = plchan->phychan; - goto got_channel; - } + LIST_HEAD(head); + struct pl08x_txd *txd; - ch = pl08x_get_phy_channel(pl08x, plchan); - if (!ch) { - /* No physical channel available, cope with it */ - dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); - return -EBUSY; - } + vchan_get_all_descriptors(&plchan->vc, &head); - /* - * OK we have a physical channel: for memcpy() this is all we - * need, but for slaves the physical signals may be muxed! - * Can the platform allow us to use this channel? - */ - if (plchan->slave && pl08x->pd->get_signal) { - ret = pl08x->pd->get_signal(plchan); - if (ret < 0) { - dev_dbg(&pl08x->adev->dev, - "unable to use physical channel %d for transfer on %s due to platform restrictions\n", - ch->id, plchan->name); - /* Release physical channel & return */ - pl08x_put_phy_channel(pl08x, ch); - return -EBUSY; - } - ch->signal = ret; + while (!list_empty(&head)) { + txd = list_first_entry(&head, struct pl08x_txd, vd.node); + list_del(&txd->vd.node); + pl08x_desc_free(&txd->vd); } - - plchan->phychan = ch; - dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", - ch->id, - ch->signal, - plchan->name); - -got_channel: - /* Assign the flow control signal to this channel */ - if (txd->direction == DMA_MEM_TO_DEV) - txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; - else if (txd->direction == DMA_DEV_TO_MEM) - txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; - - plchan->phychan_hold++; - - return 0; } -static void release_phy_channel(struct pl08x_dma_chan *plchan) +/* + * The DMA ENGINE API + */ +static int pl08x_alloc_chan_resources(struct dma_chan *chan) { - struct pl08x_driver_data *pl08x = plchan->host; - - if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { - pl08x->pd->put_signal(plchan); - plchan->phychan->signal = -1; - } - pl08x_put_phy_channel(pl08x, plchan->phychan); - plchan->phychan = NULL; + return 0; } -static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) +static void pl08x_free_chan_resources(struct dma_chan *chan) { - struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); - struct pl08x_txd *txd = to_pl08x_txd(tx); - unsigned long flags; - dma_cookie_t cookie; - - spin_lock_irqsave(&plchan->lock, flags); - cookie = dma_cookie_assign(tx); - - /* Put this onto the pending list */ - list_add_tail(&txd->node, &plchan->pend_list); - - /* - * If there was no physical channel available for this memcpy, - * stack the request up and indicate that the channel is waiting - * for a free physical channel. - */ - if (!plchan->slave && !plchan->phychan) { - /* Do this memcpy whenever there is a channel ready */ - plchan->state = PL08X_CHAN_WAITING; - plchan->waiting = txd; - } else { - plchan->phychan_hold--; - } - - spin_unlock_irqrestore(&plchan->lock, flags); - - return cookie; + /* Ensure all queued descriptors are freed */ + vchan_free_chan_resources(to_virt_chan(chan)); } static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( @@ -968,23 +1138,53 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + struct virt_dma_desc *vd; + unsigned long flags; enum dma_status ret; + size_t bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_SUCCESS) return ret; /* + * There's no point calculating the residue if there's + * no txstate to store the value. + */ + if (!txstate) { + if (plchan->state == PL08X_CHAN_PAUSED) + ret = DMA_PAUSED; + return ret; + } + + spin_lock_irqsave(&plchan->vc.lock, flags); + ret = dma_cookie_status(chan, cookie, txstate); + if (ret != DMA_SUCCESS) { + vd = vchan_find_desc(&plchan->vc, cookie); + if (vd) { + /* On the issued list, so hasn't been processed yet */ + struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); + struct pl08x_sg *dsg; + + list_for_each_entry(dsg, &txd->dsg_list, node) + bytes += dsg->len; + } else { + bytes = pl08x_getbytes_chan(plchan); + } + } + spin_unlock_irqrestore(&plchan->vc.lock, flags); + + /* * This cookie not complete yet * Get number of bytes left in the active transactions and queue */ - dma_set_residue(txstate, pl08x_getbytes_chan(plchan)); + dma_set_residue(txstate, bytes); - if (plchan->state == PL08X_CHAN_PAUSED) - return DMA_PAUSED; + if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) + ret = DMA_PAUSED; /* Whether waiting or running, we're in progress */ - return DMA_IN_PROGRESS; + return ret; } /* PrimeCell DMA extension */ @@ -1080,38 +1280,14 @@ static u32 pl08x_burst(u32 maxburst) return burst_sizes[i].reg; } -static int dma_set_runtime_config(struct dma_chan *chan, - struct dma_slave_config *config) +static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, + enum dma_slave_buswidth addr_width, u32 maxburst) { - struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); - struct pl08x_driver_data *pl08x = plchan->host; - enum dma_slave_buswidth addr_width; - u32 width, burst, maxburst; - u32 cctl = 0; - - if (!plchan->slave) - return -EINVAL; - - /* Transfer direction */ - plchan->runtime_direction = config->direction; - if (config->direction == DMA_MEM_TO_DEV) { - addr_width = config->dst_addr_width; - maxburst = config->dst_maxburst; - } else if (config->direction == DMA_DEV_TO_MEM) { - addr_width = config->src_addr_width; - maxburst = config->src_maxburst; - } else { - dev_err(&pl08x->adev->dev, - "bad runtime_config: alien transfer direction\n"); - return -EINVAL; - } + u32 width, burst, cctl = 0; width = pl08x_width(addr_width); - if (width == ~0) { - dev_err(&pl08x->adev->dev, - "bad runtime_config: alien address width\n"); - return -EINVAL; - } + if (width == ~0) + return ~0; cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; @@ -1128,28 +1304,23 @@ static int dma_set_runtime_config(struct dma_chan *chan, cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; - plchan->device_fc = config->device_fc; + return pl08x_cctl(cctl); +} - if (plchan->runtime_direction == DMA_DEV_TO_MEM) { - plchan->src_addr = config->src_addr; - plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | - pl08x_select_bus(plchan->cd->periph_buses, - pl08x->mem_buses); - } else { - plchan->dst_addr = config->dst_addr; - plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | - pl08x_select_bus(pl08x->mem_buses, - plchan->cd->periph_buses); - } +static int dma_set_runtime_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); - dev_dbg(&pl08x->adev->dev, - "configured channel %s (%s) for %s, data width %d, " - "maxburst %d words, LE, CCTL=0x%08x\n", - dma_chan_name(chan), plchan->name, - (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", - addr_width, - maxburst, - cctl); + if (!plchan->slave) + return -EINVAL; + + /* Reject definitely invalid configurations */ + if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || + config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) + return -EINVAL; + + plchan->cfg = *config; return 0; } @@ -1163,95 +1334,19 @@ static void pl08x_issue_pending(struct dma_chan *chan) struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); unsigned long flags; - spin_lock_irqsave(&plchan->lock, flags); - /* Something is already active, or we're waiting for a channel... */ - if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { - spin_unlock_irqrestore(&plchan->lock, flags); - return; - } - - /* Take the first element in the queue and execute it */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *next; - - next = list_first_entry(&plchan->pend_list, - struct pl08x_txd, - node); - list_del(&next->node); - plchan->state = PL08X_CHAN_RUNNING; - - pl08x_start_txd(plchan, next); + spin_lock_irqsave(&plchan->vc.lock, flags); + if (vchan_issue_pending(&plchan->vc)) { + if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) + pl08x_phy_alloc_and_start(plchan); } - - spin_unlock_irqrestore(&plchan->lock, flags); -} - -static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) -{ - struct pl08x_driver_data *pl08x = plchan->host; - unsigned long flags; - int num_llis, ret; - - num_llis = pl08x_fill_llis_for_desc(pl08x, txd); - if (!num_llis) { - spin_lock_irqsave(&plchan->lock, flags); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - return -EINVAL; - } - - spin_lock_irqsave(&plchan->lock, flags); - - /* - * See if we already have a physical channel allocated, - * else this is the time to try to get one. - */ - ret = prep_phy_channel(plchan, txd); - if (ret) { - /* - * No physical channel was available. - * - * memcpy transfers can be sorted out at submission time. - * - * Slave transfers may have been denied due to platform - * channel muxing restrictions. Since there is no guarantee - * that this will ever be resolved, and the signal must be - * acquired AFTER acquiring the physical channel, we will let - * them be NACK:ed with -EBUSY here. The drivers can retry - * the prep() call if they are eager on doing this using DMA. - */ - if (plchan->slave) { - pl08x_free_txd_list(pl08x, plchan); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - return -EBUSY; - } - } else - /* - * Else we're all set, paused and ready to roll, status - * will switch to PL08X_CHAN_RUNNING when we call - * issue_pending(). If there is something running on the - * channel already we don't change its state. - */ - if (plchan->state == PL08X_CHAN_IDLE) - plchan->state = PL08X_CHAN_PAUSED; - - spin_unlock_irqrestore(&plchan->lock, flags); - - return 0; + spin_unlock_irqrestore(&plchan->vc.lock, flags); } -static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, - unsigned long flags) +static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) { struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); if (txd) { - dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); - txd->tx.flags = flags; - txd->tx.tx_submit = pl08x_tx_submit; - INIT_LIST_HEAD(&txd->node); INIT_LIST_HEAD(&txd->dsg_list); /* Always enable error and terminal interrupts */ @@ -1274,7 +1369,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( struct pl08x_sg *dsg; int ret; - txd = pl08x_get_txd(plchan, flags); + txd = pl08x_get_txd(plchan); if (!txd) { dev_err(&pl08x->adev->dev, "%s no memory for descriptor\n", __func__); @@ -1290,14 +1385,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( } list_add_tail(&dsg->node, &txd->dsg_list); - txd->direction = DMA_NONE; dsg->src_addr = src; dsg->dst_addr = dest; dsg->len = len; /* Set platform data for m2m */ txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; - txd->cctl = pl08x->pd->memcpy_channel.cctl & + txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); /* Both to be incremented or the code will break */ @@ -1307,11 +1401,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( txd->cctl |= pl08x_select_bus(pl08x->mem_buses, pl08x->mem_buses); - ret = pl08x_prep_channel_resources(plchan, txd); - if (ret) + ret = pl08x_fill_llis_for_desc(plchan->host, txd); + if (!ret) { + pl08x_free_txd(pl08x, txd); return NULL; + } - return &txd->tx; + return vchan_tx_prep(&plchan->vc, &txd->vd, flags); } static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( @@ -1324,36 +1420,40 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( struct pl08x_txd *txd; struct pl08x_sg *dsg; struct scatterlist *sg; + enum dma_slave_buswidth addr_width; dma_addr_t slave_addr; int ret, tmp; + u8 src_buses, dst_buses; + u32 maxburst, cctl; dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", __func__, sg_dma_len(sgl), plchan->name); - txd = pl08x_get_txd(plchan, flags); + txd = pl08x_get_txd(plchan); if (!txd) { dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); return NULL; } - if (direction != plchan->runtime_direction) - dev_err(&pl08x->adev->dev, "%s DMA setup does not match " - "the direction configured for the PrimeCell\n", - __func__); - /* * Set up addresses, the PrimeCell configured address * will take precedence since this may configure the * channel target address dynamically at runtime. */ - txd->direction = direction; - if (direction == DMA_MEM_TO_DEV) { - txd->cctl = plchan->dst_cctl; - slave_addr = plchan->dst_addr; + cctl = PL080_CONTROL_SRC_INCR; + slave_addr = plchan->cfg.dst_addr; + addr_width = plchan->cfg.dst_addr_width; + maxburst = plchan->cfg.dst_maxburst; + src_buses = pl08x->mem_buses; + dst_buses = plchan->cd->periph_buses; } else if (direction == DMA_DEV_TO_MEM) { - txd->cctl = plchan->src_cctl; - slave_addr = plchan->src_addr; + cctl = PL080_CONTROL_DST_INCR; + slave_addr = plchan->cfg.src_addr; + addr_width = plchan->cfg.src_addr_width; + maxburst = plchan->cfg.src_maxburst; + src_buses = plchan->cd->periph_buses; + dst_buses = pl08x->mem_buses; } else { pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, @@ -1361,7 +1461,17 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( return NULL; } - if (plchan->device_fc) + cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); + if (cctl == ~0) { + pl08x_free_txd(pl08x, txd); + dev_err(&pl08x->adev->dev, + "DMA slave configuration botched?\n"); + return NULL; + } + + txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); + + if (plchan->cfg.device_fc) tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : PL080_FLOW_PER2MEM_PER; else @@ -1370,9 +1480,28 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; + ret = pl08x_request_mux(plchan); + if (ret < 0) { + pl08x_free_txd(pl08x, txd); + dev_dbg(&pl08x->adev->dev, + "unable to mux for transfer on %s due to platform restrictions\n", + plchan->name); + return NULL; + } + + dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", + plchan->signal, plchan->name); + + /* Assign the flow control signal to this channel */ + if (direction == DMA_MEM_TO_DEV) + txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; + else + txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; + for_each_sg(sgl, sg, sg_len, tmp) { dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); if (!dsg) { + pl08x_release_mux(plchan); pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", __func__); @@ -1390,11 +1519,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( } } - ret = pl08x_prep_channel_resources(plchan, txd); - if (ret) + ret = pl08x_fill_llis_for_desc(plchan->host, txd); + if (!ret) { + pl08x_release_mux(plchan); + pl08x_free_txd(pl08x, txd); return NULL; + } - return &txd->tx; + return vchan_tx_prep(&plchan->vc, &txd->vd, flags); } static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, @@ -1415,9 +1547,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, * Anything succeeds on channels with no physical allocation and * no queued transfers. */ - spin_lock_irqsave(&plchan->lock, flags); + spin_lock_irqsave(&plchan->vc.lock, flags); if (!plchan->phychan && !plchan->at) { - spin_unlock_irqrestore(&plchan->lock, flags); + spin_unlock_irqrestore(&plchan->vc.lock, flags); return 0; } @@ -1426,18 +1558,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, plchan->state = PL08X_CHAN_IDLE; if (plchan->phychan) { - pl08x_terminate_phy_chan(pl08x, plchan->phychan); - /* * Mark physical channel as free and free any slave * signal */ - release_phy_channel(plchan); - plchan->phychan_hold = 0; + pl08x_phy_free(plchan); } /* Dequeue jobs and free LLIs */ if (plchan->at) { - pl08x_free_txd(pl08x, plchan->at); + pl08x_desc_free(&plchan->at->vd); plchan->at = NULL; } /* Dequeue jobs not yet fired as well */ @@ -1457,7 +1586,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, break; } - spin_unlock_irqrestore(&plchan->lock, flags); + spin_unlock_irqrestore(&plchan->vc.lock, flags); return ret; } @@ -1494,123 +1623,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); } -static void pl08x_unmap_buffers(struct pl08x_txd *txd) -{ - struct device *dev = txd->tx.chan->device->dev; - struct pl08x_sg *dsg; - - if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - else { - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - } - } - if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - else - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - } -} - -static void pl08x_tasklet(unsigned long data) -{ - struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; - struct pl08x_driver_data *pl08x = plchan->host; - struct pl08x_txd *txd; - unsigned long flags; - - spin_lock_irqsave(&plchan->lock, flags); - - txd = plchan->at; - plchan->at = NULL; - - if (txd) { - /* Update last completed */ - dma_cookie_complete(&txd->tx); - } - - /* If a new descriptor is queued, set it up plchan->at is NULL here */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *next; - - next = list_first_entry(&plchan->pend_list, - struct pl08x_txd, - node); - list_del(&next->node); - - pl08x_start_txd(plchan, next); - } else if (plchan->phychan_hold) { - /* - * This channel is still in use - we have a new txd being - * prepared and will soon be queued. Don't give up the - * physical channel. - */ - } else { - struct pl08x_dma_chan *waiting = NULL; - - /* - * No more jobs, so free up the physical channel - * Free any allocated signal on slave transfers too - */ - release_phy_channel(plchan); - plchan->state = PL08X_CHAN_IDLE; - - /* - * And NOW before anyone else can grab that free:d up - * physical channel, see if there is some memcpy pending - * that seriously needs to start because of being stacked - * up while we were choking the physical channels with data. - */ - list_for_each_entry(waiting, &pl08x->memcpy.channels, - chan.device_node) { - if (waiting->state == PL08X_CHAN_WAITING && - waiting->waiting != NULL) { - int ret; - - /* This should REALLY not fail now */ - ret = prep_phy_channel(waiting, - waiting->waiting); - BUG_ON(ret); - waiting->phychan_hold--; - waiting->state = PL08X_CHAN_RUNNING; - waiting->waiting = NULL; - pl08x_issue_pending(&waiting->chan); - break; - } - } - } - - spin_unlock_irqrestore(&plchan->lock, flags); - - if (txd) { - dma_async_tx_callback callback = txd->tx.callback; - void *callback_param = txd->tx.callback_param; - - /* Don't try to unmap buffers on slave channels */ - if (!plchan->slave) - pl08x_unmap_buffers(txd); - - /* Free the descriptor */ - spin_lock_irqsave(&plchan->lock, flags); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - - /* Callback to signal completion */ - if (callback) - callback(callback_param); - } -} - static irqreturn_t pl08x_irq(int irq, void *dev) { struct pl08x_driver_data *pl08x = dev; @@ -1635,6 +1647,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) /* Locate physical channel */ struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; struct pl08x_dma_chan *plchan = phychan->serving; + struct pl08x_txd *tx; if (!plchan) { dev_err(&pl08x->adev->dev, @@ -1643,8 +1656,29 @@ static irqreturn_t pl08x_irq(int irq, void *dev) continue; } - /* Schedule tasklet on this channel */ - tasklet_schedule(&plchan->tasklet); + spin_lock(&plchan->vc.lock); + tx = plchan->at; + if (tx) { + plchan->at = NULL; + /* + * This descriptor is done, release its mux + * reservation. + */ + pl08x_release_mux(plchan); + tx->done = true; + vchan_cookie_complete(&tx->vd); + + /* + * And start the next descriptor (if any), + * otherwise free this channel. + */ + if (vchan_next_desc(&plchan->vc)) + pl08x_start_next_txd(plchan); + else + pl08x_phy_free(plchan); + } + spin_unlock(&plchan->vc.lock); + mask |= (1 << i); } } @@ -1654,16 +1688,10 @@ static irqreturn_t pl08x_irq(int irq, void *dev) static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) { - u32 cctl = pl08x_cctl(chan->cd->cctl); - chan->slave = true; chan->name = chan->cd->bus_id; - chan->src_addr = chan->cd->addr; - chan->dst_addr = chan->cd->addr; - chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | - pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); - chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | - pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); + chan->cfg.src_addr = chan->cd->addr; + chan->cfg.dst_addr = chan->cd->addr; } /* @@ -1693,6 +1721,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, chan->host = pl08x; chan->state = PL08X_CHAN_IDLE; + chan->signal = -1; if (slave) { chan->cd = &pl08x->pd->slave_channels[i]; @@ -1705,26 +1734,12 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, return -ENOMEM; } } - if (chan->cd->circular_buffer) { - dev_err(&pl08x->adev->dev, - "channel %s: circular buffers not supported\n", - chan->name); - kfree(chan); - continue; - } dev_dbg(&pl08x->adev->dev, "initialize virtual channel \"%s\"\n", chan->name); - chan->chan.device = dmadev; - dma_cookie_init(&chan->chan); - - spin_lock_init(&chan->lock); - INIT_LIST_HEAD(&chan->pend_list); - tasklet_init(&chan->tasklet, pl08x_tasklet, - (unsigned long) chan); - - list_add_tail(&chan->chan.device_node, &dmadev->channels); + chan->vc.desc_free = pl08x_desc_free; + vchan_init(&chan->vc, dmadev); } dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", i, slave ? "slave" : "memcpy"); @@ -1737,8 +1752,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev) struct pl08x_dma_chan *next; list_for_each_entry_safe(chan, - next, &dmadev->channels, chan.device_node) { - list_del(&chan->chan.device_node); + next, &dmadev->channels, vc.chan.device_node) { + list_del(&chan->vc.chan.device_node); kfree(chan); } } @@ -1791,7 +1806,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "\nPL08x virtual memcpy channels:\n"); seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); - list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { + list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { seq_printf(s, "%s\t\t%s\n", chan->name, pl08x_state_str(chan->state)); } @@ -1799,7 +1814,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "\nPL08x virtual slave channels:\n"); seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); - list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { + list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { seq_printf(s, "%s\t\t%s\n", chan->name, pl08x_state_str(chan->state)); } @@ -1851,9 +1866,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_pl08x; } - pm_runtime_set_active(&adev->dev); - pm_runtime_enable(&adev->dev); - /* Initialize memcpy engine */ dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); pl08x->memcpy.dev = &adev->dev; @@ -1903,8 +1915,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_lli_pool; } - spin_lock_init(&pl08x->lock); - pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); if (!pl08x->base) { ret = -ENOMEM; @@ -1942,7 +1952,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) ch->id = i; ch->base = pl08x->base + PL080_Cx_BASE(i); spin_lock_init(&ch->lock); - ch->signal = -1; /* * Nomadik variants can have channels that are locked @@ -2007,7 +2016,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) amba_part(adev), amba_rev(adev), (unsigned long long)adev->res.start, adev->irq[0]); - pm_runtime_put(&adev->dev); return 0; out_no_slave_reg: @@ -2026,9 +2034,6 @@ out_no_ioremap: dma_pool_destroy(pl08x->pool); out_no_lli_pool: out_no_platdata: - pm_runtime_put(&adev->dev); - pm_runtime_disable(&adev->dev); - kfree(pl08x); out_no_pl08x: amba_release_regions(adev); diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c new file mode 100644 index 00000000000..ae056182613 --- /dev/null +++ b/drivers/dma/omap-dma.c @@ -0,0 +1,669 @@ +/* + * OMAP DMAengine support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/omap-dma.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "virt-dma.h" +#include <plat/dma.h> + +struct omap_dmadev { + struct dma_device ddev; + spinlock_t lock; + struct tasklet_struct task; + struct list_head pending; +}; + +struct omap_chan { + struct virt_dma_chan vc; + struct list_head node; + + struct dma_slave_config cfg; + unsigned dma_sig; + bool cyclic; + + int dma_ch; + struct omap_desc *desc; + unsigned sgidx; +}; + +struct omap_sg { + dma_addr_t addr; + uint32_t en; /* number of elements (24-bit) */ + uint32_t fn; /* number of frames (16-bit) */ +}; + +struct omap_desc { + struct virt_dma_desc vd; + enum dma_transfer_direction dir; + dma_addr_t dev_addr; + + int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ + uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */ + uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */ + uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */ + uint8_t periph_port; /* Peripheral port */ + + unsigned sglen; + struct omap_sg sg[0]; +}; + +static const unsigned es_bytes[] = { + [OMAP_DMA_DATA_TYPE_S8] = 1, + [OMAP_DMA_DATA_TYPE_S16] = 2, + [OMAP_DMA_DATA_TYPE_S32] = 4, +}; + +static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) +{ + return container_of(d, struct omap_dmadev, ddev); +} + +static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct omap_chan, vc.chan); +} + +static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct omap_desc, vd.tx); +} + +static void omap_dma_desc_free(struct virt_dma_desc *vd) +{ + kfree(container_of(vd, struct omap_desc, vd)); +} + +static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, + unsigned idx) +{ + struct omap_sg *sg = d->sg + idx; + + if (d->dir == DMA_DEV_TO_MEM) + omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, + OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); + else + omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, + OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); + + omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn, + d->sync_mode, c->dma_sig, d->sync_type); + + omap_start_dma(c->dma_ch); +} + +static void omap_dma_start_desc(struct omap_chan *c) +{ + struct virt_dma_desc *vd = vchan_next_desc(&c->vc); + struct omap_desc *d; + + if (!vd) { + c->desc = NULL; + return; + } + + list_del(&vd->node); + + c->desc = d = to_omap_dma_desc(&vd->tx); + c->sgidx = 0; + + if (d->dir == DMA_DEV_TO_MEM) + omap_set_dma_src_params(c->dma_ch, d->periph_port, + OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); + else + omap_set_dma_dest_params(c->dma_ch, d->periph_port, + OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); + + omap_dma_start_sg(c, d, 0); +} + +static void omap_dma_callback(int ch, u16 status, void *data) +{ + struct omap_chan *c = data; + struct omap_desc *d; + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + d = c->desc; + if (d) { + if (!c->cyclic) { + if (++c->sgidx < d->sglen) { + omap_dma_start_sg(c, d, c->sgidx); + } else { + omap_dma_start_desc(c); + vchan_cookie_complete(&d->vd); + } + } else { + vchan_cyclic_callback(&d->vd); + } + } + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +/* + * This callback schedules all pending channels. We could be more + * clever here by postponing allocation of the real DMA channels to + * this point, and freeing them when our virtual channel becomes idle. + * + * We would then need to deal with 'all channels in-use' + */ +static void omap_dma_sched(unsigned long data) +{ + struct omap_dmadev *d = (struct omap_dmadev *)data; + LIST_HEAD(head); + + spin_lock_irq(&d->lock); + list_splice_tail_init(&d->pending, &head); + spin_unlock_irq(&d->lock); + + while (!list_empty(&head)) { + struct omap_chan *c = list_first_entry(&head, + struct omap_chan, node); + + spin_lock_irq(&c->vc.lock); + list_del_init(&c->node); + omap_dma_start_desc(c); + spin_unlock_irq(&c->vc.lock); + } +} + +static int omap_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + + dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); + + return omap_request_dma(c->dma_sig, "DMA engine", + omap_dma_callback, c, &c->dma_ch); +} + +static void omap_dma_free_chan_resources(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + + vchan_free_chan_resources(&c->vc); + omap_free_dma(c->dma_ch); + + dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); +} + +static size_t omap_dma_sg_size(struct omap_sg *sg) +{ + return sg->en * sg->fn; +} + +static size_t omap_dma_desc_size(struct omap_desc *d) +{ + unsigned i; + size_t size; + + for (size = i = 0; i < d->sglen; i++) + size += omap_dma_sg_size(&d->sg[i]); + + return size * es_bytes[d->es]; +} + +static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) +{ + unsigned i; + size_t size, es_size = es_bytes[d->es]; + + for (size = i = 0; i < d->sglen; i++) { + size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; + + if (size) + size += this_size; + else if (addr >= d->sg[i].addr && + addr < d->sg[i].addr + this_size) + size += d->sg[i].addr + this_size - addr; + } + return size; +} + +static enum dma_status omap_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + struct virt_dma_desc *vd; + enum dma_status ret; + unsigned long flags; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_SUCCESS || !txstate) + return ret; + + spin_lock_irqsave(&c->vc.lock, flags); + vd = vchan_find_desc(&c->vc, cookie); + if (vd) { + txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); + } else if (c->desc && c->desc->vd.tx.cookie == cookie) { + struct omap_desc *d = c->desc; + dma_addr_t pos; + + if (d->dir == DMA_MEM_TO_DEV) + pos = omap_get_dma_src_pos(c->dma_ch); + else if (d->dir == DMA_DEV_TO_MEM) + pos = omap_get_dma_dst_pos(c->dma_ch); + else + pos = 0; + + txstate->residue = omap_dma_desc_size_pos(d, pos); + } else { + txstate->residue = 0; + } + spin_unlock_irqrestore(&c->vc.lock, flags); + + return ret; +} + +static void omap_dma_issue_pending(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + if (vchan_issue_pending(&c->vc) && !c->desc) { + struct omap_dmadev *d = to_omap_dma_dev(chan->device); + spin_lock(&d->lock); + if (list_empty(&c->node)) + list_add_tail(&c->node, &d->pending); + spin_unlock(&d->lock); + tasklet_schedule(&d->task); + } + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, + enum dma_transfer_direction dir, unsigned long tx_flags, void *context) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + enum dma_slave_buswidth dev_width; + struct scatterlist *sgent; + struct omap_desc *d; + dma_addr_t dev_addr; + unsigned i, j = 0, es, en, frame_bytes, sync_type; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_addr = c->cfg.src_addr; + dev_width = c->cfg.src_addr_width; + burst = c->cfg.src_maxburst; + sync_type = OMAP_DMA_SRC_SYNC; + } else if (dir == DMA_MEM_TO_DEV) { + dev_addr = c->cfg.dst_addr; + dev_width = c->cfg.dst_addr_width; + burst = c->cfg.dst_maxburst; + sync_type = OMAP_DMA_DST_SYNC; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + es = OMAP_DMA_DATA_TYPE_S8; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + es = OMAP_DMA_DATA_TYPE_S16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = OMAP_DMA_DATA_TYPE_S32; + break; + default: /* not reached */ + return NULL; + } + + /* Now allocate and setup the descriptor. */ + d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + d->dir = dir; + d->dev_addr = dev_addr; + d->es = es; + d->sync_mode = OMAP_DMA_SYNC_FRAME; + d->sync_type = sync_type; + d->periph_port = OMAP_DMA_PORT_TIPB; + + /* + * Build our scatterlist entries: each contains the address, + * the number of elements (EN) in each frame, and the number of + * frames (FN). Number of bytes for this entry = ES * EN * FN. + * + * Burst size translates to number of elements with frame sync. + * Note: DMA engine defines burst to be the number of dev-width + * transfers. + */ + en = burst; + frame_bytes = es_bytes[es] * en; + for_each_sg(sgl, sgent, sglen, i) { + d->sg[j].addr = sg_dma_address(sgent); + d->sg[j].en = en; + d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; + j++; + } + + d->sglen = j; + + return vchan_tx_prep(&c->vc, &d->vd, tx_flags); +} + +static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction dir, void *context) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + enum dma_slave_buswidth dev_width; + struct omap_desc *d; + dma_addr_t dev_addr; + unsigned es, sync_type; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_addr = c->cfg.src_addr; + dev_width = c->cfg.src_addr_width; + burst = c->cfg.src_maxburst; + sync_type = OMAP_DMA_SRC_SYNC; + } else if (dir == DMA_MEM_TO_DEV) { + dev_addr = c->cfg.dst_addr; + dev_width = c->cfg.dst_addr_width; + burst = c->cfg.dst_maxburst; + sync_type = OMAP_DMA_DST_SYNC; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + es = OMAP_DMA_DATA_TYPE_S8; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + es = OMAP_DMA_DATA_TYPE_S16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = OMAP_DMA_DATA_TYPE_S32; + break; + default: /* not reached */ + return NULL; + } + + /* Now allocate and setup the descriptor. */ + d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + d->dir = dir; + d->dev_addr = dev_addr; + d->fi = burst; + d->es = es; + d->sync_mode = OMAP_DMA_SYNC_PACKET; + d->sync_type = sync_type; + d->periph_port = OMAP_DMA_PORT_MPUI; + d->sg[0].addr = buf_addr; + d->sg[0].en = period_len / es_bytes[es]; + d->sg[0].fn = buf_len / period_len; + d->sglen = 1; + + if (!c->cyclic) { + c->cyclic = true; + omap_dma_link_lch(c->dma_ch, c->dma_ch); + omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ); + omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); + } + + if (!cpu_class_is_omap1()) { + omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); + omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); + } + + return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); +} + +static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) +{ + if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || + cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) + return -EINVAL; + + memcpy(&c->cfg, cfg, sizeof(c->cfg)); + + return 0; +} + +static int omap_dma_terminate_all(struct omap_chan *c) +{ + struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&c->vc.lock, flags); + + /* Prevent this channel being scheduled */ + spin_lock(&d->lock); + list_del_init(&c->node); + spin_unlock(&d->lock); + + /* + * Stop DMA activity: we assume the callback will not be called + * after omap_stop_dma() returns (even if it does, it will see + * c->desc is NULL and exit.) + */ + if (c->desc) { + c->desc = NULL; + omap_stop_dma(c->dma_ch); + } + + if (c->cyclic) { + c->cyclic = false; + omap_dma_unlink_lch(c->dma_ch, c->dma_ch); + } + + vchan_get_all_descriptors(&c->vc, &head); + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); + + return 0; +} + +static int omap_dma_pause(struct omap_chan *c) +{ + /* FIXME: not supported by platform private API */ + return -EINVAL; +} + +static int omap_dma_resume(struct omap_chan *c) +{ + /* FIXME: not supported by platform private API */ + return -EINVAL; +} + +static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + int ret; + + switch (cmd) { + case DMA_SLAVE_CONFIG: + ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); + break; + + case DMA_TERMINATE_ALL: + ret = omap_dma_terminate_all(c); + break; + + case DMA_PAUSE: + ret = omap_dma_pause(c); + break; + + case DMA_RESUME: + ret = omap_dma_resume(c); + break; + + default: + ret = -ENXIO; + break; + } + + return ret; +} + +static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) +{ + struct omap_chan *c; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; + + c->dma_sig = dma_sig; + c->vc.desc_free = omap_dma_desc_free; + vchan_init(&c->vc, &od->ddev); + INIT_LIST_HEAD(&c->node); + + od->ddev.chancnt++; + + return 0; +} + +static void omap_dma_free(struct omap_dmadev *od) +{ + tasklet_kill(&od->task); + while (!list_empty(&od->ddev.channels)) { + struct omap_chan *c = list_first_entry(&od->ddev.channels, + struct omap_chan, vc.chan.device_node); + + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); + kfree(c); + } + kfree(od); +} + +static int omap_dma_probe(struct platform_device *pdev) +{ + struct omap_dmadev *od; + int rc, i; + + od = kzalloc(sizeof(*od), GFP_KERNEL); + if (!od) + return -ENOMEM; + + dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); + dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); + od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; + od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; + od->ddev.device_tx_status = omap_dma_tx_status; + od->ddev.device_issue_pending = omap_dma_issue_pending; + od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; + od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; + od->ddev.device_control = omap_dma_control; + od->ddev.dev = &pdev->dev; + INIT_LIST_HEAD(&od->ddev.channels); + INIT_LIST_HEAD(&od->pending); + spin_lock_init(&od->lock); + + tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); + + for (i = 0; i < 127; i++) { + rc = omap_dma_chan_init(od, i); + if (rc) { + omap_dma_free(od); + return rc; + } + } + + rc = dma_async_device_register(&od->ddev); + if (rc) { + pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", + rc); + omap_dma_free(od); + } else { + platform_set_drvdata(pdev, od); + } + + dev_info(&pdev->dev, "OMAP DMA engine driver\n"); + + return rc; +} + +static int omap_dma_remove(struct platform_device *pdev) +{ + struct omap_dmadev *od = platform_get_drvdata(pdev); + + dma_async_device_unregister(&od->ddev); + omap_dma_free(od); + + return 0; +} + +static struct platform_driver omap_dma_driver = { + .probe = omap_dma_probe, + .remove = omap_dma_remove, + .driver = { + .name = "omap-dma-engine", + .owner = THIS_MODULE, + }, +}; + +bool omap_dma_filter_fn(struct dma_chan *chan, void *param) +{ + if (chan->device->dev->driver == &omap_dma_driver.driver) { + struct omap_chan *c = to_omap_dma_chan(chan); + unsigned req = *(unsigned *)param; + + return req == c->dma_sig; + } + return false; +} +EXPORT_SYMBOL_GPL(omap_dma_filter_fn); + +static struct platform_device *pdev; + +static const struct platform_device_info omap_dma_dev_info = { + .name = "omap-dma-engine", + .id = -1, + .dma_mask = DMA_BIT_MASK(32), +}; + +static int omap_dma_init(void) +{ + int rc = platform_driver_register(&omap_dma_driver); + + if (rc == 0) { + pdev = platform_device_register_full(&omap_dma_dev_info); + if (IS_ERR(pdev)) { + platform_driver_unregister(&omap_dma_driver); + rc = PTR_ERR(pdev); + } + } + return rc; +} +subsys_initcall(omap_dma_init); + +static void __exit omap_dma_exit(void) +{ + platform_device_unregister(pdev); + platform_driver_unregister(&omap_dma_driver); +} +module_exit(omap_dma_exit); + +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index ec78ccef913..f5a73606217 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -21,6 +21,8 @@ #include <linux/slab.h> #include <linux/spinlock.h> +#include "virt-dma.h" + #define NR_PHY_CHAN 6 #define DMA_ALIGN 3 #define DMA_MAX_SIZE 0x1fff @@ -72,12 +74,13 @@ struct sa11x0_dma_sg { }; struct sa11x0_dma_desc { - struct dma_async_tx_descriptor tx; + struct virt_dma_desc vd; + u32 ddar; size_t size; + unsigned period; + bool cyclic; - /* maybe protected by c->lock */ - struct list_head node; unsigned sglen; struct sa11x0_dma_sg sg[0]; }; @@ -85,15 +88,11 @@ struct sa11x0_dma_desc { struct sa11x0_dma_phy; struct sa11x0_dma_chan { - struct dma_chan chan; - spinlock_t lock; - dma_cookie_t lc; + struct virt_dma_chan vc; - /* protected by c->lock */ + /* protected by c->vc.lock */ struct sa11x0_dma_phy *phy; enum dma_status status; - struct list_head desc_submitted; - struct list_head desc_issued; /* protected by d->lock */ struct list_head node; @@ -109,7 +108,7 @@ struct sa11x0_dma_phy { struct sa11x0_dma_chan *vchan; - /* Protected by c->lock */ + /* Protected by c->vc.lock */ unsigned sg_load; struct sa11x0_dma_desc *txd_load; unsigned sg_done; @@ -127,13 +126,12 @@ struct sa11x0_dma_dev { spinlock_t lock; struct tasklet_struct task; struct list_head chan_pending; - struct list_head desc_complete; struct sa11x0_dma_phy phy[NR_PHY_CHAN]; }; static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) { - return container_of(chan, struct sa11x0_dma_chan, chan); + return container_of(chan, struct sa11x0_dma_chan, vc.chan); } static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) @@ -141,27 +139,26 @@ static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) return container_of(dmadev, struct sa11x0_dma_dev, slave); } -static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx) +static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) { - return container_of(tx, struct sa11x0_dma_desc, tx); + struct virt_dma_desc *vd = vchan_next_desc(&c->vc); + + return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL; } -static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) +static void sa11x0_dma_free_desc(struct virt_dma_desc *vd) { - if (list_empty(&c->desc_issued)) - return NULL; - - return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node); + kfree(container_of(vd, struct sa11x0_dma_desc, vd)); } static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) { - list_del(&txd->node); + list_del(&txd->vd.node); p->txd_load = txd; p->sg_load = 0; dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", - p->num, txd, txd->tx.cookie, txd->ddar); + p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar); } static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, @@ -183,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, return; if (p->sg_load == txd->sglen) { - struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); + if (!txd->cyclic) { + struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); - /* - * We have reached the end of the current descriptor. - * Peek at the next descriptor, and if compatible with - * the current, start processing it. - */ - if (txn && txn->ddar == txd->ddar) { - txd = txn; - sa11x0_dma_start_desc(p, txn); + /* + * We have reached the end of the current descriptor. + * Peek at the next descriptor, and if compatible with + * the current, start processing it. + */ + if (txn && txn->ddar == txd->ddar) { + txd = txn; + sa11x0_dma_start_desc(p, txn); + } else { + p->txd_load = NULL; + return; + } } else { - p->txd_load = NULL; - return; + /* Cyclic: reset back to beginning */ + p->sg_load = 0; } } @@ -229,21 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd = p->txd_done; if (++p->sg_done == txd->sglen) { - struct sa11x0_dma_dev *d = p->dev; - - dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n", - p->num, p->txd_done, p->txd_done->tx.cookie); - - c->lc = txd->tx.cookie; + if (!txd->cyclic) { + vchan_cookie_complete(&txd->vd); - spin_lock(&d->lock); - list_add_tail(&txd->node, &d->desc_complete); - spin_unlock(&d->lock); + p->sg_done = 0; + p->txd_done = p->txd_load; - p->sg_done = 0; - p->txd_done = p->txd_load; + if (!p->txd_done) + tasklet_schedule(&p->dev->task); + } else { + if ((p->sg_done % txd->period) == 0) + vchan_cyclic_callback(&txd->vd); - tasklet_schedule(&d->task); + /* Cyclic: reset back to beginning */ + p->sg_done = 0; + } } sa11x0_dma_start_sg(p, c); @@ -280,7 +282,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) if (c) { unsigned long flags; - spin_lock_irqsave(&c->lock, flags); + spin_lock_irqsave(&c->vc.lock, flags); /* * Now that we're holding the lock, check that the vchan * really is associated with this pchan before touching the @@ -294,7 +296,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) if (dcsr & DCSR_DONEB) sa11x0_dma_complete(p, c); } - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&c->vc.lock, flags); } return IRQ_HANDLED; @@ -332,28 +334,15 @@ static void sa11x0_dma_tasklet(unsigned long arg) struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; struct sa11x0_dma_phy *p; struct sa11x0_dma_chan *c; - struct sa11x0_dma_desc *txd, *txn; - LIST_HEAD(head); unsigned pch, pch_alloc = 0; dev_dbg(d->slave.dev, "tasklet enter\n"); - /* Get the completed tx descriptors */ - spin_lock_irq(&d->lock); - list_splice_init(&d->desc_complete, &head); - spin_unlock_irq(&d->lock); - - list_for_each_entry(txd, &head, node) { - c = to_sa11x0_dma_chan(txd->tx.chan); - - dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n", - c, txd, txd->tx.cookie); - - spin_lock_irq(&c->lock); + list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) { + spin_lock_irq(&c->vc.lock); p = c->phy; - if (p) { - if (!p->txd_done) - sa11x0_dma_start_txd(c); + if (p && !p->txd_done) { + sa11x0_dma_start_txd(c); if (!p->txd_done) { /* No current txd associated with this channel */ dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); @@ -363,7 +352,7 @@ static void sa11x0_dma_tasklet(unsigned long arg) p->vchan = NULL; } } - spin_unlock_irq(&c->lock); + spin_unlock_irq(&c->vc.lock); } spin_lock_irq(&d->lock); @@ -380,7 +369,7 @@ static void sa11x0_dma_tasklet(unsigned long arg) /* Mark this channel allocated */ p->vchan = c; - dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c); + dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); } } spin_unlock_irq(&d->lock); @@ -390,42 +379,18 @@ static void sa11x0_dma_tasklet(unsigned long arg) p = &d->phy[pch]; c = p->vchan; - spin_lock_irq(&c->lock); + spin_lock_irq(&c->vc.lock); c->phy = p; sa11x0_dma_start_txd(c); - spin_unlock_irq(&c->lock); + spin_unlock_irq(&c->vc.lock); } } - /* Now free the completed tx descriptor, and call their callbacks */ - list_for_each_entry_safe(txd, txn, &head, node) { - dma_async_tx_callback callback = txd->tx.callback; - void *callback_param = txd->tx.callback_param; - - dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n", - txd, txd->tx.cookie); - - kfree(txd); - - if (callback) - callback(callback_param); - } - dev_dbg(d->slave.dev, "tasklet exit\n"); } -static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head) -{ - struct sa11x0_dma_desc *txd, *txn; - - list_for_each_entry_safe(txd, txn, head, node) { - dev_dbg(d->slave.dev, "txd %p: freeing\n", txd); - kfree(txd); - } -} - static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) { return 0; @@ -436,18 +401,12 @@ static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); unsigned long flags; - LIST_HEAD(head); - spin_lock_irqsave(&c->lock, flags); - spin_lock(&d->lock); + spin_lock_irqsave(&d->lock, flags); list_del_init(&c->node); - spin_unlock(&d->lock); - - list_splice_tail_init(&c->desc_submitted, &head); - list_splice_tail_init(&c->desc_issued, &head); - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&d->lock, flags); - sa11x0_dma_desc_free(d, &head); + vchan_free_chan_resources(&c->vc); } static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) @@ -472,33 +431,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); struct sa11x0_dma_phy *p; - struct sa11x0_dma_desc *txd; - dma_cookie_t last_used, last_complete; + struct virt_dma_desc *vd; unsigned long flags; enum dma_status ret; - size_t bytes = 0; - - last_used = c->chan.cookie; - last_complete = c->lc; - ret = dma_async_is_complete(cookie, last_complete, last_used); - if (ret == DMA_SUCCESS) { - dma_set_tx_state(state, last_complete, last_used, 0); + ret = dma_cookie_status(&c->vc.chan, cookie, state); + if (ret == DMA_SUCCESS) return ret; - } - spin_lock_irqsave(&c->lock, flags); + if (!state) + return c->status; + + spin_lock_irqsave(&c->vc.lock, flags); p = c->phy; - ret = c->status; - if (p) { - dma_addr_t addr = sa11x0_dma_pos(p); - dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); + /* + * If the cookie is on our issue queue, then the residue is + * its total size. + */ + vd = vchan_find_desc(&c->vc, cookie); + if (vd) { + state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size; + } else if (!p) { + state->residue = 0; + } else { + struct sa11x0_dma_desc *txd; + size_t bytes = 0; - txd = p->txd_done; + if (p->txd_done && p->txd_done->vd.tx.cookie == cookie) + txd = p->txd_done; + else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie) + txd = p->txd_load; + else + txd = NULL; + + ret = c->status; if (txd) { + dma_addr_t addr = sa11x0_dma_pos(p); unsigned i; + dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); + for (i = 0; i < txd->sglen; i++) { dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", i, txd->sg[i].addr, txd->sg[i].len); @@ -521,17 +494,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, bytes += txd->sg[i].len; } } - if (txd != p->txd_load && p->txd_load) - bytes += p->txd_load->size; - } - list_for_each_entry(txd, &c->desc_issued, node) { - bytes += txd->size; + state->residue = bytes; } - spin_unlock_irqrestore(&c->lock, flags); - - dma_set_tx_state(state, last_complete, last_used, bytes); + spin_unlock_irqrestore(&c->vc.lock, flags); - dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes); + dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue); return ret; } @@ -547,40 +514,20 @@ static void sa11x0_dma_issue_pending(struct dma_chan *chan) struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); unsigned long flags; - spin_lock_irqsave(&c->lock, flags); - list_splice_tail_init(&c->desc_submitted, &c->desc_issued); - if (!list_empty(&c->desc_issued)) { - spin_lock(&d->lock); - if (!c->phy && list_empty(&c->node)) { - list_add_tail(&c->node, &d->chan_pending); - tasklet_schedule(&d->task); - dev_dbg(d->slave.dev, "vchan %p: issued\n", c); + spin_lock_irqsave(&c->vc.lock, flags); + if (vchan_issue_pending(&c->vc)) { + if (!c->phy) { + spin_lock(&d->lock); + if (list_empty(&c->node)) { + list_add_tail(&c->node, &d->chan_pending); + tasklet_schedule(&d->task); + dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); + } + spin_unlock(&d->lock); } - spin_unlock(&d->lock); } else - dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c); - spin_unlock_irqrestore(&c->lock, flags); -} - -static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan); - struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx); - unsigned long flags; - - spin_lock_irqsave(&c->lock, flags); - c->chan.cookie += 1; - if (c->chan.cookie < 0) - c->chan.cookie = 1; - txd->tx.cookie = c->chan.cookie; - - list_add_tail(&txd->node, &c->desc_submitted); - spin_unlock_irqrestore(&c->lock, flags); - - dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n", - c, txd, txd->tx.cookie); - - return txd->tx.cookie; + dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); + spin_unlock_irqrestore(&c->vc.lock, flags); } static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( @@ -596,7 +543,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( /* SA11x0 channels can only operate in their native direction */ if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", - c, c->ddar, dir); + &c->vc, c->ddar, dir); return NULL; } @@ -612,14 +559,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; if (addr & DMA_ALIGN) { dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", - c, addr); + &c->vc, addr); return NULL; } } txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); if (!txd) { - dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c); + dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); return NULL; } @@ -655,17 +602,73 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( } while (len); } - dma_async_tx_descriptor_init(&txd->tx, &c->chan); - txd->tx.flags = flags; - txd->tx.tx_submit = sa11x0_dma_tx_submit; txd->ddar = c->ddar; txd->size = size; txd->sglen = j; dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", - c, txd, txd->size, txd->sglen); + &c->vc, &txd->vd, txd->size, txd->sglen); - return &txd->tx; + return vchan_tx_prep(&c->vc, &txd->vd, flags); +} + +static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, + enum dma_transfer_direction dir, void *context) +{ + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); + struct sa11x0_dma_desc *txd; + unsigned i, j, k, sglen, sgperiod; + + /* SA11x0 channels can only operate in their native direction */ + if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { + dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", + &c->vc, c->ddar, dir); + return NULL; + } + + sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN); + sglen = size * sgperiod / period; + + /* Do not allow zero-sized txds */ + if (sglen == 0) + return NULL; + + txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC); + if (!txd) { + dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); + return NULL; + } + + for (i = k = 0; i < size / period; i++) { + size_t tlen, len = period; + + for (j = 0; j < sgperiod; j++, k++) { + tlen = len; + + if (tlen > DMA_MAX_SIZE) { + unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN); + tlen = (tlen / mult) & ~DMA_ALIGN; + } + + txd->sg[k].addr = addr; + txd->sg[k].len = tlen; + addr += tlen; + len -= tlen; + } + + WARN_ON(len != 0); + } + + WARN_ON(k != sglen); + + txd->ddar = c->ddar; + txd->size = size; + txd->sglen = sglen; + txd->cyclic = 1; + txd->period = sgperiod; + + return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) @@ -695,8 +698,8 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c if (maxburst == 8) ddar |= DDAR_BS; - dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", - c, addr, width, maxburst); + dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", + &c->vc, addr, width, maxburst); c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; @@ -718,16 +721,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); case DMA_TERMINATE_ALL: - dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c); + dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); /* Clear the tx descriptor lists */ - spin_lock_irqsave(&c->lock, flags); - list_splice_tail_init(&c->desc_submitted, &head); - list_splice_tail_init(&c->desc_issued, &head); + spin_lock_irqsave(&c->vc.lock, flags); + vchan_get_all_descriptors(&c->vc, &head); p = c->phy; if (p) { - struct sa11x0_dma_desc *txd, *txn; - dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); /* vchan is assigned to a pchan - stop the channel */ writel(DCSR_RUN | DCSR_IE | @@ -735,17 +735,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, DCSR_STRTB | DCSR_DONEB, p->base + DMA_DCSR_C); - list_for_each_entry_safe(txd, txn, &d->desc_complete, node) - if (txd->tx.chan == &c->chan) - list_move(&txd->node, &head); - if (p->txd_load) { if (p->txd_load != p->txd_done) - list_add_tail(&p->txd_load->node, &head); + list_add_tail(&p->txd_load->vd.node, &head); p->txd_load = NULL; } if (p->txd_done) { - list_add_tail(&p->txd_done->node, &head); + list_add_tail(&p->txd_done->vd.node, &head); p->txd_done = NULL; } c->phy = NULL; @@ -754,14 +750,14 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, spin_unlock(&d->lock); tasklet_schedule(&d->task); } - spin_unlock_irqrestore(&c->lock, flags); - sa11x0_dma_desc_free(d, &head); + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); ret = 0; break; case DMA_PAUSE: - dev_dbg(d->slave.dev, "vchan %p: pause\n", c); - spin_lock_irqsave(&c->lock, flags); + dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); + spin_lock_irqsave(&c->vc.lock, flags); if (c->status == DMA_IN_PROGRESS) { c->status = DMA_PAUSED; @@ -774,26 +770,26 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, spin_unlock(&d->lock); } } - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&c->vc.lock, flags); ret = 0; break; case DMA_RESUME: - dev_dbg(d->slave.dev, "vchan %p: resume\n", c); - spin_lock_irqsave(&c->lock, flags); + dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); + spin_lock_irqsave(&c->vc.lock, flags); if (c->status == DMA_PAUSED) { c->status = DMA_IN_PROGRESS; p = c->phy; if (p) { writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); - } else if (!list_empty(&c->desc_issued)) { + } else if (!list_empty(&c->vc.desc_issued)) { spin_lock(&d->lock); list_add_tail(&c->node, &d->chan_pending); spin_unlock(&d->lock); } } - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&c->vc.lock, flags); ret = 0; break; @@ -853,15 +849,13 @@ static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev, return -ENOMEM; } - c->chan.device = dmadev; c->status = DMA_IN_PROGRESS; c->ddar = chan_desc[i].ddar; c->name = chan_desc[i].name; - spin_lock_init(&c->lock); - INIT_LIST_HEAD(&c->desc_submitted); - INIT_LIST_HEAD(&c->desc_issued); INIT_LIST_HEAD(&c->node); - list_add_tail(&c->chan.device_node, &dmadev->channels); + + c->vc.desc_free = sa11x0_dma_free_desc; + vchan_init(&c->vc, dmadev); } return dma_async_device_register(dmadev); @@ -890,8 +884,9 @@ static void sa11x0_dma_free_channels(struct dma_device *dmadev) { struct sa11x0_dma_chan *c, *cn; - list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) { - list_del(&c->chan.device_node); + list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) { + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); kfree(c); } } @@ -915,7 +910,6 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev) spin_lock_init(&d->lock); INIT_LIST_HEAD(&d->chan_pending); - INIT_LIST_HEAD(&d->desc_complete); d->base = ioremap(res->start, resource_size(res)); if (!d->base) { @@ -947,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev) } dma_cap_set(DMA_SLAVE, d->slave.cap_mask); + dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; + d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic; ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); if (ret) { dev_warn(d->slave.dev, "failed to register slave async device: %d\n", diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c new file mode 100644 index 00000000000..6f80432a3f0 --- /dev/null +++ b/drivers/dma/virt-dma.c @@ -0,0 +1,123 @@ +/* + * Virtual DMA channel support for DMAengine + * + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/device.h> +#include <linux/dmaengine.h> +#include <linux/module.h> +#include <linux/spinlock.h> + +#include "virt-dma.h" + +static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct virt_dma_desc, tx); +} + +dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct virt_dma_chan *vc = to_virt_chan(tx->chan); + struct virt_dma_desc *vd = to_virt_desc(tx); + unsigned long flags; + dma_cookie_t cookie; + + spin_lock_irqsave(&vc->lock, flags); + cookie = dma_cookie_assign(tx); + + list_add_tail(&vd->node, &vc->desc_submitted); + spin_unlock_irqrestore(&vc->lock, flags); + + dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", + vc, vd, cookie); + + return cookie; +} +EXPORT_SYMBOL_GPL(vchan_tx_submit); + +struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, + dma_cookie_t cookie) +{ + struct virt_dma_desc *vd; + + list_for_each_entry(vd, &vc->desc_issued, node) + if (vd->tx.cookie == cookie) + return vd; + + return NULL; +} +EXPORT_SYMBOL_GPL(vchan_find_desc); + +/* + * This tasklet handles the completion of a DMA descriptor by + * calling its callback and freeing it. + */ +static void vchan_complete(unsigned long arg) +{ + struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; + struct virt_dma_desc *vd; + dma_async_tx_callback cb = NULL; + void *cb_data = NULL; + LIST_HEAD(head); + + spin_lock_irq(&vc->lock); + list_splice_tail_init(&vc->desc_completed, &head); + vd = vc->cyclic; + if (vd) { + vc->cyclic = NULL; + cb = vd->tx.callback; + cb_data = vd->tx.callback_param; + } + spin_unlock_irq(&vc->lock); + + if (cb) + cb(cb_data); + + while (!list_empty(&head)) { + vd = list_first_entry(&head, struct virt_dma_desc, node); + cb = vd->tx.callback; + cb_data = vd->tx.callback_param; + + list_del(&vd->node); + + vc->desc_free(vd); + + if (cb) + cb(cb_data); + } +} + +void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) +{ + while (!list_empty(head)) { + struct virt_dma_desc *vd = list_first_entry(head, + struct virt_dma_desc, node); + list_del(&vd->node); + dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); + vc->desc_free(vd); + } +} +EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); + +void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) +{ + dma_cookie_init(&vc->chan); + + spin_lock_init(&vc->lock); + INIT_LIST_HEAD(&vc->desc_submitted); + INIT_LIST_HEAD(&vc->desc_issued); + INIT_LIST_HEAD(&vc->desc_completed); + + tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); + + vc->chan.device = dmadev; + list_add_tail(&vc->chan.device_node, &dmadev->channels); +} +EXPORT_SYMBOL_GPL(vchan_init); + +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h new file mode 100644 index 00000000000..85c19d63f9f --- /dev/null +++ b/drivers/dma/virt-dma.h @@ -0,0 +1,152 @@ +/* + * Virtual DMA channel support for DMAengine + * + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef VIRT_DMA_H +#define VIRT_DMA_H + +#include <linux/dmaengine.h> +#include <linux/interrupt.h> + +#include "dmaengine.h" + +struct virt_dma_desc { + struct dma_async_tx_descriptor tx; + /* protected by vc.lock */ + struct list_head node; +}; + +struct virt_dma_chan { + struct dma_chan chan; + struct tasklet_struct task; + void (*desc_free)(struct virt_dma_desc *); + + spinlock_t lock; + + /* protected by vc.lock */ + struct list_head desc_submitted; + struct list_head desc_issued; + struct list_head desc_completed; + + struct virt_dma_desc *cyclic; +}; + +static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) +{ + return container_of(chan, struct virt_dma_chan, chan); +} + +void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); +void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); +struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); + +/** + * vchan_tx_prep - prepare a descriptor + * vc: virtual channel allocating this descriptor + * vd: virtual descriptor to prepare + * tx_flags: flags argument passed in to prepare function + */ +static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, + struct virt_dma_desc *vd, unsigned long tx_flags) +{ + extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); + + dma_async_tx_descriptor_init(&vd->tx, &vc->chan); + vd->tx.flags = tx_flags; + vd->tx.tx_submit = vchan_tx_submit; + + return &vd->tx; +} + +/** + * vchan_issue_pending - move submitted descriptors to issued list + * vc: virtual channel to update + * + * vc.lock must be held by caller + */ +static inline bool vchan_issue_pending(struct virt_dma_chan *vc) +{ + list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); + return !list_empty(&vc->desc_issued); +} + +/** + * vchan_cookie_complete - report completion of a descriptor + * vd: virtual descriptor to update + * + * vc.lock must be held by caller + */ +static inline void vchan_cookie_complete(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + dma_cookie_complete(&vd->tx); + dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", + vd, vd->tx.cookie); + list_add_tail(&vd->node, &vc->desc_completed); + + tasklet_schedule(&vc->task); +} + +/** + * vchan_cyclic_callback - report the completion of a period + * vd: virtual descriptor + */ +static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + vc->cyclic = vd; + tasklet_schedule(&vc->task); +} + +/** + * vchan_next_desc - peek at the next descriptor to be processed + * vc: virtual channel to obtain descriptor from + * + * vc.lock must be held by caller + */ +static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) +{ + if (list_empty(&vc->desc_issued)) + return NULL; + + return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node); +} + +/** + * vchan_get_all_descriptors - obtain all submitted and issued descriptors + * vc: virtual channel to get descriptors from + * head: list of descriptors found + * + * vc.lock must be held by caller + * + * Removes all submitted and issued descriptors from internal lists, and + * provides a list of all descriptors found + */ +static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, + struct list_head *head) +{ + list_splice_tail_init(&vc->desc_submitted, head); + list_splice_tail_init(&vc->desc_issued, head); + list_splice_tail_init(&vc->desc_completed, head); +} + +static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) +{ + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&vc->lock, flags); + vchan_get_all_descriptors(vc, &head); + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); +} + +#endif diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 1eee45b69b7..d949b781f6f 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -268,13 +268,14 @@ config DM_MIRROR needed for live data migration tools such as 'pvmove'. config DM_RAID - tristate "RAID 1/4/5/6 target" + tristate "RAID 1/4/5/6/10 target" depends on BLK_DEV_DM select MD_RAID1 + select MD_RAID10 select MD_RAID456 select BLK_DEV_MD ---help--- - A dm target that supports RAID1, RAID4, RAID5 and RAID6 mappings + A dm target that supports RAID1, RAID10, RAID4, RAID5 and RAID6 mappings A RAID-5 set of N drives with a capacity of C MB per drive provides the capacity of C * (N - 1) MB, and protects against a failure diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 15dbe03117e..94e7f6ba2e1 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1305,7 +1305,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect prepare_to_wait(&bitmap->overflow_wait, &__wait, TASK_UNINTERRUPTIBLE); spin_unlock_irq(&bitmap->counts.lock); - io_schedule(); + schedule(); finish_wait(&bitmap->overflow_wait, &__wait); continue; } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index f2f29c52654..982e3e390c4 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -11,6 +11,7 @@ #include "md.h" #include "raid1.h" #include "raid5.h" +#include "raid10.h" #include "bitmap.h" #include <linux/device-mapper.h> @@ -52,7 +53,10 @@ struct raid_dev { #define DMPF_MAX_RECOVERY_RATE 0x20 #define DMPF_MAX_WRITE_BEHIND 0x40 #define DMPF_STRIPE_CACHE 0x80 -#define DMPF_REGION_SIZE 0X100 +#define DMPF_REGION_SIZE 0x100 +#define DMPF_RAID10_COPIES 0x200 +#define DMPF_RAID10_FORMAT 0x400 + struct raid_set { struct dm_target *ti; @@ -76,6 +80,7 @@ static struct raid_type { const unsigned algorithm; /* RAID algorithm. */ } raid_types[] = { {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */}, + {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */}, {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, @@ -86,6 +91,17 @@ static struct raid_type { {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE} }; +static unsigned raid10_md_layout_to_copies(int layout) +{ + return layout & 0xFF; +} + +static int raid10_format_to_md_layout(char *format, unsigned copies) +{ + /* 1 "far" copy, and 'copies' "near" copies */ + return (1 << 8) | (copies & 0xFF); +} + static struct raid_type *get_raid_type(char *name) { int i; @@ -339,10 +355,16 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) * [stripe_cache <sectors>] Stripe cache size for higher RAIDs * [region_size <sectors>] Defines granularity of bitmap + * + * RAID10-only options: + * [raid10_copies <# copies>] Number of copies. (Default: 2) + * [raid10_format <near>] Layout algorithm. (Default: near) */ static int parse_raid_params(struct raid_set *rs, char **argv, unsigned num_raid_params) { + char *raid10_format = "near"; + unsigned raid10_copies = 2; unsigned i, rebuild_cnt = 0; unsigned long value, region_size = 0; sector_t sectors_per_dev = rs->ti->len; @@ -416,11 +438,28 @@ static int parse_raid_params(struct raid_set *rs, char **argv, } key = argv[i++]; + + /* Parameters that take a string value are checked here. */ + if (!strcasecmp(key, "raid10_format")) { + if (rs->raid_type->level != 10) { + rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; + return -EINVAL; + } + if (strcmp("near", argv[i])) { + rs->ti->error = "Invalid 'raid10_format' value given"; + return -EINVAL; + } + raid10_format = argv[i]; + rs->print_flags |= DMPF_RAID10_FORMAT; + continue; + } + if (strict_strtoul(argv[i], 10, &value) < 0) { rs->ti->error = "Bad numerical argument given in raid params"; return -EINVAL; } + /* Parameters that take a numeric value are checked here */ if (!strcasecmp(key, "rebuild")) { rebuild_cnt++; @@ -439,6 +478,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, return -EINVAL; } break; + case 10: default: DMERR("The rebuild parameter is not supported for %s", rs->raid_type->name); rs->ti->error = "Rebuild not supported for this RAID type"; @@ -495,7 +535,8 @@ static int parse_raid_params(struct raid_set *rs, char **argv, */ value /= 2; - if (rs->raid_type->level < 5) { + if ((rs->raid_type->level != 5) && + (rs->raid_type->level != 6)) { rs->ti->error = "Inappropriate argument: stripe_cache"; return -EINVAL; } @@ -520,6 +561,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv, } else if (!strcasecmp(key, "region_size")) { rs->print_flags |= DMPF_REGION_SIZE; region_size = value; + } else if (!strcasecmp(key, "raid10_copies") && + (rs->raid_type->level == 10)) { + if ((value < 2) || (value > 0xFF)) { + rs->ti->error = "Bad value for 'raid10_copies'"; + return -EINVAL; + } + rs->print_flags |= DMPF_RAID10_COPIES; + raid10_copies = value; } else { DMERR("Unable to parse RAID parameter: %s", key); rs->ti->error = "Unable to parse RAID parameters"; @@ -538,8 +587,22 @@ static int parse_raid_params(struct raid_set *rs, char **argv, if (dm_set_target_max_io_len(rs->ti, max_io_len)) return -EINVAL; - if ((rs->raid_type->level > 1) && - sector_div(sectors_per_dev, (rs->md.raid_disks - rs->raid_type->parity_devs))) { + if (rs->raid_type->level == 10) { + if (raid10_copies > rs->md.raid_disks) { + rs->ti->error = "Not enough devices to satisfy specification"; + return -EINVAL; + } + + /* (Len * #mirrors) / #devices */ + sectors_per_dev = rs->ti->len * raid10_copies; + sector_div(sectors_per_dev, rs->md.raid_disks); + + rs->md.layout = raid10_format_to_md_layout(raid10_format, + raid10_copies); + rs->md.new_layout = rs->md.layout; + } else if ((rs->raid_type->level > 1) && + sector_div(sectors_per_dev, + (rs->md.raid_disks - rs->raid_type->parity_devs))) { rs->ti->error = "Target length not divisible by number of data devices"; return -EINVAL; } @@ -566,6 +629,9 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits) if (rs->raid_type->level == 1) return md_raid1_congested(&rs->md, bits); + if (rs->raid_type->level == 10) + return md_raid10_congested(&rs->md, bits); + return md_raid5_congested(&rs->md, bits); } @@ -884,6 +950,9 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) case 6: redundancy = rs->raid_type->parity_devs; break; + case 10: + redundancy = raid10_md_layout_to_copies(mddev->layout) - 1; + break; default: ti->error = "Unknown RAID type"; return -EINVAL; @@ -1049,12 +1118,19 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } + if (ti->len != rs->md.array_sectors) { + ti->error = "Array size does not match requested target length"; + ret = -EINVAL; + goto size_mismatch; + } rs->callbacks.congested_fn = raid_is_congested; dm_table_add_target_callbacks(ti->table, &rs->callbacks); mddev_suspend(&rs->md); return 0; +size_mismatch: + md_stop(&rs->md); bad: context_free(rs); @@ -1203,6 +1279,13 @@ static int raid_status(struct dm_target *ti, status_type_t type, DMEMIT(" region_size %lu", rs->md.bitmap_info.chunksize >> 9); + if (rs->print_flags & DMPF_RAID10_COPIES) + DMEMIT(" raid10_copies %u", + raid10_md_layout_to_copies(rs->md.layout)); + + if (rs->print_flags & DMPF_RAID10_FORMAT) + DMEMIT(" raid10_format near"); + DMEMIT(" %d", rs->md.raid_disks); for (i = 0; i < rs->md.raid_disks; i++) { if (rs->dev[i].meta_dev) @@ -1277,7 +1360,7 @@ static void raid_resume(struct dm_target *ti) static struct target_type raid_target = { .name = "raid", - .version = {1, 2, 0}, + .version = {1, 3, 0}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, @@ -1304,6 +1387,8 @@ module_init(dm_raid_init); module_exit(dm_raid_exit); MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target"); +MODULE_ALIAS("dm-raid1"); +MODULE_ALIAS("dm-raid10"); MODULE_ALIAS("dm-raid4"); MODULE_ALIAS("dm-raid5"); MODULE_ALIAS("dm-raid6"); diff --git a/drivers/md/md.c b/drivers/md/md.c index d5ab4493c8b..fcd098794d3 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -498,61 +498,13 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) } EXPORT_SYMBOL(md_flush_request); -/* Support for plugging. - * This mirrors the plugging support in request_queue, but does not - * require having a whole queue or request structures. - * We allocate an md_plug_cb for each md device and each thread it gets - * plugged on. This links tot the private plug_handle structure in the - * personality data where we keep a count of the number of outstanding - * plugs so other code can see if a plug is active. - */ -struct md_plug_cb { - struct blk_plug_cb cb; - struct mddev *mddev; -}; - -static void plugger_unplug(struct blk_plug_cb *cb) -{ - struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); - if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) - md_wakeup_thread(mdcb->mddev->thread); - kfree(mdcb); -} - -/* Check that an unplug wakeup will come shortly. - * If not, wakeup the md thread immediately - */ -int mddev_check_plugged(struct mddev *mddev) +void md_unplug(struct blk_plug_cb *cb, bool from_schedule) { - struct blk_plug *plug = current->plug; - struct md_plug_cb *mdcb; - - if (!plug) - return 0; - - list_for_each_entry(mdcb, &plug->cb_list, cb.list) { - if (mdcb->cb.callback == plugger_unplug && - mdcb->mddev == mddev) { - /* Already on the list, move to top */ - if (mdcb != list_first_entry(&plug->cb_list, - struct md_plug_cb, - cb.list)) - list_move(&mdcb->cb.list, &plug->cb_list); - return 1; - } - } - /* Not currently on the callback list */ - mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); - if (!mdcb) - return 0; - - mdcb->mddev = mddev; - mdcb->cb.callback = plugger_unplug; - atomic_inc(&mddev->plug_cnt); - list_add(&mdcb->cb.list, &plug->cb_list); - return 1; + struct mddev *mddev = cb->data; + md_wakeup_thread(mddev->thread); + kfree(cb); } -EXPORT_SYMBOL_GPL(mddev_check_plugged); +EXPORT_SYMBOL(md_unplug); static inline struct mddev *mddev_get(struct mddev *mddev) { @@ -602,7 +554,6 @@ void mddev_init(struct mddev *mddev) atomic_set(&mddev->active, 1); atomic_set(&mddev->openers, 0); atomic_set(&mddev->active_io, 0); - atomic_set(&mddev->plug_cnt, 0); spin_lock_init(&mddev->write_lock); atomic_set(&mddev->flush_pending, 0); init_waitqueue_head(&mddev->sb_wait); @@ -3942,17 +3893,13 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) break; case clear: /* stopping an active array */ - if (atomic_read(&mddev->openers) > 0) - return -EBUSY; err = do_md_stop(mddev, 0, NULL); break; case inactive: /* stopping an active array */ - if (mddev->pers) { - if (atomic_read(&mddev->openers) > 0) - return -EBUSY; + if (mddev->pers) err = do_md_stop(mddev, 2, NULL); - } else + else err = 0; /* already inactive */ break; case suspended: diff --git a/drivers/md/md.h b/drivers/md/md.h index 7b4a3c318ca..f385b038589 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -266,9 +266,6 @@ struct mddev { int new_chunk_sectors; int reshape_backwards; - atomic_t plug_cnt; /* If device is expecting - * more bios soon. - */ struct md_thread *thread; /* management thread */ struct md_thread *sync_thread; /* doing resync or reconstruct */ sector_t curr_resync; /* last block scheduled */ @@ -630,6 +627,12 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, struct mddev *mddev); extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, struct mddev *mddev); -extern int mddev_check_plugged(struct mddev *mddev); extern void md_trim_bio(struct bio *bio, int offset, int size); + +extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule); +static inline int mddev_check_plugged(struct mddev *mddev) +{ + return !!blk_check_plugged(md_unplug, mddev, + sizeof(struct blk_plug_cb)); +} #endif /* _MD_MD_H */ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index cacd008d686..611b5f79761 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -46,6 +46,20 @@ */ #define NR_RAID1_BIOS 256 +/* when we get a read error on a read-only array, we redirect to another + * device without failing the first device, or trying to over-write to + * correct the read error. To keep track of bad blocks on a per-bio + * level, we store IO_BLOCKED in the appropriate 'bios' pointer + */ +#define IO_BLOCKED ((struct bio *)1) +/* When we successfully write to a known bad-block, we need to remove the + * bad-block marking which must be done from process context. So we record + * the success by setting devs[n].bio to IO_MADE_GOOD + */ +#define IO_MADE_GOOD ((struct bio *)2) + +#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) + /* When there are this many requests queue to be written by * the raid1 thread, we become 'congested' to provide back-pressure * for writeback. @@ -483,12 +497,14 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect const sector_t this_sector = r1_bio->sector; int sectors; int best_good_sectors; - int start_disk; - int best_disk; - int i; + int best_disk, best_dist_disk, best_pending_disk; + int has_nonrot_disk; + int disk; sector_t best_dist; + unsigned int min_pending; struct md_rdev *rdev; int choose_first; + int choose_next_idle; rcu_read_lock(); /* @@ -499,26 +515,26 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect retry: sectors = r1_bio->sectors; best_disk = -1; + best_dist_disk = -1; best_dist = MaxSector; + best_pending_disk = -1; + min_pending = UINT_MAX; best_good_sectors = 0; + has_nonrot_disk = 0; + choose_next_idle = 0; if (conf->mddev->recovery_cp < MaxSector && - (this_sector + sectors >= conf->next_resync)) { + (this_sector + sectors >= conf->next_resync)) choose_first = 1; - start_disk = 0; - } else { + else choose_first = 0; - start_disk = conf->last_used; - } - for (i = 0 ; i < conf->raid_disks * 2 ; i++) { + for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { sector_t dist; sector_t first_bad; int bad_sectors; - - int disk = start_disk + i; - if (disk >= conf->raid_disks * 2) - disk -= conf->raid_disks * 2; + unsigned int pending; + bool nonrot; rdev = rcu_dereference(conf->mirrors[disk].rdev); if (r1_bio->bios[disk] == IO_BLOCKED @@ -577,22 +593,77 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect } else best_good_sectors = sectors; + nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); + has_nonrot_disk |= nonrot; + pending = atomic_read(&rdev->nr_pending); dist = abs(this_sector - conf->mirrors[disk].head_position); - if (choose_first - /* Don't change to another disk for sequential reads */ - || conf->next_seq_sect == this_sector - || dist == 0 - /* If device is idle, use it */ - || atomic_read(&rdev->nr_pending) == 0) { + if (choose_first) { + best_disk = disk; + break; + } + /* Don't change to another disk for sequential reads */ + if (conf->mirrors[disk].next_seq_sect == this_sector + || dist == 0) { + int opt_iosize = bdev_io_opt(rdev->bdev) >> 9; + struct raid1_info *mirror = &conf->mirrors[disk]; + + best_disk = disk; + /* + * If buffered sequential IO size exceeds optimal + * iosize, check if there is idle disk. If yes, choose + * the idle disk. read_balance could already choose an + * idle disk before noticing it's a sequential IO in + * this disk. This doesn't matter because this disk + * will idle, next time it will be utilized after the + * first disk has IO size exceeds optimal iosize. In + * this way, iosize of the first disk will be optimal + * iosize at least. iosize of the second disk might be + * small, but not a big deal since when the second disk + * starts IO, the first disk is likely still busy. + */ + if (nonrot && opt_iosize > 0 && + mirror->seq_start != MaxSector && + mirror->next_seq_sect > opt_iosize && + mirror->next_seq_sect - opt_iosize >= + mirror->seq_start) { + choose_next_idle = 1; + continue; + } + break; + } + /* If device is idle, use it */ + if (pending == 0) { best_disk = disk; break; } + + if (choose_next_idle) + continue; + + if (min_pending > pending) { + min_pending = pending; + best_pending_disk = disk; + } + if (dist < best_dist) { best_dist = dist; - best_disk = disk; + best_dist_disk = disk; } } + /* + * If all disks are rotational, choose the closest disk. If any disk is + * non-rotational, choose the disk with less pending request even the + * disk is rotational, which might/might not be optimal for raids with + * mixed ratation/non-rotational disks depending on workload. + */ + if (best_disk == -1) { + if (has_nonrot_disk) + best_disk = best_pending_disk; + else + best_disk = best_dist_disk; + } + if (best_disk >= 0) { rdev = rcu_dereference(conf->mirrors[best_disk].rdev); if (!rdev) @@ -606,8 +677,11 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect goto retry; } sectors = best_good_sectors; - conf->next_seq_sect = this_sector + sectors; - conf->last_used = best_disk; + + if (conf->mirrors[best_disk].next_seq_sect != this_sector) + conf->mirrors[best_disk].seq_start = this_sector; + + conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; } rcu_read_unlock(); *max_sectors = sectors; @@ -870,10 +944,48 @@ do_sync_io: pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); } +struct raid1_plug_cb { + struct blk_plug_cb cb; + struct bio_list pending; + int pending_cnt; +}; + +static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) +{ + struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, + cb); + struct mddev *mddev = plug->cb.data; + struct r1conf *conf = mddev->private; + struct bio *bio; + + if (from_schedule) { + spin_lock_irq(&conf->device_lock); + bio_list_merge(&conf->pending_bio_list, &plug->pending); + conf->pending_count += plug->pending_cnt; + spin_unlock_irq(&conf->device_lock); + md_wakeup_thread(mddev->thread); + kfree(plug); + return; + } + + /* we aren't scheduling, so we can do the write-out directly. */ + bio = bio_list_get(&plug->pending); + bitmap_unplug(mddev->bitmap); + wake_up(&conf->wait_barrier); + + while (bio) { /* submit pending writes */ + struct bio *next = bio->bi_next; + bio->bi_next = NULL; + generic_make_request(bio); + bio = next; + } + kfree(plug); +} + static void make_request(struct mddev *mddev, struct bio * bio) { struct r1conf *conf = mddev->private; - struct mirror_info *mirror; + struct raid1_info *mirror; struct r1bio *r1_bio; struct bio *read_bio; int i, disks; @@ -883,6 +995,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); struct md_rdev *blocked_rdev; + struct blk_plug_cb *cb; + struct raid1_plug_cb *plug = NULL; int first_clone; int sectors_handled; int max_sectors; @@ -1185,11 +1299,22 @@ read_again: mbio->bi_private = r1_bio; atomic_inc(&r1_bio->remaining); + + cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); + if (cb) + plug = container_of(cb, struct raid1_plug_cb, cb); + else + plug = NULL; spin_lock_irqsave(&conf->device_lock, flags); - bio_list_add(&conf->pending_bio_list, mbio); - conf->pending_count++; + if (plug) { + bio_list_add(&plug->pending, mbio); + plug->pending_cnt++; + } else { + bio_list_add(&conf->pending_bio_list, mbio); + conf->pending_count++; + } spin_unlock_irqrestore(&conf->device_lock, flags); - if (!mddev_check_plugged(mddev)) + if (!plug) md_wakeup_thread(mddev->thread); } /* Mustn't call r1_bio_write_done before this next test, @@ -1364,7 +1489,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) struct r1conf *conf = mddev->private; int err = -EEXIST; int mirror = 0; - struct mirror_info *p; + struct raid1_info *p; int first = 0; int last = conf->raid_disks - 1; struct request_queue *q = bdev_get_queue(rdev->bdev); @@ -1433,7 +1558,7 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) struct r1conf *conf = mddev->private; int err = 0; int number = rdev->raid_disk; - struct mirror_info *p = conf->mirrors+ number; + struct raid1_info *p = conf->mirrors + number; if (rdev != p->rdev) p = conf->mirrors + conf->raid_disks + number; @@ -2173,8 +2298,7 @@ static void raid1d(struct mddev *mddev) blk_start_plug(&plug); for (;;) { - if (atomic_read(&mddev->plug_cnt) == 0) - flush_pending_writes(conf); + flush_pending_writes(conf); spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) { @@ -2371,6 +2495,18 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp bio->bi_rw = READ; bio->bi_end_io = end_sync_read; read_targets++; + } else if (!test_bit(WriteErrorSeen, &rdev->flags) && + test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && + !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { + /* + * The device is suitable for reading (InSync), + * but has bad block(s) here. Let's try to correct them, + * if we are doing resync or repair. Otherwise, leave + * this device alone for this sync request. + */ + bio->bi_rw = WRITE; + bio->bi_end_io = end_sync_write; + write_targets++; } } if (bio->bi_end_io) { @@ -2428,7 +2564,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp /* There is nowhere to write, so all non-sync * drives must be failed - so we are finished */ - sector_t rv = max_sector - sector_nr; + sector_t rv; + if (min_bad > 0) + max_sector = sector_nr + min_bad; + rv = max_sector - sector_nr; *skipped = 1; put_buf(r1_bio); return rv; @@ -2521,7 +2660,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) { struct r1conf *conf; int i; - struct mirror_info *disk; + struct raid1_info *disk; struct md_rdev *rdev; int err = -ENOMEM; @@ -2529,7 +2668,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (!conf) goto abort; - conf->mirrors = kzalloc(sizeof(struct mirror_info) + conf->mirrors = kzalloc(sizeof(struct raid1_info) * mddev->raid_disks * 2, GFP_KERNEL); if (!conf->mirrors) @@ -2572,6 +2711,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) mddev->merge_check_needed = 1; disk->head_position = 0; + disk->seq_start = MaxSector; } conf->raid_disks = mddev->raid_disks; conf->mddev = mddev; @@ -2585,7 +2725,6 @@ static struct r1conf *setup_conf(struct mddev *mddev) conf->recovery_disabled = mddev->recovery_disabled - 1; err = -EIO; - conf->last_used = -1; for (i = 0; i < conf->raid_disks * 2; i++) { disk = conf->mirrors + i; @@ -2611,19 +2750,9 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (disk->rdev && (disk->rdev->saved_raid_disk < 0)) conf->fullsync = 1; - } else if (conf->last_used < 0) - /* - * The first working device is used as a - * starting point to read balancing. - */ - conf->last_used = i; + } } - if (conf->last_used < 0) { - printk(KERN_ERR "md/raid1:%s: no operational mirrors\n", - mdname(mddev)); - goto abort; - } err = -ENOMEM; conf->thread = md_register_thread(raid1d, mddev, "raid1"); if (!conf->thread) { @@ -2798,7 +2927,7 @@ static int raid1_reshape(struct mddev *mddev) */ mempool_t *newpool, *oldpool; struct pool_info *newpoolinfo; - struct mirror_info *newmirrors; + struct raid1_info *newmirrors; struct r1conf *conf = mddev->private; int cnt, raid_disks; unsigned long flags; @@ -2841,7 +2970,7 @@ static int raid1_reshape(struct mddev *mddev) kfree(newpoolinfo); return -ENOMEM; } - newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks * 2, + newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2, GFP_KERNEL); if (!newmirrors) { kfree(newpoolinfo); @@ -2880,7 +3009,6 @@ static int raid1_reshape(struct mddev *mddev) conf->raid_disks = mddev->raid_disks = raid_disks; mddev->delta_disks = 0; - conf->last_used = 0; /* just make sure it is in-range */ lower_barrier(conf); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 80ded139314..0ff3715fb7e 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -1,9 +1,15 @@ #ifndef _RAID1_H #define _RAID1_H -struct mirror_info { +struct raid1_info { struct md_rdev *rdev; sector_t head_position; + + /* When choose the best device for a read (read_balance()) + * we try to keep sequential reads one the same device + */ + sector_t next_seq_sect; + sector_t seq_start; }; /* @@ -24,17 +30,11 @@ struct pool_info { struct r1conf { struct mddev *mddev; - struct mirror_info *mirrors; /* twice 'raid_disks' to + struct raid1_info *mirrors; /* twice 'raid_disks' to * allow for replacements. */ int raid_disks; - /* When choose the best device for a read (read_balance()) - * we try to keep sequential reads one the same device - * using 'last_used' and 'next_seq_sect' - */ - int last_used; - sector_t next_seq_sect; /* During resync, read_balancing is only allowed on the part * of the array that has been resynced. 'next_resync' tells us * where that is. @@ -135,20 +135,6 @@ struct r1bio { /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/ }; -/* when we get a read error on a read-only array, we redirect to another - * device without failing the first device, or trying to over-write to - * correct the read error. To keep track of bad blocks on a per-bio - * level, we store IO_BLOCKED in the appropriate 'bios' pointer - */ -#define IO_BLOCKED ((struct bio *)1) -/* When we successfully write to a known bad-block, we need to remove the - * bad-block marking which must be done from process context. So we record - * the success by setting bios[n] to IO_MADE_GOOD - */ -#define IO_MADE_GOOD ((struct bio *)2) - -#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) - /* bits for r1bio.state */ #define R1BIO_Uptodate 0 #define R1BIO_IsSync 1 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8da6282254c..de5ed6fd880 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -60,7 +60,21 @@ */ #define NR_RAID10_BIOS 256 -/* When there are this many requests queue to be written by +/* when we get a read error on a read-only array, we redirect to another + * device without failing the first device, or trying to over-write to + * correct the read error. To keep track of bad blocks on a per-bio + * level, we store IO_BLOCKED in the appropriate 'bios' pointer + */ +#define IO_BLOCKED ((struct bio *)1) +/* When we successfully write to a known bad-block, we need to remove the + * bad-block marking which must be done from process context. So we record + * the success by setting devs[n].bio to IO_MADE_GOOD + */ +#define IO_MADE_GOOD ((struct bio *)2) + +#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) + +/* When there are this many requests queued to be written by * the raid10 thread, we become 'congested' to provide back-pressure * for writeback. */ @@ -717,7 +731,7 @@ static struct md_rdev *read_balance(struct r10conf *conf, int sectors = r10_bio->sectors; int best_good_sectors; sector_t new_distance, best_dist; - struct md_rdev *rdev, *best_rdev; + struct md_rdev *best_rdev, *rdev = NULL; int do_balance; int best_slot; struct geom *geo = &conf->geo; @@ -839,9 +853,8 @@ retry: return rdev; } -static int raid10_congested(void *data, int bits) +int md_raid10_congested(struct mddev *mddev, int bits) { - struct mddev *mddev = data; struct r10conf *conf = mddev->private; int i, ret = 0; @@ -849,8 +862,6 @@ static int raid10_congested(void *data, int bits) conf->pending_count >= max_queued_requests) return 1; - if (mddev_congested(mddev, bits)) - return 1; rcu_read_lock(); for (i = 0; (i < conf->geo.raid_disks || i < conf->prev.raid_disks) @@ -866,6 +877,15 @@ static int raid10_congested(void *data, int bits) rcu_read_unlock(); return ret; } +EXPORT_SYMBOL_GPL(md_raid10_congested); + +static int raid10_congested(void *data, int bits) +{ + struct mddev *mddev = data; + + return mddev_congested(mddev, bits) || + md_raid10_congested(mddev, bits); +} static void flush_pending_writes(struct r10conf *conf) { @@ -1546,7 +1566,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) static void print_conf(struct r10conf *conf) { int i; - struct mirror_info *tmp; + struct raid10_info *tmp; printk(KERN_DEBUG "RAID10 conf printout:\n"); if (!conf) { @@ -1580,7 +1600,7 @@ static int raid10_spare_active(struct mddev *mddev) { int i; struct r10conf *conf = mddev->private; - struct mirror_info *tmp; + struct raid10_info *tmp; int count = 0; unsigned long flags; @@ -1655,7 +1675,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) else mirror = first; for ( ; mirror <= last ; mirror++) { - struct mirror_info *p = &conf->mirrors[mirror]; + struct raid10_info *p = &conf->mirrors[mirror]; if (p->recovery_disabled == mddev->recovery_disabled) continue; if (p->rdev) { @@ -1709,7 +1729,7 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) int err = 0; int number = rdev->raid_disk; struct md_rdev **rdevp; - struct mirror_info *p = conf->mirrors + number; + struct raid10_info *p = conf->mirrors + number; print_conf(conf); if (rdev == p->rdev) @@ -2660,8 +2680,7 @@ static void raid10d(struct mddev *mddev) blk_start_plug(&plug); for (;;) { - if (atomic_read(&mddev->plug_cnt) == 0) - flush_pending_writes(conf); + flush_pending_writes(conf); spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) { @@ -2876,7 +2895,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, sector_t sect; int must_sync; int any_working; - struct mirror_info *mirror = &conf->mirrors[i]; + struct raid10_info *mirror = &conf->mirrors[i]; if ((mirror->rdev == NULL || test_bit(In_sync, &mirror->rdev->flags)) @@ -3388,7 +3407,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) goto out; /* FIXME calc properly */ - conf->mirrors = kzalloc(sizeof(struct mirror_info)*(mddev->raid_disks + + conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks + max(0,mddev->delta_disks)), GFP_KERNEL); if (!conf->mirrors) @@ -3452,7 +3471,7 @@ static int run(struct mddev *mddev) { struct r10conf *conf; int i, disk_idx, chunk_size; - struct mirror_info *disk; + struct raid10_info *disk; struct md_rdev *rdev; sector_t size; sector_t min_offset_diff = 0; @@ -3472,12 +3491,14 @@ static int run(struct mddev *mddev) conf->thread = NULL; chunk_size = mddev->chunk_sectors << 9; - blk_queue_io_min(mddev->queue, chunk_size); - if (conf->geo.raid_disks % conf->geo.near_copies) - blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); - else - blk_queue_io_opt(mddev->queue, chunk_size * - (conf->geo.raid_disks / conf->geo.near_copies)); + if (mddev->queue) { + blk_queue_io_min(mddev->queue, chunk_size); + if (conf->geo.raid_disks % conf->geo.near_copies) + blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); + else + blk_queue_io_opt(mddev->queue, chunk_size * + (conf->geo.raid_disks / conf->geo.near_copies)); + } rdev_for_each(rdev, mddev) { long long diff; @@ -3511,8 +3532,9 @@ static int run(struct mddev *mddev) if (first || diff < min_offset_diff) min_offset_diff = diff; - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); + if (mddev->gendisk) + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); disk->head_position = 0; } @@ -3575,22 +3597,22 @@ static int run(struct mddev *mddev) md_set_array_sectors(mddev, size); mddev->resync_max_sectors = size; - mddev->queue->backing_dev_info.congested_fn = raid10_congested; - mddev->queue->backing_dev_info.congested_data = mddev; - - /* Calculate max read-ahead size. - * We need to readahead at least twice a whole stripe.... - * maybe... - */ - { + if (mddev->queue) { int stripe = conf->geo.raid_disks * ((mddev->chunk_sectors << 9) / PAGE_SIZE); + mddev->queue->backing_dev_info.congested_fn = raid10_congested; + mddev->queue->backing_dev_info.congested_data = mddev; + + /* Calculate max read-ahead size. + * We need to readahead at least twice a whole stripe.... + * maybe... + */ stripe /= conf->geo.near_copies; if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); } - blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); if (md_integrity_register(mddev)) goto out_free_conf; @@ -3641,7 +3663,10 @@ static int stop(struct mddev *mddev) lower_barrier(conf); md_unregister_thread(&mddev->thread); - blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ + if (mddev->queue) + /* the unplug fn references 'conf'*/ + blk_sync_queue(mddev->queue); + if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); kfree(conf->mirrors); @@ -3805,7 +3830,7 @@ static int raid10_check_reshape(struct mddev *mddev) if (mddev->delta_disks > 0) { /* allocate new 'mirrors' list */ conf->mirrors_new = kzalloc( - sizeof(struct mirror_info) + sizeof(struct raid10_info) *(mddev->raid_disks + mddev->delta_disks), GFP_KERNEL); @@ -3930,7 +3955,7 @@ static int raid10_start_reshape(struct mddev *mddev) spin_lock_irq(&conf->device_lock); if (conf->mirrors_new) { memcpy(conf->mirrors_new, conf->mirrors, - sizeof(struct mirror_info)*conf->prev.raid_disks); + sizeof(struct raid10_info)*conf->prev.raid_disks); smp_mb(); kfree(conf->mirrors_old); /* FIXME and elsewhere */ conf->mirrors_old = conf->mirrors; diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 135b1b0a155..007c2c68dd8 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -1,7 +1,7 @@ #ifndef _RAID10_H #define _RAID10_H -struct mirror_info { +struct raid10_info { struct md_rdev *rdev, *replacement; sector_t head_position; int recovery_disabled; /* matches @@ -13,8 +13,8 @@ struct mirror_info { struct r10conf { struct mddev *mddev; - struct mirror_info *mirrors; - struct mirror_info *mirrors_new, *mirrors_old; + struct raid10_info *mirrors; + struct raid10_info *mirrors_new, *mirrors_old; spinlock_t device_lock; /* geometry */ @@ -123,20 +123,6 @@ struct r10bio { } devs[0]; }; -/* when we get a read error on a read-only array, we redirect to another - * device without failing the first device, or trying to over-write to - * correct the read error. To keep track of bad blocks on a per-bio - * level, we store IO_BLOCKED in the appropriate 'bios' pointer - */ -#define IO_BLOCKED ((struct bio*)1) -/* When we successfully write to a known bad-block, we need to remove the - * bad-block marking which must be done from process context. So we record - * the success by setting devs[n].bio to IO_MADE_GOOD - */ -#define IO_MADE_GOOD ((struct bio *)2) - -#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) - /* bits for r10bio.state */ enum r10bio_state { R10BIO_Uptodate, @@ -159,4 +145,7 @@ enum r10bio_state { */ R10BIO_Previous, }; + +extern int md_raid10_congested(struct mddev *mddev, int bits); + #endif diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 04348d76bb3..adda94df5eb 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -99,34 +99,40 @@ static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) * We maintain a biased count of active stripes in the bottom 16 bits of * bi_phys_segments, and a count of processed stripes in the upper 16 bits */ -static inline int raid5_bi_phys_segments(struct bio *bio) +static inline int raid5_bi_processed_stripes(struct bio *bio) { - return bio->bi_phys_segments & 0xffff; + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + return (atomic_read(segments) >> 16) & 0xffff; } -static inline int raid5_bi_hw_segments(struct bio *bio) +static inline int raid5_dec_bi_active_stripes(struct bio *bio) { - return (bio->bi_phys_segments >> 16) & 0xffff; + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + return atomic_sub_return(1, segments) & 0xffff; } -static inline int raid5_dec_bi_phys_segments(struct bio *bio) +static inline void raid5_inc_bi_active_stripes(struct bio *bio) { - --bio->bi_phys_segments; - return raid5_bi_phys_segments(bio); + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + atomic_inc(segments); } -static inline int raid5_dec_bi_hw_segments(struct bio *bio) +static inline void raid5_set_bi_processed_stripes(struct bio *bio, + unsigned int cnt) { - unsigned short val = raid5_bi_hw_segments(bio); + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + int old, new; - --val; - bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); - return val; + do { + old = atomic_read(segments); + new = (old & 0xffff) | (cnt << 16); + } while (atomic_cmpxchg(segments, old, new) != old); } -static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) +static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt) { - bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16); + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + atomic_set(segments, cnt); } /* Find first data disk in a raid6 stripe */ @@ -190,49 +196,56 @@ static int stripe_operations_active(struct stripe_head *sh) test_bit(STRIPE_COMPUTE_RUN, &sh->state); } -static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) +static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) { - if (atomic_dec_and_test(&sh->count)) { - BUG_ON(!list_empty(&sh->lru)); - BUG_ON(atomic_read(&conf->active_stripes)==0); - if (test_bit(STRIPE_HANDLE, &sh->state)) { - if (test_bit(STRIPE_DELAYED, &sh->state) && - !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - list_add_tail(&sh->lru, &conf->delayed_list); - else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && - sh->bm_seq - conf->seq_write > 0) - list_add_tail(&sh->lru, &conf->bitmap_list); - else { - clear_bit(STRIPE_DELAYED, &sh->state); - clear_bit(STRIPE_BIT_DELAY, &sh->state); - list_add_tail(&sh->lru, &conf->handle_list); - } - md_wakeup_thread(conf->mddev->thread); - } else { - BUG_ON(stripe_operations_active(sh)); - if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - if (atomic_dec_return(&conf->preread_active_stripes) - < IO_THRESHOLD) - md_wakeup_thread(conf->mddev->thread); - atomic_dec(&conf->active_stripes); - if (!test_bit(STRIPE_EXPANDING, &sh->state)) { - list_add_tail(&sh->lru, &conf->inactive_list); - wake_up(&conf->wait_for_stripe); - if (conf->retry_read_aligned) - md_wakeup_thread(conf->mddev->thread); - } + BUG_ON(!list_empty(&sh->lru)); + BUG_ON(atomic_read(&conf->active_stripes)==0); + if (test_bit(STRIPE_HANDLE, &sh->state)) { + if (test_bit(STRIPE_DELAYED, &sh->state) && + !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + list_add_tail(&sh->lru, &conf->delayed_list); + else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && + sh->bm_seq - conf->seq_write > 0) + list_add_tail(&sh->lru, &conf->bitmap_list); + else { + clear_bit(STRIPE_DELAYED, &sh->state); + clear_bit(STRIPE_BIT_DELAY, &sh->state); + list_add_tail(&sh->lru, &conf->handle_list); + } + md_wakeup_thread(conf->mddev->thread); + } else { + BUG_ON(stripe_operations_active(sh)); + if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + if (atomic_dec_return(&conf->preread_active_stripes) + < IO_THRESHOLD) + md_wakeup_thread(conf->mddev->thread); + atomic_dec(&conf->active_stripes); + if (!test_bit(STRIPE_EXPANDING, &sh->state)) { + list_add_tail(&sh->lru, &conf->inactive_list); + wake_up(&conf->wait_for_stripe); + if (conf->retry_read_aligned) + md_wakeup_thread(conf->mddev->thread); } } } +static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) +{ + if (atomic_dec_and_test(&sh->count)) + do_release_stripe(conf, sh); +} + static void release_stripe(struct stripe_head *sh) { struct r5conf *conf = sh->raid_conf; unsigned long flags; - spin_lock_irqsave(&conf->device_lock, flags); - __release_stripe(conf, sh); - spin_unlock_irqrestore(&conf->device_lock, flags); + local_irq_save(flags); + if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { + do_release_stripe(conf, sh); + spin_unlock(&conf->device_lock); + } + local_irq_restore(flags); } static inline void remove_hash(struct stripe_head *sh) @@ -471,7 +484,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector, } else { if (atomic_read(&sh->count)) { BUG_ON(!list_empty(&sh->lru) - && !test_bit(STRIPE_EXPANDING, &sh->state)); + && !test_bit(STRIPE_EXPANDING, &sh->state) + && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)); } else { if (!test_bit(STRIPE_HANDLE, &sh->state)) atomic_inc(&conf->active_stripes); @@ -640,6 +654,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) else bi->bi_sector = (sh->sector + rdev->data_offset); + if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) + bi->bi_rw |= REQ_FLUSH; + bi->bi_flags = 1 << BIO_UPTODATE; bi->bi_idx = 0; bi->bi_io_vec[0].bv_len = STRIPE_SIZE; @@ -749,14 +766,12 @@ static void ops_complete_biofill(void *stripe_head_ref) { struct stripe_head *sh = stripe_head_ref; struct bio *return_bi = NULL; - struct r5conf *conf = sh->raid_conf; int i; pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); /* clear completed biofills */ - spin_lock_irq(&conf->device_lock); for (i = sh->disks; i--; ) { struct r5dev *dev = &sh->dev[i]; @@ -774,7 +789,7 @@ static void ops_complete_biofill(void *stripe_head_ref) while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { rbi2 = r5_next_bio(rbi, dev->sector); - if (!raid5_dec_bi_phys_segments(rbi)) { + if (!raid5_dec_bi_active_stripes(rbi)) { rbi->bi_next = return_bi; return_bi = rbi; } @@ -782,7 +797,6 @@ static void ops_complete_biofill(void *stripe_head_ref) } } } - spin_unlock_irq(&conf->device_lock); clear_bit(STRIPE_BIOFILL_RUN, &sh->state); return_io(return_bi); @@ -794,7 +808,6 @@ static void ops_complete_biofill(void *stripe_head_ref) static void ops_run_biofill(struct stripe_head *sh) { struct dma_async_tx_descriptor *tx = NULL; - struct r5conf *conf = sh->raid_conf; struct async_submit_ctl submit; int i; @@ -805,10 +818,10 @@ static void ops_run_biofill(struct stripe_head *sh) struct r5dev *dev = &sh->dev[i]; if (test_bit(R5_Wantfill, &dev->flags)) { struct bio *rbi; - spin_lock_irq(&conf->device_lock); + spin_lock_irq(&sh->stripe_lock); dev->read = rbi = dev->toread; dev->toread = NULL; - spin_unlock_irq(&conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { tx = async_copy_data(0, rbi, dev->page, @@ -1144,12 +1157,12 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { struct bio *wbi; - spin_lock_irq(&sh->raid_conf->device_lock); + spin_lock_irq(&sh->stripe_lock); chosen = dev->towrite; dev->towrite = NULL; BUG_ON(dev->written); wbi = dev->written = chosen; - spin_unlock_irq(&sh->raid_conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { @@ -1454,6 +1467,8 @@ static int grow_one_stripe(struct r5conf *conf) init_waitqueue_head(&sh->ops.wait_for_ops); #endif + spin_lock_init(&sh->stripe_lock); + if (grow_buffers(sh)) { shrink_buffers(sh); kmem_cache_free(conf->slab_cache, sh); @@ -1739,7 +1754,9 @@ static void raid5_end_read_request(struct bio * bi, int error) atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); - } + } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) + clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); + if (atomic_read(&rdev->read_errors)) atomic_set(&rdev->read_errors, 0); } else { @@ -1784,7 +1801,11 @@ static void raid5_end_read_request(struct bio * bi, int error) else retry = 1; if (retry) - set_bit(R5_ReadError, &sh->dev[i].flags); + if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { + set_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); + } else + set_bit(R5_ReadNoMerge, &sh->dev[i].flags); else { clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); @@ -2340,11 +2361,18 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in (unsigned long long)bi->bi_sector, (unsigned long long)sh->sector); - - spin_lock_irq(&conf->device_lock); + /* + * If several bio share a stripe. The bio bi_phys_segments acts as a + * reference count to avoid race. The reference count should already be + * increased before this function is called (for example, in + * make_request()), so other bio sharing this stripe will not free the + * stripe. If a stripe is owned by one stripe, the stripe lock will + * protect it. + */ + spin_lock_irq(&sh->stripe_lock); if (forwrite) { bip = &sh->dev[dd_idx].towrite; - if (*bip == NULL && sh->dev[dd_idx].written == NULL) + if (*bip == NULL) firstwrite = 1; } else bip = &sh->dev[dd_idx].toread; @@ -2360,7 +2388,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in if (*bip) bi->bi_next = *bip; *bip = bi; - bi->bi_phys_segments++; + raid5_inc_bi_active_stripes(bi); if (forwrite) { /* check if page is covered */ @@ -2375,7 +2403,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); } - spin_unlock_irq(&conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", (unsigned long long)(*bip)->bi_sector, @@ -2391,7 +2419,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in overlap: set_bit(R5_Overlap, &sh->dev[dd_idx].flags); - spin_unlock_irq(&conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); return 0; } @@ -2441,10 +2469,11 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, rdev_dec_pending(rdev, conf->mddev); } } - spin_lock_irq(&conf->device_lock); + spin_lock_irq(&sh->stripe_lock); /* fail all writes first */ bi = sh->dev[i].towrite; sh->dev[i].towrite = NULL; + spin_unlock_irq(&sh->stripe_lock); if (bi) { s->to_write--; bitmap_end = 1; @@ -2457,13 +2486,17 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); - if (!raid5_dec_bi_phys_segments(bi)) { + if (!raid5_dec_bi_active_stripes(bi)) { md_write_end(conf->mddev); bi->bi_next = *return_bi; *return_bi = bi; } bi = nextbi; } + if (bitmap_end) + bitmap_endwrite(conf->mddev->bitmap, sh->sector, + STRIPE_SECTORS, 0, 0); + bitmap_end = 0; /* and fail all 'written' */ bi = sh->dev[i].written; sh->dev[i].written = NULL; @@ -2472,7 +2505,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].sector + STRIPE_SECTORS) { struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); - if (!raid5_dec_bi_phys_segments(bi)) { + if (!raid5_dec_bi_active_stripes(bi)) { md_write_end(conf->mddev); bi->bi_next = *return_bi; *return_bi = bi; @@ -2496,14 +2529,13 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); - if (!raid5_dec_bi_phys_segments(bi)) { + if (!raid5_dec_bi_active_stripes(bi)) { bi->bi_next = *return_bi; *return_bi = bi; } bi = nextbi; } } - spin_unlock_irq(&conf->device_lock); if (bitmap_end) bitmap_endwrite(conf->mddev->bitmap, sh->sector, STRIPE_SECTORS, 0, 0); @@ -2707,30 +2739,23 @@ static void handle_stripe_clean_event(struct r5conf *conf, test_bit(R5_UPTODATE, &dev->flags)) { /* We can return any write requests */ struct bio *wbi, *wbi2; - int bitmap_end = 0; pr_debug("Return write for disc %d\n", i); - spin_lock_irq(&conf->device_lock); wbi = dev->written; dev->written = NULL; while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { wbi2 = r5_next_bio(wbi, dev->sector); - if (!raid5_dec_bi_phys_segments(wbi)) { + if (!raid5_dec_bi_active_stripes(wbi)) { md_write_end(conf->mddev); wbi->bi_next = *return_bi; *return_bi = wbi; } wbi = wbi2; } - if (dev->towrite == NULL) - bitmap_end = 1; - spin_unlock_irq(&conf->device_lock); - if (bitmap_end) - bitmap_endwrite(conf->mddev->bitmap, - sh->sector, - STRIPE_SECTORS, + bitmap_endwrite(conf->mddev->bitmap, sh->sector, + STRIPE_SECTORS, !test_bit(STRIPE_DEGRADED, &sh->state), - 0); + 0); } } @@ -3182,7 +3207,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) /* Now to look around and see what can be done */ rcu_read_lock(); - spin_lock_irq(&conf->device_lock); for (i=disks; i--; ) { struct md_rdev *rdev; sector_t first_bad; @@ -3328,7 +3352,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) do_recovery = 1; } } - spin_unlock_irq(&conf->device_lock); if (test_bit(STRIPE_SYNCING, &sh->state)) { /* If there is a failed device being replaced, * we must be recovering. @@ -3791,7 +3814,7 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf) * this sets the active strip count to 1 and the processed * strip count to zero (upper 8 bits) */ - bi->bi_phys_segments = 1; /* biased count of active stripes */ + raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */ } return bi; @@ -3988,6 +4011,62 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf) return sh; } +struct raid5_plug_cb { + struct blk_plug_cb cb; + struct list_head list; +}; + +static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) +{ + struct raid5_plug_cb *cb = container_of( + blk_cb, struct raid5_plug_cb, cb); + struct stripe_head *sh; + struct mddev *mddev = cb->cb.data; + struct r5conf *conf = mddev->private; + + if (cb->list.next && !list_empty(&cb->list)) { + spin_lock_irq(&conf->device_lock); + while (!list_empty(&cb->list)) { + sh = list_first_entry(&cb->list, struct stripe_head, lru); + list_del_init(&sh->lru); + /* + * avoid race release_stripe_plug() sees + * STRIPE_ON_UNPLUG_LIST clear but the stripe + * is still in our list + */ + smp_mb__before_clear_bit(); + clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); + __release_stripe(conf, sh); + } + spin_unlock_irq(&conf->device_lock); + } + kfree(cb); +} + +static void release_stripe_plug(struct mddev *mddev, + struct stripe_head *sh) +{ + struct blk_plug_cb *blk_cb = blk_check_plugged( + raid5_unplug, mddev, + sizeof(struct raid5_plug_cb)); + struct raid5_plug_cb *cb; + + if (!blk_cb) { + release_stripe(sh); + return; + } + + cb = container_of(blk_cb, struct raid5_plug_cb, cb); + + if (cb->list.next == NULL) + INIT_LIST_HEAD(&cb->list); + + if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) + list_add_tail(&sh->lru, &cb->list); + else + release_stripe(sh); +} + static void make_request(struct mddev *mddev, struct bio * bi) { struct r5conf *conf = mddev->private; @@ -4113,11 +4192,10 @@ static void make_request(struct mddev *mddev, struct bio * bi) finish_wait(&conf->wait_for_overlap, &w); set_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); - if ((bi->bi_rw & REQ_SYNC) && + if ((bi->bi_rw & REQ_NOIDLE) && !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) atomic_inc(&conf->preread_active_stripes); - mddev_check_plugged(mddev); - release_stripe(sh); + release_stripe_plug(mddev, sh); } else { /* cannot get stripe for read-ahead, just give-up */ clear_bit(BIO_UPTODATE, &bi->bi_flags); @@ -4126,9 +4204,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) } } - spin_lock_irq(&conf->device_lock); - remaining = raid5_dec_bi_phys_segments(bi); - spin_unlock_irq(&conf->device_lock); + remaining = raid5_dec_bi_active_stripes(bi); if (remaining == 0) { if ( rw == WRITE ) @@ -4484,7 +4560,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) sector += STRIPE_SECTORS, scnt++) { - if (scnt < raid5_bi_hw_segments(raid_bio)) + if (scnt < raid5_bi_processed_stripes(raid_bio)) /* already done this stripe */ continue; @@ -4492,25 +4568,24 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) if (!sh) { /* failed to get a stripe - must wait */ - raid5_set_bi_hw_segments(raid_bio, scnt); + raid5_set_bi_processed_stripes(raid_bio, scnt); conf->retry_read_aligned = raid_bio; return handled; } if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { release_stripe(sh); - raid5_set_bi_hw_segments(raid_bio, scnt); + raid5_set_bi_processed_stripes(raid_bio, scnt); conf->retry_read_aligned = raid_bio; return handled; } + set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); handle_stripe(sh); release_stripe(sh); handled++; } - spin_lock_irq(&conf->device_lock); - remaining = raid5_dec_bi_phys_segments(raid_bio); - spin_unlock_irq(&conf->device_lock); + remaining = raid5_dec_bi_active_stripes(raid_bio); if (remaining == 0) bio_endio(raid_bio, 0); if (atomic_dec_and_test(&conf->active_aligned_reads)) @@ -4518,6 +4593,30 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) return handled; } +#define MAX_STRIPE_BATCH 8 +static int handle_active_stripes(struct r5conf *conf) +{ + struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; + int i, batch_size = 0; + + while (batch_size < MAX_STRIPE_BATCH && + (sh = __get_priority_stripe(conf)) != NULL) + batch[batch_size++] = sh; + + if (batch_size == 0) + return batch_size; + spin_unlock_irq(&conf->device_lock); + + for (i = 0; i < batch_size; i++) + handle_stripe(batch[i]); + + cond_resched(); + + spin_lock_irq(&conf->device_lock); + for (i = 0; i < batch_size; i++) + __release_stripe(conf, batch[i]); + return batch_size; +} /* * This is our raid5 kernel thread. @@ -4528,7 +4627,6 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) */ static void raid5d(struct mddev *mddev) { - struct stripe_head *sh; struct r5conf *conf = mddev->private; int handled; struct blk_plug plug; @@ -4542,8 +4640,9 @@ static void raid5d(struct mddev *mddev) spin_lock_irq(&conf->device_lock); while (1) { struct bio *bio; + int batch_size; - if (atomic_read(&mddev->plug_cnt) == 0 && + if ( !list_empty(&conf->bitmap_list)) { /* Now is a good time to flush some bitmap updates */ conf->seq_flush++; @@ -4553,8 +4652,7 @@ static void raid5d(struct mddev *mddev) conf->seq_write = conf->seq_flush; activate_bit_delay(conf); } - if (atomic_read(&mddev->plug_cnt) == 0) - raid5_activate_delayed(conf); + raid5_activate_delayed(conf); while ((bio = remove_bio_from_retry(conf))) { int ok; @@ -4566,21 +4664,16 @@ static void raid5d(struct mddev *mddev) handled++; } - sh = __get_priority_stripe(conf); - - if (!sh) + batch_size = handle_active_stripes(conf); + if (!batch_size) break; - spin_unlock_irq(&conf->device_lock); - - handled++; - handle_stripe(sh); - release_stripe(sh); - cond_resched(); + handled += batch_size; - if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) + if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) { + spin_unlock_irq(&conf->device_lock); md_check_recovery(mddev); - - spin_lock_irq(&conf->device_lock); + spin_lock_irq(&conf->device_lock); + } } pr_debug("%d stripes handled\n", handled); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 2164021f3b5..a9fc24901ed 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -210,6 +210,7 @@ struct stripe_head { int disks; /* disks in stripe */ enum check_states check_state; enum reconstruct_states reconstruct_state; + spinlock_t stripe_lock; /** * struct stripe_operations * @target - STRIPE_OP_COMPUTE_BLK target @@ -273,6 +274,7 @@ enum r5dev_flags { R5_Wantwrite, R5_Overlap, /* There is a pending overlapping request * on this block */ + R5_ReadNoMerge, /* prevent bio from merging in block-layer */ R5_ReadError, /* seen a read error here recently */ R5_ReWrite, /* have tried to over-write the readerror */ @@ -319,6 +321,7 @@ enum { STRIPE_BIOFILL_RUN, STRIPE_COMPUTE_RUN, STRIPE_OPS_REQ_PENDING, + STRIPE_ON_UNPLUG_LIST, }; /* diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 3e8dcf8d2e0..50e08f03aa6 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -17,10 +17,12 @@ #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/interrupt.h> +#include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/timer.h> +#include <linux/omap-dma.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include <linux/clk.h> @@ -128,6 +130,10 @@ struct mmc_omap_host { unsigned char id; /* 16xx chips have 2 MMC blocks */ struct clk * iclk; struct clk * fclk; + struct dma_chan *dma_rx; + u32 dma_rx_burst; + struct dma_chan *dma_tx; + u32 dma_tx_burst; struct resource *mem_res; void __iomem *virt_base; unsigned int phys_base; @@ -153,12 +159,8 @@ struct mmc_omap_host { unsigned use_dma:1; unsigned brs_received:1, dma_done:1; - unsigned dma_is_read:1; unsigned dma_in_use:1; - int dma_ch; spinlock_t dma_lock; - struct timer_list dma_timer; - unsigned dma_len; struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS]; struct mmc_omap_slot *current_slot; @@ -406,18 +408,25 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data, int abort) { enum dma_data_direction dma_data_dir; + struct device *dev = mmc_dev(host->mmc); + struct dma_chan *c; - BUG_ON(host->dma_ch < 0); - if (data->error) - omap_stop_dma(host->dma_ch); - /* Release DMA channel lazily */ - mod_timer(&host->dma_timer, jiffies + HZ); - if (data->flags & MMC_DATA_WRITE) + if (data->flags & MMC_DATA_WRITE) { dma_data_dir = DMA_TO_DEVICE; - else + c = host->dma_tx; + } else { dma_data_dir = DMA_FROM_DEVICE; - dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, - dma_data_dir); + c = host->dma_rx; + } + if (c) { + if (data->error) { + dmaengine_terminate_all(c); + /* Claim nothing transferred on error... */ + data->bytes_xfered = 0; + } + dev = c->device->dev; + } + dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir); } static void mmc_omap_send_stop_work(struct work_struct *work) @@ -525,16 +534,6 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data) } static void -mmc_omap_dma_timer(unsigned long data) -{ - struct mmc_omap_host *host = (struct mmc_omap_host *) data; - - BUG_ON(host->dma_ch < 0); - omap_free_dma(host->dma_ch); - host->dma_ch = -1; -} - -static void mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data) { unsigned long flags; @@ -891,159 +890,15 @@ static void mmc_omap_cover_handler(unsigned long param) jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY)); } -/* Prepare to transfer the next segment of a scatterlist */ -static void -mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data) +static void mmc_omap_dma_callback(void *priv) { - int dma_ch = host->dma_ch; - unsigned long data_addr; - u16 buf, frame; - u32 count; - struct scatterlist *sg = &data->sg[host->sg_idx]; - int src_port = 0; - int dst_port = 0; - int sync_dev = 0; - - data_addr = host->phys_base + OMAP_MMC_REG(host, DATA); - frame = data->blksz; - count = sg_dma_len(sg); - - if ((data->blocks == 1) && (count > data->blksz)) - count = frame; - - host->dma_len = count; - - /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx. - * Use 16 or 32 word frames when the blocksize is at least that large. - * Blocksize is usually 512 bytes; but not for some SD reads. - */ - if (cpu_is_omap15xx() && frame > 32) - frame = 32; - else if (frame > 64) - frame = 64; - count /= frame; - frame >>= 1; - - if (!(data->flags & MMC_DATA_WRITE)) { - buf = 0x800f | ((frame - 1) << 8); - - if (cpu_class_is_omap1()) { - src_port = OMAP_DMA_PORT_TIPB; - dst_port = OMAP_DMA_PORT_EMIFF; - } - if (cpu_is_omap24xx()) - sync_dev = OMAP24XX_DMA_MMC1_RX; - - omap_set_dma_src_params(dma_ch, src_port, - OMAP_DMA_AMODE_CONSTANT, - data_addr, 0, 0); - omap_set_dma_dest_params(dma_ch, dst_port, - OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sg), 0, 0); - omap_set_dma_dest_data_pack(dma_ch, 1); - omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); - } else { - buf = 0x0f80 | ((frame - 1) << 0); - - if (cpu_class_is_omap1()) { - src_port = OMAP_DMA_PORT_EMIFF; - dst_port = OMAP_DMA_PORT_TIPB; - } - if (cpu_is_omap24xx()) - sync_dev = OMAP24XX_DMA_MMC1_TX; - - omap_set_dma_dest_params(dma_ch, dst_port, - OMAP_DMA_AMODE_CONSTANT, - data_addr, 0, 0); - omap_set_dma_src_params(dma_ch, src_port, - OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sg), 0, 0); - omap_set_dma_src_data_pack(dma_ch, 1); - omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); - } + struct mmc_omap_host *host = priv; + struct mmc_data *data = host->data; - /* Max limit for DMA frame count is 0xffff */ - BUG_ON(count > 0xffff); + /* If we got to the end of DMA, assume everything went well */ + data->bytes_xfered += data->blocks * data->blksz; - OMAP_MMC_WRITE(host, BUF, buf); - omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16, - frame, count, OMAP_DMA_SYNC_FRAME, - sync_dev, 0); -} - -/* A scatterlist segment completed */ -static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) -{ - struct mmc_omap_host *host = (struct mmc_omap_host *) data; - struct mmc_data *mmcdat = host->data; - - if (unlikely(host->dma_ch < 0)) { - dev_err(mmc_dev(host->mmc), - "DMA callback while DMA not enabled\n"); - return; - } - /* FIXME: We really should do something to _handle_ the errors */ - if (ch_status & OMAP1_DMA_TOUT_IRQ) { - dev_err(mmc_dev(host->mmc),"DMA timeout\n"); - return; - } - if (ch_status & OMAP_DMA_DROP_IRQ) { - dev_err(mmc_dev(host->mmc), "DMA sync error\n"); - return; - } - if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { - return; - } - mmcdat->bytes_xfered += host->dma_len; - host->sg_idx++; - if (host->sg_idx < host->sg_len) { - mmc_omap_prepare_dma(host, host->data); - omap_start_dma(host->dma_ch); - } else - mmc_omap_dma_done(host, host->data); -} - -static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data) -{ - const char *dma_dev_name; - int sync_dev, dma_ch, is_read, r; - - is_read = !(data->flags & MMC_DATA_WRITE); - del_timer_sync(&host->dma_timer); - if (host->dma_ch >= 0) { - if (is_read == host->dma_is_read) - return 0; - omap_free_dma(host->dma_ch); - host->dma_ch = -1; - } - - if (is_read) { - if (host->id == 0) { - sync_dev = OMAP_DMA_MMC_RX; - dma_dev_name = "MMC1 read"; - } else { - sync_dev = OMAP_DMA_MMC2_RX; - dma_dev_name = "MMC2 read"; - } - } else { - if (host->id == 0) { - sync_dev = OMAP_DMA_MMC_TX; - dma_dev_name = "MMC1 write"; - } else { - sync_dev = OMAP_DMA_MMC2_TX; - dma_dev_name = "MMC2 write"; - } - } - r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb, - host, &dma_ch); - if (r != 0) { - dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r); - return r; - } - host->dma_ch = dma_ch; - host->dma_is_read = is_read; - - return 0; + mmc_omap_dma_done(host, data); } static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req) @@ -1118,33 +973,85 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) host->sg_idx = 0; if (use_dma) { - if (mmc_omap_get_dma_channel(host, data) == 0) { - enum dma_data_direction dma_data_dir; - - if (data->flags & MMC_DATA_WRITE) - dma_data_dir = DMA_TO_DEVICE; - else - dma_data_dir = DMA_FROM_DEVICE; - - host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, - sg_len, dma_data_dir); - host->total_bytes_left = 0; - mmc_omap_prepare_dma(host, req->data); - host->brs_received = 0; - host->dma_done = 0; - host->dma_in_use = 1; - } else - use_dma = 0; + enum dma_data_direction dma_data_dir; + struct dma_async_tx_descriptor *tx; + struct dma_chan *c; + u32 burst, *bp; + u16 buf; + + /* + * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx + * and 24xx. Use 16 or 32 word frames when the + * blocksize is at least that large. Blocksize is + * usually 512 bytes; but not for some SD reads. + */ + burst = cpu_is_omap15xx() ? 32 : 64; + if (burst > data->blksz) + burst = data->blksz; + + burst >>= 1; + + if (data->flags & MMC_DATA_WRITE) { + c = host->dma_tx; + bp = &host->dma_tx_burst; + buf = 0x0f80 | (burst - 1) << 0; + dma_data_dir = DMA_TO_DEVICE; + } else { + c = host->dma_rx; + bp = &host->dma_rx_burst; + buf = 0x800f | (burst - 1) << 8; + dma_data_dir = DMA_FROM_DEVICE; + } + + if (!c) + goto use_pio; + + /* Only reconfigure if we have a different burst size */ + if (*bp != burst) { + struct dma_slave_config cfg; + + cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA); + cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA); + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + cfg.src_maxburst = burst; + cfg.dst_maxburst = burst; + + if (dmaengine_slave_config(c, &cfg)) + goto use_pio; + + *bp = burst; + } + + host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len, + dma_data_dir); + if (host->sg_len == 0) + goto use_pio; + + tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len, + data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) + goto use_pio; + + OMAP_MMC_WRITE(host, BUF, buf); + + tx->callback = mmc_omap_dma_callback; + tx->callback_param = host; + dmaengine_submit(tx); + host->brs_received = 0; + host->dma_done = 0; + host->dma_in_use = 1; + return; } + use_pio: /* Revert to PIO? */ - if (!use_dma) { - OMAP_MMC_WRITE(host, BUF, 0x1f1f); - host->total_bytes_left = data->blocks * block_size; - host->sg_len = sg_len; - mmc_omap_sg_to_buf(host); - host->dma_in_use = 0; - } + OMAP_MMC_WRITE(host, BUF, 0x1f1f); + host->total_bytes_left = data->blocks * block_size; + host->sg_len = sg_len; + mmc_omap_sg_to_buf(host); + host->dma_in_use = 0; } static void mmc_omap_start_request(struct mmc_omap_host *host, @@ -1157,8 +1064,12 @@ static void mmc_omap_start_request(struct mmc_omap_host *host, /* only touch fifo AFTER the controller readies it */ mmc_omap_prepare_data(host, req); mmc_omap_start_command(host, req->cmd); - if (host->dma_in_use) - omap_start_dma(host->dma_ch); + if (host->dma_in_use) { + struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ? + host->dma_tx : host->dma_rx; + + dma_async_issue_pending(c); + } } static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req) @@ -1400,6 +1311,8 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; struct mmc_omap_host *host = NULL; struct resource *res; + dma_cap_mask_t mask; + unsigned sig; int i, ret = 0; int irq; @@ -1439,7 +1352,6 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host); spin_lock_init(&host->dma_lock); - setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host); spin_lock_init(&host->slot_lock); init_waitqueue_head(&host->slot_wq); @@ -1450,11 +1362,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) host->id = pdev->id; host->mem_res = res; host->irq = irq; - host->use_dma = 1; - host->dev->dma_mask = &pdata->dma_mask; - host->dma_ch = -1; - host->irq = irq; host->phys_base = host->mem_res->start; host->virt_base = ioremap(res->start, resource_size(res)); @@ -1474,9 +1382,48 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) goto err_free_iclk; } + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->dma_tx_burst = -1; + host->dma_rx_burst = -1; + + if (cpu_is_omap24xx()) + sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX; + else + sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX; + host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig); +#if 0 + if (!host->dma_tx) { + dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n", + sig); + goto err_dma; + } +#else + if (!host->dma_tx) + dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n", + sig); +#endif + if (cpu_is_omap24xx()) + sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX; + else + sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX; + host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig); +#if 0 + if (!host->dma_rx) { + dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n", + sig); + goto err_dma; + } +#else + if (!host->dma_rx) + dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n", + sig); +#endif + ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host); if (ret) - goto err_free_fclk; + goto err_free_dma; if (pdata->init != NULL) { ret = pdata->init(&pdev->dev); @@ -1510,7 +1457,11 @@ err_plat_cleanup: pdata->cleanup(&pdev->dev); err_free_irq: free_irq(host->irq, host); -err_free_fclk: +err_free_dma: + if (host->dma_tx) + dma_release_channel(host->dma_tx); + if (host->dma_rx) + dma_release_channel(host->dma_rx); clk_put(host->fclk); err_free_iclk: clk_disable(host->iclk); @@ -1545,6 +1496,11 @@ static int __devexit mmc_omap_remove(struct platform_device *pdev) clk_disable(host->iclk); clk_put(host->iclk); + if (host->dma_tx) + dma_release_channel(host->dma_tx); + if (host->dma_rx) + dma_release_channel(host->dma_rx); + iounmap(host->virt_base); release_mem_region(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start + 1); diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index bc28627af66..3a09f93cc3b 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/debugfs.h> +#include <linux/dmaengine.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/delay.h> @@ -29,6 +30,7 @@ #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/of_device.h> +#include <linux/omap-dma.h> #include <linux/mmc/host.h> #include <linux/mmc/core.h> #include <linux/mmc/mmc.h> @@ -37,7 +39,6 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/pm_runtime.h> -#include <plat/dma.h> #include <mach/hardware.h> #include <plat/board.h> #include <plat/mmc.h> @@ -166,7 +167,8 @@ struct omap_hsmmc_host { int suspended; int irq; int use_dma, dma_ch; - int dma_line_tx, dma_line_rx; + struct dma_chan *tx_chan; + struct dma_chan *rx_chan; int slot_id; int response_busy; int context_loss; @@ -797,6 +799,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data) return DMA_FROM_DEVICE; } +static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host, + struct mmc_data *data) +{ + return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; +} + static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) { int dma_ch; @@ -889,10 +897,13 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) spin_unlock_irqrestore(&host->irq_lock, flags); if (host->use_dma && dma_ch != -1) { - dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, - host->data->sg_len, + struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data); + + dmaengine_terminate_all(chan); + dma_unmap_sg(chan->device->dev, + host->data->sg, host->data->sg_len, omap_hsmmc_get_dma_dir(host, host->data)); - omap_free_dma(dma_ch); + host->data->host_cookie = 0; } host->data = NULL; @@ -1190,90 +1201,29 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id) return IRQ_HANDLED; } -static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host, - struct mmc_data *data) -{ - int sync_dev; - - if (data->flags & MMC_DATA_WRITE) - sync_dev = host->dma_line_tx; - else - sync_dev = host->dma_line_rx; - return sync_dev; -} - -static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host, - struct mmc_data *data, - struct scatterlist *sgl) -{ - int blksz, nblk, dma_ch; - - dma_ch = host->dma_ch; - if (data->flags & MMC_DATA_WRITE) { - omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - (host->mapbase + OMAP_HSMMC_DATA), 0, 0); - omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sgl), 0, 0); - } else { - omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - (host->mapbase + OMAP_HSMMC_DATA), 0, 0); - omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sgl), 0, 0); - } - - blksz = host->data->blksz; - nblk = sg_dma_len(sgl) / blksz; - - omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, - blksz / 4, nblk, OMAP_DMA_SYNC_FRAME, - omap_hsmmc_get_dma_sync_dev(host, data), - !(data->flags & MMC_DATA_WRITE)); - - omap_start_dma(dma_ch); -} - -/* - * DMA call back function - */ -static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) +static void omap_hsmmc_dma_callback(void *param) { - struct omap_hsmmc_host *host = cb_data; + struct omap_hsmmc_host *host = param; + struct dma_chan *chan; struct mmc_data *data; - int dma_ch, req_in_progress; - unsigned long flags; - - if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { - dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n", - ch_status); - return; - } + int req_in_progress; - spin_lock_irqsave(&host->irq_lock, flags); + spin_lock_irq(&host->irq_lock); if (host->dma_ch < 0) { - spin_unlock_irqrestore(&host->irq_lock, flags); + spin_unlock_irq(&host->irq_lock); return; } data = host->mrq->data; - host->dma_sg_idx++; - if (host->dma_sg_idx < host->dma_len) { - /* Fire up the next transfer. */ - omap_hsmmc_config_dma_params(host, data, - data->sg + host->dma_sg_idx); - spin_unlock_irqrestore(&host->irq_lock, flags); - return; - } - + chan = omap_hsmmc_get_dma_chan(host, data); if (!data->host_cookie) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + dma_unmap_sg(chan->device->dev, + data->sg, data->sg_len, omap_hsmmc_get_dma_dir(host, data)); req_in_progress = host->req_in_progress; - dma_ch = host->dma_ch; host->dma_ch = -1; - spin_unlock_irqrestore(&host->irq_lock, flags); - - omap_free_dma(dma_ch); + spin_unlock_irq(&host->irq_lock); /* If DMA has finished after TC, complete the request */ if (!req_in_progress) { @@ -1286,7 +1236,8 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, struct mmc_data *data, - struct omap_hsmmc_next *next) + struct omap_hsmmc_next *next, + struct dma_chan *chan) { int dma_len; @@ -1301,8 +1252,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, /* Check if next job is already prepared */ if (next || (!next && data->host_cookie != host->next_data.cookie)) { - dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, + dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len, omap_hsmmc_get_dma_dir(host, data)); } else { @@ -1329,8 +1279,11 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, struct mmc_request *req) { - int dma_ch = 0, ret = 0, i; + struct dma_slave_config cfg; + struct dma_async_tx_descriptor *tx; + int ret = 0, i; struct mmc_data *data = req->data; + struct dma_chan *chan; /* Sanity check: all the SG entries must be aligned by block size. */ for (i = 0; i < data->sg_len; i++) { @@ -1348,22 +1301,41 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, BUG_ON(host->dma_ch != -1); - ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), - "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); - if (ret != 0) { - dev_err(mmc_dev(host->mmc), - "%s: omap_request_dma() failed with %d\n", - mmc_hostname(host->mmc), ret); + chan = omap_hsmmc_get_dma_chan(host, data); + + cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA; + cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = data->blksz / 4; + cfg.dst_maxburst = data->blksz / 4; + + ret = dmaengine_slave_config(chan, &cfg); + if (ret) return ret; - } - ret = omap_hsmmc_pre_dma_transfer(host, data, NULL); + + ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan); if (ret) return ret; - host->dma_ch = dma_ch; - host->dma_sg_idx = 0; + tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, + data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) { + dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n"); + /* FIXME: cleanup */ + return -1; + } + + tx->callback = omap_hsmmc_dma_callback; + tx->callback_param = host; - omap_hsmmc_config_dma_params(host, data, data->sg); + /* Does not fail */ + dmaengine_submit(tx); + + host->dma_ch = 1; + + dma_async_issue_pending(chan); return 0; } @@ -1445,11 +1417,11 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, struct omap_hsmmc_host *host = mmc_priv(mmc); struct mmc_data *data = mrq->data; - if (host->use_dma) { - if (data->host_cookie) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, - omap_hsmmc_get_dma_dir(host, data)); + if (host->use_dma && data->host_cookie) { + struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data); + + dma_unmap_sg(c->device->dev, data->sg, data->sg_len, + omap_hsmmc_get_dma_dir(host, data)); data->host_cookie = 0; } } @@ -1464,10 +1436,13 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, return ; } - if (host->use_dma) + if (host->use_dma) { + struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data); + if (omap_hsmmc_pre_dma_transfer(host, mrq->data, - &host->next_data)) + &host->next_data, c)) mrq->data->host_cookie = 0; + } } /* @@ -1800,6 +1775,8 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) struct resource *res; int ret, irq; const struct of_device_id *match; + dma_cap_mask_t mask; + unsigned tx_req, rx_req; match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev); if (match) { @@ -1844,7 +1821,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) host->pdata = pdata; host->dev = &pdev->dev; host->use_dma = 1; - host->dev->dma_mask = &pdata->dma_mask; host->dma_ch = -1; host->irq = irq; host->slot_id = 0; @@ -1934,7 +1910,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) ret = -ENXIO; goto err_irq; } - host->dma_line_tx = res->start; + tx_req = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); if (!res) { @@ -1942,7 +1918,24 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) ret = -ENXIO; goto err_irq; } - host->dma_line_rx = res->start; + rx_req = res->start; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req); + if (!host->rx_chan) { + dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req); + ret = -ENXIO; + goto err_irq; + } + + host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req); + if (!host->tx_chan) { + dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req); + ret = -ENXIO; + goto err_irq; + } /* Request IRQ for MMC operations */ ret = request_irq(host->irq, omap_hsmmc_irq, 0, @@ -2021,6 +2014,10 @@ err_reg: err_irq_cd_init: free_irq(host->irq, host); err_irq: + if (host->tx_chan) + dma_release_channel(host->tx_chan); + if (host->rx_chan) + dma_release_channel(host->rx_chan); pm_runtime_put_sync(host->dev); pm_runtime_disable(host->dev); clk_put(host->fclk); @@ -2056,6 +2053,11 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev) if (mmc_slot(host).card_detect_irq) free_irq(mmc_slot(host).card_detect_irq, host); + if (host->tx_chan) + dma_release_channel(host->tx_chan); + if (host->rx_chan) + dma_release_channel(host->rx_chan); + pm_runtime_put_sync(host->dev); pm_runtime_disable(host->dev); clk_put(host->fclk); diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index d7f681d0c9b..e9309b3659e 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -9,6 +9,7 @@ */ #include <linux/platform_device.h> +#include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/module.h> @@ -18,6 +19,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> +#include <linux/omap-dma.h> #include <linux/io.h> #include <linux/slab.h> @@ -123,7 +125,7 @@ struct omap_nand_info { int gpmc_cs; unsigned long phys_base; struct completion comp; - int dma_ch; + struct dma_chan *dma; int gpmc_irq; enum { OMAP_NAND_IO_READ = 0, /* read */ @@ -336,12 +338,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd, } /* - * omap_nand_dma_cb: callback on the completion of dma transfer - * @lch: logical channel - * @ch_satuts: channel status + * omap_nand_dma_callback: callback on the completion of dma transfer * @data: pointer to completion data structure */ -static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) +static void omap_nand_dma_callback(void *data) { complete((struct completion *) data); } @@ -358,17 +358,13 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, { struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, mtd); + struct dma_async_tx_descriptor *tx; enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; - dma_addr_t dma_addr; - int ret; + struct scatterlist sg; unsigned long tim, limit; - - /* The fifo depth is 64 bytes max. - * But configure the FIFO-threahold to 32 to get a sync at each frame - * and frame length is 32 bytes. - */ - int buf_len = len >> 6; + unsigned n; + int ret; if (addr >= high_memory) { struct page *p1; @@ -382,40 +378,33 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); } - dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir); - if (dma_mapping_error(&info->pdev->dev, dma_addr)) { + sg_init_one(&sg, addr, len); + n = dma_map_sg(info->dma->device->dev, &sg, 1, dir); + if (n == 0) { dev_err(&info->pdev->dev, "Couldn't DMA map a %d byte buffer\n", len); goto out_copy; } - if (is_write) { - omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - info->phys_base, 0, 0); - omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - dma_addr, 0, 0); - omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, - 0x10, buf_len, OMAP_DMA_SYNC_FRAME, - OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC); - } else { - omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - info->phys_base, 0, 0); - omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - dma_addr, 0, 0); - omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, - 0x10, buf_len, OMAP_DMA_SYNC_FRAME, - OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); - } - /* configure and start prefetch transfer */ + tx = dmaengine_prep_slave_sg(info->dma, &sg, n, + is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) + goto out_copy_unmap; + + tx->callback = omap_nand_dma_callback; + tx->callback_param = &info->comp; + dmaengine_submit(tx); + + /* configure and start prefetch transfer */ ret = gpmc_prefetch_enable(info->gpmc_cs, - PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); + PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); if (ret) /* PFPW engine is busy, use cpu copy method */ goto out_copy_unmap; init_completion(&info->comp); - - omap_start_dma(info->dma_ch); + dma_async_issue_pending(info->dma); /* setup and start DMA using dma_addr */ wait_for_completion(&info->comp); @@ -427,11 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, /* disable and stop the PFPW engine */ gpmc_prefetch_reset(info->gpmc_cs); - dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); + dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); return 0; out_copy_unmap: - dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); + dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); out_copy: if (info->nand.options & NAND_BUSWIDTH_16) is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) @@ -1164,6 +1153,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) struct omap_nand_platform_data *pdata; int err; int i, offset; + dma_cap_mask_t mask; + unsigned sig; pdata = pdev->dev.platform_data; if (pdata == NULL) { @@ -1244,18 +1235,31 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) break; case NAND_OMAP_PREFETCH_DMA: - err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", - omap_nand_dma_cb, &info->comp, &info->dma_ch); - if (err < 0) { - info->dma_ch = -1; - dev_err(&pdev->dev, "DMA request failed!\n"); + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + sig = OMAP24XX_DMA_GPMC; + info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig); + if (!info->dma) { + dev_err(&pdev->dev, "DMA engine request failed\n"); + err = -ENXIO; goto out_release_mem_region; } else { - omap_set_dma_dest_burst_mode(info->dma_ch, - OMAP_DMA_DATA_BURST_16); - omap_set_dma_src_burst_mode(info->dma_ch, - OMAP_DMA_DATA_BURST_16); - + struct dma_slave_config cfg; + int rc; + + memset(&cfg, 0, sizeof(cfg)); + cfg.src_addr = info->phys_base; + cfg.dst_addr = info->phys_base; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = 16; + cfg.dst_maxburst = 16; + rc = dmaengine_slave_config(info->dma, &cfg); + if (rc) { + dev_err(&pdev->dev, "DMA engine slave config failed: %d\n", + rc); + goto out_release_mem_region; + } info->nand.read_buf = omap_read_buf_dma_pref; info->nand.write_buf = omap_write_buf_dma_pref; } @@ -1358,6 +1362,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) return 0; out_release_mem_region: + if (info->dma) + dma_release_channel(info->dma); release_mem_region(info->phys_base, NAND_IO_SIZE); out_free_info: kfree(info); @@ -1373,8 +1379,8 @@ static int omap_nand_remove(struct platform_device *pdev) omap3_free_bch(&info->mtd); platform_set_drvdata(pdev, NULL); - if (info->dma_ch != -1) - omap_free_dma(info->dma_ch); + if (info->dma) + dma_release_channel(info->dma); if (info->gpmc_irq) free_irq(info->gpmc_irq, info); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 57bf1d7ee80..9ab24528f9b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -1188,7 +1188,7 @@ exit: kfree(buf); /* close file before return */ if (fp) - filp_close(fp, current->files); + filp_close(fp, NULL); /* restore previous address limit */ set_fs(old_fs); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 2d1e68db9b3..e894ca7b54c 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -4146,45 +4146,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) static void fc_bsg_remove(struct request_queue *q) { - struct request *req; /* block request */ - int counts; /* totals for request_list count and starved */ - if (q) { - /* Stop taking in new requests */ - spin_lock_irq(q->queue_lock); - blk_stop_queue(q); - - /* drain all requests in the queue */ - while (1) { - /* need the lock to fetch a request - * this may fetch the same reqeust as the previous pass - */ - req = blk_fetch_request(q); - /* save requests in use and starved */ - counts = q->rq.count[0] + q->rq.count[1] + - q->rq.starved[0] + q->rq.starved[1]; - spin_unlock_irq(q->queue_lock); - /* any requests still outstanding? */ - if (counts == 0) - break; - - /* This may be the same req as the previous iteration, - * always send the blk_end_request_all after a prefetch. - * It is not okay to not end the request because the - * prefetch started the request. - */ - if (req) { - /* return -ENXIO to indicate that this queue is - * going away - */ - req->errors = -ENXIO; - blk_end_request_all(req, -ENXIO); - } - - msleep(200); /* allow bsg to possibly finish */ - spin_lock_irq(q->queue_lock); - } - bsg_unregister_queue(q); blk_cleanup_queue(q); } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 09809d06ecc..fa1dfaa83e3 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -575,7 +575,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct iscsi_cls_host *ihost = shost->shost_data; if (ihost->bsg_q) { - bsg_remove_queue(ihost->bsg_q); + bsg_unregister_queue(ihost->bsg_q); blk_cleanup_queue(ihost->bsg_q); } return 0; diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 7d46b15e152..bc4778175e3 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -28,6 +28,8 @@ #include <linux/device.h> #include <linux/delay.h> #include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/omap-dma.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/clk.h> @@ -39,7 +41,6 @@ #include <linux/spi/spi.h> -#include <plat/dma.h> #include <plat/clock.h> #include <plat/mcspi.h> @@ -93,8 +94,8 @@ /* We have 2 DMA channels per CS, one for RX and one for TX */ struct omap2_mcspi_dma { - int dma_tx_channel; - int dma_rx_channel; + struct dma_chan *dma_tx; + struct dma_chan *dma_rx; int dma_tx_sync_dev; int dma_rx_sync_dev; @@ -300,20 +301,46 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) return 0; } +static void omap2_mcspi_rx_callback(void *data) +{ + struct spi_device *spi = data; + struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); + struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + + complete(&mcspi_dma->dma_rx_completion); + + /* We must disable the DMA RX request */ + omap2_mcspi_set_dma_req(spi, 1, 0); +} + +static void omap2_mcspi_tx_callback(void *data) +{ + struct spi_device *spi = data; + struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); + struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + + complete(&mcspi_dma->dma_tx_completion); + + /* We must disable the DMA TX request */ + omap2_mcspi_set_dma_req(spi, 0, 0); +} + static unsigned omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) { struct omap2_mcspi *mcspi; struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi_dma *mcspi_dma; - unsigned int count, c; - unsigned long base, tx_reg, rx_reg; - int word_len, data_type, element_count; + unsigned int count; + int word_len, element_count; int elements = 0; u32 l; u8 * rx; const u8 * tx; void __iomem *chstat_reg; + struct dma_slave_config cfg; + enum dma_slave_buswidth width; + unsigned es; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; @@ -321,68 +348,92 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; + if (cs->word_len <= 8) { + width = DMA_SLAVE_BUSWIDTH_1_BYTE; + es = 1; + } else if (cs->word_len <= 16) { + width = DMA_SLAVE_BUSWIDTH_2_BYTES; + es = 2; + } else { + width = DMA_SLAVE_BUSWIDTH_4_BYTES; + es = 4; + } + + memset(&cfg, 0, sizeof(cfg)); + cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0; + cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0; + cfg.src_addr_width = width; + cfg.dst_addr_width = width; + cfg.src_maxburst = 1; + cfg.dst_maxburst = 1; + + if (xfer->tx_buf && mcspi_dma->dma_tx) { + struct dma_async_tx_descriptor *tx; + struct scatterlist sg; + + dmaengine_slave_config(mcspi_dma->dma_tx, &cfg); + + sg_init_table(&sg, 1); + sg_dma_address(&sg) = xfer->tx_dma; + sg_dma_len(&sg) = xfer->len; + + tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (tx) { + tx->callback = omap2_mcspi_tx_callback; + tx->callback_param = spi; + dmaengine_submit(tx); + } else { + /* FIXME: fall back to PIO? */ + } + } + + if (xfer->rx_buf && mcspi_dma->dma_rx) { + struct dma_async_tx_descriptor *tx; + struct scatterlist sg; + size_t len = xfer->len - es; + + dmaengine_slave_config(mcspi_dma->dma_rx, &cfg); + + if (l & OMAP2_MCSPI_CHCONF_TURBO) + len -= es; + + sg_init_table(&sg, 1); + sg_dma_address(&sg) = xfer->rx_dma; + sg_dma_len(&sg) = len; + + tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (tx) { + tx->callback = omap2_mcspi_rx_callback; + tx->callback_param = spi; + dmaengine_submit(tx); + } else { + /* FIXME: fall back to PIO? */ + } + } + count = xfer->len; - c = count; word_len = cs->word_len; - base = cs->phys; - tx_reg = base + OMAP2_MCSPI_TX0; - rx_reg = base + OMAP2_MCSPI_RX0; rx = xfer->rx_buf; tx = xfer->tx_buf; if (word_len <= 8) { - data_type = OMAP_DMA_DATA_TYPE_S8; element_count = count; } else if (word_len <= 16) { - data_type = OMAP_DMA_DATA_TYPE_S16; element_count = count >> 1; } else /* word_len <= 32 */ { - data_type = OMAP_DMA_DATA_TYPE_S32; element_count = count >> 2; } if (tx != NULL) { - omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel, - data_type, element_count, 1, - OMAP_DMA_SYNC_ELEMENT, - mcspi_dma->dma_tx_sync_dev, 0); - - omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0, - OMAP_DMA_AMODE_CONSTANT, - tx_reg, 0, 0); - - omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0, - OMAP_DMA_AMODE_POST_INC, - xfer->tx_dma, 0, 0); - } - - if (rx != NULL) { - elements = element_count - 1; - if (l & OMAP2_MCSPI_CHCONF_TURBO) - elements--; - - omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, - data_type, elements, 1, - OMAP_DMA_SYNC_ELEMENT, - mcspi_dma->dma_rx_sync_dev, 1); - - omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0, - OMAP_DMA_AMODE_CONSTANT, - rx_reg, 0, 0); - - omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0, - OMAP_DMA_AMODE_POST_INC, - xfer->rx_dma, 0, 0); - } - - if (tx != NULL) { - omap_start_dma(mcspi_dma->dma_tx_channel); + dma_async_issue_pending(mcspi_dma->dma_tx); omap2_mcspi_set_dma_req(spi, 0, 1); } if (rx != NULL) { - omap_start_dma(mcspi_dma->dma_rx_channel); + dma_async_issue_pending(mcspi_dma->dma_rx); omap2_mcspi_set_dma_req(spi, 1, 1); } @@ -408,7 +459,10 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) DMA_FROM_DEVICE); omap2_mcspi_set_enable(spi, 0); + elements = element_count - 1; + if (l & OMAP2_MCSPI_CHCONF_TURBO) { + elements--; if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) & OMAP2_MCSPI_CHSTAT_RXS)) { @@ -725,64 +779,38 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, return 0; } -static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data) -{ - struct spi_device *spi = data; - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; - - mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &(mcspi->dma_channels[spi->chip_select]); - - complete(&mcspi_dma->dma_rx_completion); - - /* We must disable the DMA RX request */ - omap2_mcspi_set_dma_req(spi, 1, 0); -} - -static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data) -{ - struct spi_device *spi = data; - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; - - mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &(mcspi->dma_channels[spi->chip_select]); - - complete(&mcspi_dma->dma_tx_completion); - - /* We must disable the DMA TX request */ - omap2_mcspi_set_dma_req(spi, 0, 0); -} - static int omap2_mcspi_request_dma(struct spi_device *spi) { struct spi_master *master = spi->master; struct omap2_mcspi *mcspi; struct omap2_mcspi_dma *mcspi_dma; + dma_cap_mask_t mask; + unsigned sig; mcspi = spi_master_get_devdata(master); mcspi_dma = mcspi->dma_channels + spi->chip_select; - if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX", - omap2_mcspi_dma_rx_callback, spi, - &mcspi_dma->dma_rx_channel)) { - dev_err(&spi->dev, "no RX DMA channel for McSPI\n"); + init_completion(&mcspi_dma->dma_rx_completion); + init_completion(&mcspi_dma->dma_tx_completion); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + sig = mcspi_dma->dma_rx_sync_dev; + mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig); + if (!mcspi_dma->dma_rx) { + dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n"); return -EAGAIN; } - if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX", - omap2_mcspi_dma_tx_callback, spi, - &mcspi_dma->dma_tx_channel)) { - omap_free_dma(mcspi_dma->dma_rx_channel); - mcspi_dma->dma_rx_channel = -1; - dev_err(&spi->dev, "no TX DMA channel for McSPI\n"); + sig = mcspi_dma->dma_tx_sync_dev; + mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig); + if (!mcspi_dma->dma_tx) { + dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n"); + dma_release_channel(mcspi_dma->dma_rx); + mcspi_dma->dma_rx = NULL; return -EAGAIN; } - init_completion(&mcspi_dma->dma_rx_completion); - init_completion(&mcspi_dma->dma_tx_completion); - return 0; } @@ -814,8 +842,7 @@ static int omap2_mcspi_setup(struct spi_device *spi) list_add_tail(&cs->node, &ctx->cs); } - if (mcspi_dma->dma_rx_channel == -1 - || mcspi_dma->dma_tx_channel == -1) { + if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) { ret = omap2_mcspi_request_dma(spi); if (ret < 0) return ret; @@ -850,13 +877,13 @@ static void omap2_mcspi_cleanup(struct spi_device *spi) if (spi->chip_select < spi->master->num_chipselect) { mcspi_dma = &mcspi->dma_channels[spi->chip_select]; - if (mcspi_dma->dma_rx_channel != -1) { - omap_free_dma(mcspi_dma->dma_rx_channel); - mcspi_dma->dma_rx_channel = -1; + if (mcspi_dma->dma_rx) { + dma_release_channel(mcspi_dma->dma_rx); + mcspi_dma->dma_rx = NULL; } - if (mcspi_dma->dma_tx_channel != -1) { - omap_free_dma(mcspi_dma->dma_tx_channel); - mcspi_dma->dma_tx_channel = -1; + if (mcspi_dma->dma_tx) { + dma_release_channel(mcspi_dma->dma_tx); + mcspi_dma->dma_tx = NULL; } } } @@ -1176,7 +1203,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev) break; } - mcspi->dma_channels[i].dma_rx_channel = -1; mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start; sprintf(dma_ch_name, "tx%d", i); dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, @@ -1187,7 +1213,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev) break; } - mcspi->dma_channels[i].dma_tx_channel = -1; mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start; } diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c index 9a60d4cd218..f545716c666 100644 --- a/drivers/staging/bcm/Misc.c +++ b/drivers/staging/bcm/Misc.c @@ -157,12 +157,7 @@ static int create_worker_threads(struct bcm_mini_adapter *psAdapter) static struct file *open_firmware_file(struct bcm_mini_adapter *Adapter, const char *path) { - struct file *flp = NULL; - mm_segment_t oldfs; - oldfs = get_fs(); - set_fs(get_ds()); - flp = filp_open(path, O_RDONLY, S_IRWXU); - set_fs(oldfs); + struct file *flp = filp_open(path, O_RDONLY, S_IRWXU); if (IS_ERR(flp)) { pr_err(DRV_NAME "Unable To Open File %s, err %ld", path, PTR_ERR(flp)); flp = NULL; @@ -183,14 +178,12 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u { int errorno = 0; struct file *flp = NULL; - mm_segment_t oldfs; struct timeval tv = {0}; flp = open_firmware_file(Adapter, path); if (!flp) { - errorno = -ENOENT; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Unable to Open %s\n", path); - goto exit_download; + return -ENOENT; } BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path, (unsigned long)flp->f_dentry->d_inode->i_size, loc); do_gettimeofday(&tv); @@ -201,10 +194,7 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u errorno = -EIO; goto exit_download; } - oldfs = get_fs(); - set_fs(get_ds()); vfs_llseek(flp, 0, 0); - set_fs(oldfs); if (Adapter->bcm_file_readback_from_chip(Adapter->pvInterfaceAdapter, flp, loc)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to read back firmware!"); errorno = -EIO; @@ -212,12 +202,7 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u } exit_download: - oldfs = get_fs(); - set_fs(get_ds()); - if (flp && !(IS_ERR(flp))) - filp_close(flp, current->files); - set_fs(oldfs); - + filp_close(flp, NULL); return errorno; } @@ -1056,10 +1041,8 @@ OUT: static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter) { struct file *flp = NULL; - mm_segment_t oldfs = {0}; char *buff; int len = 0; - loff_t pos = 0; buff = kmalloc(BUFFER_1K, GFP_KERNEL); if (!buff) @@ -1079,20 +1062,16 @@ static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter) Adapter->pstargetparams = NULL; return -ENOENT; } - oldfs = get_fs(); - set_fs(get_ds()); - len = vfs_read(flp, (void __user __force *)buff, BUFFER_1K, &pos); - set_fs(oldfs); + len = kernel_read(flp, 0, buff, BUFFER_1K); + filp_close(flp, NULL); if (len != sizeof(STARGETPARAMS)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Mismatch in Target Param Structure!\n"); kfree(buff); kfree(Adapter->pstargetparams); Adapter->pstargetparams = NULL; - filp_close(flp, current->files); return -ENOENT; } - filp_close(flp, current->files); /* Check for autolink in config params */ /* diff --git a/drivers/staging/gdm72xx/sdio_boot.c b/drivers/staging/gdm72xx/sdio_boot.c index 760efee23d4..65624bca8b3 100644 --- a/drivers/staging/gdm72xx/sdio_boot.c +++ b/drivers/staging/gdm72xx/sdio_boot.c @@ -66,9 +66,8 @@ static int download_image(struct sdio_func *func, char *img_name) return -ENOENT; } - if (filp->f_dentry) - inode = filp->f_dentry->d_inode; - if (!inode || !S_ISREG(inode->i_mode)) { + inode = filp->f_dentry->d_inode; + if (!S_ISREG(inode->i_mode)) { printk(KERN_ERR "Invalid file type: %s\n", img_name); ret = -EINVAL; goto out; @@ -123,7 +122,7 @@ static int download_image(struct sdio_func *func, char *img_name) pno++; } out: - filp_close(filp, current->files); + filp_close(filp, NULL); return ret; } diff --git a/drivers/staging/gdm72xx/usb_boot.c b/drivers/staging/gdm72xx/usb_boot.c index fef290c38db..e3dbd5a552c 100644 --- a/drivers/staging/gdm72xx/usb_boot.c +++ b/drivers/staging/gdm72xx/usb_boot.c @@ -173,14 +173,12 @@ int usb_boot(struct usb_device *usbdev, u16 pid) filp = filp_open(img_name, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(filp)) { printk(KERN_ERR "Can't find %s.\n", img_name); - set_fs(fs); ret = PTR_ERR(filp); goto restore_fs; } - if (filp->f_dentry) - inode = filp->f_dentry->d_inode; - if (!inode || !S_ISREG(inode->i_mode)) { + inode = filp->f_dentry->d_inode; + if (!S_ISREG(inode->i_mode)) { printk(KERN_ERR "Invalid file type: %s\n", img_name); ret = -EINVAL; goto out; @@ -262,7 +260,7 @@ int usb_boot(struct usb_device *usbdev, u16 pid) ret = -EINVAL; } out: - filp_close(filp, current->files); + filp_close(filp, NULL); restore_fs: set_fs(fs); @@ -322,13 +320,11 @@ static int em_download_image(struct usb_device *usbdev, char *path, goto restore_fs; } - if (filp->f_dentry) { - inode = filp->f_dentry->d_inode; - if (!inode || !S_ISREG(inode->i_mode)) { - printk(KERN_ERR "Invalid file type: %s\n", path); - ret = -EINVAL; - goto out; - } + inode = filp->f_dentry->d_inode; + if (!S_ISREG(inode->i_mode)) { + printk(KERN_ERR "Invalid file type: %s\n", path); + ret = -EINVAL; + goto out; } buf = kmalloc(DOWNLOAD_CHUCK + pad_size, GFP_KERNEL); @@ -364,7 +360,7 @@ static int em_download_image(struct usb_device *usbdev, char *path, goto out; out: - filp_close(filp, current->files); + filp_close(filp, NULL); restore_fs: set_fs(fs); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 9e2100551c7..cbb5aaf3e56 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -109,46 +109,29 @@ static struct se_device *fd_create_virtdevice( struct se_subsystem_dev *se_dev, void *p) { - char *dev_p = NULL; struct se_device *dev; struct se_dev_limits dev_limits; struct queue_limits *limits; struct fd_dev *fd_dev = p; struct fd_host *fd_host = hba->hba_ptr; - mm_segment_t old_fs; struct file *file; struct inode *inode = NULL; int dev_flags = 0, flags, ret = -EINVAL; memset(&dev_limits, 0, sizeof(struct se_dev_limits)); - old_fs = get_fs(); - set_fs(get_ds()); - dev_p = getname(fd_dev->fd_dev_name); - set_fs(old_fs); - - if (IS_ERR(dev_p)) { - pr_err("getname(%s) failed: %lu\n", - fd_dev->fd_dev_name, IS_ERR(dev_p)); - ret = PTR_ERR(dev_p); - goto fail; - } /* * Use O_DSYNC by default instead of O_SYNC to forgo syncing * of pure timestamp updates. */ flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; - file = filp_open(dev_p, flags, 0600); + file = filp_open(fd_dev->fd_dev_name, flags, 0600); if (IS_ERR(file)) { - pr_err("filp_open(%s) failed\n", dev_p); + pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name); ret = PTR_ERR(file); goto fail; } - if (!file || !file->f_dentry) { - pr_err("filp_open(%s) failed\n", dev_p); - goto fail; - } fd_dev->fd_file = file; /* * If using a block backend with this struct file, we extract @@ -212,14 +195,12 @@ static struct se_device *fd_create_virtdevice( " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, fd_dev->fd_dev_name, fd_dev->fd_dev_size); - putname(dev_p); return dev; fail: if (fd_dev->fd_file) { filp_close(fd_dev->fd_file, NULL); fd_dev->fd_file = NULL; } - putname(dev_p); return ERR_PTR(ret); } @@ -452,14 +433,11 @@ static ssize_t fd_set_configfs_dev_params( token = match_token(ptr, tokens, args); switch (token) { case Opt_fd_dev_name: - arg_p = match_strdup(&args[0]); - if (!arg_p) { - ret = -ENOMEM; + if (match_strlcpy(fd_dev->fd_dev_name, &args[0], + FD_MAX_DEV_NAME) == 0) { + ret = -EINVAL; break; } - snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, - "%s", arg_p); - kfree(arg_p); pr_debug("FILEIO: Referencing Path: %s\n", fd_dev->fd_dev_name); fd_dev->fbd_flags |= FBDF_HAS_PATH; diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c index ae8b18869b8..8d9bcd8207c 100644 --- a/drivers/usb/gadget/storage_common.c +++ b/drivers/usb/gadget/storage_common.c @@ -656,9 +656,8 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) if (!(filp->f_mode & FMODE_WRITE)) ro = 1; - if (filp->f_path.dentry) - inode = filp->f_path.dentry->d_inode; - if (!inode || (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) { + inode = filp->f_path.dentry->d_inode; + if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) { LINFO(curlun, "invalid file type: %s\n", filename); goto out; } @@ -667,7 +666,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) * If we can't read the file, it's no good. * If we can't write the file, use it read-only. */ - if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) { + if (!(filp->f_op->read || filp->f_op->aio_read)) { LINFO(curlun, "file not readable: %s\n", filename); goto out; } @@ -712,7 +711,6 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) if (fsg_lun_is_open(curlun)) fsg_lun_close(curlun); - get_file(filp); curlun->blksize = blksize; curlun->blkbits = blkbits; curlun->ro = ro; @@ -720,10 +718,10 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) curlun->file_length = size; curlun->num_sectors = num_sectors; LDBG(curlun, "open backing file: %s\n", filename); - rc = 0; + return 0; out: - filp_close(filp, current->files); + fput(filp); return rc; } diff --git a/drivers/usb/gadget/u_uac1.c b/drivers/usb/gadget/u_uac1.c index af989898205..e0c5e88e03e 100644 --- a/drivers/usb/gadget/u_uac1.c +++ b/drivers/usb/gadget/u_uac1.c @@ -275,17 +275,17 @@ static int gaudio_close_snd_dev(struct gaudio *gau) /* Close control device */ snd = &gau->control; if (snd->filp) - filp_close(snd->filp, current->files); + filp_close(snd->filp, NULL); /* Close PCM playback device and setup substream */ snd = &gau->playback; if (snd->filp) - filp_close(snd->filp, current->files); + filp_close(snd->filp, NULL); /* Close PCM capture device and setup substream */ snd = &gau->capture; if (snd->filp) - filp_close(snd->filp, current->files); + filp_close(snd->filp, NULL); return 0; } diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index b0b2ac33534..747442d2c0f 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c @@ -90,7 +90,8 @@ #undef DEBUG #ifdef DEBUG -#define DBG(fmt, args...) printk(KERN_DEBUG "aty128fb: %s " fmt, __func__, ##args); +#define DBG(fmt, args...) \ + printk(KERN_DEBUG "aty128fb: %s " fmt, __func__, ##args); #else #define DBG(fmt, args...) #endif @@ -449,8 +450,9 @@ static int aty128_decode_var(struct fb_var_screeninfo *var, struct aty128fb_par *par); #if 0 static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, - void __iomem *bios); -static void __devinit __iomem *aty128_map_ROM(struct pci_dev *pdev, const struct aty128fb_par *par); + void __iomem *bios); +static void __devinit __iomem *aty128_map_ROM(struct pci_dev *pdev, + const struct aty128fb_par *par); #endif static void aty128_timings(struct aty128fb_par *par); static void aty128_init_engine(struct aty128fb_par *par); @@ -779,7 +781,8 @@ static u32 depth_to_dst(u32 depth) #ifndef __sparc__ -static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, struct pci_dev *dev) +static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, + struct pci_dev *dev) { u16 dptr; u8 rom_type; @@ -811,13 +814,14 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, s /* Look for the PCI data to check the ROM type */ dptr = BIOS_IN16(0x18); - /* Check the PCI data signature. If it's wrong, we still assume a normal x86 ROM - * for now, until I've verified this works everywhere. The goal here is more - * to phase out Open Firmware images. + /* Check the PCI data signature. If it's wrong, we still assume a normal + * x86 ROM for now, until I've verified this works everywhere. + * The goal here is more to phase out Open Firmware images. * - * Currently, we only look at the first PCI data, we could iteratre and deal with - * them all, and we should use fb_bios_start relative to start of image and not - * relative start of ROM, but so far, I never found a dual-image ATI card + * Currently, we only look at the first PCI data, we could iteratre and + * deal with them all, and we should use fb_bios_start relative to start + * of image and not relative start of ROM, but so far, I never found a + * dual-image ATI card. * * typedef struct { * u32 signature; + 0x00 @@ -852,7 +856,8 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, s printk(KERN_INFO "aty128fb: Found HP PA-RISC ROM Image\n"); goto failed; default: - printk(KERN_INFO "aty128fb: Found unknown type %d ROM Image\n", rom_type); + printk(KERN_INFO "aty128fb: Found unknown type %d ROM Image\n", + rom_type); goto failed; } anyway: @@ -863,7 +868,8 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, s return NULL; } -static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, unsigned char __iomem *bios) +static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, + unsigned char __iomem *bios) { unsigned int bios_hdr; unsigned int bios_pll; @@ -1247,10 +1253,13 @@ static int aty128_crtc_to_var(const struct aty128_crtc *crtc, static void aty128_set_crt_enable(struct aty128fb_par *par, int on) { if (on) { - aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) | CRT_CRTC_ON); - aty_st_le32(DAC_CNTL, (aty_ld_le32(DAC_CNTL) | DAC_PALETTE2_SNOOP_EN)); + aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) | + CRT_CRTC_ON); + aty_st_le32(DAC_CNTL, (aty_ld_le32(DAC_CNTL) | + DAC_PALETTE2_SNOOP_EN)); } else - aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) & ~CRT_CRTC_ON); + aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) & + ~CRT_CRTC_ON); } static void aty128_set_lcd_enable(struct aty128fb_par *par, int on) @@ -1281,7 +1290,8 @@ static void aty128_set_lcd_enable(struct aty128fb_par *par, int on) } } -static void aty128_set_pll(struct aty128_pll *pll, const struct aty128fb_par *par) +static void aty128_set_pll(struct aty128_pll *pll, + const struct aty128fb_par *par) { u32 div3; @@ -1366,7 +1376,8 @@ static int aty128_var_to_pll(u32 period_in_ps, struct aty128_pll *pll, } -static int aty128_pll_to_var(const struct aty128_pll *pll, struct fb_var_screeninfo *var) +static int aty128_pll_to_var(const struct aty128_pll *pll, + struct fb_var_screeninfo *var) { var->pixclock = 100000000 / pll->vclk; @@ -1512,7 +1523,8 @@ static int aty128fb_set_par(struct fb_info *info) * encode/decode the User Defined Part of the Display */ -static int aty128_decode_var(struct fb_var_screeninfo *var, struct aty128fb_par *par) +static int aty128_decode_var(struct fb_var_screeninfo *var, + struct aty128fb_par *par) { int err; struct aty128_crtc crtc; @@ -1559,7 +1571,8 @@ static int aty128_encode_var(struct fb_var_screeninfo *var, } -static int aty128fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +static int aty128fb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) { struct aty128fb_par par; int err; @@ -1575,7 +1588,8 @@ static int aty128fb_check_var(struct fb_var_screeninfo *var, struct fb_info *inf /* * Pan or Wrap the Display */ -static int aty128fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fb) +static int aty128fb_pan_display(struct fb_var_screeninfo *var, + struct fb_info *fb) { struct aty128fb_par *par = fb->par; u32 xoffset, yoffset; @@ -1594,7 +1608,8 @@ static int aty128fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *f par->crtc.xoffset = xoffset; par->crtc.yoffset = yoffset; - offset = ((yoffset * par->crtc.vxres + xoffset)*(par->crtc.bpp >> 3)) & ~7; + offset = ((yoffset * par->crtc.vxres + xoffset) * (par->crtc.bpp >> 3)) + & ~7; if (par->crtc.bpp == 24) offset += 8 * (offset % 3); /* Must be multiple of 8 and 3 */ @@ -1620,11 +1635,13 @@ static void aty128_st_pal(u_int regno, u_int red, u_int green, u_int blue, * do mirroring */ - aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) | DAC_PALETTE_ACCESS_CNTL); + aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) | + DAC_PALETTE_ACCESS_CNTL); aty_st_8(PALETTE_INDEX, regno); aty_st_le32(PALETTE_DATA, (red<<16)|(green<<8)|blue); #endif - aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) & ~DAC_PALETTE_ACCESS_CNTL); + aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) & + ~DAC_PALETTE_ACCESS_CNTL); } aty_st_8(PALETTE_INDEX, regno); @@ -1753,7 +1770,8 @@ static int aty128_bl_update_status(struct backlight_device *bd) aty_st_le32(LVDS_GEN_CNTL, reg); } reg &= ~LVDS_BL_MOD_LEVEL_MASK; - reg |= (aty128_bl_get_level_brightness(par, level) << LVDS_BL_MOD_LEVEL_SHIFT); + reg |= (aty128_bl_get_level_brightness(par, level) << + LVDS_BL_MOD_LEVEL_SHIFT); #ifdef BACKLIGHT_LVDS_OFF reg |= LVDS_ON | LVDS_EN; reg &= ~LVDS_DISPLAY_DIS; @@ -1764,7 +1782,8 @@ static int aty128_bl_update_status(struct backlight_device *bd) #endif } else { reg &= ~LVDS_BL_MOD_LEVEL_MASK; - reg |= (aty128_bl_get_level_brightness(par, 0) << LVDS_BL_MOD_LEVEL_SHIFT); + reg |= (aty128_bl_get_level_brightness(par, 0) << + LVDS_BL_MOD_LEVEL_SHIFT); #ifdef BACKLIGHT_LVDS_OFF reg |= LVDS_DISPLAY_DIS; aty_st_le32(LVDS_GEN_CNTL, reg); @@ -1869,7 +1888,8 @@ static void aty128_early_resume(void *data) } #endif /* CONFIG_PPC_PMAC */ -static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent) +static int __devinit aty128_init(struct pci_dev *pdev, + const struct pci_device_id *ent) { struct fb_info *info = pci_get_drvdata(pdev); struct aty128fb_par *par = info->par; @@ -1887,7 +1907,8 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i /* range check to make sure */ if (ent->driver_data < ARRAY_SIZE(r128_family)) - strlcat(video_card, r128_family[ent->driver_data], sizeof(video_card)); + strlcat(video_card, r128_family[ent->driver_data], + sizeof(video_card)); printk(KERN_INFO "aty128fb: %s [chip rev 0x%x] ", video_card, chip_rev); @@ -1911,11 +1932,11 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i /* Indicate sleep capability */ if (par->chip_gen == rage_M3) { pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, NULL, 0, 1); -#if 0 /* Disable the early video resume hack for now as it's causing problems, among - * others we now rely on the PCI core restoring the config space for us, which - * isn't the case with that hack, and that code path causes various things to - * be called with interrupts off while they shouldn't. I'm leaving the code in - * as it can be useful for debugging purposes +#if 0 /* Disable the early video resume hack for now as it's causing problems, + * among others we now rely on the PCI core restoring the config space + * for us, which isn't the case with that hack, and that code path causes + * various things to be called with interrupts off while they shouldn't. + * I'm leaving the code in as it can be useful for debugging purposes */ pmac_set_early_video_resume(aty128_early_resume, par); #endif @@ -1953,11 +1974,11 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i default_vmode = VMODE_1152_768_60; if (default_cmode > 16) - default_cmode = CMODE_32; + default_cmode = CMODE_32; else if (default_cmode > 8) - default_cmode = CMODE_16; + default_cmode = CMODE_16; else - default_cmode = CMODE_8; + default_cmode = CMODE_8; if (mac_vmode_to_var(default_vmode, default_cmode, &var)) var = default_var; @@ -2018,7 +2039,8 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i #ifdef CONFIG_PCI /* register a card ++ajoshi */ -static int __devinit aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static int __devinit aty128_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) { unsigned long fb_addr, reg_addr; struct aty128fb_par *par; @@ -2318,39 +2340,39 @@ static inline void aty128_rectcopy(int srcx, int srcy, int dstx, int dsty, u_int width, u_int height, struct fb_info_aty128 *par) { - u32 save_dp_datatype, save_dp_cntl, dstval; - - if (!width || !height) - return; - - dstval = depth_to_dst(par->current_par.crtc.depth); - if (dstval == DST_24BPP) { - srcx *= 3; - dstx *= 3; - width *= 3; - } else if (dstval == -EINVAL) { - printk("aty128fb: invalid depth or RGBA\n"); - return; - } - - wait_for_fifo(2, par); - save_dp_datatype = aty_ld_le32(DP_DATATYPE); - save_dp_cntl = aty_ld_le32(DP_CNTL); - - wait_for_fifo(6, par); - aty_st_le32(SRC_Y_X, (srcy << 16) | srcx); - aty_st_le32(DP_MIX, ROP3_SRCCOPY | DP_SRC_RECT); - aty_st_le32(DP_CNTL, DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); - aty_st_le32(DP_DATATYPE, save_dp_datatype | dstval | SRC_DSTCOLOR); - - aty_st_le32(DST_Y_X, (dsty << 16) | dstx); - aty_st_le32(DST_HEIGHT_WIDTH, (height << 16) | width); - - par->blitter_may_be_busy = 1; - - wait_for_fifo(2, par); - aty_st_le32(DP_DATATYPE, save_dp_datatype); - aty_st_le32(DP_CNTL, save_dp_cntl); + u32 save_dp_datatype, save_dp_cntl, dstval; + + if (!width || !height) + return; + + dstval = depth_to_dst(par->current_par.crtc.depth); + if (dstval == DST_24BPP) { + srcx *= 3; + dstx *= 3; + width *= 3; + } else if (dstval == -EINVAL) { + printk("aty128fb: invalid depth or RGBA\n"); + return; + } + + wait_for_fifo(2, par); + save_dp_datatype = aty_ld_le32(DP_DATATYPE); + save_dp_cntl = aty_ld_le32(DP_CNTL); + + wait_for_fifo(6, par); + aty_st_le32(SRC_Y_X, (srcy << 16) | srcx); + aty_st_le32(DP_MIX, ROP3_SRCCOPY | DP_SRC_RECT); + aty_st_le32(DP_CNTL, DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); + aty_st_le32(DP_DATATYPE, save_dp_datatype | dstval | SRC_DSTCOLOR); + + aty_st_le32(DST_Y_X, (dsty << 16) | dstx); + aty_st_le32(DST_HEIGHT_WIDTH, (height << 16) | width); + + par->blitter_may_be_busy = 1; + + wait_for_fifo(2, par); + aty_st_le32(DP_DATATYPE, save_dp_datatype); + aty_st_le32(DP_CNTL, save_dp_cntl); } @@ -2358,17 +2380,17 @@ static inline void aty128_rectcopy(int srcx, int srcy, int dstx, int dsty, * Text mode accelerated functions */ -static void fbcon_aty128_bmove(struct display *p, int sy, int sx, int dy, int dx, - int height, int width) +static void fbcon_aty128_bmove(struct display *p, int sy, int sx, int dy, + int dx, int height, int width) { - sx *= fontwidth(p); - sy *= fontheight(p); - dx *= fontwidth(p); - dy *= fontheight(p); - width *= fontwidth(p); - height *= fontheight(p); - - aty128_rectcopy(sx, sy, dx, dy, width, height, + sx *= fontwidth(p); + sy *= fontheight(p); + dx *= fontwidth(p); + dy *= fontheight(p); + width *= fontwidth(p); + height *= fontheight(p); + + aty128_rectcopy(sx, sy, dx, dy, width, height, (struct fb_info_aty128 *)p->fb_info); } #endif /* 0 */ diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c index 47118c75a4c..7ae9d53f2bf 100644 --- a/drivers/video/da8xx-fb.c +++ b/drivers/video/da8xx-fb.c @@ -30,7 +30,10 @@ #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/console.h> +#include <linux/spinlock.h> #include <linux/slab.h> +#include <linux/delay.h> +#include <linux/lcm.h> #include <video/da8xx-fb.h> #include <asm/div64.h> @@ -160,6 +163,13 @@ struct da8xx_fb_par { wait_queue_head_t vsync_wait; int vsync_flag; int vsync_timeout; + spinlock_t lock_for_chan_update; + + /* + * LCDC has 2 ping pong DMA channels, channel 0 + * and channel 1. + */ + unsigned int which_dma_channel_done; #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; unsigned int lcd_fck_rate; @@ -260,10 +270,18 @@ static inline void lcd_enable_raster(void) { u32 reg; + /* Put LCDC in reset for several cycles */ + if (lcd_revision == LCD_VERSION_2) + /* Write 1 to reset LCDC */ + lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG); + mdelay(1); + /* Bring LCDC out of reset */ if (lcd_revision == LCD_VERSION_2) lcdc_write(0, LCD_CLK_RESET_REG); + mdelay(1); + /* Above reset sequence doesnot reset register context */ reg = lcdc_read(LCD_RASTER_CTRL_REG); if (!(reg & LCD_RASTER_ENABLE)) lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); @@ -277,10 +295,6 @@ static inline void lcd_disable_raster(void) reg = lcdc_read(LCD_RASTER_CTRL_REG); if (reg & LCD_RASTER_ENABLE) lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); - - if (lcd_revision == LCD_VERSION_2) - /* Write 1 to reset LCDC */ - lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG); } static void lcd_blit(int load_mode, struct da8xx_fb_par *par) @@ -344,8 +358,8 @@ static void lcd_blit(int load_mode, struct da8xx_fb_par *par) lcd_enable_raster(); } -/* Configure the Burst Size of DMA */ -static int lcd_cfg_dma(int burst_size) +/* Configure the Burst Size and fifo threhold of DMA */ +static int lcd_cfg_dma(int burst_size, int fifo_th) { u32 reg; @@ -369,6 +383,9 @@ static int lcd_cfg_dma(int burst_size) default: return -EINVAL; } + + reg |= (fifo_th << 8); + lcdc_write(reg, LCD_DMA_CTRL_REG); return 0; @@ -670,8 +687,8 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg, lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) & ~LCD_INVERT_PIXEL_CLOCK), LCD_RASTER_TIMING_2_REG); - /* Configure the DMA burst size. */ - ret = lcd_cfg_dma(cfg->dma_burst_sz); + /* Configure the DMA burst size and fifo threshold. */ + ret = lcd_cfg_dma(cfg->dma_burst_sz, cfg->fifo_th); if (ret < 0) return ret; @@ -715,7 +732,6 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg) { struct da8xx_fb_par *par = arg; u32 stat = lcdc_read(LCD_MASKED_STAT_REG); - u32 reg_int; if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) { lcd_disable_raster(); @@ -732,10 +748,8 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg) lcdc_write(stat, LCD_MASKED_STAT_REG); - /* Disable PL completion inerrupt */ - reg_int = lcdc_read(LCD_INT_ENABLE_CLR_REG) | - (LCD_V2_PL_INT_ENA); - lcdc_write(reg_int, LCD_INT_ENABLE_CLR_REG); + /* Disable PL completion interrupt */ + lcdc_write(LCD_V2_PL_INT_ENA, LCD_INT_ENABLE_CLR_REG); /* Setup and start data loading mode */ lcd_blit(LOAD_DATA, par); @@ -743,6 +757,7 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg) lcdc_write(stat, LCD_MASKED_STAT_REG); if (stat & LCD_END_OF_FRAME0) { + par->which_dma_channel_done = 0; lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); lcdc_write(par->dma_end, @@ -752,6 +767,7 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg) } if (stat & LCD_END_OF_FRAME1) { + par->which_dma_channel_done = 1; lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG); lcdc_write(par->dma_end, @@ -798,6 +814,7 @@ static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg) lcdc_write(stat, LCD_STAT_REG); if (stat & LCD_END_OF_FRAME0) { + par->which_dma_channel_done = 0; lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); lcdc_write(par->dma_end, @@ -807,6 +824,7 @@ static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg) } if (stat & LCD_END_OF_FRAME1) { + par->which_dma_channel_done = 1; lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG); lcdc_write(par->dma_end, @@ -1021,11 +1039,14 @@ static int cfb_blank(int blank, struct fb_info *info) par->blank = blank; switch (blank) { case FB_BLANK_UNBLANK: + lcd_enable_raster(); + if (par->panel_power_ctrl) par->panel_power_ctrl(1); - - lcd_enable_raster(); break; + case FB_BLANK_NORMAL: + case FB_BLANK_VSYNC_SUSPEND: + case FB_BLANK_HSYNC_SUSPEND: case FB_BLANK_POWERDOWN: if (par->panel_power_ctrl) par->panel_power_ctrl(0); @@ -1052,6 +1073,7 @@ static int da8xx_pan_display(struct fb_var_screeninfo *var, struct fb_fix_screeninfo *fix = &fbi->fix; unsigned int end; unsigned int start; + unsigned long irq_flags; if (var->xoffset != fbi->var.xoffset || var->yoffset != fbi->var.yoffset) { @@ -1069,6 +1091,21 @@ static int da8xx_pan_display(struct fb_var_screeninfo *var, end = start + fbi->var.yres * fix->line_length - 1; par->dma_start = start; par->dma_end = end; + spin_lock_irqsave(&par->lock_for_chan_update, + irq_flags); + if (par->which_dma_channel_done == 0) { + lcdc_write(par->dma_start, + LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); + lcdc_write(par->dma_end, + LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG); + } else if (par->which_dma_channel_done == 1) { + lcdc_write(par->dma_start, + LCD_DMA_FRM_BUF_BASE_ADDR_1_REG); + lcdc_write(par->dma_end, + LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG); + } + spin_unlock_irqrestore(&par->lock_for_chan_update, + irq_flags); } } @@ -1114,6 +1151,7 @@ static int __devinit fb_probe(struct platform_device *device) struct da8xx_fb_par *par; resource_size_t len; int ret, i; + unsigned long ulcm; if (fb_pdata == NULL) { dev_err(&device->dev, "Can not get platform data\n"); @@ -1209,7 +1247,8 @@ static int __devinit fb_probe(struct platform_device *device) /* allocate frame buffer */ par->vram_size = lcdc_info->width * lcdc_info->height * lcd_cfg->bpp; - par->vram_size = PAGE_ALIGN(par->vram_size/8); + ulcm = lcm((lcdc_info->width * lcd_cfg->bpp)/8, PAGE_SIZE); + par->vram_size = roundup(par->vram_size/8, ulcm); par->vram_size = par->vram_size * LCD_NUM_BUFFERS; par->vram_virt = dma_alloc_coherent(NULL, @@ -1296,6 +1335,8 @@ static int __devinit fb_probe(struct platform_device *device) /* initialize the vsync wait queue */ init_waitqueue_head(&par->vsync_wait); par->vsync_timeout = HZ / 5; + par->which_dma_channel_done = -1; + spin_lock_init(&par->lock_for_chan_update); /* Register the Frame Buffer */ if (register_framebuffer(da8xx_fb_info) < 0) { @@ -1382,11 +1423,12 @@ static int fb_resume(struct platform_device *dev) struct da8xx_fb_par *par = info->par; console_lock(); + clk_enable(par->lcdc_clk); + lcd_enable_raster(); + if (par->panel_power_ctrl) par->panel_power_ctrl(1); - clk_enable(par->lcdc_clk); - lcd_enable_raster(); fb_set_suspend(info, 0); console_unlock(); diff --git a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c index a268cbf1cbe..68b9b511ce8 100644 --- a/drivers/video/epson1355fb.c +++ b/drivers/video/epson1355fb.c @@ -477,11 +477,11 @@ static __init unsigned int get_fb_size(struct fb_info *info) return size; } -static int epson1355_width_tab[2][4] __initdata = +static int epson1355_width_tab[2][4] __devinitdata = { {4, 8, 16, -1}, {9, 12, 16, -1} }; -static int epson1355_bpp_tab[8] __initdata = { 1, 2, 4, 8, 15, 16 }; +static int epson1355_bpp_tab[8] __devinitdata = { 1, 2, 4, 8, 15, 16 }; -static void __init fetch_hw_state(struct fb_info *info, struct epson1355_par *par) +static void __devinit fetch_hw_state(struct fb_info *info, struct epson1355_par *par) { struct fb_var_screeninfo *var = &info->var; struct fb_fix_screeninfo *fix = &info->fix; @@ -601,7 +601,7 @@ static int epson1355fb_remove(struct platform_device *dev) return 0; } -int __devinit epson1355fb_probe(struct platform_device *dev) +static int __devinit epson1355fb_probe(struct platform_device *dev) { struct epson1355_par *default_par; struct fb_info *info; diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c index a36b2d28280..c6c016a506c 100644 --- a/drivers/video/exynos/exynos_dp_core.c +++ b/drivers/video/exynos/exynos_dp_core.c @@ -47,7 +47,7 @@ static int exynos_dp_detect_hpd(struct exynos_dp_device *dp) exynos_dp_init_hpd(dp); - udelay(200); + usleep_range(200, 210); while (exynos_dp_get_plug_in_status(dp) != 0) { timeout_loop++; @@ -55,7 +55,7 @@ static int exynos_dp_detect_hpd(struct exynos_dp_device *dp) dev_err(dp->dev, "failed to get hpd plug status\n"); return -ETIMEDOUT; } - udelay(10); + usleep_range(10, 11); } return 0; @@ -304,7 +304,7 @@ static void exynos_dp_link_start(struct exynos_dp_device *dp) buf[lane] = DPCD_PRE_EMPHASIS_PATTERN2_LEVEL0 | DPCD_VOLTAGE_SWING_PATTERN1_LEVEL0; exynos_dp_write_bytes_to_dpcd(dp, - DPCD_ADDR_TRAINING_PATTERN_SET, + DPCD_ADDR_TRAINING_LANE0_SET, lane_count, buf); } @@ -336,7 +336,7 @@ static int exynos_dp_channel_eq_ok(u8 link_status[6], int lane_count) u8 lane_status; lane_align = link_status[2]; - if ((lane_align == DPCD_INTERLANE_ALIGN_DONE) == 0) + if ((lane_align & DPCD_INTERLANE_ALIGN_DONE) == 0) return -EINVAL; for (lane = 0; lane < lane_count; lane++) { @@ -407,6 +407,9 @@ static unsigned int exynos_dp_get_lane_link_training( case 3: reg = exynos_dp_get_lane3_link_training(dp); break; + default: + WARN_ON(1); + return 0; } return reg; @@ -483,7 +486,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp) u8 pre_emphasis; u8 training_lane; - udelay(100); + usleep_range(100, 101); exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS, 6, link_status); @@ -501,7 +504,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp) buf[0] = DPCD_SCRAMBLING_DISABLED | DPCD_TRAINING_PATTERN_2; exynos_dp_write_byte_to_dpcd(dp, - DPCD_ADDR_TRAINING_LANE0_SET, + DPCD_ADDR_TRAINING_PATTERN_SET, buf[0]); for (lane = 0; lane < lane_count; lane++) { @@ -568,7 +571,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp) u8 adjust_request[2]; - udelay(400); + usleep_range(400, 401); exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS, 6, link_status); @@ -736,7 +739,7 @@ static int exynos_dp_set_link_train(struct exynos_dp_device *dp, if (retval == 0) break; - udelay(100); + usleep_range(100, 110); } return retval; @@ -770,7 +773,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp, return -ETIMEDOUT; } - udelay(1); + usleep_range(1, 2); } /* Set to use the register calculated M/N video */ @@ -804,7 +807,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp, return -ETIMEDOUT; } - mdelay(1); + usleep_range(1000, 1001); } if (retval != 0) diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/video/exynos/exynos_dp_core.h index 1e0f998e0c9..8526e548c38 100644 --- a/drivers/video/exynos/exynos_dp_core.h +++ b/drivers/video/exynos/exynos_dp_core.h @@ -85,10 +85,6 @@ void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype); void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype); void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count); void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count); -void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype); -void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype); -void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count); -void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count); void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable); void exynos_dp_set_training_pattern(struct exynos_dp_device *dp, enum pattern_set pattern); diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/video/exynos/exynos_dp_reg.c index bcb0e3ae1e9..2db5b9aa250 100644 --- a/drivers/video/exynos/exynos_dp_reg.c +++ b/drivers/video/exynos/exynos_dp_reg.c @@ -122,7 +122,7 @@ void exynos_dp_reset(struct exynos_dp_device *dp) LS_CLK_DOMAIN_FUNC_EN_N; writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); - udelay(20); + usleep_range(20, 30); exynos_dp_lane_swap(dp, 0); @@ -988,7 +988,7 @@ void exynos_dp_reset_macro(struct exynos_dp_device *dp) writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST); /* 10 us is the minimum reset time. */ - udelay(10); + usleep_range(10, 20); reg &= ~MACRO_RST; writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST); diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c index 9908e75ae76..4bc2b8a5dd8 100644 --- a/drivers/video/exynos/exynos_mipi_dsi.c +++ b/drivers/video/exynos/exynos_mipi_dsi.c @@ -154,7 +154,7 @@ static int exynos_mipi_dsi_blank_mode(struct mipi_dsim_device *dsim, int power) if (client_drv && client_drv->power_on) client_drv->power_on(client_dev, 1); - exynos_mipi_regulator_disable(dsim); + exynos_mipi_regulator_enable(dsim); /* enable MIPI-DSI PHY. */ if (dsim->pd->phy_enable) diff --git a/drivers/video/exynos/s6e8ax0.h b/drivers/video/exynos/s6e8ax0.h deleted file mode 100644 index 1f1b270484b..00000000000 --- a/drivers/video/exynos/s6e8ax0.h +++ /dev/null @@ -1,21 +0,0 @@ -/* linux/drivers/video/backlight/s6e8ax0.h - * - * MIPI-DSI based s6e8ax0 AMOLED LCD Panel definitions. - * - * Copyright (c) 2011 Samsung Electronics - * - * Inki Dae, <inki.dae@samsung.com> - * Donghwa Lee <dh09.lee@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#ifndef _S6E8AX0_H -#define _S6E8AX0_H - -extern void s6e8ax0_init(void); - -#endif - diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c index 1ddeb11659d..64cda560c48 100644 --- a/drivers/video/fb_defio.c +++ b/drivers/video/fb_defio.c @@ -104,6 +104,8 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma, deferred framebuffer IO. then if userspace touches a page again, we repeat the same scheme */ + file_update_time(vma->vm_file); + /* protect against the workqueue changing the page list */ mutex_lock(&fbdefio->lock); diff --git a/drivers/video/fb_draw.h b/drivers/video/fb_draw.h index 04c01faaf77..624ee115f12 100644 --- a/drivers/video/fb_draw.h +++ b/drivers/video/fb_draw.h @@ -3,6 +3,7 @@ #include <asm/types.h> #include <linux/fb.h> +#include <linux/bug.h> /* * Compose two values, using a bitmask as decision value @@ -41,7 +42,8 @@ pixel_to_pat( u32 bpp, u32 pixel) case 32: return 0x0000000100000001ul*pixel; default: - panic("pixel_to_pat(): unsupported pixelformat\n"); + WARN(1, "pixel_to_pat(): unsupported pixelformat %d\n", bpp); + return 0; } } #else @@ -66,7 +68,8 @@ pixel_to_pat( u32 bpp, u32 pixel) case 32: return 0x00000001ul*pixel; default: - panic("pixel_to_pat(): unsupported pixelformat\n"); + WARN(1, "pixel_to_pat(): unsupported pixelformat %d\n", bpp); + return 0; } } #endif diff --git a/drivers/video/grvga.c b/drivers/video/grvga.c index da066c21092..5245f9a7189 100644 --- a/drivers/video/grvga.c +++ b/drivers/video/grvga.c @@ -354,7 +354,7 @@ static int __devinit grvga_probe(struct platform_device *dev) */ if (fb_get_options("grvga", &options)) { retval = -ENODEV; - goto err; + goto free_fb; } if (!options || !*options) @@ -370,7 +370,7 @@ static int __devinit grvga_probe(struct platform_device *dev) if (grvga_parse_custom(this_opt, &info->var) < 0) { dev_err(&dev->dev, "Failed to parse custom mode (%s).\n", this_opt); retval = -EINVAL; - goto err1; + goto free_fb; } } else if (!strncmp(this_opt, "addr", 4)) grvga_fix_addr = simple_strtoul(this_opt + 5, NULL, 16); @@ -387,10 +387,11 @@ static int __devinit grvga_probe(struct platform_device *dev) info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN; info->fix.smem_len = grvga_mem_size; - if (!request_mem_region(dev->resource[0].start, resource_size(&dev->resource[0]), "grlib-svgactrl regs")) { + if (!devm_request_mem_region(&dev->dev, dev->resource[0].start, + resource_size(&dev->resource[0]), "grlib-svgactrl regs")) { dev_err(&dev->dev, "registers already mapped\n"); retval = -EBUSY; - goto err; + goto free_fb; } par->regs = of_ioremap(&dev->resource[0], 0, @@ -400,14 +401,14 @@ static int __devinit grvga_probe(struct platform_device *dev) if (!par->regs) { dev_err(&dev->dev, "failed to map registers\n"); retval = -ENOMEM; - goto err1; + goto free_fb; } retval = fb_alloc_cmap(&info->cmap, 256, 0); if (retval < 0) { dev_err(&dev->dev, "failed to allocate mem with fb_alloc_cmap\n"); retval = -ENOMEM; - goto err2; + goto unmap_regs; } if (mode_opt) { @@ -415,7 +416,7 @@ static int __devinit grvga_probe(struct platform_device *dev) grvga_modedb, sizeof(grvga_modedb), &grvga_modedb[0], 8); if (!retval || retval == 4) { retval = -EINVAL; - goto err3; + goto dealloc_cmap; } } @@ -427,10 +428,11 @@ static int __devinit grvga_probe(struct platform_device *dev) physical_start = grvga_fix_addr; - if (!request_mem_region(physical_start, grvga_mem_size, dev->name)) { + if (!devm_request_mem_region(&dev->dev, physical_start, + grvga_mem_size, dev->name)) { dev_err(&dev->dev, "failed to request memory region\n"); retval = -ENOMEM; - goto err3; + goto dealloc_cmap; } virtual_start = (unsigned long) ioremap(physical_start, grvga_mem_size); @@ -438,7 +440,7 @@ static int __devinit grvga_probe(struct platform_device *dev) if (!virtual_start) { dev_err(&dev->dev, "error mapping framebuffer memory\n"); retval = -ENOMEM; - goto err4; + goto dealloc_cmap; } } else { /* Allocate frambuffer memory */ @@ -451,7 +453,7 @@ static int __devinit grvga_probe(struct platform_device *dev) "unable to allocate framebuffer memory (%lu bytes)\n", grvga_mem_size); retval = -ENOMEM; - goto err3; + goto dealloc_cmap; } physical_start = dma_map_single(&dev->dev, (void *)virtual_start, grvga_mem_size, DMA_TO_DEVICE); @@ -484,7 +486,7 @@ static int __devinit grvga_probe(struct platform_device *dev) retval = register_framebuffer(info); if (retval < 0) { dev_err(&dev->dev, "failed to register framebuffer\n"); - goto err4; + goto free_mem; } __raw_writel(physical_start, &par->regs->fb_pos); @@ -493,21 +495,18 @@ static int __devinit grvga_probe(struct platform_device *dev) return 0; -err4: +free_mem: dev_set_drvdata(&dev->dev, NULL); - if (grvga_fix_addr) { - release_mem_region(physical_start, grvga_mem_size); + if (grvga_fix_addr) iounmap((void *)virtual_start); - } else + else kfree((void *)virtual_start); -err3: +dealloc_cmap: fb_dealloc_cmap(&info->cmap); -err2: +unmap_regs: of_iounmap(&dev->resource[0], par->regs, resource_size(&dev->resource[0])); -err1: - release_mem_region(dev->resource[0].start, resource_size(&dev->resource[0])); -err: +free_fb: framebuffer_release(info); return retval; @@ -524,12 +523,10 @@ static int __devexit grvga_remove(struct platform_device *device) of_iounmap(&device->resource[0], par->regs, resource_size(&device->resource[0])); - release_mem_region(device->resource[0].start, resource_size(&device->resource[0])); - if (!par->fb_alloced) { - release_mem_region(info->fix.smem_start, info->fix.smem_len); + if (!par->fb_alloced) iounmap(info->screen_base); - } else + else kfree((void *)info->screen_base); framebuffer_release(info); diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index eec0d7b748e..c89f8a8d36d 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c @@ -269,7 +269,7 @@ struct mx3fb_info { dma_cookie_t cookie; struct scatterlist sg[2]; - u32 sync; /* preserve var->sync flags */ + struct fb_var_screeninfo cur_var; /* current var info */ }; static void mx3fb_dma_done(void *); @@ -698,9 +698,29 @@ static void mx3fb_dma_done(void *arg) complete(&mx3_fbi->flip_cmpl); } +static bool mx3fb_must_set_par(struct fb_info *fbi) +{ + struct mx3fb_info *mx3_fbi = fbi->par; + struct fb_var_screeninfo old_var = mx3_fbi->cur_var; + struct fb_var_screeninfo new_var = fbi->var; + + if ((fbi->var.activate & FB_ACTIVATE_FORCE) && + (fbi->var.activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) + return true; + + /* + * Ignore xoffset and yoffset update, + * because pan display handles this case. + */ + old_var.xoffset = new_var.xoffset; + old_var.yoffset = new_var.yoffset; + + return !!memcmp(&old_var, &new_var, sizeof(struct fb_var_screeninfo)); +} + static int __set_par(struct fb_info *fbi, bool lock) { - u32 mem_len; + u32 mem_len, cur_xoffset, cur_yoffset; struct ipu_di_signal_cfg sig_cfg; enum ipu_panel mode = IPU_PANEL_TFT; struct mx3fb_info *mx3_fbi = fbi->par; @@ -780,8 +800,25 @@ static int __set_par(struct fb_info *fbi, bool lock) video->out_height = fbi->var.yres; video->out_stride = fbi->var.xres_virtual; - if (mx3_fbi->blank == FB_BLANK_UNBLANK) + if (mx3_fbi->blank == FB_BLANK_UNBLANK) { sdc_enable_channel(mx3_fbi); + /* + * sg[0] points to fb smem_start address + * and is actually active in controller. + */ + mx3_fbi->cur_var.xoffset = 0; + mx3_fbi->cur_var.yoffset = 0; + } + + /* + * Preserve xoffset and yoffest in case they are + * inactive in controller as fb is blanked. + */ + cur_xoffset = mx3_fbi->cur_var.xoffset; + cur_yoffset = mx3_fbi->cur_var.yoffset; + mx3_fbi->cur_var = fbi->var; + mx3_fbi->cur_var.xoffset = cur_xoffset; + mx3_fbi->cur_var.yoffset = cur_yoffset; return 0; } @@ -802,7 +839,7 @@ static int mx3fb_set_par(struct fb_info *fbi) mutex_lock(&mx3_fbi->mutex); - ret = __set_par(fbi, true); + ret = mx3fb_must_set_par(fbi) ? __set_par(fbi, true) : 0; mutex_unlock(&mx3_fbi->mutex); @@ -901,8 +938,8 @@ static int mx3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi) var->grayscale = 0; /* Preserve sync flags */ - var->sync |= mx3_fbi->sync; - mx3_fbi->sync |= var->sync; + var->sync |= mx3_fbi->cur_var.sync; + mx3_fbi->cur_var.sync |= var->sync; return 0; } @@ -1043,8 +1080,8 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var, return -EINVAL; } - if (fbi->var.xoffset == var->xoffset && - fbi->var.yoffset == var->yoffset) + if (mx3_fbi->cur_var.xoffset == var->xoffset && + mx3_fbi->cur_var.yoffset == var->yoffset) return 0; /* No change, do nothing */ y_bottom = var->yoffset; @@ -1127,6 +1164,8 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var, else fbi->var.vmode &= ~FB_VMODE_YWRAP; + mx3_fbi->cur_var = fbi->var; + mutex_unlock(&mx3_fbi->mutex); dev_dbg(fbi->device, "Update complete\n"); diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c index ad741c3d1ae..eaeed4340e0 100644 --- a/drivers/video/omap2/displays/panel-acx565akm.c +++ b/drivers/video/omap2/displays/panel-acx565akm.c @@ -487,6 +487,13 @@ static struct omap_video_timings acx_panel_timings = { .vfp = 3, .vsw = 3, .vbp = 4, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; static int acx_panel_probe(struct omap_dss_device *dssdev) @@ -498,8 +505,7 @@ static int acx_panel_probe(struct omap_dss_device *dssdev) struct backlight_properties props; dev_dbg(&dssdev->dev, "%s\n", __func__); - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS; + /* FIXME AC bias ? */ dssdev->panel.timings = acx_panel_timings; diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c index e42f9dc2212..bc5af2500eb 100644 --- a/drivers/video/omap2/displays/panel-generic-dpi.c +++ b/drivers/video/omap2/displays/panel-generic-dpi.c @@ -40,12 +40,6 @@ struct panel_config { struct omap_video_timings timings; - int acbi; /* ac-bias pin transitions per interrupt */ - /* Unit: line clocks */ - int acb; /* ac-bias pin frequency */ - - enum omap_panel_config config; - int power_on_delay; int power_off_delay; @@ -73,11 +67,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 11, .vfp = 3, .vbp = 2, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_LOW, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO, .power_on_delay = 50, .power_off_delay = 100, .name = "sharp_lq", @@ -98,11 +94,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 1, .vfp = 1, .vbp = 1, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x28, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .power_on_delay = 50, .power_off_delay = 100, .name = "sharp_ls", @@ -123,12 +121,13 @@ static struct panel_config generic_dpi_panels[] = { .vfp = 4, .vsw = 2, .vbp = 2, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC | - OMAP_DSS_LCD_ONOFF, .power_on_delay = 0, .power_off_delay = 0, .name = "toppoly_tdo35s", @@ -149,11 +148,13 @@ static struct panel_config generic_dpi_panels[] = { .vfp = 4, .vsw = 10, .vbp = 12 - 10, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .power_on_delay = 0, .power_off_delay = 0, .name = "samsung_lte430wq_f0c", @@ -174,11 +175,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 2, .vfp = 4, .vbp = 11, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .power_on_delay = 0, .power_off_delay = 0, .name = "seiko_70wvw1tz3", @@ -199,11 +202,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 10, .vfp = 2, .vbp = 2, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_LOW, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO, .power_on_delay = 0, .power_off_delay = 0, .name = "powertip_ph480272t", @@ -224,11 +229,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 3, .vfp = 12, .vbp = 25, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x28, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .power_on_delay = 0, .power_off_delay = 0, .name = "innolux_at070tn83", @@ -249,9 +256,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 1, .vfp = 2, .vbp = 7, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .name = "nec_nl2432dr22-11b", }, @@ -270,9 +281,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 1, .vfp = 1, .vbp = 1, - }, - .config = OMAP_DSS_LCD_TFT, + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, + }, .name = "h4", }, @@ -291,10 +306,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 10, .vfp = 2, .vbp = 2, - }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, + }, .name = "apollon", }, /* FocalTech ETM070003DH6 */ @@ -312,9 +330,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 3, .vfp = 13, .vbp = 29, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .name = "focaltech_etm070003dh6", }, @@ -333,11 +355,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 23, .vfp = 1, .vbp = 1, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC, .power_on_delay = 0, .power_off_delay = 0, .name = "microtips_umsh_8173md", @@ -358,9 +382,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 10, .vfp = 4, .vbp = 2, - }, - .config = OMAP_DSS_LCD_TFT, + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, + }, .name = "ortustech_com43h4m10xtc", }, @@ -379,11 +407,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 10, .vfp = 12, .vbp = 23, - }, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO, + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_LOW, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, + }, .name = "innolux_at080tn52", }, @@ -401,8 +431,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 1, .vfp = 26, .vbp = 1, + + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT, .name = "mitsubishi_aa084sb01", }, /* EDT ET0500G0DH6 */ @@ -419,8 +454,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 2, .vfp = 35, .vbp = 10, + + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT, .name = "edt_et0500g0dh6", }, @@ -439,9 +479,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 2, .vfp = 10, .vbp = 33, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC, .name = "primeview_pd050vl1", }, @@ -460,9 +504,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 2, .vfp = 10, .vbp = 33, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC, .name = "primeview_pm070wl4", }, @@ -481,9 +529,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 4, .vfp = 1, .vbp = 23, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC, .name = "primeview_pd104slf", }, }; @@ -573,10 +625,7 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev) if (!panel_config) return -EINVAL; - dssdev->panel.config = panel_config->config; dssdev->panel.timings = panel_config->timings; - dssdev->panel.acb = panel_config->acb; - dssdev->panel.acbi = panel_config->acbi; drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL); if (!drv_data) diff --git a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c index 0841cc2b3f7..80280779884 100644 --- a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c +++ b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c @@ -40,6 +40,12 @@ static struct omap_video_timings lb035q02_timings = { .vsw = 2, .vfp = 4, .vbp = 18, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; static int lb035q02_panel_power_on(struct omap_dss_device *dssdev) @@ -82,8 +88,6 @@ static int lb035q02_panel_probe(struct omap_dss_device *dssdev) struct lb035q02_data *ld; int r; - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS; dssdev->panel.timings = lb035q02_timings; ld = kzalloc(sizeof(*ld), GFP_KERNEL); diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c index 4a34cdc1371..e6c115373c0 100644 --- a/drivers/video/omap2/displays/panel-n8x0.c +++ b/drivers/video/omap2/displays/panel-n8x0.c @@ -473,7 +473,6 @@ static int n8x0_panel_probe(struct omap_dss_device *dssdev) mutex_init(&ddata->lock); - dssdev->panel.config = OMAP_DSS_LCD_TFT; dssdev->panel.timings.x_res = 800; dssdev->panel.timings.y_res = 480; dssdev->ctrl.pixel_size = 16; diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c index 8b38b39213f..b122b0f31c4 100644 --- a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c +++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c @@ -76,6 +76,12 @@ static struct omap_video_timings nec_8048_panel_timings = { .vfp = 3, .vsw = 1, .vbp = 4, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, }; static int nec_8048_bl_update_status(struct backlight_device *bl) @@ -116,9 +122,6 @@ static int nec_8048_panel_probe(struct omap_dss_device *dssdev) struct backlight_properties props; int r; - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_RF | - OMAP_DSS_LCD_ONOFF; dssdev->panel.timings = nec_8048_panel_timings; necd = kzalloc(sizeof(*necd), GFP_KERNEL); diff --git a/drivers/video/omap2/displays/panel-picodlp.c b/drivers/video/omap2/displays/panel-picodlp.c index 98ebdaddab5..2d35bd38886 100644 --- a/drivers/video/omap2/displays/panel-picodlp.c +++ b/drivers/video/omap2/displays/panel-picodlp.c @@ -69,6 +69,12 @@ static struct omap_video_timings pico_ls_timings = { .vsw = 2, .vfp = 3, .vbp = 14, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, }; static inline struct picodlp_panel_data @@ -414,9 +420,6 @@ static int picodlp_panel_probe(struct omap_dss_device *dssdev) struct i2c_client *picodlp_i2c_client; int r = 0, picodlp_adapter_id; - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_ONOFF | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IVS; - dssdev->panel.acb = 0x0; dssdev->panel.timings = pico_ls_timings; picod = kzalloc(sizeof(struct picodlp_data), GFP_KERNEL); diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c index ba38b3ad17d..bd86ba9ccf7 100644 --- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c @@ -44,6 +44,12 @@ static struct omap_video_timings sharp_ls_timings = { .vsw = 1, .vfp = 1, .vbp = 1, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; static int sharp_ls_bl_update_status(struct backlight_device *bl) @@ -86,9 +92,6 @@ static int sharp_ls_panel_probe(struct omap_dss_device *dssdev) struct sharp_data *sd; int r; - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS; - dssdev->panel.acb = 0x28; dssdev->panel.timings = sharp_ls_timings; sd = kzalloc(sizeof(*sd), GFP_KERNEL); diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c index 901576eb5a8..3f5acc7771d 100644 --- a/drivers/video/omap2/displays/panel-taal.c +++ b/drivers/video/omap2/displays/panel-taal.c @@ -882,7 +882,6 @@ static int taal_probe(struct omap_dss_device *dssdev) goto err; } - dssdev->panel.config = OMAP_DSS_LCD_TFT; dssdev->panel.timings = panel_config->timings; dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888; diff --git a/drivers/video/omap2/displays/panel-tfp410.c b/drivers/video/omap2/displays/panel-tfp410.c index bff306e041c..40cc0cfa5d1 100644 --- a/drivers/video/omap2/displays/panel-tfp410.c +++ b/drivers/video/omap2/displays/panel-tfp410.c @@ -39,6 +39,12 @@ static const struct omap_video_timings tfp410_default_timings = { .vfp = 3, .vsw = 4, .vbp = 7, + + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; struct panel_drv_data { @@ -95,7 +101,6 @@ static int tfp410_probe(struct omap_dss_device *dssdev) return -ENOMEM; dssdev->panel.timings = tfp410_default_timings; - dssdev->panel.config = OMAP_DSS_LCD_TFT; ddata->dssdev = dssdev; mutex_init(&ddata->lock); diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c index 4b6448b3c31..fa7baa650ae 100644 --- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c @@ -267,6 +267,12 @@ static const struct omap_video_timings tpo_td043_timings = { .vsw = 1, .vfp = 39, .vbp = 34, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043) @@ -423,8 +429,6 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev) return -ENODEV; } - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IHS | - OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IPC; dssdev->panel.timings = tpo_td043_timings; dssdev->ctrl.pixel_size = 24; diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig index 43324e5ed25..b337a8469fd 100644 --- a/drivers/video/omap2/dss/Kconfig +++ b/drivers/video/omap2/dss/Kconfig @@ -52,7 +52,7 @@ config OMAP2_DSS_RFBI DBI is a bus between the host processor and a peripheral, such as a display or a framebuffer chip. - See http://www.mipi.org/ for DBI spesifications. + See http://www.mipi.org/ for DBI specifications. config OMAP2_DSS_VENC bool "VENC support" @@ -92,7 +92,7 @@ config OMAP2_DSS_DSI DSI is a high speed half-duplex serial interface between the host processor and a peripheral, such as a display or a framebuffer chip. - See http://www.mipi.org/ for DSI spesifications. + See http://www.mipi.org/ for DSI specifications. config OMAP2_DSS_MIN_FCK_PER_PCK int "Minimum FCK/PCK ratio (for scaling)" diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c index ab22cc224f3..0fefc68372b 100644 --- a/drivers/video/omap2/dss/apply.c +++ b/drivers/video/omap2/dss/apply.c @@ -104,6 +104,7 @@ struct mgr_priv_data { bool shadow_extra_info_dirty; struct omap_video_timings timings; + struct dss_lcd_mgr_config lcd_config; }; static struct { @@ -137,6 +138,7 @@ static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr) void dss_apply_init(void) { const int num_ovls = dss_feat_get_num_ovls(); + struct mgr_priv_data *mp; int i; spin_lock_init(&data_lock); @@ -168,16 +170,35 @@ void dss_apply_init(void) op->user_info = op->info; } + + /* + * Initialize some of the lcd_config fields for TV manager, this lets + * us prevent checking if the manager is LCD or TV at some places + */ + mp = &dss_data.mgr_priv_data_array[OMAP_DSS_CHANNEL_DIGIT]; + + mp->lcd_config.video_port_width = 24; + mp->lcd_config.clock_info.lck_div = 1; + mp->lcd_config.clock_info.pck_div = 1; } +/* + * A LCD manager's stallmode decides whether it is in manual or auto update. TV + * manager is always auto update, stallmode field for TV manager is false by + * default + */ static bool ovl_manual_update(struct omap_overlay *ovl) { - return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; + struct mgr_priv_data *mp = get_mgr_priv(ovl->manager); + + return mp->lcd_config.stallmode; } static bool mgr_manual_update(struct omap_overlay_manager *mgr) { - return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + return mp->lcd_config.stallmode; } static int dss_check_settings_low(struct omap_overlay_manager *mgr, @@ -214,7 +235,7 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr, ois[ovl->id] = oi; } - return dss_mgr_check(mgr, mi, &mp->timings, ois); + return dss_mgr_check(mgr, mi, &mp->timings, &mp->lcd_config, ois); } /* @@ -537,7 +558,7 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl) { struct ovl_priv_data *op = get_ovl_priv(ovl); struct omap_overlay_info *oi; - bool ilace, replication; + bool replication; struct mgr_priv_data *mp; int r; @@ -550,11 +571,9 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl) mp = get_mgr_priv(ovl->manager); - replication = dss_use_replication(ovl->manager->device, oi->color_mode); - - ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC; + replication = dss_ovl_use_replication(mp->lcd_config, oi->color_mode); - r = dispc_ovl_setup(ovl->id, oi, ilace, replication, &mp->timings); + r = dispc_ovl_setup(ovl->id, oi, replication, &mp->timings); if (r) { /* * We can't do much here, as this function can be called from @@ -635,6 +654,24 @@ static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr) dispc_mgr_set_timings(mgr->id, &mp->timings); + /* lcd_config parameters */ + if (dss_mgr_is_lcd(mgr->id)) { + dispc_mgr_set_io_pad_mode(mp->lcd_config.io_pad_mode); + + dispc_mgr_enable_stallmode(mgr->id, mp->lcd_config.stallmode); + dispc_mgr_enable_fifohandcheck(mgr->id, + mp->lcd_config.fifohandcheck); + + dispc_mgr_set_clock_div(mgr->id, &mp->lcd_config.clock_info); + + dispc_mgr_set_tft_data_lines(mgr->id, + mp->lcd_config.video_port_width); + + dispc_lcd_enable_signal_polarity(mp->lcd_config.lcden_sig_polarity); + + dispc_mgr_set_lcd_type_tft(mgr->id); + } + mp->extra_info_dirty = false; if (mp->updating) mp->shadow_extra_info_dirty = true; @@ -1294,6 +1331,44 @@ void dss_mgr_set_timings(struct omap_overlay_manager *mgr, mutex_unlock(&apply_lock); } +static void dss_apply_mgr_lcd_config(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + mp->lcd_config = *config; + mp->extra_info_dirty = true; +} + +void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config) +{ + unsigned long flags; + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + mutex_lock(&apply_lock); + + if (mp->enabled) { + DSSERR("cannot apply lcd config for %s: manager needs to be disabled\n", + mgr->name); + goto out; + } + + spin_lock_irqsave(&data_lock, flags); + + dss_apply_mgr_lcd_config(mgr, config); + + dss_write_regs(); + dss_set_go_bits(); + + spin_unlock_irqrestore(&data_lock, flags); + + wait_pending_extra_info_updates(); + +out: + mutex_unlock(&apply_lock); +} + int dss_ovl_set_info(struct omap_overlay *ovl, struct omap_overlay_info *info) { diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index 397d4eee11b..5b289c5f695 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c @@ -119,6 +119,97 @@ enum omap_color_component { DISPC_COLOR_COMPONENT_UV = 1 << 1, }; +enum mgr_reg_fields { + DISPC_MGR_FLD_ENABLE, + DISPC_MGR_FLD_STNTFT, + DISPC_MGR_FLD_GO, + DISPC_MGR_FLD_TFTDATALINES, + DISPC_MGR_FLD_STALLMODE, + DISPC_MGR_FLD_TCKENABLE, + DISPC_MGR_FLD_TCKSELECTION, + DISPC_MGR_FLD_CPR, + DISPC_MGR_FLD_FIFOHANDCHECK, + /* used to maintain a count of the above fields */ + DISPC_MGR_FLD_NUM, +}; + +static const struct { + const char *name; + u32 vsync_irq; + u32 framedone_irq; + u32 sync_lost_irq; + struct reg_field reg_desc[DISPC_MGR_FLD_NUM]; +} mgr_desc[] = { + [OMAP_DSS_CHANNEL_LCD] = { + .name = "LCD", + .vsync_irq = DISPC_IRQ_VSYNC, + .framedone_irq = DISPC_IRQ_FRAMEDONE, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 }, + [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL, 5, 5 }, + [DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL, 9, 8 }, + [DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL, 11, 11 }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG, 10, 10 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG, 11, 11 }, + [DISPC_MGR_FLD_CPR] = { DISPC_CONFIG, 15, 15 }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG, 16, 16 }, + }, + }, + [OMAP_DSS_CHANNEL_DIGIT] = { + .name = "DIGIT", + .vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN, + .framedone_irq = 0, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 }, + [DISPC_MGR_FLD_STNTFT] = { }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL, 6, 6 }, + [DISPC_MGR_FLD_TFTDATALINES] = { }, + [DISPC_MGR_FLD_STALLMODE] = { }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG, 12, 12 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG, 13, 13 }, + [DISPC_MGR_FLD_CPR] = { }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG, 16, 16 }, + }, + }, + [OMAP_DSS_CHANNEL_LCD2] = { + .name = "LCD2", + .vsync_irq = DISPC_IRQ_VSYNC2, + .framedone_irq = DISPC_IRQ_FRAMEDONE2, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST2, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 }, + [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL2, 5, 5 }, + [DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL2, 9, 8 }, + [DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL2, 11, 11 }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG2, 10, 10 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG2, 11, 11 }, + [DISPC_MGR_FLD_CPR] = { DISPC_CONFIG2, 15, 15 }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG2, 16, 16 }, + }, + }, + [OMAP_DSS_CHANNEL_LCD3] = { + .name = "LCD3", + .vsync_irq = DISPC_IRQ_VSYNC3, + .framedone_irq = DISPC_IRQ_FRAMEDONE3, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST3, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 }, + [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL3, 5, 5 }, + [DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL3, 9, 8 }, + [DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL3, 11, 11 }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG3, 10, 10 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG3, 11, 11 }, + [DISPC_MGR_FLD_CPR] = { DISPC_CONFIG3, 15, 15 }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG3, 16, 16 }, + }, + }, +}; + static void _omap_dispc_set_irqs(void); static inline void dispc_write_reg(const u16 idx, u32 val) @@ -131,6 +222,18 @@ static inline u32 dispc_read_reg(const u16 idx) return __raw_readl(dispc.base + idx); } +static u32 mgr_fld_read(enum omap_channel channel, enum mgr_reg_fields regfld) +{ + const struct reg_field rfld = mgr_desc[channel].reg_desc[regfld]; + return REG_GET(rfld.reg, rfld.high, rfld.low); +} + +static void mgr_fld_write(enum omap_channel channel, + enum mgr_reg_fields regfld, int val) { + const struct reg_field rfld = mgr_desc[channel].reg_desc[regfld]; + REG_FLD_MOD(rfld.reg, val, rfld.high, rfld.low); +} + #define SR(reg) \ dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg) #define RR(reg) \ @@ -153,6 +256,10 @@ static void dispc_save_context(void) SR(CONTROL2); SR(CONFIG2); } + if (dss_has_feature(FEAT_MGR_LCD3)) { + SR(CONTROL3); + SR(CONFIG3); + } for (i = 0; i < dss_feat_get_num_mgrs(); i++) { SR(DEFAULT_COLOR(i)); @@ -266,6 +373,8 @@ static void dispc_restore_context(void) RR(GLOBAL_ALPHA); if (dss_has_feature(FEAT_MGR_LCD2)) RR(CONFIG2); + if (dss_has_feature(FEAT_MGR_LCD3)) + RR(CONFIG3); for (i = 0; i < dss_feat_get_num_mgrs(); i++) { RR(DEFAULT_COLOR(i)); @@ -351,6 +460,8 @@ static void dispc_restore_context(void) RR(CONTROL); if (dss_has_feature(FEAT_MGR_LCD2)) RR(CONTROL2); + if (dss_has_feature(FEAT_MGR_LCD3)) + RR(CONTROL3); /* clear spurious SYNC_LOST_DIGIT interrupts */ dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT); @@ -387,101 +498,41 @@ void dispc_runtime_put(void) WARN_ON(r < 0 && r != -ENOSYS); } -static inline bool dispc_mgr_is_lcd(enum omap_channel channel) -{ - if (channel == OMAP_DSS_CHANNEL_LCD || - channel == OMAP_DSS_CHANNEL_LCD2) - return true; - else - return false; -} - u32 dispc_mgr_get_vsync_irq(enum omap_channel channel) { - switch (channel) { - case OMAP_DSS_CHANNEL_LCD: - return DISPC_IRQ_VSYNC; - case OMAP_DSS_CHANNEL_LCD2: - return DISPC_IRQ_VSYNC2; - case OMAP_DSS_CHANNEL_DIGIT: - return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; - default: - BUG(); - return 0; - } + return mgr_desc[channel].vsync_irq; } u32 dispc_mgr_get_framedone_irq(enum omap_channel channel) { - switch (channel) { - case OMAP_DSS_CHANNEL_LCD: - return DISPC_IRQ_FRAMEDONE; - case OMAP_DSS_CHANNEL_LCD2: - return DISPC_IRQ_FRAMEDONE2; - case OMAP_DSS_CHANNEL_DIGIT: - return 0; - default: - BUG(); - return 0; - } + return mgr_desc[channel].framedone_irq; } bool dispc_mgr_go_busy(enum omap_channel channel) { - int bit; - - if (dispc_mgr_is_lcd(channel)) - bit = 5; /* GOLCD */ - else - bit = 6; /* GODIGIT */ - - if (channel == OMAP_DSS_CHANNEL_LCD2) - return REG_GET(DISPC_CONTROL2, bit, bit) == 1; - else - return REG_GET(DISPC_CONTROL, bit, bit) == 1; + return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1; } void dispc_mgr_go(enum omap_channel channel) { - int bit; bool enable_bit, go_bit; - if (dispc_mgr_is_lcd(channel)) - bit = 0; /* LCDENABLE */ - else - bit = 1; /* DIGITALENABLE */ - /* if the channel is not enabled, we don't need GO */ - if (channel == OMAP_DSS_CHANNEL_LCD2) - enable_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1; - else - enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1; + enable_bit = mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE) == 1; if (!enable_bit) return; - if (dispc_mgr_is_lcd(channel)) - bit = 5; /* GOLCD */ - else - bit = 6; /* GODIGIT */ - - if (channel == OMAP_DSS_CHANNEL_LCD2) - go_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1; - else - go_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1; + go_bit = mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1; if (go_bit) { DSSERR("GO bit not down for channel %d\n", channel); return; } - DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : - (channel == OMAP_DSS_CHANNEL_LCD2 ? "LCD2" : "DIGIT")); + DSSDBG("GO %s\n", mgr_desc[channel].name); - if (channel == OMAP_DSS_CHANNEL_LCD2) - REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit); - else - REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit); + mgr_fld_write(channel, DISPC_MGR_FLD_GO, 1); } static void dispc_ovl_write_firh_reg(enum omap_plane plane, int reg, u32 value) @@ -832,6 +883,15 @@ void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel) chan = 0; chan2 = 1; break; + case OMAP_DSS_CHANNEL_LCD3: + if (dss_has_feature(FEAT_MGR_LCD3)) { + chan = 0; + chan2 = 2; + } else { + BUG(); + return; + } + break; default: BUG(); return; @@ -867,7 +927,14 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane) val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); - if (dss_has_feature(FEAT_MGR_LCD2)) { + if (dss_has_feature(FEAT_MGR_LCD3)) { + if (FLD_GET(val, 31, 30) == 0) + channel = FLD_GET(val, shift, shift); + else if (FLD_GET(val, 31, 30) == 1) + channel = OMAP_DSS_CHANNEL_LCD2; + else + channel = OMAP_DSS_CHANNEL_LCD3; + } else if (dss_has_feature(FEAT_MGR_LCD2)) { if (FLD_GET(val, 31, 30) == 0) channel = FLD_GET(val, shift, shift); else @@ -922,16 +989,10 @@ void dispc_enable_gamma_table(bool enable) static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable) { - u16 reg; - - if (channel == OMAP_DSS_CHANNEL_LCD) - reg = DISPC_CONFIG; - else if (channel == OMAP_DSS_CHANNEL_LCD2) - reg = DISPC_CONFIG2; - else + if (channel == OMAP_DSS_CHANNEL_DIGIT) return; - REG_FLD_MOD(reg, enable, 15, 15); + mgr_fld_write(channel, DISPC_MGR_FLD_CPR, enable); } static void dispc_mgr_set_cpr_coef(enum omap_channel channel, @@ -939,7 +1000,7 @@ static void dispc_mgr_set_cpr_coef(enum omap_channel channel, { u32 coef_r, coef_g, coef_b; - if (!dispc_mgr_is_lcd(channel)) + if (!dss_mgr_is_lcd(channel)) return; coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) | @@ -1798,7 +1859,7 @@ static int check_horiz_timing_omap3(enum omap_channel channel, nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width; pclk = dispc_mgr_pclk_rate(channel); - if (dispc_mgr_is_lcd(channel)) + if (dss_mgr_is_lcd(channel)) lclk = dispc_mgr_lclk_rate(channel); else lclk = dispc_fclk_rate(); @@ -2086,8 +2147,7 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane, } int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, - bool ilace, bool replication, - const struct omap_video_timings *mgr_timings) + bool replication, const struct omap_video_timings *mgr_timings) { struct omap_overlay *ovl = omap_dss_get_overlay(plane); bool five_taps = true; @@ -2103,6 +2163,7 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, u16 out_width, out_height; enum omap_channel channel; int x_predecim = 1, y_predecim = 1; + bool ilace = mgr_timings->interlace; channel = dispc_ovl_get_channel_out(plane); @@ -2254,14 +2315,9 @@ static void dispc_disable_isr(void *data, u32 mask) static void _enable_lcd_out(enum omap_channel channel, bool enable) { - if (channel == OMAP_DSS_CHANNEL_LCD2) { - REG_FLD_MOD(DISPC_CONTROL2, enable ? 1 : 0, 0, 0); - /* flush posted write */ - dispc_read_reg(DISPC_CONTROL2); - } else { - REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0); - dispc_read_reg(DISPC_CONTROL); - } + mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable); + /* flush posted write */ + mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); } static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable) @@ -2274,12 +2330,9 @@ static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable) /* When we disable LCD output, we need to wait until frame is done. * Otherwise the DSS is still working, and turning off the clocks * prevents DSS from going to OFF mode */ - is_on = channel == OMAP_DSS_CHANNEL_LCD2 ? - REG_GET(DISPC_CONTROL2, 0, 0) : - REG_GET(DISPC_CONTROL, 0, 0); + is_on = mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); - irq = channel == OMAP_DSS_CHANNEL_LCD2 ? DISPC_IRQ_FRAMEDONE2 : - DISPC_IRQ_FRAMEDONE; + irq = mgr_desc[channel].framedone_irq; if (!enable && is_on) { init_completion(&frame_done_completion); @@ -2384,21 +2437,12 @@ static void dispc_mgr_enable_digit_out(bool enable) bool dispc_mgr_is_enabled(enum omap_channel channel) { - if (channel == OMAP_DSS_CHANNEL_LCD) - return !!REG_GET(DISPC_CONTROL, 0, 0); - else if (channel == OMAP_DSS_CHANNEL_DIGIT) - return !!REG_GET(DISPC_CONTROL, 1, 1); - else if (channel == OMAP_DSS_CHANNEL_LCD2) - return !!REG_GET(DISPC_CONTROL2, 0, 0); - else { - BUG(); - return false; - } + return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); } void dispc_mgr_enable(enum omap_channel channel, bool enable) { - if (dispc_mgr_is_lcd(channel)) + if (dss_mgr_is_lcd(channel)) dispc_mgr_enable_lcd_out(channel, enable); else if (channel == OMAP_DSS_CHANNEL_DIGIT) dispc_mgr_enable_digit_out(enable); @@ -2432,36 +2476,13 @@ void dispc_pck_free_enable(bool enable) void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable) { - if (channel == OMAP_DSS_CHANNEL_LCD2) - REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16); - else - REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16); + mgr_fld_write(channel, DISPC_MGR_FLD_FIFOHANDCHECK, enable); } -void dispc_mgr_set_lcd_display_type(enum omap_channel channel, - enum omap_lcd_display_type type) +void dispc_mgr_set_lcd_type_tft(enum omap_channel channel) { - int mode; - - switch (type) { - case OMAP_DSS_LCD_DISPLAY_STN: - mode = 0; - break; - - case OMAP_DSS_LCD_DISPLAY_TFT: - mode = 1; - break; - - default: - BUG(); - return; - } - - if (channel == OMAP_DSS_CHANNEL_LCD2) - REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3); - else - REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3); + mgr_fld_write(channel, DISPC_MGR_FLD_STNTFT, 1); } void dispc_set_loadmode(enum omap_dss_load_mode mode) @@ -2479,24 +2500,14 @@ static void dispc_mgr_set_trans_key(enum omap_channel ch, enum omap_dss_trans_key_type type, u32 trans_key) { - if (ch == OMAP_DSS_CHANNEL_LCD) - REG_FLD_MOD(DISPC_CONFIG, type, 11, 11); - else if (ch == OMAP_DSS_CHANNEL_DIGIT) - REG_FLD_MOD(DISPC_CONFIG, type, 13, 13); - else /* OMAP_DSS_CHANNEL_LCD2 */ - REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11); + mgr_fld_write(ch, DISPC_MGR_FLD_TCKSELECTION, type); dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key); } static void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable) { - if (ch == OMAP_DSS_CHANNEL_LCD) - REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10); - else if (ch == OMAP_DSS_CHANNEL_DIGIT) - REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12); - else /* OMAP_DSS_CHANNEL_LCD2 */ - REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10); + mgr_fld_write(ch, DISPC_MGR_FLD_TCKENABLE, enable); } static void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch, @@ -2547,10 +2558,7 @@ void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines) return; } - if (channel == OMAP_DSS_CHANNEL_LCD2) - REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8); - else - REG_FLD_MOD(DISPC_CONTROL, code, 9, 8); + mgr_fld_write(channel, DISPC_MGR_FLD_TFTDATALINES, code); } void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode) @@ -2584,10 +2592,7 @@ void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode) void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable) { - if (channel == OMAP_DSS_CHANNEL_LCD2) - REG_FLD_MOD(DISPC_CONTROL2, enable, 11, 11); - else - REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11); + mgr_fld_write(channel, DISPC_MGR_FLD_STALLMODE, enable); } static bool _dispc_mgr_size_ok(u16 width, u16 height) @@ -2627,7 +2632,7 @@ bool dispc_mgr_timings_ok(enum omap_channel channel, timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res); - if (dispc_mgr_is_lcd(channel)) + if (dss_mgr_is_lcd(channel)) timings_ok = timings_ok && _dispc_lcd_timings_ok(timings->hsw, timings->hfp, timings->hbp, timings->vsw, timings->vfp, @@ -2637,9 +2642,16 @@ bool dispc_mgr_timings_ok(enum omap_channel channel, } static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw, - int hfp, int hbp, int vsw, int vfp, int vbp) + int hfp, int hbp, int vsw, int vfp, int vbp, + enum omap_dss_signal_level vsync_level, + enum omap_dss_signal_level hsync_level, + enum omap_dss_signal_edge data_pclk_edge, + enum omap_dss_signal_level de_level, + enum omap_dss_signal_edge sync_pclk_edge) + { - u32 timing_h, timing_v; + u32 timing_h, timing_v, l; + bool onoff, rf, ipc; if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) { timing_h = FLD_VAL(hsw-1, 5, 0) | FLD_VAL(hfp-1, 15, 8) | @@ -2657,6 +2669,44 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw, dispc_write_reg(DISPC_TIMING_H(channel), timing_h); dispc_write_reg(DISPC_TIMING_V(channel), timing_v); + + switch (data_pclk_edge) { + case OMAPDSS_DRIVE_SIG_RISING_EDGE: + ipc = false; + break; + case OMAPDSS_DRIVE_SIG_FALLING_EDGE: + ipc = true; + break; + case OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES: + default: + BUG(); + } + + switch (sync_pclk_edge) { + case OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES: + onoff = false; + rf = false; + break; + case OMAPDSS_DRIVE_SIG_FALLING_EDGE: + onoff = true; + rf = false; + break; + case OMAPDSS_DRIVE_SIG_RISING_EDGE: + onoff = true; + rf = true; + break; + default: + BUG(); + }; + + l = dispc_read_reg(DISPC_POL_FREQ(channel)); + l |= FLD_VAL(onoff, 17, 17); + l |= FLD_VAL(rf, 16, 16); + l |= FLD_VAL(de_level, 15, 15); + l |= FLD_VAL(ipc, 14, 14); + l |= FLD_VAL(hsync_level, 13, 13); + l |= FLD_VAL(vsync_level, 12, 12); + dispc_write_reg(DISPC_POL_FREQ(channel), l); } /* change name to mode? */ @@ -2674,9 +2724,10 @@ void dispc_mgr_set_timings(enum omap_channel channel, return; } - if (dispc_mgr_is_lcd(channel)) { + if (dss_mgr_is_lcd(channel)) { _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw, - t.vfp, t.vbp); + t.vfp, t.vbp, t.vsync_level, t.hsync_level, + t.data_pclk_edge, t.de_level, t.sync_pclk_edge); xtot = t.x_res + t.hfp + t.hsw + t.hbp; ytot = t.y_res + t.vfp + t.vsw + t.vbp; @@ -2687,14 +2738,13 @@ void dispc_mgr_set_timings(enum omap_channel channel, DSSDBG("pck %u\n", timings->pixel_clock); DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n", t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp); + DSSDBG("vsync_level %d hsync_level %d data_pclk_edge %d de_level %d sync_pclk_edge %d\n", + t.vsync_level, t.hsync_level, t.data_pclk_edge, + t.de_level, t.sync_pclk_edge); DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt); } else { - enum dss_hdmi_venc_clk_source_select source; - - source = dss_get_hdmi_venc_clk_source(); - - if (source == DSS_VENC_TV_CLK) + if (t.interlace == true) t.y_res /= 2; } @@ -2780,7 +2830,7 @@ unsigned long dispc_mgr_pclk_rate(enum omap_channel channel) { unsigned long r; - if (dispc_mgr_is_lcd(channel)) { + if (dss_mgr_is_lcd(channel)) { int pcd; u32 l; @@ -2821,12 +2871,32 @@ unsigned long dispc_core_clk_rate(void) return fclk / lcd; } -void dispc_dump_clocks(struct seq_file *s) +static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel) { int lcd, pcd; + enum omap_dss_clk_source lcd_clk_src; + + seq_printf(s, "- %s -\n", mgr_desc[channel].name); + + lcd_clk_src = dss_get_lcd_clk_source(channel); + + seq_printf(s, "%s clk source = %s (%s)\n", mgr_desc[channel].name, + dss_get_generic_clk_source_name(lcd_clk_src), + dss_feat_get_clk_source_name(lcd_clk_src)); + + dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd); + + seq_printf(s, "lck\t\t%-16lulck div\t%u\n", + dispc_mgr_lclk_rate(channel), lcd); + seq_printf(s, "pck\t\t%-16lupck div\t%u\n", + dispc_mgr_pclk_rate(channel), pcd); +} + +void dispc_dump_clocks(struct seq_file *s) +{ + int lcd; u32 l; enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); - enum omap_dss_clk_source lcd_clk_src; if (dispc_runtime_get()) return; @@ -2847,36 +2917,13 @@ void dispc_dump_clocks(struct seq_file *s) seq_printf(s, "lck\t\t%-16lulck div\t%u\n", (dispc_fclk_rate()/lcd), lcd); } - seq_printf(s, "- LCD1 -\n"); - - lcd_clk_src = dss_get_lcd_clk_source(OMAP_DSS_CHANNEL_LCD); - - seq_printf(s, "lcd1_clk source = %s (%s)\n", - dss_get_generic_clk_source_name(lcd_clk_src), - dss_feat_get_clk_source_name(lcd_clk_src)); - - dispc_mgr_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD, &lcd, &pcd); - - seq_printf(s, "lck\t\t%-16lulck div\t%u\n", - dispc_mgr_lclk_rate(OMAP_DSS_CHANNEL_LCD), lcd); - seq_printf(s, "pck\t\t%-16lupck div\t%u\n", - dispc_mgr_pclk_rate(OMAP_DSS_CHANNEL_LCD), pcd); - if (dss_has_feature(FEAT_MGR_LCD2)) { - seq_printf(s, "- LCD2 -\n"); - - lcd_clk_src = dss_get_lcd_clk_source(OMAP_DSS_CHANNEL_LCD2); - seq_printf(s, "lcd2_clk source = %s (%s)\n", - dss_get_generic_clk_source_name(lcd_clk_src), - dss_feat_get_clk_source_name(lcd_clk_src)); + dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD); - dispc_mgr_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD2, &lcd, &pcd); - - seq_printf(s, "lck\t\t%-16lulck div\t%u\n", - dispc_mgr_lclk_rate(OMAP_DSS_CHANNEL_LCD2), lcd); - seq_printf(s, "pck\t\t%-16lupck div\t%u\n", - dispc_mgr_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd); - } + if (dss_has_feature(FEAT_MGR_LCD2)) + dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD2); + if (dss_has_feature(FEAT_MGR_LCD3)) + dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD3); dispc_runtime_put(); } @@ -2929,6 +2976,12 @@ void dispc_dump_irqs(struct seq_file *s) PIS(ACBIAS_COUNT_STAT2); PIS(SYNC_LOST2); } + if (dss_has_feature(FEAT_MGR_LCD3)) { + PIS(FRAMEDONE3); + PIS(VSYNC3); + PIS(ACBIAS_COUNT_STAT3); + PIS(SYNC_LOST3); + } #undef PIS } #endif @@ -2940,6 +2993,7 @@ static void dispc_dump_regs(struct seq_file *s) [OMAP_DSS_CHANNEL_LCD] = "LCD", [OMAP_DSS_CHANNEL_DIGIT] = "TV", [OMAP_DSS_CHANNEL_LCD2] = "LCD2", + [OMAP_DSS_CHANNEL_LCD3] = "LCD3", }; const char *ovl_names[] = { [OMAP_DSS_GFX] = "GFX", @@ -2972,6 +3026,10 @@ static void dispc_dump_regs(struct seq_file *s) DUMPREG(DISPC_CONTROL2); DUMPREG(DISPC_CONFIG2); } + if (dss_has_feature(FEAT_MGR_LCD3)) { + DUMPREG(DISPC_CONTROL3); + DUMPREG(DISPC_CONFIG3); + } #undef DUMPREG @@ -3093,41 +3151,8 @@ static void dispc_dump_regs(struct seq_file *s) #undef DUMPREG } -static void _dispc_mgr_set_pol_freq(enum omap_channel channel, bool onoff, - bool rf, bool ieo, bool ipc, bool ihs, bool ivs, u8 acbi, - u8 acb) -{ - u32 l = 0; - - DSSDBG("onoff %d rf %d ieo %d ipc %d ihs %d ivs %d acbi %d acb %d\n", - onoff, rf, ieo, ipc, ihs, ivs, acbi, acb); - - l |= FLD_VAL(onoff, 17, 17); - l |= FLD_VAL(rf, 16, 16); - l |= FLD_VAL(ieo, 15, 15); - l |= FLD_VAL(ipc, 14, 14); - l |= FLD_VAL(ihs, 13, 13); - l |= FLD_VAL(ivs, 12, 12); - l |= FLD_VAL(acbi, 11, 8); - l |= FLD_VAL(acb, 7, 0); - - dispc_write_reg(DISPC_POL_FREQ(channel), l); -} - -void dispc_mgr_set_pol_freq(enum omap_channel channel, - enum omap_panel_config config, u8 acbi, u8 acb) -{ - _dispc_mgr_set_pol_freq(channel, (config & OMAP_DSS_LCD_ONOFF) != 0, - (config & OMAP_DSS_LCD_RF) != 0, - (config & OMAP_DSS_LCD_IEO) != 0, - (config & OMAP_DSS_LCD_IPC) != 0, - (config & OMAP_DSS_LCD_IHS) != 0, - (config & OMAP_DSS_LCD_IVS) != 0, - acbi, acb); -} - /* with fck as input clock rate, find dispc dividers that produce req_pck */ -void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck, +void dispc_find_clk_divs(unsigned long req_pck, unsigned long fck, struct dispc_clock_info *cinfo) { u16 pcd_min, pcd_max; @@ -3138,9 +3163,6 @@ void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck, pcd_min = dss_feat_get_param_min(FEAT_PARAM_DSS_PCD); pcd_max = dss_feat_get_param_max(FEAT_PARAM_DSS_PCD); - if (!is_tft) - pcd_min = 3; - best_pck = 0; best_ld = 0; best_pd = 0; @@ -3192,15 +3214,13 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate, return 0; } -int dispc_mgr_set_clock_div(enum omap_channel channel, +void dispc_mgr_set_clock_div(enum omap_channel channel, struct dispc_clock_info *cinfo) { DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div); DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div); dispc_mgr_set_lcd_divisor(channel, cinfo->lck_div, cinfo->pck_div); - - return 0; } int dispc_mgr_get_clock_div(enum omap_channel channel, @@ -3354,6 +3374,8 @@ static void print_irq_status(u32 status) PIS(SYNC_LOST_DIGIT); if (dss_has_feature(FEAT_MGR_LCD2)) PIS(SYNC_LOST2); + if (dss_has_feature(FEAT_MGR_LCD3)) + PIS(SYNC_LOST3); #undef PIS printk("\n"); @@ -3450,12 +3472,6 @@ static void dispc_error_worker(struct work_struct *work) DISPC_IRQ_VID3_FIFO_UNDERFLOW, }; - static const unsigned sync_lost_bits[] = { - DISPC_IRQ_SYNC_LOST, - DISPC_IRQ_SYNC_LOST_DIGIT, - DISPC_IRQ_SYNC_LOST2, - }; - spin_lock_irqsave(&dispc.irq_lock, flags); errors = dispc.error_irqs; dispc.error_irqs = 0; @@ -3484,7 +3500,7 @@ static void dispc_error_worker(struct work_struct *work) unsigned bit; mgr = omap_dss_get_overlay_manager(i); - bit = sync_lost_bits[i]; + bit = mgr_desc[i].sync_lost_irq; if (bit & errors) { struct omap_dss_device *dssdev = mgr->device; @@ -3603,6 +3619,8 @@ static void _omap_dispc_initialize_irq(void) dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR; if (dss_has_feature(FEAT_MGR_LCD2)) dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2; + if (dss_has_feature(FEAT_MGR_LCD3)) + dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST3; if (dss_feat_get_num_ovls() > 3) dispc.irq_error_mask |= DISPC_IRQ_VID3_FIFO_UNDERFLOW; diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h index f278080e106..92d8a9be86f 100644 --- a/drivers/video/omap2/dss/dispc.h +++ b/drivers/video/omap2/dss/dispc.h @@ -36,6 +36,8 @@ #define DISPC_CONTROL2 0x0238 #define DISPC_CONFIG2 0x0620 #define DISPC_DIVISOR 0x0804 +#define DISPC_CONTROL3 0x0848 +#define DISPC_CONFIG3 0x084C /* DISPC overlay registers */ #define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \ @@ -118,6 +120,8 @@ static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel) return 0x0050; case OMAP_DSS_CHANNEL_LCD2: return 0x03AC; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0814; default: BUG(); return 0; @@ -133,6 +137,8 @@ static inline u16 DISPC_TRANS_COLOR(enum omap_channel channel) return 0x0058; case OMAP_DSS_CHANNEL_LCD2: return 0x03B0; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0818; default: BUG(); return 0; @@ -149,6 +155,8 @@ static inline u16 DISPC_TIMING_H(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x0400; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0840; default: BUG(); return 0; @@ -165,6 +173,8 @@ static inline u16 DISPC_TIMING_V(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x0404; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0844; default: BUG(); return 0; @@ -181,6 +191,8 @@ static inline u16 DISPC_POL_FREQ(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x0408; + case OMAP_DSS_CHANNEL_LCD3: + return 0x083C; default: BUG(); return 0; @@ -197,6 +209,8 @@ static inline u16 DISPC_DIVISORo(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x040C; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0838; default: BUG(); return 0; @@ -213,6 +227,8 @@ static inline u16 DISPC_SIZE_MGR(enum omap_channel channel) return 0x0078; case OMAP_DSS_CHANNEL_LCD2: return 0x03CC; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0834; default: BUG(); return 0; @@ -229,6 +245,8 @@ static inline u16 DISPC_DATA_CYCLE1(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03C0; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0828; default: BUG(); return 0; @@ -245,6 +263,8 @@ static inline u16 DISPC_DATA_CYCLE2(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03C4; + case OMAP_DSS_CHANNEL_LCD3: + return 0x082C; default: BUG(); return 0; @@ -261,6 +281,8 @@ static inline u16 DISPC_DATA_CYCLE3(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03C8; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0830; default: BUG(); return 0; @@ -277,6 +299,8 @@ static inline u16 DISPC_CPR_COEF_R(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03BC; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0824; default: BUG(); return 0; @@ -293,6 +317,8 @@ static inline u16 DISPC_CPR_COEF_G(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03B8; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0820; default: BUG(); return 0; @@ -309,6 +335,8 @@ static inline u16 DISPC_CPR_COEF_B(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03B4; + case OMAP_DSS_CHANNEL_LCD3: + return 0x081C; default: BUG(); return 0; diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c index 24901063037..5bd957e8550 100644 --- a/drivers/video/omap2/dss/display.c +++ b/drivers/video/omap2/dss/display.c @@ -116,7 +116,7 @@ static ssize_t display_timings_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct omap_dss_device *dssdev = to_dss_device(dev); - struct omap_video_timings t; + struct omap_video_timings t = dssdev->panel.timings; int r, found; if (!dssdev->driver->set_timings || !dssdev->driver->check_timings) @@ -316,44 +316,6 @@ void omapdss_default_get_timings(struct omap_dss_device *dssdev, } EXPORT_SYMBOL(omapdss_default_get_timings); -/* Checks if replication logic should be used. Only use for active matrix, - * when overlay is in RGB12U or RGB16 mode, and LCD interface is - * 18bpp or 24bpp */ -bool dss_use_replication(struct omap_dss_device *dssdev, - enum omap_color_mode mode) -{ - int bpp; - - if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16) - return false; - - if (dssdev->type == OMAP_DISPLAY_TYPE_DPI && - (dssdev->panel.config & OMAP_DSS_LCD_TFT) == 0) - return false; - - switch (dssdev->type) { - case OMAP_DISPLAY_TYPE_DPI: - bpp = dssdev->phy.dpi.data_lines; - break; - case OMAP_DISPLAY_TYPE_HDMI: - case OMAP_DISPLAY_TYPE_VENC: - case OMAP_DISPLAY_TYPE_SDI: - bpp = 24; - break; - case OMAP_DISPLAY_TYPE_DBI: - bpp = dssdev->ctrl.pixel_size; - break; - case OMAP_DISPLAY_TYPE_DSI: - bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt); - break; - default: - BUG(); - return false; - } - - return bpp > 16; -} - void dss_init_device(struct platform_device *pdev, struct omap_dss_device *dssdev) { diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c index 8c2056c9537..3266be23fc0 100644 --- a/drivers/video/omap2/dss/dpi.c +++ b/drivers/video/omap2/dss/dpi.c @@ -38,6 +38,8 @@ static struct { struct regulator *vdds_dsi_reg; struct platform_device *dsidev; + + struct dss_lcd_mgr_config mgr_config; } dpi; static struct platform_device *dpi_get_dsidev(enum omap_dss_clk_source clk) @@ -64,7 +66,7 @@ static bool dpi_use_dsi_pll(struct omap_dss_device *dssdev) return false; } -static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, +static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, unsigned long pck_req, unsigned long *fck, int *lck_div, int *pck_div) { @@ -72,8 +74,8 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, struct dispc_clock_info dispc_cinfo; int r; - r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft, pck_req, - &dsi_cinfo, &dispc_cinfo); + r = dsi_pll_calc_clock_div_pck(dpi.dsidev, pck_req, &dsi_cinfo, + &dispc_cinfo); if (r) return r; @@ -83,11 +85,7 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src); - r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo); - if (r) { - dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); - return r; - } + dpi.mgr_config.clock_info = dispc_cinfo; *fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk; *lck_div = dispc_cinfo.lck_div; @@ -96,7 +94,7 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, return 0; } -static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft, +static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, unsigned long pck_req, unsigned long *fck, int *lck_div, int *pck_div) { @@ -104,7 +102,7 @@ static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft, struct dispc_clock_info dispc_cinfo; int r; - r = dss_calc_clock_div(is_tft, pck_req, &dss_cinfo, &dispc_cinfo); + r = dss_calc_clock_div(pck_req, &dss_cinfo, &dispc_cinfo); if (r) return r; @@ -112,9 +110,7 @@ static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft, if (r) return r; - r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo); - if (r) - return r; + dpi.mgr_config.clock_info = dispc_cinfo; *fck = dss_cinfo.fck; *lck_div = dispc_cinfo.lck_div; @@ -129,20 +125,14 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) int lck_div = 0, pck_div = 0; unsigned long fck = 0; unsigned long pck; - bool is_tft; int r = 0; - dispc_mgr_set_pol_freq(dssdev->manager->id, dssdev->panel.config, - dssdev->panel.acbi, dssdev->panel.acb); - - is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; - if (dpi_use_dsi_pll(dssdev)) - r = dpi_set_dsi_clk(dssdev, is_tft, t->pixel_clock * 1000, - &fck, &lck_div, &pck_div); + r = dpi_set_dsi_clk(dssdev, t->pixel_clock * 1000, &fck, + &lck_div, &pck_div); else - r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000, - &fck, &lck_div, &pck_div); + r = dpi_set_dispc_clk(dssdev, t->pixel_clock * 1000, &fck, + &lck_div, &pck_div); if (r) return r; @@ -161,19 +151,18 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) return 0; } -static void dpi_basic_init(struct omap_dss_device *dssdev) +static void dpi_config_lcd_manager(struct omap_dss_device *dssdev) { - bool is_tft; + dpi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; + + dpi.mgr_config.stallmode = false; + dpi.mgr_config.fifohandcheck = false; - is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; + dpi.mgr_config.video_port_width = dssdev->phy.dpi.data_lines; - dispc_mgr_set_io_pad_mode(DSS_IO_PAD_MODE_BYPASS); - dispc_mgr_enable_stallmode(dssdev->manager->id, false); + dpi.mgr_config.lcden_sig_polarity = 0; - dispc_mgr_set_lcd_display_type(dssdev->manager->id, is_tft ? - OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN); - dispc_mgr_set_tft_data_lines(dssdev->manager->id, - dssdev->phy.dpi.data_lines); + dss_mgr_set_lcd_config(dssdev->manager, &dpi.mgr_config); } int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) @@ -206,8 +195,6 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_get_dispc; - dpi_basic_init(dssdev); - if (dpi_use_dsi_pll(dssdev)) { r = dsi_runtime_get(dpi.dsidev); if (r) @@ -222,6 +209,8 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_set_mode; + dpi_config_lcd_manager(dssdev); + mdelay(2); r = dss_mgr_enable(dssdev->manager); @@ -292,7 +281,6 @@ EXPORT_SYMBOL(dpi_set_timings); int dpi_check_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { - bool is_tft; int r; int lck_div, pck_div; unsigned long fck; @@ -305,11 +293,9 @@ int dpi_check_timings(struct omap_dss_device *dssdev, if (timings->pixel_clock == 0) return -EINVAL; - is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; - if (dpi_use_dsi_pll(dssdev)) { struct dsi_clock_info dsi_cinfo; - r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft, + r = dsi_pll_calc_clock_div_pck(dpi.dsidev, timings->pixel_clock * 1000, &dsi_cinfo, &dispc_cinfo); @@ -319,7 +305,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev, fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk; } else { struct dss_clock_info dss_cinfo; - r = dss_calc_clock_div(is_tft, timings->pixel_clock * 1000, + r = dss_calc_clock_div(timings->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo); if (r) diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index 14ce8cc079e..b07e8864f82 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c @@ -331,6 +331,8 @@ struct dsi_data { unsigned num_lanes_used; unsigned scp_clk_refcount; + + struct dss_lcd_mgr_config mgr_config; }; struct dsi_packet_sent_handler_data { @@ -1085,9 +1087,9 @@ static inline void dsi_enable_pll_clock(struct platform_device *dsidev, struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); if (enable) - clk_enable(dsi->sys_clk); + clk_prepare_enable(dsi->sys_clk); else - clk_disable(dsi->sys_clk); + clk_disable_unprepare(dsi->sys_clk); if (enable && dsi->pll_locked) { if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) @@ -1316,7 +1318,7 @@ static int dsi_calc_clock_rates(struct platform_device *dsidev, return 0; } -int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft, +int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, unsigned long req_pck, struct dsi_clock_info *dsi_cinfo, struct dispc_clock_info *dispc_cinfo) { @@ -1335,8 +1337,8 @@ int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft, dsi->cache_cinfo.clkin == dss_sys_clk) { DSSDBG("DSI clock info found from cache\n"); *dsi_cinfo = dsi->cache_cinfo; - dispc_find_clk_divs(is_tft, req_pck, - dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo); + dispc_find_clk_divs(req_pck, dsi_cinfo->dsi_pll_hsdiv_dispc_clk, + dispc_cinfo); return 0; } @@ -1402,7 +1404,7 @@ retry: match = 1; - dispc_find_clk_divs(is_tft, req_pck, + dispc_find_clk_divs(req_pck, cur.dsi_pll_hsdiv_dispc_clk, &cur_dispc); @@ -3631,17 +3633,14 @@ static void dsi_config_vp_num_line_buffers(struct omap_dss_device *dssdev) static void dsi_config_vp_sync_events(struct omap_dss_device *dssdev) { struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); - int de_pol = dssdev->panel.dsi_vm_data.vp_de_pol; - int hsync_pol = dssdev->panel.dsi_vm_data.vp_hsync_pol; - int vsync_pol = dssdev->panel.dsi_vm_data.vp_vsync_pol; bool vsync_end = dssdev->panel.dsi_vm_data.vp_vsync_end; bool hsync_end = dssdev->panel.dsi_vm_data.vp_hsync_end; u32 r; r = dsi_read_reg(dsidev, DSI_CTRL); - r = FLD_MOD(r, de_pol, 9, 9); /* VP_DE_POL */ - r = FLD_MOD(r, hsync_pol, 10, 10); /* VP_HSYNC_POL */ - r = FLD_MOD(r, vsync_pol, 11, 11); /* VP_VSYNC_POL */ + r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */ + r = FLD_MOD(r, 1, 10, 10); /* VP_HSYNC_POL */ + r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */ r = FLD_MOD(r, 1, 15, 15); /* VP_VSYNC_START */ r = FLD_MOD(r, vsync_end, 16, 16); /* VP_VSYNC_END */ r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */ @@ -4340,52 +4339,101 @@ EXPORT_SYMBOL(omap_dsi_update); /* Display funcs */ +static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct dispc_clock_info dispc_cinfo; + int r; + unsigned long long fck; + + fck = dsi_get_pll_hsdiv_dispc_rate(dsidev); + + dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div; + dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div; + + r = dispc_calc_clock_rates(fck, &dispc_cinfo); + if (r) { + DSSERR("Failed to calc dispc clocks\n"); + return r; + } + + dsi->mgr_config.clock_info = dispc_cinfo; + + return 0; +} + static int dsi_display_init_dispc(struct omap_dss_device *dssdev) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct omap_video_timings timings; int r; + u32 irq = 0; if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) { u16 dw, dh; - u32 irq; - struct omap_video_timings timings = { - .hsw = 1, - .hfp = 1, - .hbp = 1, - .vsw = 1, - .vfp = 0, - .vbp = 0, - }; dssdev->driver->get_resolution(dssdev, &dw, &dh); + timings.x_res = dw; timings.y_res = dh; + timings.hsw = 1; + timings.hfp = 1; + timings.hbp = 1; + timings.vsw = 1; + timings.vfp = 0; + timings.vbp = 0; - irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ? - DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2; + irq = dispc_mgr_get_framedone_irq(dssdev->manager->id); r = omap_dispc_register_isr(dsi_framedone_irq_callback, (void *) dssdev, irq); if (r) { DSSERR("can't get FRAMEDONE irq\n"); - return r; + goto err; } - dispc_mgr_enable_stallmode(dssdev->manager->id, true); - dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1); - - dss_mgr_set_timings(dssdev->manager, &timings); + dsi->mgr_config.stallmode = true; + dsi->mgr_config.fifohandcheck = true; } else { - dispc_mgr_enable_stallmode(dssdev->manager->id, false); - dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 0); + timings = dssdev->panel.timings; - dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings); + dsi->mgr_config.stallmode = false; + dsi->mgr_config.fifohandcheck = false; } - dispc_mgr_set_lcd_display_type(dssdev->manager->id, - OMAP_DSS_LCD_DISPLAY_TFT); - dispc_mgr_set_tft_data_lines(dssdev->manager->id, - dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt)); + /* + * override interlace, logic level and edge related parameters in + * omap_video_timings with default values + */ + timings.interlace = false; + timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH; + timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH; + timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH; + timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES; + + dss_mgr_set_timings(dssdev->manager, &timings); + + r = dsi_configure_dispc_clocks(dssdev); + if (r) + goto err1; + + dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; + dsi->mgr_config.video_port_width = + dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt); + dsi->mgr_config.lcden_sig_polarity = 0; + + dss_mgr_set_lcd_config(dssdev->manager, &dsi->mgr_config); + return 0; +err1: + if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) + omap_dispc_unregister_isr(dsi_framedone_irq_callback, + (void *) dssdev, irq); +err: + return r; } static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev) @@ -4393,8 +4441,7 @@ static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev) if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) { u32 irq; - irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ? - DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2; + irq = dispc_mgr_get_framedone_irq(dssdev->manager->id); omap_dispc_unregister_isr(dsi_framedone_irq_callback, (void *) dssdev, irq); @@ -4426,33 +4473,6 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev) return 0; } -static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev) -{ - struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); - struct dispc_clock_info dispc_cinfo; - int r; - unsigned long long fck; - - fck = dsi_get_pll_hsdiv_dispc_rate(dsidev); - - dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div; - dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div; - - r = dispc_calc_clock_rates(fck, &dispc_cinfo); - if (r) { - DSSERR("Failed to calc dispc clocks\n"); - return r; - } - - r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo); - if (r) { - DSSERR("Failed to set dispc clocks\n"); - return r; - } - - return 0; -} - static int dsi_display_init_dsi(struct omap_dss_device *dssdev) { struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); @@ -4474,10 +4494,6 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev) DSSDBG("PLL OK\n"); - r = dsi_configure_dispc_clocks(dssdev); - if (r) - goto err2; - r = dsi_cio_init(dssdev); if (r) goto err2; diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index d2b57197b29..04b4586113e 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c @@ -388,7 +388,8 @@ void dss_select_lcd_clk_source(enum omap_channel channel, dsi_wait_pll_hsdiv_dispc_active(dsidev); break; case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: - BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2); + BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 && + channel != OMAP_DSS_CHANNEL_LCD3); b = 1; dsidev = dsi_get_dsidev_from_id(1); dsi_wait_pll_hsdiv_dispc_active(dsidev); @@ -398,10 +399,12 @@ void dss_select_lcd_clk_source(enum omap_channel channel, return; } - pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12; + pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : + (channel == OMAP_DSS_CHANNEL_LCD2 ? 12 : 19); REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */ - ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1; + ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : + (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); dss.lcd_clk_source[ix] = clk_src; } @@ -418,7 +421,8 @@ enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module) enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) { if (dss_has_feature(FEAT_LCD_CLK_SRC)) { - int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1; + int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : + (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); return dss.lcd_clk_source[ix]; } else { /* LCD_CLK source is the same as DISPC_FCLK source for @@ -502,8 +506,7 @@ unsigned long dss_get_dpll4_rate(void) return 0; } -int dss_calc_clock_div(bool is_tft, unsigned long req_pck, - struct dss_clock_info *dss_cinfo, +int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo, struct dispc_clock_info *dispc_cinfo) { unsigned long prate; @@ -551,7 +554,7 @@ retry: fck = clk_get_rate(dss.dss_clk); fck_div = 1; - dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc); + dispc_find_clk_divs(req_pck, fck, &cur_dispc); match = 1; best_dss.fck = fck; @@ -581,7 +584,7 @@ retry: match = 1; - dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc); + dispc_find_clk_divs(req_pck, fck, &cur_dispc); if (abs(cur_dispc.pck - req_pck) < abs(best_dispc.pck - req_pck)) { diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h index dd1092ceaee..f67afe76f21 100644 --- a/drivers/video/omap2/dss/dss.h +++ b/drivers/video/omap2/dss/dss.h @@ -152,6 +152,25 @@ struct dsi_clock_info { u16 lp_clk_div; }; +struct reg_field { + u16 reg; + u8 high; + u8 low; +}; + +struct dss_lcd_mgr_config { + enum dss_io_pad_mode io_pad_mode; + + bool stallmode; + bool fifohandcheck; + + struct dispc_clock_info clock_info; + + int video_port_width; + + int lcden_sig_polarity; +}; + struct seq_file; struct platform_device; @@ -188,6 +207,8 @@ int dss_mgr_set_device(struct omap_overlay_manager *mgr, int dss_mgr_unset_device(struct omap_overlay_manager *mgr); void dss_mgr_set_timings(struct omap_overlay_manager *mgr, struct omap_video_timings *timings); +void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config); const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr); bool dss_ovl_is_enabled(struct omap_overlay *ovl); @@ -210,8 +231,6 @@ void dss_init_device(struct platform_device *pdev, struct omap_dss_device *dssdev); void dss_uninit_device(struct platform_device *pdev, struct omap_dss_device *dssdev); -bool dss_use_replication(struct omap_dss_device *dssdev, - enum omap_color_mode mode); /* manager */ int dss_init_overlay_managers(struct platform_device *pdev); @@ -223,8 +242,18 @@ int dss_mgr_check_timings(struct omap_overlay_manager *mgr, int dss_mgr_check(struct omap_overlay_manager *mgr, struct omap_overlay_manager_info *info, const struct omap_video_timings *mgr_timings, + const struct dss_lcd_mgr_config *config, struct omap_overlay_info **overlay_infos); +static inline bool dss_mgr_is_lcd(enum omap_channel id) +{ + if (id == OMAP_DSS_CHANNEL_LCD || id == OMAP_DSS_CHANNEL_LCD2 || + id == OMAP_DSS_CHANNEL_LCD3) + return true; + else + return false; +} + /* overlay */ void dss_init_overlays(struct platform_device *pdev); void dss_uninit_overlays(struct platform_device *pdev); @@ -234,6 +263,8 @@ int dss_ovl_simple_check(struct omap_overlay *ovl, const struct omap_overlay_info *info); int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info, const struct omap_video_timings *mgr_timings); +bool dss_ovl_use_replication(struct dss_lcd_mgr_config config, + enum omap_color_mode mode); /* DSS */ int dss_init_platform_driver(void) __init; @@ -268,8 +299,7 @@ unsigned long dss_get_dpll4_rate(void); int dss_calc_clock_rates(struct dss_clock_info *cinfo); int dss_set_clock_div(struct dss_clock_info *cinfo); int dss_get_clock_div(struct dss_clock_info *cinfo); -int dss_calc_clock_div(bool is_tft, unsigned long req_pck, - struct dss_clock_info *dss_cinfo, +int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo, struct dispc_clock_info *dispc_cinfo); /* SDI */ @@ -296,7 +326,7 @@ u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt); unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev); int dsi_pll_set_clock_div(struct platform_device *dsidev, struct dsi_clock_info *cinfo); -int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft, +int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, unsigned long req_pck, struct dsi_clock_info *cinfo, struct dispc_clock_info *dispc_cinfo); int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk, @@ -330,7 +360,7 @@ static inline int dsi_pll_set_clock_div(struct platform_device *dsidev, return -ENODEV; } static inline int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, - bool is_tft, unsigned long req_pck, + unsigned long req_pck, struct dsi_clock_info *dsi_cinfo, struct dispc_clock_info *dispc_cinfo) { @@ -387,7 +417,7 @@ void dispc_set_loadmode(enum omap_dss_load_mode mode); bool dispc_mgr_timings_ok(enum omap_channel channel, const struct omap_video_timings *timings); unsigned long dispc_fclk_rate(void); -void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck, +void dispc_find_clk_divs(unsigned long req_pck, unsigned long fck, struct dispc_clock_info *cinfo); int dispc_calc_clock_rates(unsigned long dispc_fclk_rate, struct dispc_clock_info *cinfo); @@ -398,8 +428,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane, u32 *fifo_low, u32 *fifo_high, bool use_fifomerge, bool manual_update); int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, - bool ilace, bool replication, - const struct omap_video_timings *mgr_timings); + bool replication, const struct omap_video_timings *mgr_timings); int dispc_ovl_enable(enum omap_plane plane, bool enable); void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel); @@ -415,16 +444,13 @@ bool dispc_mgr_is_channel_enabled(enum omap_channel channel); void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode); void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable); void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines); -void dispc_mgr_set_lcd_display_type(enum omap_channel channel, - enum omap_lcd_display_type type); +void dispc_mgr_set_lcd_type_tft(enum omap_channel channel); void dispc_mgr_set_timings(enum omap_channel channel, struct omap_video_timings *timings); -void dispc_mgr_set_pol_freq(enum omap_channel channel, - enum omap_panel_config config, u8 acbi, u8 acb); unsigned long dispc_mgr_lclk_rate(enum omap_channel channel); unsigned long dispc_mgr_pclk_rate(enum omap_channel channel); unsigned long dispc_core_clk_rate(void); -int dispc_mgr_set_clock_div(enum omap_channel channel, +void dispc_mgr_set_clock_div(enum omap_channel channel, struct dispc_clock_info *cinfo); int dispc_mgr_get_clock_div(enum omap_channel channel, struct dispc_clock_info *cinfo); diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h index bdf469f080e..996ffcbfed5 100644 --- a/drivers/video/omap2/dss/dss_features.h +++ b/drivers/video/omap2/dss/dss_features.h @@ -24,9 +24,9 @@ #include "ti_hdmi.h" #endif -#define MAX_DSS_MANAGERS 3 +#define MAX_DSS_MANAGERS 4 #define MAX_DSS_OVERLAYS 4 -#define MAX_DSS_LCD_MANAGERS 2 +#define MAX_DSS_LCD_MANAGERS 3 #define MAX_NUM_DSI 2 /* DSS has feature id */ @@ -36,6 +36,7 @@ enum dss_feat_id { FEAT_PCKFREEENABLE, FEAT_FUNCGATED, FEAT_MGR_LCD2, + FEAT_MGR_LCD3, FEAT_LINEBUFFERSPLIT, FEAT_ROWREPEATENABLE, FEAT_RESIZECONF, diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index 26a2430a702..060216fdc57 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c @@ -78,43 +78,214 @@ static struct { */ static const struct hdmi_config cea_timings[] = { -{ {640, 480, 25200, 96, 16, 48, 2, 10, 33, 0, 0, 0}, {1, HDMI_HDMI} }, -{ {720, 480, 27027, 62, 16, 60, 6, 9, 30, 0, 0, 0}, {2, HDMI_HDMI} }, -{ {1280, 720, 74250, 40, 110, 220, 5, 5, 20, 1, 1, 0}, {4, HDMI_HDMI} }, -{ {1920, 540, 74250, 44, 88, 148, 5, 2, 15, 1, 1, 1}, {5, HDMI_HDMI} }, -{ {1440, 240, 27027, 124, 38, 114, 3, 4, 15, 0, 0, 1}, {6, HDMI_HDMI} }, -{ {1920, 1080, 148500, 44, 88, 148, 5, 4, 36, 1, 1, 0}, {16, HDMI_HDMI} }, -{ {720, 576, 27000, 64, 12, 68, 5, 5, 39, 0, 0, 0}, {17, HDMI_HDMI} }, -{ {1280, 720, 74250, 40, 440, 220, 5, 5, 20, 1, 1, 0}, {19, HDMI_HDMI} }, -{ {1920, 540, 74250, 44, 528, 148, 5, 2, 15, 1, 1, 1}, {20, HDMI_HDMI} }, -{ {1440, 288, 27000, 126, 24, 138, 3, 2, 19, 0, 0, 1}, {21, HDMI_HDMI} }, -{ {1440, 576, 54000, 128, 24, 136, 5, 5, 39, 0, 0, 0}, {29, HDMI_HDMI} }, -{ {1920, 1080, 148500, 44, 528, 148, 5, 4, 36, 1, 1, 0}, {31, HDMI_HDMI} }, -{ {1920, 1080, 74250, 44, 638, 148, 5, 4, 36, 1, 1, 0}, {32, HDMI_HDMI} }, -{ {2880, 480, 108108, 248, 64, 240, 6, 9, 30, 0, 0, 0}, {35, HDMI_HDMI} }, -{ {2880, 576, 108000, 256, 48, 272, 5, 5, 39, 0, 0, 0}, {37, HDMI_HDMI} }, + { + { 640, 480, 25200, 96, 16, 48, 2, 10, 33, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 1, HDMI_HDMI }, + }, + { + { 720, 480, 27027, 62, 16, 60, 6, 9, 30, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 2, HDMI_HDMI }, + }, + { + { 1280, 720, 74250, 40, 110, 220, 5, 5, 20, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 4, HDMI_HDMI }, + }, + { + { 1920, 540, 74250, 44, 88, 148, 5, 2, 15, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + true, }, + { 5, HDMI_HDMI }, + }, + { + { 1440, 240, 27027, 124, 38, 114, 3, 4, 15, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + true, }, + { 6, HDMI_HDMI }, + }, + { + { 1920, 1080, 148500, 44, 88, 148, 5, 4, 36, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 16, HDMI_HDMI }, + }, + { + { 720, 576, 27000, 64, 12, 68, 5, 5, 39, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 17, HDMI_HDMI }, + }, + { + { 1280, 720, 74250, 40, 440, 220, 5, 5, 20, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 19, HDMI_HDMI }, + }, + { + { 1920, 540, 74250, 44, 528, 148, 5, 2, 15, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + true, }, + { 20, HDMI_HDMI }, + }, + { + { 1440, 288, 27000, 126, 24, 138, 3, 2, 19, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + true, }, + { 21, HDMI_HDMI }, + }, + { + { 1440, 576, 54000, 128, 24, 136, 5, 5, 39, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 29, HDMI_HDMI }, + }, + { + { 1920, 1080, 148500, 44, 528, 148, 5, 4, 36, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 31, HDMI_HDMI }, + }, + { + { 1920, 1080, 74250, 44, 638, 148, 5, 4, 36, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 32, HDMI_HDMI }, + }, + { + { 2880, 480, 108108, 248, 64, 240, 6, 9, 30, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 35, HDMI_HDMI }, + }, + { + { 2880, 576, 108000, 256, 48, 272, 5, 5, 39, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 37, HDMI_HDMI }, + }, }; + static const struct hdmi_config vesa_timings[] = { /* VESA From Here */ -{ {640, 480, 25175, 96, 16, 48, 2 , 11, 31, 0, 0, 0}, {4, HDMI_DVI} }, -{ {800, 600, 40000, 128, 40, 88, 4 , 1, 23, 1, 1, 0}, {9, HDMI_DVI} }, -{ {848, 480, 33750, 112, 16, 112, 8 , 6, 23, 1, 1, 0}, {0xE, HDMI_DVI} }, -{ {1280, 768, 79500, 128, 64, 192, 7 , 3, 20, 1, 0, 0}, {0x17, HDMI_DVI} }, -{ {1280, 800, 83500, 128, 72, 200, 6 , 3, 22, 1, 0, 0}, {0x1C, HDMI_DVI} }, -{ {1360, 768, 85500, 112, 64, 256, 6 , 3, 18, 1, 1, 0}, {0x27, HDMI_DVI} }, -{ {1280, 960, 108000, 112, 96, 312, 3 , 1, 36, 1, 1, 0}, {0x20, HDMI_DVI} }, -{ {1280, 1024, 108000, 112, 48, 248, 3 , 1, 38, 1, 1, 0}, {0x23, HDMI_DVI} }, -{ {1024, 768, 65000, 136, 24, 160, 6, 3, 29, 0, 0, 0}, {0x10, HDMI_DVI} }, -{ {1400, 1050, 121750, 144, 88, 232, 4, 3, 32, 1, 0, 0}, {0x2A, HDMI_DVI} }, -{ {1440, 900, 106500, 152, 80, 232, 6, 3, 25, 1, 0, 0}, {0x2F, HDMI_DVI} }, -{ {1680, 1050, 146250, 176 , 104, 280, 6, 3, 30, 1, 0, 0}, {0x3A, HDMI_DVI} }, -{ {1366, 768, 85500, 143, 70, 213, 3, 3, 24, 1, 1, 0}, {0x51, HDMI_DVI} }, -{ {1920, 1080, 148500, 44, 148, 80, 5, 4, 36, 1, 1, 0}, {0x52, HDMI_DVI} }, -{ {1280, 768, 68250, 32, 48, 80, 7, 3, 12, 0, 1, 0}, {0x16, HDMI_DVI} }, -{ {1400, 1050, 101000, 32, 48, 80, 4, 3, 23, 0, 1, 0}, {0x29, HDMI_DVI} }, -{ {1680, 1050, 119000, 32, 48, 80, 6, 3, 21, 0, 1, 0}, {0x39, HDMI_DVI} }, -{ {1280, 800, 79500, 32, 48, 80, 6, 3, 14, 0, 1, 0}, {0x1B, HDMI_DVI} }, -{ {1280, 720, 74250, 40, 110, 220, 5, 5, 20, 1, 1, 0}, {0x55, HDMI_DVI} } + { + { 640, 480, 25175, 96, 16, 48, 2, 11, 31, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 4, HDMI_DVI }, + }, + { + { 800, 600, 40000, 128, 40, 88, 4, 1, 23, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 9, HDMI_DVI }, + }, + { + { 848, 480, 33750, 112, 16, 112, 8, 6, 23, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0xE, HDMI_DVI }, + }, + { + { 1280, 768, 79500, 128, 64, 192, 7, 3, 20, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x17, HDMI_DVI }, + }, + { + { 1280, 800, 83500, 128, 72, 200, 6, 3, 22, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x1C, HDMI_DVI }, + }, + { + { 1360, 768, 85500, 112, 64, 256, 6, 3, 18, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x27, HDMI_DVI }, + }, + { + { 1280, 960, 108000, 112, 96, 312, 3, 1, 36, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x20, HDMI_DVI }, + }, + { + { 1280, 1024, 108000, 112, 48, 248, 3, 1, 38, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x23, HDMI_DVI }, + }, + { + { 1024, 768, 65000, 136, 24, 160, 6, 3, 29, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x10, HDMI_DVI }, + }, + { + { 1400, 1050, 121750, 144, 88, 232, 4, 3, 32, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x2A, HDMI_DVI }, + }, + { + { 1440, 900, 106500, 152, 80, 232, 6, 3, 25, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x2F, HDMI_DVI }, + }, + { + { 1680, 1050, 146250, 176 , 104, 280, 6, 3, 30, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x3A, HDMI_DVI }, + }, + { + { 1366, 768, 85500, 143, 70, 213, 3, 3, 24, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x51, HDMI_DVI }, + }, + { + { 1920, 1080, 148500, 44, 148, 80, 5, 4, 36, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x52, HDMI_DVI }, + }, + { + { 1280, 768, 68250, 32, 48, 80, 7, 3, 12, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x16, HDMI_DVI }, + }, + { + { 1400, 1050, 101000, 32, 48, 80, 4, 3, 23, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x29, HDMI_DVI }, + }, + { + { 1680, 1050, 119000, 32, 48, 80, 6, 3, 21, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x39, HDMI_DVI }, + }, + { + { 1280, 800, 79500, 32, 48, 80, 6, 3, 14, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x1B, HDMI_DVI }, + }, + { + { 1280, 720, 74250, 40, 110, 220, 5, 5, 20, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x55, HDMI_DVI }, + }, }; static int hdmi_runtime_get(void) @@ -179,7 +350,7 @@ static const struct hdmi_config *hdmi_get_timings(void) } static bool hdmi_timings_compare(struct omap_video_timings *timing1, - const struct hdmi_video_timings *timing2) + const struct omap_video_timings *timing2) { int timing1_vsync, timing1_hsync, timing2_vsync, timing2_hsync; @@ -758,6 +929,7 @@ static int __init omapdss_hdmihw_probe(struct platform_device *pdev) hdmi.ip_data.core_av_offset = HDMI_CORE_AV; hdmi.ip_data.pll_offset = HDMI_PLLCTRL; hdmi.ip_data.phy_offset = HDMI_PHY; + mutex_init(&hdmi.ip_data.lock); hdmi_panel_init(); @@ -785,7 +957,7 @@ static int __exit omapdss_hdmihw_remove(struct platform_device *pdev) static int hdmi_runtime_suspend(struct device *dev) { - clk_disable(hdmi.sys_clk); + clk_disable_unprepare(hdmi.sys_clk); dispc_runtime_put(); @@ -800,7 +972,7 @@ static int hdmi_runtime_resume(struct device *dev) if (r < 0) return r; - clk_enable(hdmi.sys_clk); + clk_prepare_enable(hdmi.sys_clk); return 0; } diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c index 1179e3c4b1c..e10844faadf 100644 --- a/drivers/video/omap2/dss/hdmi_panel.c +++ b/drivers/video/omap2/dss/hdmi_panel.c @@ -43,10 +43,11 @@ static int hdmi_panel_probe(struct omap_dss_device *dssdev) { DSSDBG("ENTER hdmi_panel_probe\n"); - dssdev->panel.config = OMAP_DSS_LCD_TFT | - OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IHS; - - dssdev->panel.timings = (struct omap_video_timings){640, 480, 25175, 96, 16, 48, 2 , 11, 31}; + dssdev->panel.timings = (struct omap_video_timings) + { 640, 480, 25175, 96, 16, 48, 2, 11, 31, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, + }; DSSDBG("hdmi_panel_probe x_res= %d y_res = %d\n", dssdev->panel.timings.x_res, diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c index 0cbcde4c688..53710fadc82 100644 --- a/drivers/video/omap2/dss/manager.c +++ b/drivers/video/omap2/dss/manager.c @@ -500,16 +500,12 @@ static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr) if (r) return r; - if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) { + if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) irq = DISPC_IRQ_EVSYNC_ODD; - } else if (mgr->device->type == OMAP_DISPLAY_TYPE_HDMI) { + else if (mgr->device->type == OMAP_DISPLAY_TYPE_HDMI) irq = DISPC_IRQ_EVSYNC_EVEN; - } else { - if (mgr->id == OMAP_DSS_CHANNEL_LCD) - irq = DISPC_IRQ_VSYNC; - else - irq = DISPC_IRQ_VSYNC2; - } + else + irq = dispc_mgr_get_vsync_irq(mgr->id); r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout); @@ -545,6 +541,10 @@ int dss_init_overlay_managers(struct platform_device *pdev) mgr->name = "lcd2"; mgr->id = OMAP_DSS_CHANNEL_LCD2; break; + case 3: + mgr->name = "lcd3"; + mgr->id = OMAP_DSS_CHANNEL_LCD3; + break; } mgr->set_device = &dss_mgr_set_device; @@ -665,9 +665,40 @@ int dss_mgr_check_timings(struct omap_overlay_manager *mgr, return 0; } +static int dss_mgr_check_lcd_config(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config) +{ + struct dispc_clock_info cinfo = config->clock_info; + int dl = config->video_port_width; + bool stallmode = config->stallmode; + bool fifohandcheck = config->fifohandcheck; + + if (cinfo.lck_div < 1 || cinfo.lck_div > 255) + return -EINVAL; + + if (cinfo.pck_div < 1 || cinfo.pck_div > 255) + return -EINVAL; + + if (dl != 12 && dl != 16 && dl != 18 && dl != 24) + return -EINVAL; + + /* fifohandcheck should be used only with stallmode */ + if (stallmode == false && fifohandcheck == true) + return -EINVAL; + + /* + * io pad mode can be only checked by using dssdev connected to the + * manager. Ignore checking these for now, add checks when manager + * is capable of holding information related to the connected interface + */ + + return 0; +} + int dss_mgr_check(struct omap_overlay_manager *mgr, struct omap_overlay_manager_info *info, const struct omap_video_timings *mgr_timings, + const struct dss_lcd_mgr_config *lcd_config, struct omap_overlay_info **overlay_infos) { struct omap_overlay *ovl; @@ -683,6 +714,10 @@ int dss_mgr_check(struct omap_overlay_manager *mgr, if (r) return r; + r = dss_mgr_check_lcd_config(mgr, lcd_config); + if (r) + return r; + list_for_each_entry(ovl, &mgr->overlays, list) { struct omap_overlay_info *oi; int r; diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c index b0ba60f88dd..952c6fad9a8 100644 --- a/drivers/video/omap2/dss/overlay.c +++ b/drivers/video/omap2/dss/overlay.c @@ -528,14 +528,24 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force) struct omap_overlay_manager *lcd_mgr; struct omap_overlay_manager *tv_mgr; struct omap_overlay_manager *lcd2_mgr = NULL; + struct omap_overlay_manager *lcd3_mgr = NULL; struct omap_overlay_manager *mgr = NULL; - lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD); - tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_TV); + lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_LCD); + tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_DIGIT); + if (dss_has_feature(FEAT_MGR_LCD3)) + lcd3_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_LCD3); if (dss_has_feature(FEAT_MGR_LCD2)) - lcd2_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD2); - - if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) { + lcd2_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_LCD2); + + if (dssdev->channel == OMAP_DSS_CHANNEL_LCD3) { + if (!lcd3_mgr->device || force) { + if (lcd3_mgr->device) + lcd3_mgr->unset_device(lcd3_mgr); + lcd3_mgr->set_device(lcd3_mgr, dssdev); + mgr = lcd3_mgr; + } + } else if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) { if (!lcd2_mgr->device || force) { if (lcd2_mgr->device) lcd2_mgr->unset_device(lcd2_mgr); @@ -677,3 +687,16 @@ int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info, return 0; } + +/* + * Checks if replication logic should be used. Only use when overlay is in + * RGB12U or RGB16 mode, and video port width interface is 18bpp or 24bpp + */ +bool dss_ovl_use_replication(struct dss_lcd_mgr_config config, + enum omap_color_mode mode) +{ + if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16) + return false; + + return config.video_port_width > 16; +} diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c index 7985fa12b9b..7c087424b63 100644 --- a/drivers/video/omap2/dss/rfbi.c +++ b/drivers/video/omap2/dss/rfbi.c @@ -300,10 +300,11 @@ void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width, } EXPORT_SYMBOL(omap_rfbi_write_pixels); -static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, +static int rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, u16 height, void (*callback)(void *data), void *data) { u32 l; + int r; struct omap_video_timings timings = { .hsw = 1, .hfp = 1, @@ -322,7 +323,9 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, dss_mgr_set_timings(dssdev->manager, &timings); - dispc_mgr_enable(dssdev->manager->id, true); + r = dss_mgr_enable(dssdev->manager); + if (r) + return r; rfbi.framedone_callback = callback; rfbi.framedone_callback_data = data; @@ -335,6 +338,8 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, l = FLD_MOD(l, 1, 4, 4); /* ITE */ rfbi_write_reg(RFBI_CONTROL, l); + + return 0; } static void framedone_callback(void *data, u32 mask) @@ -814,8 +819,11 @@ int omap_rfbi_update(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h, void (*callback)(void *), void *data) { - rfbi_transfer_area(dssdev, w, h, callback, data); - return 0; + int r; + + r = rfbi_transfer_area(dssdev, w, h, callback, data); + + return r; } EXPORT_SYMBOL(omap_rfbi_update); @@ -859,6 +867,22 @@ static void rfbi_dump_regs(struct seq_file *s) #undef DUMPREG } +static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev) +{ + struct dss_lcd_mgr_config mgr_config; + + mgr_config.io_pad_mode = DSS_IO_PAD_MODE_RFBI; + + mgr_config.stallmode = true; + /* Do we need fifohandcheck for RFBI? */ + mgr_config.fifohandcheck = false; + + mgr_config.video_port_width = dssdev->ctrl.pixel_size; + mgr_config.lcden_sig_polarity = 0; + + dss_mgr_set_lcd_config(dssdev->manager, &mgr_config); +} + int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev) { int r; @@ -885,13 +909,7 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev) goto err1; } - dispc_mgr_set_lcd_display_type(dssdev->manager->id, - OMAP_DSS_LCD_DISPLAY_TFT); - - dispc_mgr_set_io_pad_mode(DSS_IO_PAD_MODE_RFBI); - dispc_mgr_enable_stallmode(dssdev->manager->id, true); - - dispc_mgr_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size); + rfbi_config_lcd_manager(dssdev); rfbi_configure(dssdev->phy.rfbi.channel, dssdev->ctrl.pixel_size, diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c index 3a43dc2a9b4..5d31699fbd3 100644 --- a/drivers/video/omap2/dss/sdi.c +++ b/drivers/video/omap2/dss/sdi.c @@ -32,19 +32,21 @@ static struct { bool update_enabled; struct regulator *vdds_sdi_reg; -} sdi; -static void sdi_basic_init(struct omap_dss_device *dssdev) + struct dss_lcd_mgr_config mgr_config; +} sdi; +static void sdi_config_lcd_manager(struct omap_dss_device *dssdev) { - dispc_mgr_set_io_pad_mode(DSS_IO_PAD_MODE_BYPASS); - dispc_mgr_enable_stallmode(dssdev->manager->id, false); + sdi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; + + sdi.mgr_config.stallmode = false; + sdi.mgr_config.fifohandcheck = false; - dispc_mgr_set_lcd_display_type(dssdev->manager->id, - OMAP_DSS_LCD_DISPLAY_TFT); + sdi.mgr_config.video_port_width = 24; + sdi.mgr_config.lcden_sig_polarity = 1; - dispc_mgr_set_tft_data_lines(dssdev->manager->id, 24); - dispc_lcd_enable_signal_polarity(1); + dss_mgr_set_lcd_config(dssdev->manager, &sdi.mgr_config); } int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) @@ -52,8 +54,6 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) struct omap_video_timings *t = &dssdev->panel.timings; struct dss_clock_info dss_cinfo; struct dispc_clock_info dispc_cinfo; - u16 lck_div, pck_div; - unsigned long fck; unsigned long pck; int r; @@ -76,24 +76,17 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_get_dispc; - sdi_basic_init(dssdev); - /* 15.5.9.1.2 */ - dssdev->panel.config |= OMAP_DSS_LCD_RF | OMAP_DSS_LCD_ONOFF; - - dispc_mgr_set_pol_freq(dssdev->manager->id, dssdev->panel.config, - dssdev->panel.acbi, dssdev->panel.acb); + dssdev->panel.timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + dssdev->panel.timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; - r = dss_calc_clock_div(1, t->pixel_clock * 1000, - &dss_cinfo, &dispc_cinfo); + r = dss_calc_clock_div(t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo); if (r) goto err_calc_clock_div; - fck = dss_cinfo.fck; - lck_div = dispc_cinfo.lck_div; - pck_div = dispc_cinfo.pck_div; + sdi.mgr_config.clock_info = dispc_cinfo; - pck = fck / lck_div / pck_div / 1000; + pck = dss_cinfo.fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div / 1000; if (pck != t->pixel_clock) { DSSWARN("Could not find exact pixel clock. Requested %d kHz, " @@ -110,9 +103,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_set_dss_clock_div; - r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo); - if (r) - goto err_set_dispc_clock_div; + sdi_config_lcd_manager(dssdev); dss_sdi_init(dssdev->phy.sdi.datapairs); r = dss_sdi_enable(); @@ -129,7 +120,6 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) err_mgr_enable: dss_sdi_disable(); err_sdi_enable: -err_set_dispc_clock_div: err_set_dss_clock_div: err_calc_clock_div: dispc_runtime_put(); diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h index e734cb444bc..b046c208cb9 100644 --- a/drivers/video/omap2/dss/ti_hdmi.h +++ b/drivers/video/omap2/dss/ti_hdmi.h @@ -42,30 +42,13 @@ enum hdmi_clk_refsel { HDMI_REFSEL_SYSCLK = 3 }; -/* HDMI timing structure */ -struct hdmi_video_timings { - u16 x_res; - u16 y_res; - /* Unit: KHz */ - u32 pixel_clock; - u16 hsw; - u16 hfp; - u16 hbp; - u16 vsw; - u16 vfp; - u16 vbp; - bool vsync_pol; - bool hsync_pol; - bool interlace; -}; - struct hdmi_cm { int code; int mode; }; struct hdmi_config { - struct hdmi_video_timings timings; + struct omap_video_timings timings; struct hdmi_cm cm; }; @@ -177,7 +160,7 @@ struct hdmi_ip_data { /* ti_hdmi_4xxx_ip private data. These should be in a separate struct */ int hpd_gpio; - bool phy_tx_enabled; + struct mutex lock; }; int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data); void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data); diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c index 4dae1b29107..c23b85a20cd 100644 --- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c +++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c @@ -157,6 +157,10 @@ static int hdmi_pll_init(struct hdmi_ip_data *ip_data) /* PHY_PWR_CMD */ static int hdmi_set_phy_pwr(struct hdmi_ip_data *ip_data, enum hdmi_phy_pwr val) { + /* Return if already the state */ + if (REG_GET(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, 5, 4) == val) + return 0; + /* Command for power control of HDMI PHY */ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, val, 7, 6); @@ -231,21 +235,13 @@ void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data) static int hdmi_check_hpd_state(struct hdmi_ip_data *ip_data) { - unsigned long flags; bool hpd; int r; - /* this should be in ti_hdmi_4xxx_ip private data */ - static DEFINE_SPINLOCK(phy_tx_lock); - spin_lock_irqsave(&phy_tx_lock, flags); + mutex_lock(&ip_data->lock); hpd = gpio_get_value(ip_data->hpd_gpio); - if (hpd == ip_data->phy_tx_enabled) { - spin_unlock_irqrestore(&phy_tx_lock, flags); - return 0; - } - if (hpd) r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON); else @@ -257,9 +253,8 @@ static int hdmi_check_hpd_state(struct hdmi_ip_data *ip_data) goto err; } - ip_data->phy_tx_enabled = hpd; err: - spin_unlock_irqrestore(&phy_tx_lock, flags); + mutex_unlock(&ip_data->lock); return r; } @@ -327,7 +322,6 @@ void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data) free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data); hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF); - ip_data->phy_tx_enabled = false; } static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data) @@ -747,11 +741,15 @@ static void hdmi_wp_video_config_format(struct hdmi_ip_data *ip_data, static void hdmi_wp_video_config_interface(struct hdmi_ip_data *ip_data) { u32 r; + bool vsync_pol, hsync_pol; pr_debug("Enter hdmi_wp_video_config_interface\n"); + vsync_pol = ip_data->cfg.timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH; + hsync_pol = ip_data->cfg.timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH; + r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG); - r = FLD_MOD(r, ip_data->cfg.timings.vsync_pol, 7, 7); - r = FLD_MOD(r, ip_data->cfg.timings.hsync_pol, 6, 6); + r = FLD_MOD(r, vsync_pol, 7, 7); + r = FLD_MOD(r, hsync_pol, 6, 6); r = FLD_MOD(r, ip_data->cfg.timings.interlace, 3, 3); r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, r); diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c index 3907c8b6ecb..3a220877461 100644 --- a/drivers/video/omap2/dss/venc.c +++ b/drivers/video/omap2/dss/venc.c @@ -272,6 +272,8 @@ const struct omap_video_timings omap_dss_pal_timings = { .vsw = 5, .vfp = 5, .vbp = 41, + + .interlace = true, }; EXPORT_SYMBOL(omap_dss_pal_timings); @@ -285,6 +287,8 @@ const struct omap_video_timings omap_dss_ntsc_timings = { .vsw = 6, .vfp = 6, .vbp = 31, + + .interlace = true, }; EXPORT_SYMBOL(omap_dss_ntsc_timings); @@ -930,7 +934,7 @@ static int __exit omap_venchw_remove(struct platform_device *pdev) static int venc_runtime_suspend(struct device *dev) { if (venc.tv_dac_clk) - clk_disable(venc.tv_dac_clk); + clk_disable_unprepare(venc.tv_dac_clk); dispc_runtime_put(); @@ -946,7 +950,7 @@ static int venc_runtime_resume(struct device *dev) return r; if (venc.tv_dac_clk) - clk_enable(venc.tv_dac_clk); + clk_prepare_enable(venc.tv_dac_clk); return 0; } diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c index 3450ea0966c..08ec1a7103f 100644 --- a/drivers/video/omap2/omapfb/omapfb-main.c +++ b/drivers/video/omap2/omapfb/omapfb-main.c @@ -733,6 +733,12 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var) var->lower_margin = timings.vfp; var->hsync_len = timings.hsw; var->vsync_len = timings.vsw; + var->sync |= timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH ? + FB_SYNC_HOR_HIGH_ACT : 0; + var->sync |= timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH ? + FB_SYNC_VERT_HIGH_ACT : 0; + var->vmode = timings.interlace ? + FB_VMODE_INTERLACED : FB_VMODE_NONINTERLACED; } else { var->pixclock = 0; var->left_margin = 0; @@ -741,12 +747,10 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var) var->lower_margin = 0; var->hsync_len = 0; var->vsync_len = 0; + var->sync = 0; + var->vmode = FB_VMODE_NONINTERLACED; } - /* TODO: get these from panel->config */ - var->vmode = FB_VMODE_NONINTERLACED; - var->sync = 0; - return 0; } @@ -1993,6 +1997,7 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev) } static int omapfb_mode_to_timings(const char *mode_str, + struct omap_dss_device *display, struct omap_video_timings *timings, u8 *bpp) { struct fb_info *fbi; @@ -2046,6 +2051,14 @@ static int omapfb_mode_to_timings(const char *mode_str, goto err; } + if (display->driver->get_timings) { + display->driver->get_timings(display, timings); + } else { + timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH; + timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES; + } + timings->pixel_clock = PICOS2KHZ(var->pixclock); timings->hbp = var->left_margin; timings->hfp = var->right_margin; @@ -2055,6 +2068,13 @@ static int omapfb_mode_to_timings(const char *mode_str, timings->vsw = var->vsync_len; timings->x_res = var->xres; timings->y_res = var->yres; + timings->hsync_level = var->sync & FB_SYNC_HOR_HIGH_ACT ? + OMAPDSS_SIG_ACTIVE_HIGH : + OMAPDSS_SIG_ACTIVE_LOW; + timings->vsync_level = var->sync & FB_SYNC_VERT_HIGH_ACT ? + OMAPDSS_SIG_ACTIVE_HIGH : + OMAPDSS_SIG_ACTIVE_LOW; + timings->interlace = var->vmode & FB_VMODE_INTERLACED; switch (var->bits_per_pixel) { case 16: @@ -2085,7 +2105,7 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev, struct omap_video_timings timings, temp_timings; struct omapfb_display_data *d; - r = omapfb_mode_to_timings(mode_str, &timings, &bpp); + r = omapfb_mode_to_timings(mode_str, display, &timings, &bpp); if (r) return r; @@ -2178,8 +2198,17 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev) } static void fb_videomode_to_omap_timings(struct fb_videomode *m, + struct omap_dss_device *display, struct omap_video_timings *t) { + if (display->driver->get_timings) { + display->driver->get_timings(display, t); + } else { + t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + t->de_level = OMAPDSS_SIG_ACTIVE_HIGH; + t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES; + } + t->x_res = m->xres; t->y_res = m->yres; t->pixel_clock = PICOS2KHZ(m->pixclock); @@ -2189,6 +2218,13 @@ static void fb_videomode_to_omap_timings(struct fb_videomode *m, t->vsw = m->vsync_len; t->vfp = m->lower_margin; t->vbp = m->upper_margin; + t->hsync_level = m->sync & FB_SYNC_HOR_HIGH_ACT ? + OMAPDSS_SIG_ACTIVE_HIGH : + OMAPDSS_SIG_ACTIVE_LOW; + t->vsync_level = m->sync & FB_SYNC_VERT_HIGH_ACT ? + OMAPDSS_SIG_ACTIVE_HIGH : + OMAPDSS_SIG_ACTIVE_LOW; + t->interlace = m->vmode & FB_VMODE_INTERLACED; } static int omapfb_find_best_mode(struct omap_dss_device *display, @@ -2231,7 +2267,7 @@ static int omapfb_find_best_mode(struct omap_dss_device *display, if (m->xres == 2880 || m->xres == 1440) continue; - fb_videomode_to_omap_timings(m, &t); + fb_videomode_to_omap_timings(m, display, &t); r = display->driver->check_timings(display, &t); if (r == 0 && best_xres < m->xres) { @@ -2245,7 +2281,8 @@ static int omapfb_find_best_mode(struct omap_dss_device *display, goto err2; } - fb_videomode_to_omap_timings(&specs->modedb[best_idx], timings); + fb_videomode_to_omap_timings(&specs->modedb[best_idx], display, + timings); r = 0; diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c index 2c80246b18b..1d007366b91 100644 --- a/drivers/video/s3fb.c +++ b/drivers/video/s3fb.c @@ -84,7 +84,7 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64", "S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX", "S3 Virge/GX2", "S3 Virge/GX2+", "", "S3 Trio3D/1X", "S3 Trio3D/2X", "S3 Trio3D/2X", - "S3 Trio3D"}; + "S3 Trio3D", "S3 Virge/MX"}; #define CHIP_UNKNOWN 0x00 #define CHIP_732_TRIO32 0x01 @@ -105,6 +105,7 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64", #define CHIP_362_TRIO3D_2X 0x11 #define CHIP_368_TRIO3D_2X 0x12 #define CHIP_365_TRIO3D 0x13 +#define CHIP_260_VIRGE_MX 0x14 #define CHIP_XXX_TRIO 0x80 #define CHIP_XXX_TRIO64V2_DXGX 0x81 @@ -280,7 +281,8 @@ static int __devinit s3fb_setup_ddc_bus(struct fb_info *info) */ /* vga_wseq(par->state.vgabase, 0x08, 0x06); - not needed, already unlocked */ if (par->chip == CHIP_357_VIRGE_GX2 || - par->chip == CHIP_359_VIRGE_GX2P) + par->chip == CHIP_359_VIRGE_GX2P || + par->chip == CHIP_260_VIRGE_MX) svga_wseq_mask(par->state.vgabase, 0x0d, 0x01, 0x03); else svga_wseq_mask(par->state.vgabase, 0x0d, 0x00, 0x03); @@ -487,7 +489,8 @@ static void s3_set_pixclock(struct fb_info *info, u32 pixclock) par->chip == CHIP_359_VIRGE_GX2P || par->chip == CHIP_360_TRIO3D_1X || par->chip == CHIP_362_TRIO3D_2X || - par->chip == CHIP_368_TRIO3D_2X) { + par->chip == CHIP_368_TRIO3D_2X || + par->chip == CHIP_260_VIRGE_MX) { vga_wseq(par->state.vgabase, 0x12, (n - 2) | ((r & 3) << 6)); /* n and two bits of r */ vga_wseq(par->state.vgabase, 0x29, r >> 2); /* remaining highest bit of r */ } else @@ -690,7 +693,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip != CHIP_359_VIRGE_GX2P && par->chip != CHIP_360_TRIO3D_1X && par->chip != CHIP_362_TRIO3D_2X && - par->chip != CHIP_368_TRIO3D_2X) { + par->chip != CHIP_368_TRIO3D_2X && + par->chip != CHIP_260_VIRGE_MX) { vga_wcrt(par->state.vgabase, 0x54, 0x18); /* M parameter */ vga_wcrt(par->state.vgabase, 0x60, 0xff); /* N parameter */ vga_wcrt(par->state.vgabase, 0x61, 0xff); /* L parameter */ @@ -739,7 +743,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip == CHIP_368_TRIO3D_2X || par->chip == CHIP_365_TRIO3D || par->chip == CHIP_375_VIRGE_DX || - par->chip == CHIP_385_VIRGE_GX) { + par->chip == CHIP_385_VIRGE_GX || + par->chip == CHIP_260_VIRGE_MX) { dbytes = info->var.xres * ((bpp+7)/8); vga_wcrt(par->state.vgabase, 0x91, (dbytes + 7) / 8); vga_wcrt(par->state.vgabase, 0x90, (((dbytes + 7) / 8) >> 8) | 0x80); @@ -751,7 +756,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip == CHIP_359_VIRGE_GX2P || par->chip == CHIP_360_TRIO3D_1X || par->chip == CHIP_362_TRIO3D_2X || - par->chip == CHIP_368_TRIO3D_2X) + par->chip == CHIP_368_TRIO3D_2X || + par->chip == CHIP_260_VIRGE_MX) vga_wcrt(par->state.vgabase, 0x34, 0x00); else /* enable Data Transfer Position Control (DTPC) */ vga_wcrt(par->state.vgabase, 0x34, 0x10); @@ -807,7 +813,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip == CHIP_359_VIRGE_GX2P || par->chip == CHIP_360_TRIO3D_1X || par->chip == CHIP_362_TRIO3D_2X || - par->chip == CHIP_368_TRIO3D_2X) + par->chip == CHIP_368_TRIO3D_2X || + par->chip == CHIP_260_VIRGE_MX) svga_wcrt_mask(par->state.vgabase, 0x67, 0x00, 0xF0); else { svga_wcrt_mask(par->state.vgabase, 0x67, 0x10, 0xF0); @@ -837,7 +844,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip != CHIP_359_VIRGE_GX2P && par->chip != CHIP_360_TRIO3D_1X && par->chip != CHIP_362_TRIO3D_2X && - par->chip != CHIP_368_TRIO3D_2X) + par->chip != CHIP_368_TRIO3D_2X && + par->chip != CHIP_260_VIRGE_MX) hmul = 2; } break; @@ -864,7 +872,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip != CHIP_359_VIRGE_GX2P && par->chip != CHIP_360_TRIO3D_1X && par->chip != CHIP_362_TRIO3D_2X && - par->chip != CHIP_368_TRIO3D_2X) + par->chip != CHIP_368_TRIO3D_2X && + par->chip != CHIP_260_VIRGE_MX) hmul = 2; } break; @@ -1208,7 +1217,8 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i break; } } else if (par->chip == CHIP_357_VIRGE_GX2 || - par->chip == CHIP_359_VIRGE_GX2P) { + par->chip == CHIP_359_VIRGE_GX2P || + par->chip == CHIP_260_VIRGE_MX) { switch ((regval & 0xC0) >> 6) { case 1: /* 4MB */ info->screen_size = 4 << 20; @@ -1515,6 +1525,7 @@ static struct pci_device_id s3_devices[] __devinitdata = { {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P}, {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A13), .driver_data = CHIP_36X_TRIO3D_1X_2X}, {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8904), .driver_data = CHIP_365_TRIO3D}, + {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8C01), .driver_data = CHIP_260_VIRGE_MX}, {0, 0, 0, 0, 0, 0, 0} }; diff --git a/drivers/video/sh_mipi_dsi.c b/drivers/video/sh_mipi_dsi.c index 4c6b8448856..3951fdae5f6 100644 --- a/drivers/video/sh_mipi_dsi.c +++ b/drivers/video/sh_mipi_dsi.c @@ -127,8 +127,7 @@ static void sh_mipi_shutdown(struct platform_device *pdev) sh_mipi_dsi_enable(mipi, false); } -static int __init sh_mipi_setup(struct sh_mipi *mipi, - struct sh_mipi_dsi_info *pdata) +static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata) { void __iomem *base = mipi->base; struct sh_mobile_lcdc_chan_cfg *ch = pdata->lcd_chan; @@ -551,7 +550,7 @@ efindslot: return ret; } -static int __exit sh_mipi_remove(struct platform_device *pdev) +static int __devexit sh_mipi_remove(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1); @@ -592,7 +591,7 @@ static int __exit sh_mipi_remove(struct platform_device *pdev) } static struct platform_driver sh_mipi_driver = { - .remove = __exit_p(sh_mipi_remove), + .remove = __devexit_p(sh_mipi_remove), .shutdown = sh_mipi_shutdown, .driver = { .name = "sh-mipi-dsi", diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index e672698bd82..699487c287b 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -12,6 +12,7 @@ #include <linux/backlight.h> #include <linux/clk.h> #include <linux/console.h> +#include <linux/ctype.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/gpio.h> @@ -32,12 +33,176 @@ #include "sh_mobile_lcdcfb.h" +/* ---------------------------------------------------------------------------- + * Overlay register definitions + */ + +#define LDBCR 0xb00 +#define LDBCR_UPC(n) (1 << ((n) + 16)) +#define LDBCR_UPF(n) (1 << ((n) + 8)) +#define LDBCR_UPD(n) (1 << ((n) + 0)) +#define LDBnBSIFR(n) (0xb20 + (n) * 0x20 + 0x00) +#define LDBBSIFR_EN (1 << 31) +#define LDBBSIFR_VS (1 << 29) +#define LDBBSIFR_BRSEL (1 << 28) +#define LDBBSIFR_MX (1 << 27) +#define LDBBSIFR_MY (1 << 26) +#define LDBBSIFR_CV3 (3 << 24) +#define LDBBSIFR_CV2 (2 << 24) +#define LDBBSIFR_CV1 (1 << 24) +#define LDBBSIFR_CV0 (0 << 24) +#define LDBBSIFR_CV_MASK (3 << 24) +#define LDBBSIFR_LAY_MASK (0xff << 16) +#define LDBBSIFR_LAY_SHIFT 16 +#define LDBBSIFR_ROP3_MASK (0xff << 16) +#define LDBBSIFR_ROP3_SHIFT 16 +#define LDBBSIFR_AL_PL8 (3 << 14) +#define LDBBSIFR_AL_PL1 (2 << 14) +#define LDBBSIFR_AL_PK (1 << 14) +#define LDBBSIFR_AL_1 (0 << 14) +#define LDBBSIFR_AL_MASK (3 << 14) +#define LDBBSIFR_SWPL (1 << 10) +#define LDBBSIFR_SWPW (1 << 9) +#define LDBBSIFR_SWPB (1 << 8) +#define LDBBSIFR_RY (1 << 7) +#define LDBBSIFR_CHRR_420 (2 << 0) +#define LDBBSIFR_CHRR_422 (1 << 0) +#define LDBBSIFR_CHRR_444 (0 << 0) +#define LDBBSIFR_RPKF_ARGB32 (0x00 << 0) +#define LDBBSIFR_RPKF_RGB16 (0x03 << 0) +#define LDBBSIFR_RPKF_RGB24 (0x0b << 0) +#define LDBBSIFR_RPKF_MASK (0x1f << 0) +#define LDBnBSSZR(n) (0xb20 + (n) * 0x20 + 0x04) +#define LDBBSSZR_BVSS_MASK (0xfff << 16) +#define LDBBSSZR_BVSS_SHIFT 16 +#define LDBBSSZR_BHSS_MASK (0xfff << 0) +#define LDBBSSZR_BHSS_SHIFT 0 +#define LDBnBLOCR(n) (0xb20 + (n) * 0x20 + 0x08) +#define LDBBLOCR_CVLC_MASK (0xfff << 16) +#define LDBBLOCR_CVLC_SHIFT 16 +#define LDBBLOCR_CHLC_MASK (0xfff << 0) +#define LDBBLOCR_CHLC_SHIFT 0 +#define LDBnBSMWR(n) (0xb20 + (n) * 0x20 + 0x0c) +#define LDBBSMWR_BSMWA_MASK (0xffff << 16) +#define LDBBSMWR_BSMWA_SHIFT 16 +#define LDBBSMWR_BSMW_MASK (0xffff << 0) +#define LDBBSMWR_BSMW_SHIFT 0 +#define LDBnBSAYR(n) (0xb20 + (n) * 0x20 + 0x10) +#define LDBBSAYR_FG1A_MASK (0xff << 24) +#define LDBBSAYR_FG1A_SHIFT 24 +#define LDBBSAYR_FG1R_MASK (0xff << 16) +#define LDBBSAYR_FG1R_SHIFT 16 +#define LDBBSAYR_FG1G_MASK (0xff << 8) +#define LDBBSAYR_FG1G_SHIFT 8 +#define LDBBSAYR_FG1B_MASK (0xff << 0) +#define LDBBSAYR_FG1B_SHIFT 0 +#define LDBnBSACR(n) (0xb20 + (n) * 0x20 + 0x14) +#define LDBBSACR_FG2A_MASK (0xff << 24) +#define LDBBSACR_FG2A_SHIFT 24 +#define LDBBSACR_FG2R_MASK (0xff << 16) +#define LDBBSACR_FG2R_SHIFT 16 +#define LDBBSACR_FG2G_MASK (0xff << 8) +#define LDBBSACR_FG2G_SHIFT 8 +#define LDBBSACR_FG2B_MASK (0xff << 0) +#define LDBBSACR_FG2B_SHIFT 0 +#define LDBnBSAAR(n) (0xb20 + (n) * 0x20 + 0x18) +#define LDBBSAAR_AP_MASK (0xff << 24) +#define LDBBSAAR_AP_SHIFT 24 +#define LDBBSAAR_R_MASK (0xff << 16) +#define LDBBSAAR_R_SHIFT 16 +#define LDBBSAAR_GY_MASK (0xff << 8) +#define LDBBSAAR_GY_SHIFT 8 +#define LDBBSAAR_B_MASK (0xff << 0) +#define LDBBSAAR_B_SHIFT 0 +#define LDBnBPPCR(n) (0xb20 + (n) * 0x20 + 0x1c) +#define LDBBPPCR_AP_MASK (0xff << 24) +#define LDBBPPCR_AP_SHIFT 24 +#define LDBBPPCR_R_MASK (0xff << 16) +#define LDBBPPCR_R_SHIFT 16 +#define LDBBPPCR_GY_MASK (0xff << 8) +#define LDBBPPCR_GY_SHIFT 8 +#define LDBBPPCR_B_MASK (0xff << 0) +#define LDBBPPCR_B_SHIFT 0 +#define LDBnBBGCL(n) (0xb10 + (n) * 0x04) +#define LDBBBGCL_BGA_MASK (0xff << 24) +#define LDBBBGCL_BGA_SHIFT 24 +#define LDBBBGCL_BGR_MASK (0xff << 16) +#define LDBBBGCL_BGR_SHIFT 16 +#define LDBBBGCL_BGG_MASK (0xff << 8) +#define LDBBBGCL_BGG_SHIFT 8 +#define LDBBBGCL_BGB_MASK (0xff << 0) +#define LDBBBGCL_BGB_SHIFT 0 + #define SIDE_B_OFFSET 0x1000 #define MIRROR_OFFSET 0x2000 #define MAX_XRES 1920 #define MAX_YRES 1080 +enum sh_mobile_lcdc_overlay_mode { + LCDC_OVERLAY_BLEND, + LCDC_OVERLAY_ROP3, +}; + +/* + * struct sh_mobile_lcdc_overlay - LCDC display overlay + * + * @channel: LCDC channel this overlay belongs to + * @cfg: Overlay configuration + * @info: Frame buffer device + * @index: Overlay index (0-3) + * @base: Overlay registers base address + * @enabled: True if the overlay is enabled + * @mode: Overlay blending mode (alpha blend or ROP3) + * @alpha: Global alpha blending value (0-255, for alpha blending mode) + * @rop3: Raster operation (for ROP3 mode) + * @fb_mem: Frame buffer virtual memory address + * @fb_size: Frame buffer size in bytes + * @dma_handle: Frame buffer DMA address + * @base_addr_y: Overlay base address (RGB or luma component) + * @base_addr_c: Overlay base address (chroma component) + * @pan_y_offset: Panning linear offset in bytes (luma component) + * @format: Current pixelf format + * @xres: Horizontal visible resolution + * @xres_virtual: Horizontal total resolution + * @yres: Vertical visible resolution + * @yres_virtual: Vertical total resolution + * @pitch: Overlay line pitch + * @pos_x: Horizontal overlay position + * @pos_y: Vertical overlay position + */ +struct sh_mobile_lcdc_overlay { + struct sh_mobile_lcdc_chan *channel; + + const struct sh_mobile_lcdc_overlay_cfg *cfg; + struct fb_info *info; + + unsigned int index; + unsigned long base; + + bool enabled; + enum sh_mobile_lcdc_overlay_mode mode; + unsigned int alpha; + unsigned int rop3; + + void *fb_mem; + unsigned long fb_size; + + dma_addr_t dma_handle; + unsigned long base_addr_y; + unsigned long base_addr_c; + unsigned long pan_y_offset; + + const struct sh_mobile_lcdc_format_info *format; + unsigned int xres; + unsigned int xres_virtual; + unsigned int yres; + unsigned int yres_virtual; + unsigned int pitch; + int pos_x; + int pos_y; +}; + struct sh_mobile_lcdc_priv { void __iomem *base; int irq; @@ -45,7 +210,10 @@ struct sh_mobile_lcdc_priv { struct device *dev; struct clk *dot_clk; unsigned long lddckr; + struct sh_mobile_lcdc_chan ch[2]; + struct sh_mobile_lcdc_overlay overlays[4]; + struct notifier_block notifier; int started; int forced_fourcc; /* 2 channel LCDC must share fourcc setting */ @@ -141,6 +309,13 @@ static unsigned long lcdc_read_chan(struct sh_mobile_lcdc_chan *chan, return ioread32(chan->lcdc->base + chan->reg_offs[reg_nr]); } +static void lcdc_write_overlay(struct sh_mobile_lcdc_overlay *ovl, + int reg, unsigned long data) +{ + iowrite32(data, ovl->channel->lcdc->base + reg); + iowrite32(data, ovl->channel->lcdc->base + reg + SIDE_B_OFFSET); +} + static void lcdc_write(struct sh_mobile_lcdc_priv *priv, unsigned long reg_offs, unsigned long data) { @@ -384,8 +559,8 @@ sh_mobile_lcdc_must_reconfigure(struct sh_mobile_lcdc_chan *ch, return true; } -static int sh_mobile_check_var(struct fb_var_screeninfo *var, - struct fb_info *info); +static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var, + struct fb_info *info); static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch, enum sh_mobile_lcdc_entity_event event, @@ -439,7 +614,7 @@ static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch, fb_videomode_to_var(&var, mode); var.bits_per_pixel = info->var.bits_per_pixel; var.grayscale = info->var.grayscale; - ret = sh_mobile_check_var(&var, info); + ret = sh_mobile_lcdc_check_var(&var, info); break; } @@ -585,7 +760,7 @@ static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data) return IRQ_HANDLED; } -static int sh_mobile_wait_for_vsync(struct sh_mobile_lcdc_chan *ch) +static int sh_mobile_lcdc_wait_for_vsync(struct sh_mobile_lcdc_chan *ch) { unsigned long ldintr; int ret; @@ -685,8 +860,98 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch) lcdc_write_chan(ch, LDHAJR, tmp); } +static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl) +{ + u32 format = 0; + + if (!ovl->enabled) { + lcdc_write(ovl->channel->lcdc, LDBCR, LDBCR_UPC(ovl->index)); + lcdc_write_overlay(ovl, LDBnBSIFR(ovl->index), 0); + lcdc_write(ovl->channel->lcdc, LDBCR, + LDBCR_UPF(ovl->index) | LDBCR_UPD(ovl->index)); + return; + } + + ovl->base_addr_y = ovl->dma_handle; + ovl->base_addr_c = ovl->dma_handle + + ovl->xres_virtual * ovl->yres_virtual; + + switch (ovl->mode) { + case LCDC_OVERLAY_BLEND: + format = LDBBSIFR_EN | (ovl->alpha << LDBBSIFR_LAY_SHIFT); + break; + + case LCDC_OVERLAY_ROP3: + format = LDBBSIFR_EN | LDBBSIFR_BRSEL + | (ovl->rop3 << LDBBSIFR_ROP3_SHIFT); + break; + } + + switch (ovl->format->fourcc) { + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_NV21: + case V4L2_PIX_FMT_NV61: + case V4L2_PIX_FMT_NV42: + format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW; + break; + case V4L2_PIX_FMT_BGR24: + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV24: + format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB; + break; + case V4L2_PIX_FMT_BGR32: + default: + format |= LDBBSIFR_SWPL; + break; + } + + switch (ovl->format->fourcc) { + case V4L2_PIX_FMT_RGB565: + format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16; + break; + case V4L2_PIX_FMT_BGR24: + format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24; + break; + case V4L2_PIX_FMT_BGR32: + format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32; + break; + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420; + break; + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422; + break; + case V4L2_PIX_FMT_NV24: + case V4L2_PIX_FMT_NV42: + format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444; + break; + } + + lcdc_write(ovl->channel->lcdc, LDBCR, LDBCR_UPC(ovl->index)); + + lcdc_write_overlay(ovl, LDBnBSIFR(ovl->index), format); + + lcdc_write_overlay(ovl, LDBnBSSZR(ovl->index), + (ovl->yres << LDBBSSZR_BVSS_SHIFT) | + (ovl->xres << LDBBSSZR_BHSS_SHIFT)); + lcdc_write_overlay(ovl, LDBnBLOCR(ovl->index), + (ovl->pos_y << LDBBLOCR_CVLC_SHIFT) | + (ovl->pos_x << LDBBLOCR_CHLC_SHIFT)); + lcdc_write_overlay(ovl, LDBnBSMWR(ovl->index), + ovl->pitch << LDBBSMWR_BSMW_SHIFT); + + lcdc_write_overlay(ovl, LDBnBSAYR(ovl->index), ovl->base_addr_y); + lcdc_write_overlay(ovl, LDBnBSACR(ovl->index), ovl->base_addr_c); + + lcdc_write(ovl->channel->lcdc, LDBCR, + LDBCR_UPF(ovl->index) | LDBCR_UPD(ovl->index)); +} + /* - * __sh_mobile_lcdc_start - Configure and tart the LCDC + * __sh_mobile_lcdc_start - Configure and start the LCDC * @priv: LCDC device * * Configure all enabled channels and start the LCDC device. All external @@ -839,27 +1104,25 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) /* Compute frame buffer base address and pitch for each channel. */ for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { int pixelformat; - void *meram; + void *cache; ch = &priv->ch[k]; if (!ch->enabled) continue; ch->base_addr_y = ch->dma_handle; - ch->base_addr_c = ch->base_addr_y + ch->xres * ch->yres_virtual; + ch->base_addr_c = ch->dma_handle + + ch->xres_virtual * ch->yres_virtual; ch->line_size = ch->pitch; /* Enable MERAM if possible. */ - if (mdev == NULL || mdev->ops == NULL || - ch->cfg->meram_cfg == NULL) + if (mdev == NULL || ch->cfg->meram_cfg == NULL) continue; - /* we need to de-init configured ICBs before we can - * re-initialize them. - */ - if (ch->meram) { - mdev->ops->meram_unregister(mdev, ch->meram); - ch->meram = NULL; + /* Free the allocated MERAM cache. */ + if (ch->cache) { + sh_mobile_meram_cache_free(mdev, ch->cache); + ch->cache = NULL; } switch (ch->format->fourcc) { @@ -881,17 +1144,22 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) break; } - meram = mdev->ops->meram_register(mdev, ch->cfg->meram_cfg, + cache = sh_mobile_meram_cache_alloc(mdev, ch->cfg->meram_cfg, ch->pitch, ch->yres, pixelformat, &ch->line_size); - if (!IS_ERR(meram)) { - mdev->ops->meram_update(mdev, meram, + if (!IS_ERR(cache)) { + sh_mobile_meram_cache_update(mdev, cache, ch->base_addr_y, ch->base_addr_c, &ch->base_addr_y, &ch->base_addr_c); - ch->meram = meram; + ch->cache = cache; } } + for (k = 0; k < ARRAY_SIZE(priv->overlays); ++k) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[k]; + sh_mobile_lcdc_overlay_setup(ovl); + } + /* Start the LCDC. */ __sh_mobile_lcdc_start(priv); @@ -953,12 +1221,10 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) sh_mobile_lcdc_display_off(ch); - /* disable the meram */ - if (ch->meram) { - struct sh_mobile_meram_info *mdev; - mdev = priv->meram_dev; - mdev->ops->meram_unregister(mdev, ch->meram); - ch->meram = 0; + /* Free the MERAM cache. */ + if (ch->cache) { + sh_mobile_meram_cache_free(priv->meram_dev, ch->cache); + ch->cache = 0; } } @@ -975,8 +1241,511 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) sh_mobile_lcdc_clk_off(priv); } +static int __sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + if (var->xres > MAX_XRES || var->yres > MAX_YRES) + return -EINVAL; + + /* Make sure the virtual resolution is at least as big as the visible + * resolution. + */ + if (var->xres_virtual < var->xres) + var->xres_virtual = var->xres; + if (var->yres_virtual < var->yres) + var->yres_virtual = var->yres; + + if (sh_mobile_format_is_fourcc(var)) { + const struct sh_mobile_lcdc_format_info *format; + + format = sh_mobile_format_info(var->grayscale); + if (format == NULL) + return -EINVAL; + var->bits_per_pixel = format->bpp; + + /* Default to RGB and JPEG color-spaces for RGB and YUV formats + * respectively. + */ + if (!format->yuv) + var->colorspace = V4L2_COLORSPACE_SRGB; + else if (var->colorspace != V4L2_COLORSPACE_REC709) + var->colorspace = V4L2_COLORSPACE_JPEG; + } else { + if (var->bits_per_pixel <= 16) { /* RGB 565 */ + var->bits_per_pixel = 16; + var->red.offset = 11; + var->red.length = 5; + var->green.offset = 5; + var->green.length = 6; + var->blue.offset = 0; + var->blue.length = 5; + var->transp.offset = 0; + var->transp.length = 0; + } else if (var->bits_per_pixel <= 24) { /* RGB 888 */ + var->bits_per_pixel = 24; + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->transp.offset = 0; + var->transp.length = 0; + } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */ + var->bits_per_pixel = 32; + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->transp.offset = 24; + var->transp.length = 8; + } else + return -EINVAL; + + var->red.msb_right = 0; + var->green.msb_right = 0; + var->blue.msb_right = 0; + var->transp.msb_right = 0; + } + + /* Make sure we don't exceed our allocated memory. */ + if (var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8 > + info->fix.smem_len) + return -EINVAL; + + return 0; +} + /* ----------------------------------------------------------------------------- - * Frame buffer operations + * Frame buffer operations - Overlays + */ + +static ssize_t +overlay_alpha_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%u\n", ovl->alpha); +} + +static ssize_t +overlay_alpha_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned int alpha; + char *endp; + + alpha = simple_strtoul(buf, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (alpha > 255) + return -EINVAL; + + if (ovl->alpha != alpha) { + ovl->alpha = alpha; + + if (ovl->mode == LCDC_OVERLAY_BLEND && ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static ssize_t +overlay_mode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%u\n", ovl->mode); +} + +static ssize_t +overlay_mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned int mode; + char *endp; + + mode = simple_strtoul(buf, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (mode != LCDC_OVERLAY_BLEND && mode != LCDC_OVERLAY_ROP3) + return -EINVAL; + + if (ovl->mode != mode) { + ovl->mode = mode; + + if (ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static ssize_t +overlay_position_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%d,%d\n", ovl->pos_x, ovl->pos_y); +} + +static ssize_t +overlay_position_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + char *endp; + int pos_x; + int pos_y; + + pos_x = simple_strtol(buf, &endp, 10); + if (*endp != ',') + return -EINVAL; + + pos_y = simple_strtol(endp + 1, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (ovl->pos_x != pos_x || ovl->pos_y != pos_y) { + ovl->pos_x = pos_x; + ovl->pos_y = pos_y; + + if (ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static ssize_t +overlay_rop3_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%u\n", ovl->rop3); +} + +static ssize_t +overlay_rop3_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned int rop3; + char *endp; + + rop3 = !!simple_strtoul(buf, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (rop3 > 255) + return -EINVAL; + + if (ovl->rop3 != rop3) { + ovl->rop3 = rop3; + + if (ovl->mode == LCDC_OVERLAY_ROP3 && ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static const struct device_attribute overlay_sysfs_attrs[] = { + __ATTR(ovl_alpha, S_IRUGO|S_IWUSR, + overlay_alpha_show, overlay_alpha_store), + __ATTR(ovl_mode, S_IRUGO|S_IWUSR, + overlay_mode_show, overlay_mode_store), + __ATTR(ovl_position, S_IRUGO|S_IWUSR, + overlay_position_show, overlay_position_store), + __ATTR(ovl_rop3, S_IRUGO|S_IWUSR, + overlay_rop3_show, overlay_rop3_store), +}; + +static const struct fb_fix_screeninfo sh_mobile_lcdc_overlay_fix = { + .id = "SH Mobile LCDC", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_TRUECOLOR, + .accel = FB_ACCEL_NONE, + .xpanstep = 1, + .ypanstep = 1, + .ywrapstep = 0, + .capabilities = FB_CAP_FOURCC, +}; + +static int sh_mobile_lcdc_overlay_pan(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned long base_addr_y; + unsigned long base_addr_c; + unsigned long y_offset; + unsigned long c_offset; + + if (!ovl->format->yuv) { + y_offset = (var->yoffset * ovl->xres_virtual + var->xoffset) + * ovl->format->bpp / 8; + c_offset = 0; + } else { + unsigned int xsub = ovl->format->bpp < 24 ? 2 : 1; + unsigned int ysub = ovl->format->bpp < 16 ? 2 : 1; + + y_offset = var->yoffset * ovl->xres_virtual + var->xoffset; + c_offset = var->yoffset / ysub * ovl->xres_virtual * 2 / xsub + + var->xoffset * 2 / xsub; + } + + /* If the Y offset hasn't changed, the C offset hasn't either. There's + * nothing to do in that case. + */ + if (y_offset == ovl->pan_y_offset) + return 0; + + /* Set the source address for the next refresh */ + base_addr_y = ovl->dma_handle + y_offset; + base_addr_c = ovl->dma_handle + ovl->xres_virtual * ovl->yres_virtual + + c_offset; + + ovl->base_addr_y = base_addr_y; + ovl->base_addr_c = base_addr_c; + ovl->pan_y_offset = y_offset; + + lcdc_write(ovl->channel->lcdc, LDBCR, LDBCR_UPC(ovl->index)); + + lcdc_write_overlay(ovl, LDBnBSAYR(ovl->index), ovl->base_addr_y); + lcdc_write_overlay(ovl, LDBnBSACR(ovl->index), ovl->base_addr_c); + + lcdc_write(ovl->channel->lcdc, LDBCR, + LDBCR_UPF(ovl->index) | LDBCR_UPD(ovl->index)); + + return 0; +} + +static int sh_mobile_lcdc_overlay_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + + switch (cmd) { + case FBIO_WAITFORVSYNC: + return sh_mobile_lcdc_wait_for_vsync(ovl->channel); + + default: + return -ENOIOCTLCMD; + } +} + +static int sh_mobile_lcdc_overlay_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + return __sh_mobile_lcdc_check_var(var, info); +} + +static int sh_mobile_lcdc_overlay_set_par(struct fb_info *info) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + + ovl->format = + sh_mobile_format_info(sh_mobile_format_fourcc(&info->var)); + + ovl->xres = info->var.xres; + ovl->xres_virtual = info->var.xres_virtual; + ovl->yres = info->var.yres; + ovl->yres_virtual = info->var.yres_virtual; + + if (ovl->format->yuv) + ovl->pitch = info->var.xres_virtual; + else + ovl->pitch = info->var.xres_virtual * ovl->format->bpp / 8; + + sh_mobile_lcdc_overlay_setup(ovl); + + info->fix.line_length = ovl->pitch; + + if (sh_mobile_format_is_fourcc(&info->var)) { + info->fix.type = FB_TYPE_FOURCC; + info->fix.visual = FB_VISUAL_FOURCC; + } else { + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.visual = FB_VISUAL_TRUECOLOR; + } + + return 0; +} + +/* Overlay blanking. Disable the overlay when blanked. */ +static int sh_mobile_lcdc_overlay_blank(int blank, struct fb_info *info) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + + ovl->enabled = !blank; + sh_mobile_lcdc_overlay_setup(ovl); + + /* Prevent the backlight from receiving a blanking event by returning + * a non-zero value. + */ + return 1; +} + +static struct fb_ops sh_mobile_lcdc_overlay_ops = { + .owner = THIS_MODULE, + .fb_read = fb_sys_read, + .fb_write = fb_sys_write, + .fb_fillrect = sys_fillrect, + .fb_copyarea = sys_copyarea, + .fb_imageblit = sys_imageblit, + .fb_blank = sh_mobile_lcdc_overlay_blank, + .fb_pan_display = sh_mobile_lcdc_overlay_pan, + .fb_ioctl = sh_mobile_lcdc_overlay_ioctl, + .fb_check_var = sh_mobile_lcdc_overlay_check_var, + .fb_set_par = sh_mobile_lcdc_overlay_set_par, +}; + +static void +sh_mobile_lcdc_overlay_fb_unregister(struct sh_mobile_lcdc_overlay *ovl) +{ + struct fb_info *info = ovl->info; + + if (info == NULL || info->dev == NULL) + return; + + unregister_framebuffer(ovl->info); +} + +static int __devinit +sh_mobile_lcdc_overlay_fb_register(struct sh_mobile_lcdc_overlay *ovl) +{ + struct sh_mobile_lcdc_priv *lcdc = ovl->channel->lcdc; + struct fb_info *info = ovl->info; + unsigned int i; + int ret; + + if (info == NULL) + return 0; + + ret = register_framebuffer(info); + if (ret < 0) + return ret; + + dev_info(lcdc->dev, "registered %s/overlay %u as %dx%d %dbpp.\n", + dev_name(lcdc->dev), ovl->index, info->var.xres, + info->var.yres, info->var.bits_per_pixel); + + for (i = 0; i < ARRAY_SIZE(overlay_sysfs_attrs); ++i) { + ret = device_create_file(info->dev, &overlay_sysfs_attrs[i]); + if (ret < 0) + return ret; + } + + return 0; +} + +static void +sh_mobile_lcdc_overlay_fb_cleanup(struct sh_mobile_lcdc_overlay *ovl) +{ + struct fb_info *info = ovl->info; + + if (info == NULL || info->device == NULL) + return; + + framebuffer_release(info); +} + +static int __devinit +sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl) +{ + struct sh_mobile_lcdc_priv *priv = ovl->channel->lcdc; + struct fb_var_screeninfo *var; + struct fb_info *info; + + /* Allocate and initialize the frame buffer device. */ + info = framebuffer_alloc(0, priv->dev); + if (info == NULL) { + dev_err(priv->dev, "unable to allocate fb_info\n"); + return -ENOMEM; + } + + ovl->info = info; + + info->flags = FBINFO_FLAG_DEFAULT; + info->fbops = &sh_mobile_lcdc_overlay_ops; + info->device = priv->dev; + info->screen_base = ovl->fb_mem; + info->par = ovl; + + /* Initialize fixed screen information. Restrict pan to 2 lines steps + * for NV12 and NV21. + */ + info->fix = sh_mobile_lcdc_overlay_fix; + snprintf(info->fix.id, sizeof(info->fix.id), + "SH Mobile LCDC Overlay %u", ovl->index); + info->fix.smem_start = ovl->dma_handle; + info->fix.smem_len = ovl->fb_size; + info->fix.line_length = ovl->pitch; + + if (ovl->format->yuv) + info->fix.visual = FB_VISUAL_FOURCC; + else + info->fix.visual = FB_VISUAL_TRUECOLOR; + + switch (ovl->format->fourcc) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + info->fix.ypanstep = 2; + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + info->fix.xpanstep = 2; + } + + /* Initialize variable screen information. */ + var = &info->var; + memset(var, 0, sizeof(*var)); + var->xres = ovl->xres; + var->yres = ovl->yres; + var->xres_virtual = ovl->xres_virtual; + var->yres_virtual = ovl->yres_virtual; + var->activate = FB_ACTIVATE_NOW; + + /* Use the legacy API by default for RGB formats, and the FOURCC API + * for YUV formats. + */ + if (!ovl->format->yuv) + var->bits_per_pixel = ovl->format->bpp; + else + var->grayscale = ovl->format->fourcc; + + return sh_mobile_lcdc_overlay_check_var(var, info); +} + +/* ----------------------------------------------------------------------------- + * Frame buffer operations - main frame buffer */ static int sh_mobile_lcdc_setcolreg(u_int regno, @@ -1003,12 +1772,12 @@ static int sh_mobile_lcdc_setcolreg(u_int regno, return 0; } -static struct fb_fix_screeninfo sh_mobile_lcdc_fix = { +static const struct fb_fix_screeninfo sh_mobile_lcdc_fix = { .id = "SH Mobile LCDC", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .accel = FB_ACCEL_NONE, - .xpanstep = 0, + .xpanstep = 1, .ypanstep = 1, .ywrapstep = 0, .capabilities = FB_CAP_FOURCC, @@ -1035,78 +1804,74 @@ static void sh_mobile_lcdc_imageblit(struct fb_info *info, sh_mobile_lcdc_deferred_io_touch(info); } -static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var, - struct fb_info *info) +static int sh_mobile_lcdc_pan(struct fb_var_screeninfo *var, + struct fb_info *info) { struct sh_mobile_lcdc_chan *ch = info->par; struct sh_mobile_lcdc_priv *priv = ch->lcdc; unsigned long ldrcntr; - unsigned long new_pan_offset; unsigned long base_addr_y, base_addr_c; + unsigned long y_offset; unsigned long c_offset; - if (!ch->format->yuv) - new_pan_offset = var->yoffset * ch->pitch - + var->xoffset * (ch->format->bpp / 8); - else - new_pan_offset = var->yoffset * ch->pitch + var->xoffset; + if (!ch->format->yuv) { + y_offset = (var->yoffset * ch->xres_virtual + var->xoffset) + * ch->format->bpp / 8; + c_offset = 0; + } else { + unsigned int xsub = ch->format->bpp < 24 ? 2 : 1; + unsigned int ysub = ch->format->bpp < 16 ? 2 : 1; - if (new_pan_offset == ch->pan_offset) - return 0; /* No change, do nothing */ + y_offset = var->yoffset * ch->xres_virtual + var->xoffset; + c_offset = var->yoffset / ysub * ch->xres_virtual * 2 / xsub + + var->xoffset * 2 / xsub; + } - ldrcntr = lcdc_read(priv, _LDRCNTR); + /* If the Y offset hasn't changed, the C offset hasn't either. There's + * nothing to do in that case. + */ + if (y_offset == ch->pan_y_offset) + return 0; /* Set the source address for the next refresh */ - base_addr_y = ch->dma_handle + new_pan_offset; - if (ch->format->yuv) { - /* Set y offset */ - c_offset = var->yoffset * ch->pitch - * (ch->format->bpp - 8) / 8; - base_addr_c = ch->dma_handle + ch->xres * ch->yres_virtual - + c_offset; - /* Set x offset */ - if (ch->format->fourcc == V4L2_PIX_FMT_NV24) - base_addr_c += 2 * var->xoffset; - else - base_addr_c += var->xoffset; - } + base_addr_y = ch->dma_handle + y_offset; + base_addr_c = ch->dma_handle + ch->xres_virtual * ch->yres_virtual + + c_offset; - if (ch->meram) { - struct sh_mobile_meram_info *mdev; - - mdev = priv->meram_dev; - mdev->ops->meram_update(mdev, ch->meram, - base_addr_y, base_addr_c, - &base_addr_y, &base_addr_c); - } + if (ch->cache) + sh_mobile_meram_cache_update(priv->meram_dev, ch->cache, + base_addr_y, base_addr_c, + &base_addr_y, &base_addr_c); ch->base_addr_y = base_addr_y; ch->base_addr_c = base_addr_c; + ch->pan_y_offset = y_offset; lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y); if (ch->format->yuv) lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c); + ldrcntr = lcdc_read(priv, _LDRCNTR); if (lcdc_chan_is_sublcd(ch)) lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS); else lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_MRS); - ch->pan_offset = new_pan_offset; sh_mobile_lcdc_deferred_io_touch(info); return 0; } -static int sh_mobile_ioctl(struct fb_info *info, unsigned int cmd, - unsigned long arg) +static int sh_mobile_lcdc_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) { + struct sh_mobile_lcdc_chan *ch = info->par; int retval; switch (cmd) { case FBIO_WAITFORVSYNC: - retval = sh_mobile_wait_for_vsync(info->par); + retval = sh_mobile_lcdc_wait_for_vsync(ch); break; default: @@ -1158,7 +1923,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info) * Locking: both .fb_release() and .fb_open() are called with info->lock held if * user == 1, or with console sem held, if user == 0. */ -static int sh_mobile_release(struct fb_info *info, int user) +static int sh_mobile_lcdc_release(struct fb_info *info, int user) { struct sh_mobile_lcdc_chan *ch = info->par; @@ -1179,7 +1944,7 @@ static int sh_mobile_release(struct fb_info *info, int user) return 0; } -static int sh_mobile_open(struct fb_info *info, int user) +static int sh_mobile_lcdc_open(struct fb_info *info, int user) { struct sh_mobile_lcdc_chan *ch = info->par; @@ -1192,7 +1957,8 @@ static int sh_mobile_open(struct fb_info *info, int user) return 0; } -static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) { struct sh_mobile_lcdc_chan *ch = info->par; struct sh_mobile_lcdc_priv *p = ch->lcdc; @@ -1200,9 +1966,7 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in unsigned int best_xres = 0; unsigned int best_yres = 0; unsigned int i; - - if (var->xres > MAX_XRES || var->yres > MAX_YRES) - return -EINVAL; + int ret; /* If board code provides us with a list of available modes, make sure * we use one of them. Find the mode closest to the requested one. The @@ -1237,73 +2001,9 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in var->yres = best_yres; } - /* Make sure the virtual resolution is at least as big as the visible - * resolution. - */ - if (var->xres_virtual < var->xres) - var->xres_virtual = var->xres; - if (var->yres_virtual < var->yres) - var->yres_virtual = var->yres; - - if (sh_mobile_format_is_fourcc(var)) { - const struct sh_mobile_lcdc_format_info *format; - - format = sh_mobile_format_info(var->grayscale); - if (format == NULL) - return -EINVAL; - var->bits_per_pixel = format->bpp; - - /* Default to RGB and JPEG color-spaces for RGB and YUV formats - * respectively. - */ - if (!format->yuv) - var->colorspace = V4L2_COLORSPACE_SRGB; - else if (var->colorspace != V4L2_COLORSPACE_REC709) - var->colorspace = V4L2_COLORSPACE_JPEG; - } else { - if (var->bits_per_pixel <= 16) { /* RGB 565 */ - var->bits_per_pixel = 16; - var->red.offset = 11; - var->red.length = 5; - var->green.offset = 5; - var->green.length = 6; - var->blue.offset = 0; - var->blue.length = 5; - var->transp.offset = 0; - var->transp.length = 0; - } else if (var->bits_per_pixel <= 24) { /* RGB 888 */ - var->bits_per_pixel = 24; - var->red.offset = 16; - var->red.length = 8; - var->green.offset = 8; - var->green.length = 8; - var->blue.offset = 0; - var->blue.length = 8; - var->transp.offset = 0; - var->transp.length = 0; - } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */ - var->bits_per_pixel = 32; - var->red.offset = 16; - var->red.length = 8; - var->green.offset = 8; - var->green.length = 8; - var->blue.offset = 0; - var->blue.length = 8; - var->transp.offset = 24; - var->transp.length = 8; - } else - return -EINVAL; - - var->red.msb_right = 0; - var->green.msb_right = 0; - var->blue.msb_right = 0; - var->transp.msb_right = 0; - } - - /* Make sure we don't exceed our allocated memory. */ - if (var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8 > - info->fix.smem_len) - return -EINVAL; + ret = __sh_mobile_lcdc_check_var(var, info); + if (ret < 0) + return ret; /* only accept the forced_fourcc for dual channel configurations */ if (p->forced_fourcc && @@ -1313,7 +2013,7 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in return 0; } -static int sh_mobile_set_par(struct fb_info *info) +static int sh_mobile_lcdc_set_par(struct fb_info *info) { struct sh_mobile_lcdc_chan *ch = info->par; int ret; @@ -1329,9 +2029,9 @@ static int sh_mobile_set_par(struct fb_info *info) ch->yres_virtual = info->var.yres_virtual; if (ch->format->yuv) - ch->pitch = info->var.xres; + ch->pitch = info->var.xres_virtual; else - ch->pitch = info->var.xres * ch->format->bpp / 8; + ch->pitch = info->var.xres_virtual * ch->format->bpp / 8; ret = sh_mobile_lcdc_start(ch->lcdc); if (ret < 0) @@ -1383,8 +2083,8 @@ static int sh_mobile_lcdc_blank(int blank, struct fb_info *info) * mode will reenable the clocks and update the screen in time, * so it does not need this. */ if (!info->fbdefio) { - sh_mobile_wait_for_vsync(ch); - sh_mobile_wait_for_vsync(ch); + sh_mobile_lcdc_wait_for_vsync(ch); + sh_mobile_lcdc_wait_for_vsync(ch); } sh_mobile_lcdc_clk_off(p); } @@ -1402,12 +2102,12 @@ static struct fb_ops sh_mobile_lcdc_ops = { .fb_copyarea = sh_mobile_lcdc_copyarea, .fb_imageblit = sh_mobile_lcdc_imageblit, .fb_blank = sh_mobile_lcdc_blank, - .fb_pan_display = sh_mobile_fb_pan_display, - .fb_ioctl = sh_mobile_ioctl, - .fb_open = sh_mobile_open, - .fb_release = sh_mobile_release, - .fb_check_var = sh_mobile_check_var, - .fb_set_par = sh_mobile_set_par, + .fb_pan_display = sh_mobile_lcdc_pan, + .fb_ioctl = sh_mobile_lcdc_ioctl, + .fb_open = sh_mobile_lcdc_open, + .fb_release = sh_mobile_lcdc_release, + .fb_check_var = sh_mobile_lcdc_check_var, + .fb_set_par = sh_mobile_lcdc_set_par, }; static void @@ -1514,19 +2214,24 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch, else info->fix.visual = FB_VISUAL_TRUECOLOR; - if (ch->format->fourcc == V4L2_PIX_FMT_NV12 || - ch->format->fourcc == V4L2_PIX_FMT_NV21) + switch (ch->format->fourcc) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: info->fix.ypanstep = 2; + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + info->fix.xpanstep = 2; + } /* Initialize variable screen information using the first mode as - * default. The default Y virtual resolution is twice the panel size to - * allow for double-buffering. + * default. */ var = &info->var; fb_videomode_to_var(var, mode); var->width = ch->cfg->panel_cfg.width; var->height = ch->cfg->panel_cfg.height; - var->yres_virtual = var->yres * 2; + var->xres_virtual = ch->xres_virtual; + var->yres_virtual = ch->yres_virtual; var->activate = FB_ACTIVATE_NOW; /* Use the legacy API by default for RGB formats, and the FOURCC API @@ -1537,7 +2242,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch, else var->grayscale = ch->format->fourcc; - ret = sh_mobile_check_var(var, info); + ret = sh_mobile_lcdc_check_var(var, info); if (ret) return ret; @@ -1712,15 +2417,27 @@ static const struct fb_videomode default_720p __devinitconst = { static int sh_mobile_lcdc_remove(struct platform_device *pdev) { struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev); - int i; + unsigned int i; fb_unregister_client(&priv->notifier); + for (i = 0; i < ARRAY_SIZE(priv->overlays); i++) + sh_mobile_lcdc_overlay_fb_unregister(&priv->overlays[i]); for (i = 0; i < ARRAY_SIZE(priv->ch); i++) sh_mobile_lcdc_channel_fb_unregister(&priv->ch[i]); sh_mobile_lcdc_stop(priv); + for (i = 0; i < ARRAY_SIZE(priv->overlays); i++) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[i]; + + sh_mobile_lcdc_overlay_fb_cleanup(ovl); + + if (ovl->fb_mem) + dma_free_coherent(&pdev->dev, ovl->fb_size, + ovl->fb_mem, ovl->dma_handle); + } + for (i = 0; i < ARRAY_SIZE(priv->ch); i++) { struct sh_mobile_lcdc_chan *ch = &priv->ch[i]; @@ -1737,8 +2454,11 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev) } for (i = 0; i < ARRAY_SIZE(priv->ch); i++) { - if (priv->ch[i].bl) - sh_mobile_lcdc_bl_remove(priv->ch[i].bl); + struct sh_mobile_lcdc_chan *ch = &priv->ch[i]; + + if (ch->bl) + sh_mobile_lcdc_bl_remove(ch->bl); + mutex_destroy(&ch->open_lock); } if (priv->dot_clk) { @@ -1796,6 +2516,61 @@ static int __devinit sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan * } static int __devinit +sh_mobile_lcdc_overlay_init(struct sh_mobile_lcdc_priv *priv, + struct sh_mobile_lcdc_overlay *ovl) +{ + const struct sh_mobile_lcdc_format_info *format; + int ret; + + if (ovl->cfg->fourcc == 0) + return 0; + + /* Validate the format. */ + format = sh_mobile_format_info(ovl->cfg->fourcc); + if (format == NULL) { + dev_err(priv->dev, "Invalid FOURCC %08x\n", ovl->cfg->fourcc); + return -EINVAL; + } + + ovl->enabled = false; + ovl->mode = LCDC_OVERLAY_BLEND; + ovl->alpha = 255; + ovl->rop3 = 0; + ovl->pos_x = 0; + ovl->pos_y = 0; + + /* The default Y virtual resolution is twice the panel size to allow for + * double-buffering. + */ + ovl->format = format; + ovl->xres = ovl->cfg->max_xres; + ovl->xres_virtual = ovl->xres; + ovl->yres = ovl->cfg->max_yres; + ovl->yres_virtual = ovl->yres * 2; + + if (!format->yuv) + ovl->pitch = ovl->xres_virtual * format->bpp / 8; + else + ovl->pitch = ovl->xres_virtual; + + /* Allocate frame buffer memory. */ + ovl->fb_size = ovl->cfg->max_xres * ovl->cfg->max_yres + * format->bpp / 8 * 2; + ovl->fb_mem = dma_alloc_coherent(priv->dev, ovl->fb_size, + &ovl->dma_handle, GFP_KERNEL); + if (!ovl->fb_mem) { + dev_err(priv->dev, "unable to allocate buffer\n"); + return -ENOMEM; + } + + ret = sh_mobile_lcdc_overlay_fb_init(ovl); + if (ret < 0) + return ret; + + return 0; +} + +static int __devinit sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv, struct sh_mobile_lcdc_chan *ch) { @@ -1854,7 +2629,9 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv, num_modes = cfg->num_modes; } - /* Use the first mode as default. */ + /* Use the first mode as default. The default Y virtual resolution is + * twice the panel size to allow for double-buffering. + */ ch->format = format; ch->xres = mode->xres; ch->xres_virtual = mode->xres; @@ -1863,10 +2640,10 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv, if (!format->yuv) { ch->colorspace = V4L2_COLORSPACE_SRGB; - ch->pitch = ch->xres * format->bpp / 8; + ch->pitch = ch->xres_virtual * format->bpp / 8; } else { ch->colorspace = V4L2_COLORSPACE_REC709; - ch->pitch = ch->xres; + ch->pitch = ch->xres_virtual; } ch->display.width = cfg->panel_cfg.width; @@ -1952,7 +2729,6 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) } init_waitqueue_head(&ch->frame_end_wait); init_completion(&ch->vsync_completion); - ch->pan_offset = 0; /* probe the backlight is there is one defined */ if (ch->cfg->bl_info.max_brightness) @@ -2003,6 +2779,17 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) goto err1; } + for (i = 0; i < ARRAY_SIZE(pdata->overlays); i++) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[i]; + + ovl->cfg = &pdata->overlays[i]; + ovl->channel = &priv->ch[0]; + + error = sh_mobile_lcdc_overlay_init(priv, ovl); + if (error) + goto err1; + } + error = sh_mobile_lcdc_start(priv); if (error) { dev_err(&pdev->dev, "unable to start hardware\n"); @@ -2017,6 +2804,14 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) goto err1; } + for (i = 0; i < ARRAY_SIZE(pdata->overlays); i++) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[i]; + + error = sh_mobile_lcdc_overlay_fb_register(ovl); + if (error) + goto err1; + } + /* Failure ignored */ priv->notifier.notifier_call = sh_mobile_lcdc_notify; fb_register_client(&priv->notifier); diff --git a/drivers/video/sh_mobile_lcdcfb.h b/drivers/video/sh_mobile_lcdcfb.h index 5c3bddd2cb7..0f92f6544b9 100644 --- a/drivers/video/sh_mobile_lcdcfb.h +++ b/drivers/video/sh_mobile_lcdcfb.h @@ -47,6 +47,7 @@ struct sh_mobile_lcdc_entity { /* * struct sh_mobile_lcdc_chan - LCDC display channel * + * @pan_y_offset: Panning linear offset in bytes (luma component) * @base_addr_y: Frame buffer viewport base address (luma component) * @base_addr_c: Frame buffer viewport base address (chroma component) * @pitch: Frame buffer line pitch @@ -59,7 +60,7 @@ struct sh_mobile_lcdc_chan { unsigned long *reg_offs; unsigned long ldmt1r_value; unsigned long enabled; /* ME and SE in LDCNT2R */ - void *meram; + void *cache; struct mutex open_lock; /* protects the use counter */ int use_count; @@ -68,7 +69,7 @@ struct sh_mobile_lcdc_chan { unsigned long fb_size; dma_addr_t dma_handle; - unsigned long pan_offset; + unsigned long pan_y_offset; unsigned long frame_end; wait_queue_head_t frame_end_wait; diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c index 82ba830bf95..7a0ba8bb3fb 100644 --- a/drivers/video/sh_mobile_meram.c +++ b/drivers/video/sh_mobile_meram.c @@ -11,6 +11,7 @@ #include <linux/device.h> #include <linux/err.h> +#include <linux/export.h> #include <linux/genalloc.h> #include <linux/io.h> #include <linux/kernel.h> @@ -194,13 +195,28 @@ static inline unsigned long meram_read_reg(void __iomem *base, unsigned int off) } /* ----------------------------------------------------------------------------- - * Allocation + * MERAM allocation and free + */ + +static unsigned long meram_alloc(struct sh_mobile_meram_priv *priv, size_t size) +{ + return gen_pool_alloc(priv->pool, size); +} + +static void meram_free(struct sh_mobile_meram_priv *priv, unsigned long mem, + size_t size) +{ + gen_pool_free(priv->pool, mem, size); +} + +/* ----------------------------------------------------------------------------- + * LCDC cache planes allocation, init, cleanup and free */ /* Allocate ICBs and MERAM for a plane. */ -static int __meram_alloc(struct sh_mobile_meram_priv *priv, - struct sh_mobile_meram_fb_plane *plane, - size_t size) +static int meram_plane_alloc(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_fb_plane *plane, + size_t size) { unsigned long mem; unsigned long idx; @@ -215,7 +231,7 @@ static int __meram_alloc(struct sh_mobile_meram_priv *priv, return -ENOMEM; plane->marker = &priv->icbs[idx]; - mem = gen_pool_alloc(priv->pool, size * 1024); + mem = meram_alloc(priv, size * 1024); if (mem == 0) return -ENOMEM; @@ -229,11 +245,11 @@ static int __meram_alloc(struct sh_mobile_meram_priv *priv, } /* Free ICBs and MERAM for a plane. */ -static void __meram_free(struct sh_mobile_meram_priv *priv, - struct sh_mobile_meram_fb_plane *plane) +static void meram_plane_free(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_fb_plane *plane) { - gen_pool_free(priv->pool, priv->meram + plane->marker->offset, - plane->marker->size * 1024); + meram_free(priv, priv->meram + plane->marker->offset, + plane->marker->size * 1024); __clear_bit(plane->marker->index, &priv->used_icb); __clear_bit(plane->cache->index, &priv->used_icb); @@ -248,62 +264,6 @@ static int is_nvcolor(int cspace) return 0; } -/* Allocate memory for the ICBs and mark them as used. */ -static struct sh_mobile_meram_fb_cache * -meram_alloc(struct sh_mobile_meram_priv *priv, - const struct sh_mobile_meram_cfg *cfg, - int pixelformat) -{ - struct sh_mobile_meram_fb_cache *cache; - unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1; - int ret; - - if (cfg->icb[0].meram_size == 0) - return ERR_PTR(-EINVAL); - - if (nplanes == 2 && cfg->icb[1].meram_size == 0) - return ERR_PTR(-EINVAL); - - cache = kzalloc(sizeof(*cache), GFP_KERNEL); - if (cache == NULL) - return ERR_PTR(-ENOMEM); - - cache->nplanes = nplanes; - - ret = __meram_alloc(priv, &cache->planes[0], cfg->icb[0].meram_size); - if (ret < 0) - goto error; - - cache->planes[0].marker->current_reg = 1; - cache->planes[0].marker->pixelformat = pixelformat; - - if (cache->nplanes == 1) - return cache; - - ret = __meram_alloc(priv, &cache->planes[1], cfg->icb[1].meram_size); - if (ret < 0) { - __meram_free(priv, &cache->planes[0]); - goto error; - } - - return cache; - -error: - kfree(cache); - return ERR_PTR(-ENOMEM); -} - -/* Unmark the specified ICB as used. */ -static void meram_free(struct sh_mobile_meram_priv *priv, - struct sh_mobile_meram_fb_cache *cache) -{ - __meram_free(priv, &cache->planes[0]); - if (cache->nplanes == 2) - __meram_free(priv, &cache->planes[1]); - - kfree(cache); -} - /* Set the next address to fetch. */ static void meram_set_next_addr(struct sh_mobile_meram_priv *priv, struct sh_mobile_meram_fb_cache *cache, @@ -355,10 +315,10 @@ meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata, (((x) * (y) + (MERAM_LINE_WIDTH - 1)) & ~(MERAM_LINE_WIDTH - 1)) /* Initialize MERAM. */ -static int meram_init(struct sh_mobile_meram_priv *priv, - struct sh_mobile_meram_fb_plane *plane, - unsigned int xres, unsigned int yres, - unsigned int *out_pitch) +static int meram_plane_init(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_fb_plane *plane, + unsigned int xres, unsigned int yres, + unsigned int *out_pitch) { struct sh_mobile_meram_icb *marker = plane->marker; unsigned long total_byte_count = MERAM_CALC_BYTECOUNT(xres, yres); @@ -427,8 +387,8 @@ static int meram_init(struct sh_mobile_meram_priv *priv, return 0; } -static void meram_deinit(struct sh_mobile_meram_priv *priv, - struct sh_mobile_meram_fb_plane *plane) +static void meram_plane_cleanup(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_fb_plane *plane) { /* disable ICB */ meram_write_icb(priv->base, plane->cache->index, MExxCTL, @@ -441,20 +401,82 @@ static void meram_deinit(struct sh_mobile_meram_priv *priv, } /* ----------------------------------------------------------------------------- - * Registration/unregistration + * MERAM operations */ -static void *sh_mobile_meram_register(struct sh_mobile_meram_info *pdata, - const struct sh_mobile_meram_cfg *cfg, - unsigned int xres, unsigned int yres, - unsigned int pixelformat, - unsigned int *pitch) +unsigned long sh_mobile_meram_alloc(struct sh_mobile_meram_info *pdata, + size_t size) +{ + struct sh_mobile_meram_priv *priv = pdata->priv; + + return meram_alloc(priv, size); +} +EXPORT_SYMBOL_GPL(sh_mobile_meram_alloc); + +void sh_mobile_meram_free(struct sh_mobile_meram_info *pdata, unsigned long mem, + size_t size) +{ + struct sh_mobile_meram_priv *priv = pdata->priv; + + meram_free(priv, mem, size); +} +EXPORT_SYMBOL_GPL(sh_mobile_meram_free); + +/* Allocate memory for the ICBs and mark them as used. */ +static struct sh_mobile_meram_fb_cache * +meram_cache_alloc(struct sh_mobile_meram_priv *priv, + const struct sh_mobile_meram_cfg *cfg, + int pixelformat) +{ + unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1; + struct sh_mobile_meram_fb_cache *cache; + int ret; + + cache = kzalloc(sizeof(*cache), GFP_KERNEL); + if (cache == NULL) + return ERR_PTR(-ENOMEM); + + cache->nplanes = nplanes; + + ret = meram_plane_alloc(priv, &cache->planes[0], + cfg->icb[0].meram_size); + if (ret < 0) + goto error; + + cache->planes[0].marker->current_reg = 1; + cache->planes[0].marker->pixelformat = pixelformat; + + if (cache->nplanes == 1) + return cache; + + ret = meram_plane_alloc(priv, &cache->planes[1], + cfg->icb[1].meram_size); + if (ret < 0) { + meram_plane_free(priv, &cache->planes[0]); + goto error; + } + + return cache; + +error: + kfree(cache); + return ERR_PTR(-ENOMEM); +} + +void *sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *pdata, + const struct sh_mobile_meram_cfg *cfg, + unsigned int xres, unsigned int yres, + unsigned int pixelformat, unsigned int *pitch) { struct sh_mobile_meram_fb_cache *cache; struct sh_mobile_meram_priv *priv = pdata->priv; struct platform_device *pdev = pdata->pdev; + unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1; unsigned int out_pitch; + if (priv == NULL) + return ERR_PTR(-ENODEV); + if (pixelformat != SH_MOBILE_MERAM_PF_NV && pixelformat != SH_MOBILE_MERAM_PF_NV24 && pixelformat != SH_MOBILE_MERAM_PF_RGB) @@ -469,10 +491,16 @@ static void *sh_mobile_meram_register(struct sh_mobile_meram_info *pdata, return ERR_PTR(-EINVAL); } + if (cfg->icb[0].meram_size == 0) + return ERR_PTR(-EINVAL); + + if (nplanes == 2 && cfg->icb[1].meram_size == 0) + return ERR_PTR(-EINVAL); + mutex_lock(&priv->lock); /* We now register the ICBs and allocate the MERAM regions. */ - cache = meram_alloc(priv, cfg, pixelformat); + cache = meram_cache_alloc(priv, cfg, pixelformat); if (IS_ERR(cache)) { dev_err(&pdev->dev, "MERAM allocation failed (%ld).", PTR_ERR(cache)); @@ -480,42 +508,50 @@ static void *sh_mobile_meram_register(struct sh_mobile_meram_info *pdata, } /* initialize MERAM */ - meram_init(priv, &cache->planes[0], xres, yres, &out_pitch); + meram_plane_init(priv, &cache->planes[0], xres, yres, &out_pitch); *pitch = out_pitch; if (pixelformat == SH_MOBILE_MERAM_PF_NV) - meram_init(priv, &cache->planes[1], xres, (yres + 1) / 2, - &out_pitch); + meram_plane_init(priv, &cache->planes[1], + xres, (yres + 1) / 2, &out_pitch); else if (pixelformat == SH_MOBILE_MERAM_PF_NV24) - meram_init(priv, &cache->planes[1], 2 * xres, (yres + 1) / 2, - &out_pitch); + meram_plane_init(priv, &cache->planes[1], + 2 * xres, (yres + 1) / 2, &out_pitch); err: mutex_unlock(&priv->lock); return cache; } +EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_alloc); -static void -sh_mobile_meram_unregister(struct sh_mobile_meram_info *pdata, void *data) +void +sh_mobile_meram_cache_free(struct sh_mobile_meram_info *pdata, void *data) { struct sh_mobile_meram_fb_cache *cache = data; struct sh_mobile_meram_priv *priv = pdata->priv; mutex_lock(&priv->lock); - /* deinit & free */ - meram_deinit(priv, &cache->planes[0]); - if (cache->nplanes == 2) - meram_deinit(priv, &cache->planes[1]); + /* Cleanup and free. */ + meram_plane_cleanup(priv, &cache->planes[0]); + meram_plane_free(priv, &cache->planes[0]); + + if (cache->nplanes == 2) { + meram_plane_cleanup(priv, &cache->planes[1]); + meram_plane_free(priv, &cache->planes[1]); + } - meram_free(priv, cache); + kfree(cache); mutex_unlock(&priv->lock); } - -static void -sh_mobile_meram_update(struct sh_mobile_meram_info *pdata, void *data, - unsigned long base_addr_y, unsigned long base_addr_c, - unsigned long *icb_addr_y, unsigned long *icb_addr_c) +EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_free); + +void +sh_mobile_meram_cache_update(struct sh_mobile_meram_info *pdata, void *data, + unsigned long base_addr_y, + unsigned long base_addr_c, + unsigned long *icb_addr_y, + unsigned long *icb_addr_c) { struct sh_mobile_meram_fb_cache *cache = data; struct sh_mobile_meram_priv *priv = pdata->priv; @@ -527,13 +563,7 @@ sh_mobile_meram_update(struct sh_mobile_meram_info *pdata, void *data, mutex_unlock(&priv->lock); } - -static struct sh_mobile_meram_ops sh_mobile_meram_ops = { - .module = THIS_MODULE, - .meram_register = sh_mobile_meram_register, - .meram_unregister = sh_mobile_meram_unregister, - .meram_update = sh_mobile_meram_update, -}; +EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_update); /* ----------------------------------------------------------------------------- * Power management @@ -624,7 +654,6 @@ static int __devinit sh_mobile_meram_probe(struct platform_device *pdev) for (i = 0; i < MERAM_ICB_NUM; ++i) priv->icbs[i].index = i; - pdata->ops = &sh_mobile_meram_ops; pdata->priv = priv; pdata->pdev = pdev; diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c index 26f86428949..5533a32c6ca 100644 --- a/drivers/video/smscufx.c +++ b/drivers/video/smscufx.c @@ -904,7 +904,7 @@ static ssize_t ufx_ops_write(struct fb_info *info, const char __user *buf, result = fb_sys_write(info, buf, count, ppos); if (result > 0) { - int start = max((int)(offset / info->fix.line_length) - 1, 0); + int start = max((int)(offset / info->fix.line_length), 0); int lines = min((u32)((result / info->fix.line_length) + 1), (u32)info->var.yres); diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c index 90a2e30272a..2f6b2b835f8 100644 --- a/drivers/video/w100fb.c +++ b/drivers/video/w100fb.c @@ -1567,6 +1567,18 @@ static void w100_suspend(u32 mode) val = readl(remapped_regs + mmPLL_CNTL); val |= 0x00000004; /* bit2=1 */ writel(val, remapped_regs + mmPLL_CNTL); + + writel(0x00000000, remapped_regs + mmLCDD_CNTL1); + writel(0x00000000, remapped_regs + mmLCDD_CNTL2); + writel(0x00000000, remapped_regs + mmGENLCD_CNTL1); + writel(0x00000000, remapped_regs + mmGENLCD_CNTL2); + writel(0x00000000, remapped_regs + mmGENLCD_CNTL3); + + val = readl(remapped_regs + mmMEM_EXT_CNTL); + val |= 0xF0000000; + val &= ~(0x00000001); + writel(val, remapped_regs + mmMEM_EXT_CNTL); + writel(0x0000001d, remapped_regs + mmPWRMGT_CNTL); } } diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index fc06fd27065..dd6f7ee1e31 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -610,6 +610,9 @@ v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n", page, (unsigned long)filp->private_data); + /* Update file times before taking page lock */ + file_update_time(filp); + v9inode = V9FS_I(inode); /* make sure the cache has finished storing the page */ v9fs_fscache_wait_on_page_write(inode, page); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index fadeba6a5db..62e0cafd6e2 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1614,8 +1614,6 @@ static int cleaner_kthread(void *arg) struct btrfs_root *root = arg; do { - vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); - if (!(root->fs_info->sb->s_flags & MS_RDONLY) && mutex_trylock(&root->fs_info->cleaner_mutex)) { btrfs_run_delayed_iputs(root); @@ -1647,7 +1645,6 @@ static int transaction_kthread(void *arg) do { cannot_commit = false; delay = HZ * 30; - vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); mutex_lock(&root->fs_info->transaction_kthread_mutex); spin_lock(&root->fs_info->trans_lock); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 9aa01ec2138..5caf285c6e4 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1379,7 +1379,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, ssize_t err = 0; size_t count, ocount; - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); + sb_start_write(inode->i_sb); mutex_lock(&inode->i_mutex); @@ -1469,6 +1469,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, num_written = err; } out: + sb_end_write(inode->i_sb); current->backing_dev_info = NULL; return num_written ? num_written : err; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 48bdfd2591c..83baec24946 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6629,6 +6629,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) u64 page_start; u64 page_end; + sb_start_pagefault(inode->i_sb); ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); if (!ret) { ret = file_update_time(vma->vm_file); @@ -6718,12 +6719,15 @@ again: unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); out_unlock: - if (!ret) + if (!ret) { + sb_end_pagefault(inode->i_sb); return VM_FAULT_LOCKED; + } unlock_page(page); out: btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); out_noreserve: + sb_end_pagefault(inode->i_sb); return ret; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 43f0012016e..bc2f6ffff3c 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -195,6 +195,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) if (!inode_owner_or_capable(inode)) return -EACCES; + ret = mnt_want_write_file(file); + if (ret) + return ret; + mutex_lock(&inode->i_mutex); ip_oldflags = ip->flags; @@ -209,10 +213,6 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) } } - ret = mnt_want_write_file(file); - if (ret) - goto out_unlock; - if (flags & FS_SYNC_FL) ip->flags |= BTRFS_INODE_SYNC; else @@ -275,9 +275,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) inode->i_flags = i_oldflags; } - mnt_drop_write_file(file); out_unlock: mutex_unlock(&inode->i_mutex); + mnt_drop_write_file(file); return ret; } @@ -664,6 +664,10 @@ static noinline int btrfs_mksubvol(struct path *parent, struct dentry *dentry; int error; + error = mnt_want_write(parent->mnt); + if (error) + return error; + mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); dentry = lookup_one_len(name, parent->dentry, namelen); @@ -699,6 +703,7 @@ out_dput: dput(dentry); out_unlock: mutex_unlock(&dir->i_mutex); + mnt_drop_write(parent->mnt); return error; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 7ac7cdcc294..17be3dedacb 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -335,6 +335,8 @@ again: if (!h) return ERR_PTR(-ENOMEM); + sb_start_intwrite(root->fs_info->sb); + if (may_wait_transaction(root, type)) wait_current_trans(root); @@ -345,6 +347,7 @@ again: } while (ret == -EBUSY); if (ret < 0) { + sb_end_intwrite(root->fs_info->sb); kmem_cache_free(btrfs_trans_handle_cachep, h); return ERR_PTR(ret); } @@ -548,6 +551,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, btrfs_trans_release_metadata(trans, root); trans->block_rsv = NULL; + sb_end_intwrite(root->fs_info->sb); + if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && should_end_transaction(trans, root)) { trans->transaction->blocked = 1; @@ -1578,6 +1583,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, put_transaction(cur_trans); put_transaction(cur_trans); + sb_end_intwrite(root->fs_info->sb); + trace_btrfs_transaction_commit(root); btrfs_scrub_continue(root); diff --git a/fs/buffer.c b/fs/buffer.c index c7062c896d7..9f6d2e41281 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2306,8 +2306,8 @@ EXPORT_SYMBOL(block_commit_write); * beyond EOF, then the page is guaranteed safe against truncation until we * unlock the page. * - * Direct callers of this function should call vfs_check_frozen() so that page - * fault does not busyloop until the fs is thawed. + * Direct callers of this function should protect against filesystem freezing + * using sb_start_write() - sb_end_write() functions. */ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block) @@ -2318,6 +2318,12 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, loff_t size; int ret; + /* + * Update file times before taking page lock. We may end up failing the + * fault so this update may be superfluous but who really cares... + */ + file_update_time(vma->vm_file); + lock_page(page); size = i_size_read(inode); if ((page->mapping != inode->i_mapping) || @@ -2339,18 +2345,7 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, if (unlikely(ret < 0)) goto out_unlock; - /* - * Freezing in progress? We check after the page is marked dirty and - * with page lock held so if the test here fails, we are sure freezing - * code will wait during syncing until the page fault is done - at that - * point page will be dirty and unlocked so freezing code will write it - * and writeprotect it again. - */ set_page_dirty(page); - if (inode->i_sb->s_frozen != SB_UNFROZEN) { - ret = -EAGAIN; - goto out_unlock; - } wait_on_page_writeback(page); return 0; out_unlock: @@ -2365,12 +2360,9 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, int ret; struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb; - /* - * This check is racy but catches the common case. The check in - * __block_page_mkwrite() is reliable. - */ - vfs_check_frozen(sb, SB_FREEZE_WRITE); + sb_start_pagefault(sb); ret = __block_page_mkwrite(vma, vmf, get_block); + sb_end_pagefault(sb); return block_page_mkwrite_return(ret); } EXPORT_SYMBOL(block_page_mkwrite); diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 8b67304e4b8..452e71a1b75 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1184,6 +1184,9 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) loff_t size, len; int ret; + /* Update time before taking page lock */ + file_update_time(vma->vm_file); + size = i_size_read(inode); if (off + PAGE_CACHE_SIZE <= size) len = PAGE_CACHE_SIZE; diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index f391f1e7541..e5b77319c97 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -633,44 +633,6 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, return dentry; } -int ceph_atomic_open(struct inode *dir, struct dentry *dentry, - struct file *file, unsigned flags, umode_t mode, - int *opened) -{ - int err; - struct dentry *res = NULL; - - if (!(flags & O_CREAT)) { - if (dentry->d_name.len > NAME_MAX) - return -ENAMETOOLONG; - - err = ceph_init_dentry(dentry); - if (err < 0) - return err; - - return ceph_lookup_open(dir, dentry, file, flags, mode, opened); - } - - if (d_unhashed(dentry)) { - res = ceph_lookup(dir, dentry, 0); - if (IS_ERR(res)) - return PTR_ERR(res); - - if (res) - dentry = res; - } - - /* We don't deal with positive dentries here */ - if (dentry->d_inode) - return finish_no_open(file, res); - - *opened |= FILE_CREATED; - err = ceph_lookup_open(dir, dentry, file, flags, mode, opened); - dput(res); - - return err; -} - /* * If we do a create but get no trace back from the MDS, follow up with * a lookup (the VFS expects us to link up the provided dentry). diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 1b81d6c3187..ecebbc09bfc 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -4,6 +4,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/file.h> +#include <linux/mount.h> #include <linux/namei.h> #include <linux/writeback.h> @@ -106,9 +107,6 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) } /* - * If the filp already has private_data, that means the file was - * already opened by intent during lookup, and we do nothing. - * * If we already have the requisite capabilities, we can satisfy * the open request locally (no need to request new caps from the * MDS). We do, however, need to inform the MDS (asynchronously) @@ -207,24 +205,29 @@ out: /* - * Do a lookup + open with a single request. - * - * If this succeeds, but some subsequent check in the vfs - * may_open() fails, the struct *file gets cleaned up (i.e. - * ceph_release gets called). So fear not! + * Do a lookup + open with a single request. If we get a non-existent + * file or symlink, return 1 so the VFS can retry. */ -int ceph_lookup_open(struct inode *dir, struct dentry *dentry, +int ceph_atomic_open(struct inode *dir, struct dentry *dentry, struct file *file, unsigned flags, umode_t mode, int *opened) { struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_mds_request *req; - struct dentry *ret; + struct dentry *dn; int err; - dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n", - dentry, dentry->d_name.len, dentry->d_name.name, flags, mode); + dout("atomic_open %p dentry %p '%.*s' %s flags %d mode 0%o\n", + dir, dentry, dentry->d_name.len, dentry->d_name.name, + d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); + + if (dentry->d_name.len > NAME_MAX) + return -ENAMETOOLONG; + + err = ceph_init_dentry(dentry); + if (err < 0) + return err; /* do the open */ req = prepare_open_request(dir->i_sb, flags, mode); @@ -241,22 +244,31 @@ int ceph_lookup_open(struct inode *dir, struct dentry *dentry, (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, req); err = ceph_handle_snapdir(req, dentry, err); - if (err) - goto out; - if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry) + if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry) err = ceph_handle_notrace_create(dir, dentry); - if (err) - goto out; - err = finish_open(file, req->r_dentry, ceph_open, opened); -out: - ret = ceph_finish_lookup(req, dentry, err); - ceph_mdsc_put_request(req); - dout("ceph_lookup_open result=%p\n", ret); - if (IS_ERR(ret)) - return PTR_ERR(ret); + if (d_unhashed(dentry)) { + dn = ceph_finish_lookup(req, dentry, err); + if (IS_ERR(dn)) + err = PTR_ERR(dn); + } else { + /* we were given a hashed negative dentry */ + dn = NULL; + } + if (err) + goto out_err; + if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) { + /* make vfs retry on splice, ENOENT, or symlink */ + dout("atomic_open finish_no_open on dn %p\n", dn); + err = finish_no_open(file, dn); + } else { + dout("atomic_open finish_open on dn %p\n", dn); + err = finish_open(file, dentry, ceph_open, opened); + } - dput(ret); +out_err: + ceph_mdsc_put_request(req); + dout("atomic_open result=%d\n", err); return err; } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index ebc95cc652b..66ebe720e40 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -806,9 +806,9 @@ extern int ceph_copy_from_page_vector(struct page **pages, loff_t off, size_t len); extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); extern int ceph_open(struct inode *inode, struct file *file); -extern int ceph_lookup_open(struct inode *dir, struct dentry *dentry, - struct file *od, unsigned flags, - umode_t mode, int *opened); +extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry, + struct file *file, unsigned flags, umode_t mode, + int *opened); extern int ceph_release(struct inode *inode, struct file *filp); /* dir.c */ diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 497da5ce704..977dc0e85cc 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -246,6 +246,16 @@ struct smb_version_operations { bool (*can_echo)(struct TCP_Server_Info *); /* send echo request */ int (*echo)(struct TCP_Server_Info *); + /* create directory */ + int (*mkdir)(const unsigned int, struct cifs_tcon *, const char *, + struct cifs_sb_info *); + /* set info on created directory */ + void (*mkdir_setinfo)(struct inode *, const char *, + struct cifs_sb_info *, struct cifs_tcon *, + const unsigned int); + /* remove directory */ + int (*rmdir)(const unsigned int, struct cifs_tcon *, const char *, + struct cifs_sb_info *); }; struct smb_version_values { diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index cf7fb185103..f1bbf8305d3 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -289,18 +289,15 @@ extern int CIFSSMBUnixSetFileInfo(const unsigned int xid, u16 fid, u32 pid_of_opener); extern int CIFSSMBUnixSetPathInfo(const unsigned int xid, - struct cifs_tcon *tcon, char *file_name, + struct cifs_tcon *tcon, const char *file_name, const struct cifs_unix_set_info_args *args, const struct nls_table *nls_codepage, - int remap_special_chars); + int remap); extern int CIFSSMBMkDir(const unsigned int xid, struct cifs_tcon *tcon, - const char *newName, - const struct nls_table *nls_codepage, - int remap_special_chars); + const char *name, struct cifs_sb_info *cifs_sb); extern int CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon, - const char *name, const struct nls_table *nls_codepage, - int remap_special_chars); + const char *name, struct cifs_sb_info *cifs_sb); extern int CIFSPOSIXDelFile(const unsigned int xid, struct cifs_tcon *tcon, const char *name, __u16 type, const struct nls_table *nls_codepage, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index cabc7a01f5d..074923ce593 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -948,15 +948,15 @@ DelFileRetry: } int -CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon, - const char *dirName, const struct nls_table *nls_codepage, - int remap) +CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, + struct cifs_sb_info *cifs_sb) { DELETE_DIRECTORY_REQ *pSMB = NULL; DELETE_DIRECTORY_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; + int remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR; cFYI(1, "In CIFSSMBRmDir"); RmDirRetry: @@ -966,14 +966,15 @@ RmDirRetry: return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { - name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, dirName, - PATH_MAX, nls_codepage, remap); + name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name, + PATH_MAX, cifs_sb->local_nls, + remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ - name_len = strnlen(dirName, PATH_MAX); + name_len = strnlen(name, PATH_MAX); name_len++; /* trailing null */ - strncpy(pSMB->DirName, dirName, name_len); + strncpy(pSMB->DirName, name, name_len); } pSMB->BufferFormat = 0x04; @@ -992,14 +993,15 @@ RmDirRetry: } int -CIFSSMBMkDir(const unsigned int xid, struct cifs_tcon *tcon, - const char *name, const struct nls_table *nls_codepage, int remap) +CIFSSMBMkDir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, + struct cifs_sb_info *cifs_sb) { int rc = 0; CREATE_DIRECTORY_REQ *pSMB = NULL; CREATE_DIRECTORY_RSP *pSMBr = NULL; int bytes_returned; int name_len; + int remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR; cFYI(1, "In CIFSSMBMkDir"); MkDirRetry: @@ -1010,7 +1012,8 @@ MkDirRetry: if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name, - PATH_MAX, nls_codepage, remap); + PATH_MAX, cifs_sb->local_nls, + remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ @@ -5943,7 +5946,7 @@ CIFSSMBUnixSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon, int CIFSSMBUnixSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon, - char *fileName, + const char *file_name, const struct cifs_unix_set_info_args *args, const struct nls_table *nls_codepage, int remap) { @@ -5964,14 +5967,14 @@ setPermsRetry: if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = - cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, + cifsConvertToUTF16((__le16 *) pSMB->FileName, file_name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(fileName, PATH_MAX); + name_len = strnlen(file_name, PATH_MAX); name_len++; /* trailing null */ - strncpy(pSMB->FileName, fileName, name_len); + strncpy(pSMB->FileName, file_name, name_len); } params = 6 + name_len; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 35cb6a374a4..7354877fa3b 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1219,16 +1219,153 @@ unlink_out: return rc; } +static int +cifs_mkdir_qinfo(struct inode *inode, struct dentry *dentry, umode_t mode, + const char *full_path, struct cifs_sb_info *cifs_sb, + struct cifs_tcon *tcon, const unsigned int xid) +{ + int rc = 0; + struct inode *newinode = NULL; + + if (tcon->unix_ext) + rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, + xid); + else + rc = cifs_get_inode_info(&newinode, full_path, NULL, + inode->i_sb, xid, NULL); + if (rc) + return rc; + + d_instantiate(dentry, newinode); + /* + * setting nlink not necessary except in cases where we failed to get it + * from the server or was set bogus + */ + if ((dentry->d_inode) && (dentry->d_inode->i_nlink < 2)) + set_nlink(dentry->d_inode, 2); + + mode &= ~current_umask(); + /* must turn on setgid bit if parent dir has it */ + if (inode->i_mode & S_ISGID) + mode |= S_ISGID; + + if (tcon->unix_ext) { + struct cifs_unix_set_info_args args = { + .mode = mode, + .ctime = NO_CHANGE_64, + .atime = NO_CHANGE_64, + .mtime = NO_CHANGE_64, + .device = 0, + }; + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { + args.uid = (__u64)current_fsuid(); + if (inode->i_mode & S_ISGID) + args.gid = (__u64)inode->i_gid; + else + args.gid = (__u64)current_fsgid(); + } else { + args.uid = NO_CHANGE_64; + args.gid = NO_CHANGE_64; + } + CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, + cifs_sb->local_nls, + cifs_sb->mnt_cifs_flags & + CIFS_MOUNT_MAP_SPECIAL_CHR); + } else { + struct TCP_Server_Info *server = tcon->ses->server; + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) && + (mode & S_IWUGO) == 0 && server->ops->mkdir_setinfo) + server->ops->mkdir_setinfo(newinode, full_path, cifs_sb, + tcon, xid); + if (dentry->d_inode) { + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) + dentry->d_inode->i_mode = (mode | S_IFDIR); + + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { + dentry->d_inode->i_uid = current_fsuid(); + if (inode->i_mode & S_ISGID) + dentry->d_inode->i_gid = inode->i_gid; + else + dentry->d_inode->i_gid = + current_fsgid(); + } + } + } + return rc; +} + +static int +cifs_posix_mkdir(struct inode *inode, struct dentry *dentry, umode_t mode, + const char *full_path, struct cifs_sb_info *cifs_sb, + struct cifs_tcon *tcon, const unsigned int xid) +{ + int rc = 0; + u32 oplock = 0; + FILE_UNIX_BASIC_INFO *info = NULL; + struct inode *newinode = NULL; + struct cifs_fattr fattr; + + info = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL); + if (info == NULL) { + rc = -ENOMEM; + goto posix_mkdir_out; + } + + mode &= ~current_umask(); + rc = CIFSPOSIXCreate(xid, tcon, SMB_O_DIRECTORY | SMB_O_CREAT, mode, + NULL /* netfid */, info, &oplock, full_path, + cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & + CIFS_MOUNT_MAP_SPECIAL_CHR); + if (rc == -EOPNOTSUPP) + goto posix_mkdir_out; + else if (rc) { + cFYI(1, "posix mkdir returned 0x%x", rc); + d_drop(dentry); + goto posix_mkdir_out; + } + + if (info->Type == cpu_to_le32(-1)) + /* no return info, go query for it */ + goto posix_mkdir_get_info; + /* + * BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if + * need to set uid/gid. + */ + + cifs_unix_basic_to_fattr(&fattr, info, cifs_sb); + cifs_fill_uniqueid(inode->i_sb, &fattr); + newinode = cifs_iget(inode->i_sb, &fattr); + if (!newinode) + goto posix_mkdir_get_info; + + d_instantiate(dentry, newinode); + +#ifdef CONFIG_CIFS_DEBUG2 + cFYI(1, "instantiated dentry %p %s to inode %p", dentry, + dentry->d_name.name, newinode); + + if (newinode->i_nlink != 2) + cFYI(1, "unexpected number of links %d", newinode->i_nlink); +#endif + +posix_mkdir_out: + kfree(info); + return rc; +posix_mkdir_get_info: + rc = cifs_mkdir_qinfo(inode, dentry, mode, full_path, cifs_sb, tcon, + xid); + goto posix_mkdir_out; +} + int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode) { - int rc = 0, tmprc; + int rc = 0; unsigned int xid; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *tcon; - char *full_path = NULL; - struct inode *newinode = NULL; - struct cifs_fattr fattr; + struct TCP_Server_Info *server; + char *full_path; cFYI(1, "In cifs_mkdir, mode = 0x%hx inode = 0x%p", mode, inode); @@ -1248,145 +1385,29 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode) if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { - u32 oplock = 0; - FILE_UNIX_BASIC_INFO *pInfo = - kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL); - if (pInfo == NULL) { - rc = -ENOMEM; + rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb, + tcon, xid); + if (rc != -EOPNOTSUPP) goto mkdir_out; - } - - mode &= ~current_umask(); - rc = CIFSPOSIXCreate(xid, tcon, SMB_O_DIRECTORY | SMB_O_CREAT, - mode, NULL /* netfid */, pInfo, &oplock, - full_path, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); - if (rc == -EOPNOTSUPP) { - kfree(pInfo); - goto mkdir_retry_old; - } else if (rc) { - cFYI(1, "posix mkdir returned 0x%x", rc); - d_drop(direntry); - } else { - if (pInfo->Type == cpu_to_le32(-1)) { - /* no return info, go query for it */ - kfree(pInfo); - goto mkdir_get_info; - } -/*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need - to set uid/gid */ - - cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb); - cifs_fill_uniqueid(inode->i_sb, &fattr); - newinode = cifs_iget(inode->i_sb, &fattr); - if (!newinode) { - kfree(pInfo); - goto mkdir_get_info; - } - - d_instantiate(direntry, newinode); + } -#ifdef CONFIG_CIFS_DEBUG2 - cFYI(1, "instantiated dentry %p %s to inode %p", - direntry, direntry->d_name.name, newinode); + server = tcon->ses->server; - if (newinode->i_nlink != 2) - cFYI(1, "unexpected number of links %d", - newinode->i_nlink); -#endif - } - kfree(pInfo); + if (!server->ops->mkdir) { + rc = -ENOSYS; goto mkdir_out; } -mkdir_retry_old: + /* BB add setting the equivalent of mode via CreateX w/ACLs */ - rc = CIFSSMBMkDir(xid, tcon, full_path, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + rc = server->ops->mkdir(xid, tcon, full_path, cifs_sb); if (rc) { cFYI(1, "cifs_mkdir returned 0x%x", rc); d_drop(direntry); - } else { -mkdir_get_info: - if (tcon->unix_ext) - rc = cifs_get_inode_info_unix(&newinode, full_path, - inode->i_sb, xid); - else - rc = cifs_get_inode_info(&newinode, full_path, NULL, - inode->i_sb, xid, NULL); - - d_instantiate(direntry, newinode); - /* setting nlink not necessary except in cases where we - * failed to get it from the server or was set bogus */ - if ((direntry->d_inode) && (direntry->d_inode->i_nlink < 2)) - set_nlink(direntry->d_inode, 2); - - mode &= ~current_umask(); - /* must turn on setgid bit if parent dir has it */ - if (inode->i_mode & S_ISGID) - mode |= S_ISGID; - - if (tcon->unix_ext) { - struct cifs_unix_set_info_args args = { - .mode = mode, - .ctime = NO_CHANGE_64, - .atime = NO_CHANGE_64, - .mtime = NO_CHANGE_64, - .device = 0, - }; - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { - args.uid = (__u64)current_fsuid(); - if (inode->i_mode & S_ISGID) - args.gid = (__u64)inode->i_gid; - else - args.gid = (__u64)current_fsgid(); - } else { - args.uid = NO_CHANGE_64; - args.gid = NO_CHANGE_64; - } - CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, - cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); - } else { - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) && - (mode & S_IWUGO) == 0) { - FILE_BASIC_INFO pInfo; - struct cifsInodeInfo *cifsInode; - u32 dosattrs; - - memset(&pInfo, 0, sizeof(pInfo)); - cifsInode = CIFS_I(newinode); - dosattrs = cifsInode->cifsAttrs|ATTR_READONLY; - pInfo.Attributes = cpu_to_le32(dosattrs); - tmprc = CIFSSMBSetPathInfo(xid, tcon, - full_path, &pInfo, - cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); - if (tmprc == 0) - cifsInode->cifsAttrs = dosattrs; - } - if (direntry->d_inode) { - if (cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_DYNPERM) - direntry->d_inode->i_mode = - (mode | S_IFDIR); - - if (cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_SET_UID) { - direntry->d_inode->i_uid = - current_fsuid(); - if (inode->i_mode & S_ISGID) - direntry->d_inode->i_gid = - inode->i_gid; - else - direntry->d_inode->i_gid = - current_fsgid(); - } - } - } + goto mkdir_out; } + + rc = cifs_mkdir_qinfo(inode, direntry, mode, full_path, cifs_sb, tcon, + xid); mkdir_out: /* * Force revalidate to get parent dir info when needed since cached @@ -1405,7 +1426,8 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) unsigned int xid; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; - struct cifs_tcon *pTcon; + struct cifs_tcon *tcon; + struct TCP_Server_Info *server; char *full_path = NULL; struct cifsInodeInfo *cifsInode; @@ -1425,10 +1447,16 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) rc = PTR_ERR(tlink); goto rmdir_exit; } - pTcon = tlink_tcon(tlink); + tcon = tlink_tcon(tlink); + server = tcon->ses->server; + + if (!server->ops->rmdir) { + rc = -ENOSYS; + cifs_put_tlink(tlink); + goto rmdir_exit; + } - rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + rc = server->ops->rmdir(xid, tcon, full_path, cifs_sb); cifs_put_tlink(tlink); if (!rc) { diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index c40356d24c5..3129ac74b81 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -586,6 +586,27 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon) #endif } +static void +cifs_mkdir_setinfo(struct inode *inode, const char *full_path, + struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, + const unsigned int xid) +{ + FILE_BASIC_INFO info; + struct cifsInodeInfo *cifsInode; + u32 dosattrs; + int rc; + + memset(&info, 0, sizeof(info)); + cifsInode = CIFS_I(inode); + dosattrs = cifsInode->cifsAttrs|ATTR_READONLY; + info.Attributes = cpu_to_le32(dosattrs); + rc = CIFSSMBSetPathInfo(xid, tcon, full_path, &info, cifs_sb->local_nls, + cifs_sb->mnt_cifs_flags & + CIFS_MOUNT_MAP_SPECIAL_CHR); + if (rc == 0) + cifsInode->cifsAttrs = dosattrs; +} + struct smb_version_operations smb1_operations = { .send_cancel = send_nt_cancel, .compare_fids = cifs_compare_fids, @@ -620,6 +641,9 @@ struct smb_version_operations smb1_operations = { .get_srv_inum = cifs_get_srv_inum, .build_path_to_root = cifs_build_path_to_root, .echo = CIFSSMBEcho, + .mkdir = CIFSSMBMkDir, + .mkdir_setinfo = cifs_mkdir_setinfo, + .rmdir = CIFSSMBRmDir, }; struct smb_version_values smb1_values = { diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index 1ba5c405315..2aa5cb08c52 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -122,3 +122,42 @@ out: kfree(smb2_data); return rc; } + +int +smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, + struct cifs_sb_info *cifs_sb) +{ + return smb2_open_op_close(xid, tcon, cifs_sb, name, + FILE_WRITE_ATTRIBUTES, FILE_CREATE, 0, + CREATE_NOT_FILE, NULL, SMB2_OP_MKDIR); +} + +void +smb2_mkdir_setinfo(struct inode *inode, const char *name, + struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, + const unsigned int xid) +{ + FILE_BASIC_INFO data; + struct cifsInodeInfo *cifs_i; + u32 dosattrs; + int tmprc; + + memset(&data, 0, sizeof(data)); + cifs_i = CIFS_I(inode); + dosattrs = cifs_i->cifsAttrs | ATTR_READONLY; + data.Attributes = cpu_to_le32(dosattrs); + tmprc = smb2_open_op_close(xid, tcon, cifs_sb, name, + FILE_WRITE_ATTRIBUTES, FILE_CREATE, 0, + CREATE_NOT_FILE, &data, SMB2_OP_SET_INFO); + if (tmprc == 0) + cifs_i->cifsAttrs = dosattrs; +} + +int +smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, + struct cifs_sb_info *cifs_sb) +{ + return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN, + 0, CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE, + NULL, SMB2_OP_DELETE); +} diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 410cf925ea2..826209bf368 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -318,6 +318,9 @@ struct smb_version_operations smb21_operations = { .query_path_info = smb2_query_path_info, .get_srv_inum = smb2_get_srv_inum, .build_path_to_root = smb2_build_path_to_root, + .mkdir = smb2_mkdir, + .mkdir_setinfo = smb2_mkdir_setinfo, + .rmdir = smb2_rmdir, }; struct smb_version_values smb21_values = { diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 902bbe2b5ad..bfaa7b148af 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -52,6 +52,14 @@ extern int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, FILE_ALL_INFO *data, bool *adjust_tz); +extern int smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon, + const char *name, struct cifs_sb_info *cifs_sb); +extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path, + struct cifs_sb_info *cifs_sb, + struct cifs_tcon *tcon, const unsigned int xid); +extern int smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, + const char *name, struct cifs_sb_info *cifs_sb); + /* * SMB2 Worker functions - most of protocol specific implementation details * are contained within these calls. diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 989e034f02b..cfb4b9fed52 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -385,8 +385,6 @@ struct ecryptfs_msg_ctx { struct mutex mux; }; -struct ecryptfs_daemon; - struct ecryptfs_daemon { #define ECRYPTFS_DAEMON_IN_READ 0x00000001 #define ECRYPTFS_DAEMON_IN_POLL 0x00000002 @@ -394,10 +392,7 @@ struct ecryptfs_daemon { #define ECRYPTFS_DAEMON_MISCDEV_OPEN 0x00000008 u32 flags; u32 num_queued_msg_ctx; - struct pid *pid; - uid_t euid; - struct user_namespace *user_ns; - struct task_struct *task; + struct file *file; struct mutex mux; struct list_head msg_ctx_out_queue; wait_queue_head_t wait; @@ -554,6 +549,8 @@ extern struct kmem_cache *ecryptfs_key_tfm_cache; struct inode *ecryptfs_get_inode(struct inode *lower_inode, struct super_block *sb); void ecryptfs_i_size_init(const char *page_virt, struct inode *inode); +int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry, + struct inode *ecryptfs_inode); int ecryptfs_decode_and_decrypt_filename(char **decrypted_name, size_t *decrypted_name_size, struct dentry *ecryptfs_dentry, @@ -607,13 +604,8 @@ int ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode); -int ecryptfs_process_helo(uid_t euid, struct user_namespace *user_ns, - struct pid *pid); -int ecryptfs_process_quit(uid_t euid, struct user_namespace *user_ns, - struct pid *pid); -int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, - struct user_namespace *user_ns, struct pid *pid, - u32 seq); +int ecryptfs_process_response(struct ecryptfs_daemon *daemon, + struct ecryptfs_message *msg, u32 seq); int ecryptfs_send_message(char *data, int data_len, struct ecryptfs_msg_ctx **msg_ctx); int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx, @@ -658,8 +650,7 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs, struct inode *ecryptfs_inode); struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index); int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon); -int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon, uid_t euid, - struct user_namespace *user_ns); +int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon); int ecryptfs_parse_packet_length(unsigned char *data, size_t *size, size_t *length_size); int ecryptfs_write_packet_length(char *dest, size_t size, @@ -671,8 +662,7 @@ int ecryptfs_send_miscdev(char *data, size_t data_size, u16 msg_flags, struct ecryptfs_daemon *daemon); void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx); int -ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid, - struct user_namespace *user_ns, struct pid *pid); +ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, struct file *file); int ecryptfs_init_kthread(void); void ecryptfs_destroy_kthread(void); int ecryptfs_privileged_open(struct file **lower_file, diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 2b17f2f9b12..44ce5c6a541 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -138,29 +138,50 @@ out: return rc; } -static void ecryptfs_vma_close(struct vm_area_struct *vma) -{ - filemap_write_and_wait(vma->vm_file->f_mapping); -} - -static const struct vm_operations_struct ecryptfs_file_vm_ops = { - .close = ecryptfs_vma_close, - .fault = filemap_fault, -}; +struct kmem_cache *ecryptfs_file_info_cache; -static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int read_or_initialize_metadata(struct dentry *dentry) { + struct inode *inode = dentry->d_inode; + struct ecryptfs_mount_crypt_stat *mount_crypt_stat; + struct ecryptfs_crypt_stat *crypt_stat; int rc; - rc = generic_file_mmap(file, vma); + crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat; + mount_crypt_stat = &ecryptfs_superblock_to_private( + inode->i_sb)->mount_crypt_stat; + mutex_lock(&crypt_stat->cs_mutex); + + if (crypt_stat->flags & ECRYPTFS_POLICY_APPLIED && + crypt_stat->flags & ECRYPTFS_KEY_VALID) { + rc = 0; + goto out; + } + + rc = ecryptfs_read_metadata(dentry); if (!rc) - vma->vm_ops = &ecryptfs_file_vm_ops; + goto out; + + if (mount_crypt_stat->flags & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED) { + crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED + | ECRYPTFS_ENCRYPTED); + rc = 0; + goto out; + } + if (!(mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED) && + !i_size_read(ecryptfs_inode_to_lower(inode))) { + rc = ecryptfs_initialize_file(dentry, inode); + if (!rc) + goto out; + } + + rc = -EIO; +out: + mutex_unlock(&crypt_stat->cs_mutex); return rc; } -struct kmem_cache *ecryptfs_file_info_cache; - /** * ecryptfs_open * @inode: inode speciying file to open @@ -236,32 +257,9 @@ static int ecryptfs_open(struct inode *inode, struct file *file) rc = 0; goto out; } - mutex_lock(&crypt_stat->cs_mutex); - if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED) - || !(crypt_stat->flags & ECRYPTFS_KEY_VALID)) { - rc = ecryptfs_read_metadata(ecryptfs_dentry); - if (rc) { - ecryptfs_printk(KERN_DEBUG, - "Valid headers not found\n"); - if (!(mount_crypt_stat->flags - & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) { - rc = -EIO; - printk(KERN_WARNING "Either the lower file " - "is not in a valid eCryptfs format, " - "or the key could not be retrieved. " - "Plaintext passthrough mode is not " - "enabled; returning -EIO\n"); - mutex_unlock(&crypt_stat->cs_mutex); - goto out_put; - } - rc = 0; - crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED - | ECRYPTFS_ENCRYPTED); - mutex_unlock(&crypt_stat->cs_mutex); - goto out; - } - } - mutex_unlock(&crypt_stat->cs_mutex); + rc = read_or_initialize_metadata(ecryptfs_dentry); + if (rc) + goto out_put; ecryptfs_printk(KERN_DEBUG, "inode w/ addr = [0x%p], i_ino = " "[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino, (unsigned long long)i_size_read(inode)); @@ -292,15 +290,7 @@ static int ecryptfs_release(struct inode *inode, struct file *file) static int ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { - int rc = 0; - - rc = generic_file_fsync(file, start, end, datasync); - if (rc) - goto out; - rc = vfs_fsync_range(ecryptfs_file_to_lower(file), start, end, - datasync); -out: - return rc; + return vfs_fsync(ecryptfs_file_to_lower(file), datasync); } static int ecryptfs_fasync(int fd, struct file *file, int flag) @@ -369,7 +359,7 @@ const struct file_operations ecryptfs_main_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = ecryptfs_compat_ioctl, #endif - .mmap = ecryptfs_file_mmap, + .mmap = generic_file_mmap, .open = ecryptfs_open, .flush = ecryptfs_flush, .release = ecryptfs_release, diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index ffa2be57804..534b129ea67 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -143,6 +143,31 @@ static int ecryptfs_interpose(struct dentry *lower_dentry, return 0; } +static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry, + struct inode *inode) +{ + struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); + struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir); + struct dentry *lower_dir_dentry; + int rc; + + dget(lower_dentry); + lower_dir_dentry = lock_parent(lower_dentry); + rc = vfs_unlink(lower_dir_inode, lower_dentry); + if (rc) { + printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc); + goto out_unlock; + } + fsstack_copy_attr_times(dir, lower_dir_inode); + set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink); + inode->i_ctime = dir->i_ctime; + d_drop(dentry); +out_unlock: + unlock_dir(lower_dir_dentry); + dput(lower_dentry); + return rc; +} + /** * ecryptfs_do_create * @directory_inode: inode of the new file's dentry's parent in ecryptfs @@ -182,8 +207,10 @@ ecryptfs_do_create(struct inode *directory_inode, } inode = __ecryptfs_get_inode(lower_dentry->d_inode, directory_inode->i_sb); - if (IS_ERR(inode)) + if (IS_ERR(inode)) { + vfs_unlink(lower_dir_dentry->d_inode, lower_dentry); goto out_lock; + } fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode); fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode); out_lock: @@ -200,8 +227,8 @@ out: * * Returns zero on success */ -static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry, - struct inode *ecryptfs_inode) +int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry, + struct inode *ecryptfs_inode) { struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; @@ -264,7 +291,9 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, * that this on disk file is prepared to be an ecryptfs file */ rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode); if (rc) { - drop_nlink(ecryptfs_inode); + ecryptfs_do_unlink(directory_inode, ecryptfs_dentry, + ecryptfs_inode); + make_bad_inode(ecryptfs_inode); unlock_new_inode(ecryptfs_inode); iput(ecryptfs_inode); goto out; @@ -318,21 +347,20 @@ static int ecryptfs_lookup_interpose(struct dentry *dentry, struct vfsmount *lower_mnt; int rc = 0; - lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent)); - fsstack_copy_attr_atime(dir_inode, lower_dentry->d_parent->d_inode); - BUG_ON(!lower_dentry->d_count); - dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL); - ecryptfs_set_dentry_private(dentry, dentry_info); if (!dentry_info) { printk(KERN_ERR "%s: Out of memory whilst attempting " "to allocate ecryptfs_dentry_info struct\n", __func__); dput(lower_dentry); - mntput(lower_mnt); - d_drop(dentry); return -ENOMEM; } + + lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent)); + fsstack_copy_attr_atime(dir_inode, lower_dentry->d_parent->d_inode); + BUG_ON(!lower_dentry->d_count); + + ecryptfs_set_dentry_private(dentry, dentry_info); ecryptfs_set_dentry_lower(dentry, lower_dentry); ecryptfs_set_dentry_lower_mnt(dentry, lower_mnt); @@ -381,12 +409,6 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, struct dentry *lower_dir_dentry, *lower_dentry; int rc = 0; - if ((ecryptfs_dentry->d_name.len == 1 - && !strcmp(ecryptfs_dentry->d_name.name, ".")) - || (ecryptfs_dentry->d_name.len == 2 - && !strcmp(ecryptfs_dentry->d_name.name, ".."))) { - goto out_d_drop; - } lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); mutex_lock(&lower_dir_dentry->d_inode->i_mutex); lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name, @@ -397,8 +419,8 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, rc = PTR_ERR(lower_dentry); ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " "[%d] on lower_dentry = [%s]\n", __func__, rc, - encrypted_and_encoded_name); - goto out_d_drop; + ecryptfs_dentry->d_name.name); + goto out; } if (lower_dentry->d_inode) goto interpose; @@ -415,7 +437,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, if (rc) { printk(KERN_ERR "%s: Error attempting to encrypt and encode " "filename; rc = [%d]\n", __func__, rc); - goto out_d_drop; + goto out; } mutex_lock(&lower_dir_dentry->d_inode->i_mutex); lower_dentry = lookup_one_len(encrypted_and_encoded_name, @@ -427,14 +449,11 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " "[%d] on lower_dentry = [%s]\n", __func__, rc, encrypted_and_encoded_name); - goto out_d_drop; + goto out; } interpose: rc = ecryptfs_lookup_interpose(ecryptfs_dentry, lower_dentry, ecryptfs_dir_inode); - goto out; -out_d_drop: - d_drop(ecryptfs_dentry); out: kfree(encrypted_and_encoded_name); return ERR_PTR(rc); @@ -476,27 +495,7 @@ out_lock: static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry) { - int rc = 0; - struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); - struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir); - struct dentry *lower_dir_dentry; - - dget(lower_dentry); - lower_dir_dentry = lock_parent(lower_dentry); - rc = vfs_unlink(lower_dir_inode, lower_dentry); - if (rc) { - printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc); - goto out_unlock; - } - fsstack_copy_attr_times(dir, lower_dir_inode); - set_nlink(dentry->d_inode, - ecryptfs_inode_to_lower(dentry->d_inode)->i_nlink); - dentry->d_inode->i_ctime = dir->i_ctime; - d_drop(dentry); -out_unlock: - unlock_dir(lower_dir_dentry); - dput(lower_dentry); - return rc; + return ecryptfs_do_unlink(dir, dentry, dentry->d_inode); } static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry, @@ -971,12 +970,6 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) goto out; } - if (S_ISREG(inode->i_mode)) { - rc = filemap_write_and_wait(inode->i_mapping); - if (rc) - goto out; - fsstack_copy_attr_all(inode, lower_inode); - } memcpy(&lower_ia, ia, sizeof(lower_ia)); if (ia->ia_valid & ATTR_FILE) lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file); diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 1c0b3b6b75c..2768138eefe 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -279,6 +279,7 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, char *fnek_src; char *cipher_key_bytes_src; char *fn_cipher_key_bytes_src; + u8 cipher_code; *check_ruid = 0; @@ -420,6 +421,18 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, && !fn_cipher_key_bytes_set) mount_crypt_stat->global_default_fn_cipher_key_bytes = mount_crypt_stat->global_default_cipher_key_size; + + cipher_code = ecryptfs_code_for_cipher_string( + mount_crypt_stat->global_default_cipher_name, + mount_crypt_stat->global_default_cipher_key_size); + if (!cipher_code) { + ecryptfs_printk(KERN_ERR, + "eCryptfs doesn't support cipher: %s", + mount_crypt_stat->global_default_cipher_name); + rc = -EINVAL; + goto out; + } + mutex_lock(&key_tfm_list_mutex); if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name, NULL)) { @@ -540,6 +553,15 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags } ecryptfs_set_superblock_lower(s, path.dentry->d_sb); + + /** + * Set the POSIX ACL flag based on whether they're enabled in the lower + * mount. Force a read-only eCryptfs mount if the lower mount is ro. + * Allow a ro eCryptfs mount even when the lower mount is rw. + */ + s->s_flags = flags & ~MS_POSIXACL; + s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL); + s->s_maxbytes = path.dentry->d_sb->s_maxbytes; s->s_blocksize = path.dentry->d_sb->s_blocksize; s->s_magic = ECRYPTFS_SUPER_MAGIC; diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c index a750f957b14..b29bb8bfa8d 100644 --- a/fs/ecryptfs/messaging.c +++ b/fs/ecryptfs/messaging.c @@ -32,8 +32,8 @@ static struct mutex ecryptfs_msg_ctx_lists_mux; static struct hlist_head *ecryptfs_daemon_hash; struct mutex ecryptfs_daemon_hash_mux; static int ecryptfs_hash_bits; -#define ecryptfs_uid_hash(uid) \ - hash_long((unsigned long)uid, ecryptfs_hash_bits) +#define ecryptfs_current_euid_hash(uid) \ + hash_long((unsigned long)current_euid(), ecryptfs_hash_bits) static u32 ecryptfs_msg_counter; static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr; @@ -105,26 +105,24 @@ void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx) /** * ecryptfs_find_daemon_by_euid - * @euid: The effective user id which maps to the desired daemon id - * @user_ns: The namespace in which @euid applies * @daemon: If return value is zero, points to the desired daemon pointer * * Must be called with ecryptfs_daemon_hash_mux held. * - * Search the hash list for the given user id. + * Search the hash list for the current effective user id. * * Returns zero if the user id exists in the list; non-zero otherwise. */ -int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon, uid_t euid, - struct user_namespace *user_ns) +int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon) { struct hlist_node *elem; int rc; hlist_for_each_entry(*daemon, elem, - &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)], - euid_chain) { - if ((*daemon)->euid == euid && (*daemon)->user_ns == user_ns) { + &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()], + euid_chain) { + if ((*daemon)->file->f_cred->euid == current_euid() && + (*daemon)->file->f_cred->user_ns == current_user_ns()) { rc = 0; goto out; } @@ -137,9 +135,7 @@ out: /** * ecryptfs_spawn_daemon - Create and initialize a new daemon struct * @daemon: Pointer to set to newly allocated daemon struct - * @euid: Effective user id for the daemon - * @user_ns: The namespace in which @euid applies - * @pid: Process id for the daemon + * @file: File used when opening /dev/ecryptfs * * Must be called ceremoniously while in possession of * ecryptfs_sacred_daemon_hash_mux @@ -147,8 +143,7 @@ out: * Returns zero on success; non-zero otherwise */ int -ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid, - struct user_namespace *user_ns, struct pid *pid) +ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, struct file *file) { int rc = 0; @@ -159,16 +154,13 @@ ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid, "GFP_KERNEL memory\n", __func__, sizeof(**daemon)); goto out; } - (*daemon)->euid = euid; - (*daemon)->user_ns = get_user_ns(user_ns); - (*daemon)->pid = get_pid(pid); - (*daemon)->task = current; + (*daemon)->file = file; mutex_init(&(*daemon)->mux); INIT_LIST_HEAD(&(*daemon)->msg_ctx_out_queue); init_waitqueue_head(&(*daemon)->wait); (*daemon)->num_queued_msg_ctx = 0; hlist_add_head(&(*daemon)->euid_chain, - &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)]); + &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()]); out: return rc; } @@ -188,9 +180,6 @@ int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon) if ((daemon->flags & ECRYPTFS_DAEMON_IN_READ) || (daemon->flags & ECRYPTFS_DAEMON_IN_POLL)) { rc = -EBUSY; - printk(KERN_WARNING "%s: Attempt to destroy daemon with pid " - "[0x%p], but it is in the midst of a read or a poll\n", - __func__, daemon->pid); mutex_unlock(&daemon->mux); goto out; } @@ -203,12 +192,6 @@ int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon) ecryptfs_msg_ctx_alloc_to_free(msg_ctx); } hlist_del(&daemon->euid_chain); - if (daemon->task) - wake_up_process(daemon->task); - if (daemon->pid) - put_pid(daemon->pid); - if (daemon->user_ns) - put_user_ns(daemon->user_ns); mutex_unlock(&daemon->mux); kzfree(daemon); out: @@ -216,42 +199,9 @@ out: } /** - * ecryptfs_process_quit - * @euid: The user ID owner of the message - * @user_ns: The namespace in which @euid applies - * @pid: The process ID for the userspace program that sent the - * message - * - * Deletes the corresponding daemon for the given euid and pid, if - * it is the registered that is requesting the deletion. Returns zero - * after deleting the desired daemon; non-zero otherwise. - */ -int ecryptfs_process_quit(uid_t euid, struct user_namespace *user_ns, - struct pid *pid) -{ - struct ecryptfs_daemon *daemon; - int rc; - - mutex_lock(&ecryptfs_daemon_hash_mux); - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, user_ns); - if (rc || !daemon) { - rc = -EINVAL; - printk(KERN_ERR "Received request from user [%d] to " - "unregister unrecognized daemon [0x%p]\n", euid, pid); - goto out_unlock; - } - rc = ecryptfs_exorcise_daemon(daemon); -out_unlock: - mutex_unlock(&ecryptfs_daemon_hash_mux); - return rc; -} - -/** * ecryptfs_process_reponse * @msg: The ecryptfs message received; the caller should sanity check * msg->data_len and free the memory - * @pid: The process ID of the userspace application that sent the - * message * @seq: The sequence number of the message; must match the sequence * number for the existing message context waiting for this * response @@ -270,16 +220,11 @@ out_unlock: * * Returns zero on success; non-zero otherwise */ -int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, - struct user_namespace *user_ns, struct pid *pid, - u32 seq) +int ecryptfs_process_response(struct ecryptfs_daemon *daemon, + struct ecryptfs_message *msg, u32 seq) { - struct ecryptfs_daemon *uninitialized_var(daemon); struct ecryptfs_msg_ctx *msg_ctx; size_t msg_size; - struct nsproxy *nsproxy; - struct user_namespace *tsk_user_ns; - uid_t ctx_euid; int rc; if (msg->index >= ecryptfs_message_buf_len) { @@ -292,51 +237,6 @@ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, } msg_ctx = &ecryptfs_msg_ctx_arr[msg->index]; mutex_lock(&msg_ctx->mux); - mutex_lock(&ecryptfs_daemon_hash_mux); - rcu_read_lock(); - nsproxy = task_nsproxy(msg_ctx->task); - if (nsproxy == NULL) { - rc = -EBADMSG; - printk(KERN_ERR "%s: Receiving process is a zombie. Dropping " - "message.\n", __func__); - rcu_read_unlock(); - mutex_unlock(&ecryptfs_daemon_hash_mux); - goto wake_up; - } - tsk_user_ns = __task_cred(msg_ctx->task)->user_ns; - ctx_euid = task_euid(msg_ctx->task); - rc = ecryptfs_find_daemon_by_euid(&daemon, ctx_euid, tsk_user_ns); - rcu_read_unlock(); - mutex_unlock(&ecryptfs_daemon_hash_mux); - if (rc) { - rc = -EBADMSG; - printk(KERN_WARNING "%s: User [%d] received a " - "message response from process [0x%p] but does " - "not have a registered daemon\n", __func__, - ctx_euid, pid); - goto wake_up; - } - if (ctx_euid != euid) { - rc = -EBADMSG; - printk(KERN_WARNING "%s: Received message from user " - "[%d]; expected message from user [%d]\n", __func__, - euid, ctx_euid); - goto unlock; - } - if (tsk_user_ns != user_ns) { - rc = -EBADMSG; - printk(KERN_WARNING "%s: Received message from user_ns " - "[0x%p]; expected message from user_ns [0x%p]\n", - __func__, user_ns, tsk_user_ns); - goto unlock; - } - if (daemon->pid != pid) { - rc = -EBADMSG; - printk(KERN_ERR "%s: User [%d] sent a message response " - "from an unrecognized process [0x%p]\n", - __func__, ctx_euid, pid); - goto unlock; - } if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_PENDING) { rc = -EINVAL; printk(KERN_WARNING "%s: Desired context element is not " @@ -359,9 +259,8 @@ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, } memcpy(msg_ctx->msg, msg, msg_size); msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_DONE; - rc = 0; -wake_up: wake_up_process(msg_ctx->task); + rc = 0; unlock: mutex_unlock(&msg_ctx->mux); out: @@ -383,14 +282,11 @@ ecryptfs_send_message_locked(char *data, int data_len, u8 msg_type, struct ecryptfs_msg_ctx **msg_ctx) { struct ecryptfs_daemon *daemon; - uid_t euid = current_euid(); int rc; - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); + rc = ecryptfs_find_daemon_by_euid(&daemon); if (rc || !daemon) { rc = -ENOTCONN; - printk(KERN_ERR "%s: User [%d] does not have a daemon " - "registered\n", __func__, euid); goto out; } mutex_lock(&ecryptfs_msg_ctx_lists_mux); diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c index c0038f6566d..412e6eda25f 100644 --- a/fs/ecryptfs/miscdev.c +++ b/fs/ecryptfs/miscdev.c @@ -33,7 +33,7 @@ static atomic_t ecryptfs_num_miscdev_opens; /** * ecryptfs_miscdev_poll - * @file: dev file (ignored) + * @file: dev file * @pt: dev poll table (ignored) * * Returns the poll mask @@ -41,20 +41,10 @@ static atomic_t ecryptfs_num_miscdev_opens; static unsigned int ecryptfs_miscdev_poll(struct file *file, poll_table *pt) { - struct ecryptfs_daemon *daemon; + struct ecryptfs_daemon *daemon = file->private_data; unsigned int mask = 0; - uid_t euid = current_euid(); - int rc; - mutex_lock(&ecryptfs_daemon_hash_mux); - /* TODO: Just use file->private_data? */ - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); - if (rc || !daemon) { - mutex_unlock(&ecryptfs_daemon_hash_mux); - return -EINVAL; - } mutex_lock(&daemon->mux); - mutex_unlock(&ecryptfs_daemon_hash_mux); if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { printk(KERN_WARNING "%s: Attempt to poll on zombified " "daemon\n", __func__); @@ -79,7 +69,7 @@ out_unlock_daemon: /** * ecryptfs_miscdev_open * @inode: inode of miscdev handle (ignored) - * @file: file for miscdev handle (ignored) + * @file: file for miscdev handle * * Returns zero on success; non-zero otherwise */ @@ -87,7 +77,6 @@ static int ecryptfs_miscdev_open(struct inode *inode, struct file *file) { struct ecryptfs_daemon *daemon = NULL; - uid_t euid = current_euid(); int rc; mutex_lock(&ecryptfs_daemon_hash_mux); @@ -98,30 +87,20 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) "count; rc = [%d]\n", __func__, rc); goto out_unlock_daemon_list; } - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); - if (rc || !daemon) { - rc = ecryptfs_spawn_daemon(&daemon, euid, current_user_ns(), - task_pid(current)); - if (rc) { - printk(KERN_ERR "%s: Error attempting to spawn daemon; " - "rc = [%d]\n", __func__, rc); - goto out_module_put_unlock_daemon_list; - } - } - mutex_lock(&daemon->mux); - if (daemon->pid != task_pid(current)) { + rc = ecryptfs_find_daemon_by_euid(&daemon); + if (!rc) { rc = -EINVAL; - printk(KERN_ERR "%s: pid [0x%p] has registered with euid [%d], " - "but pid [0x%p] has attempted to open the handle " - "instead\n", __func__, daemon->pid, daemon->euid, - task_pid(current)); - goto out_unlock_daemon; + goto out_unlock_daemon_list; + } + rc = ecryptfs_spawn_daemon(&daemon, file); + if (rc) { + printk(KERN_ERR "%s: Error attempting to spawn daemon; " + "rc = [%d]\n", __func__, rc); + goto out_module_put_unlock_daemon_list; } + mutex_lock(&daemon->mux); if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) { rc = -EBUSY; - printk(KERN_ERR "%s: Miscellaneous device handle may only be " - "opened once per daemon; pid [0x%p] already has this " - "handle open\n", __func__, daemon->pid); goto out_unlock_daemon; } daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN; @@ -140,7 +119,7 @@ out_unlock_daemon_list: /** * ecryptfs_miscdev_release * @inode: inode of fs/ecryptfs/euid handle (ignored) - * @file: file for fs/ecryptfs/euid handle (ignored) + * @file: file for fs/ecryptfs/euid handle * * This keeps the daemon registered until the daemon sends another * ioctl to fs/ecryptfs/ctl or until the kernel module unregisters. @@ -150,20 +129,18 @@ out_unlock_daemon_list: static int ecryptfs_miscdev_release(struct inode *inode, struct file *file) { - struct ecryptfs_daemon *daemon = NULL; - uid_t euid = current_euid(); + struct ecryptfs_daemon *daemon = file->private_data; int rc; - mutex_lock(&ecryptfs_daemon_hash_mux); - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); - if (rc || !daemon) - daemon = file->private_data; mutex_lock(&daemon->mux); BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN)); daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN; atomic_dec(&ecryptfs_num_miscdev_opens); mutex_unlock(&daemon->mux); + + mutex_lock(&ecryptfs_daemon_hash_mux); rc = ecryptfs_exorcise_daemon(daemon); + mutex_unlock(&ecryptfs_daemon_hash_mux); if (rc) { printk(KERN_CRIT "%s: Fatal error whilst attempting to " "shut down daemon; rc = [%d]. Please report this " @@ -171,7 +148,6 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file) BUG(); } module_put(THIS_MODULE); - mutex_unlock(&ecryptfs_daemon_hash_mux); return rc; } @@ -248,7 +224,7 @@ int ecryptfs_send_miscdev(char *data, size_t data_size, /** * ecryptfs_miscdev_read - format and send message from queue - * @file: fs/ecryptfs/euid miscdevfs handle (ignored) + * @file: miscdevfs handle * @buf: User buffer into which to copy the next message on the daemon queue * @count: Amount of space available in @buf * @ppos: Offset in file (ignored) @@ -262,43 +238,27 @@ static ssize_t ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - struct ecryptfs_daemon *daemon; + struct ecryptfs_daemon *daemon = file->private_data; struct ecryptfs_msg_ctx *msg_ctx; size_t packet_length_size; char packet_length[ECRYPTFS_MAX_PKT_LEN_SIZE]; size_t i; size_t total_length; - uid_t euid = current_euid(); int rc; - mutex_lock(&ecryptfs_daemon_hash_mux); - /* TODO: Just use file->private_data? */ - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); - if (rc || !daemon) { - mutex_unlock(&ecryptfs_daemon_hash_mux); - return -EINVAL; - } mutex_lock(&daemon->mux); - if (task_pid(current) != daemon->pid) { - mutex_unlock(&daemon->mux); - mutex_unlock(&ecryptfs_daemon_hash_mux); - return -EPERM; - } if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { rc = 0; - mutex_unlock(&ecryptfs_daemon_hash_mux); printk(KERN_WARNING "%s: Attempt to read from zombified " "daemon\n", __func__); goto out_unlock_daemon; } if (daemon->flags & ECRYPTFS_DAEMON_IN_READ) { rc = 0; - mutex_unlock(&ecryptfs_daemon_hash_mux); goto out_unlock_daemon; } /* This daemon will not go away so long as this flag is set */ daemon->flags |= ECRYPTFS_DAEMON_IN_READ; - mutex_unlock(&ecryptfs_daemon_hash_mux); check_list: if (list_empty(&daemon->msg_ctx_out_queue)) { mutex_unlock(&daemon->mux); @@ -382,16 +342,12 @@ out_unlock_daemon: * ecryptfs_miscdev_response - miscdevess response to message previously sent to daemon * @data: Bytes comprising struct ecryptfs_message * @data_size: sizeof(struct ecryptfs_message) + data len - * @euid: Effective user id of miscdevess sending the miscdev response - * @user_ns: The namespace in which @euid applies - * @pid: Miscdevess id of miscdevess sending the miscdev response * @seq: Sequence number for miscdev response packet * * Returns zero on success; non-zero otherwise */ -static int ecryptfs_miscdev_response(char *data, size_t data_size, - uid_t euid, struct user_namespace *user_ns, - struct pid *pid, u32 seq) +static int ecryptfs_miscdev_response(struct ecryptfs_daemon *daemon, char *data, + size_t data_size, u32 seq) { struct ecryptfs_message *msg = (struct ecryptfs_message *)data; int rc; @@ -403,7 +359,7 @@ static int ecryptfs_miscdev_response(char *data, size_t data_size, rc = -EINVAL; goto out; } - rc = ecryptfs_process_response(msg, euid, user_ns, pid, seq); + rc = ecryptfs_process_response(daemon, msg, seq); if (rc) printk(KERN_ERR "Error processing response message; rc = [%d]\n", rc); @@ -413,7 +369,7 @@ out: /** * ecryptfs_miscdev_write - handle write to daemon miscdev handle - * @file: File for misc dev handle (ignored) + * @file: File for misc dev handle * @buf: Buffer containing user data * @count: Amount of data in @buf * @ppos: Pointer to offset in file (ignored) @@ -428,7 +384,6 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf, u32 seq; size_t packet_size, packet_size_length; char *data; - uid_t euid = current_euid(); unsigned char packet_size_peek[ECRYPTFS_MAX_PKT_LEN_SIZE]; ssize_t rc; @@ -488,10 +443,9 @@ memdup: } memcpy(&counter_nbo, &data[PKT_CTR_OFFSET], PKT_CTR_SIZE); seq = be32_to_cpu(counter_nbo); - rc = ecryptfs_miscdev_response( + rc = ecryptfs_miscdev_response(file->private_data, &data[PKT_LEN_OFFSET + packet_size_length], - packet_size, euid, current_user_ns(), - task_pid(current), seq); + packet_size, seq); if (rc) { printk(KERN_WARNING "%s: Failed to deliver miscdev " "response to requesting operation; rc = [%zd]\n", diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index a46b3a8fee1..bd1d57f98f7 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -66,18 +66,6 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc) { int rc; - /* - * Refuse to write the page out if we are called from reclaim context - * since our writepage() path may potentially allocate memory when - * calling into the lower fs vfs_write() which may in turn invoke - * us again. - */ - if (current->flags & PF_MEMALLOC) { - redirty_page_for_writepage(wbc, page); - rc = 0; - goto out; - } - rc = ecryptfs_encrypt_page(page); if (rc) { ecryptfs_printk(KERN_WARNING, "Error encrypting " @@ -498,7 +486,6 @@ static int ecryptfs_write_end(struct file *file, struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; int rc; - int need_unlock_page = 1; ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page" "(page w/ index = [0x%.16lx], to = [%d])\n", index, to); @@ -519,26 +506,26 @@ static int ecryptfs_write_end(struct file *file, "zeros in page with index = [0x%.16lx]\n", index); goto out; } - set_page_dirty(page); - unlock_page(page); - need_unlock_page = 0; + rc = ecryptfs_encrypt_page(page); + if (rc) { + ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper " + "index [0x%.16lx])\n", index); + goto out; + } if (pos + copied > i_size_read(ecryptfs_inode)) { i_size_write(ecryptfs_inode, pos + copied); ecryptfs_printk(KERN_DEBUG, "Expanded file size to " "[0x%.16llx]\n", (unsigned long long)i_size_read(ecryptfs_inode)); - balance_dirty_pages_ratelimited(mapping); - rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode); - if (rc) { - printk(KERN_ERR "Error writing inode size to metadata; " - "rc = [%d]\n", rc); - goto out; - } } - rc = copied; + rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode); + if (rc) + printk(KERN_ERR "Error writing inode size to metadata; " + "rc = [%d]\n", rc); + else + rc = copied; out: - if (need_unlock_page) - unlock_page(page); + unlock_page(page); page_cache_release(page); return rc; } diff --git a/fs/exec.c b/fs/exec.c index 3684353ebd5..574cf4de4ec 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -2069,25 +2069,18 @@ static void wait_for_dump_helpers(struct file *file) */ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) { - struct file *rp, *wp; + struct file *files[2]; struct fdtable *fdt; struct coredump_params *cp = (struct coredump_params *)info->data; struct files_struct *cf = current->files; + int err = create_pipe_files(files, 0); + if (err) + return err; - wp = create_write_pipe(0); - if (IS_ERR(wp)) - return PTR_ERR(wp); - - rp = create_read_pipe(wp, 0); - if (IS_ERR(rp)) { - free_write_pipe(wp); - return PTR_ERR(rp); - } - - cp->file = wp; + cp->file = files[1]; sys_close(0); - fd_install(0, rp); + fd_install(0, files[0]); spin_lock(&cf->file_lock); fdt = files_fdtable(cf); __set_open_fd(0, fdt); diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 264d315f6c4..6363ac66faf 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -79,6 +79,7 @@ void ext2_evict_inode(struct inode * inode) truncate_inode_pages(&inode->i_data, 0); if (want_delete) { + sb_start_intwrite(inode->i_sb); /* set dtime */ EXT2_I(inode)->i_dtime = get_seconds(); mark_inode_dirty(inode); @@ -98,8 +99,10 @@ void ext2_evict_inode(struct inode * inode) if (unlikely(rsv)) kfree(rsv); - if (want_delete) + if (want_delete) { ext2_free_inode(inode); + sb_end_intwrite(inode->i_sb); + } } typedef struct { diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 9f311d27b16..af74d9e27b7 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -42,6 +42,8 @@ static void ext2_sync_super(struct super_block *sb, static int ext2_remount (struct super_block * sb, int * flags, char * data); static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf); static int ext2_sync_fs(struct super_block *sb, int wait); +static int ext2_freeze(struct super_block *sb); +static int ext2_unfreeze(struct super_block *sb); void ext2_error(struct super_block *sb, const char *function, const char *fmt, ...) @@ -305,6 +307,8 @@ static const struct super_operations ext2_sops = { .evict_inode = ext2_evict_inode, .put_super = ext2_put_super, .sync_fs = ext2_sync_fs, + .freeze_fs = ext2_freeze, + .unfreeze_fs = ext2_unfreeze, .statfs = ext2_statfs, .remount_fs = ext2_remount, .show_options = ext2_show_options, @@ -1200,6 +1204,35 @@ static int ext2_sync_fs(struct super_block *sb, int wait) return 0; } +static int ext2_freeze(struct super_block *sb) +{ + struct ext2_sb_info *sbi = EXT2_SB(sb); + + /* + * Open but unlinked files present? Keep EXT2_VALID_FS flag cleared + * because we have unattached inodes and thus filesystem is not fully + * consistent. + */ + if (atomic_long_read(&sb->s_remove_count)) { + ext2_sync_fs(sb, 1); + return 0; + } + /* Set EXT2_FS_VALID flag */ + spin_lock(&sbi->s_lock); + sbi->s_es->s_state = cpu_to_le16(sbi->s_mount_state); + spin_unlock(&sbi->s_lock); + ext2_sync_super(sb, sbi->s_es, 1); + + return 0; +} + +static int ext2_unfreeze(struct super_block *sb) +{ + /* Just write sb to clear EXT2_VALID_FS flag */ + ext2_write_super(sb); + + return 0; +} void ext2_write_super(struct super_block *sb) { diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 89b59cb7f9b..6324f74e034 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -233,6 +233,11 @@ void ext4_evict_inode(struct inode *inode) if (is_bad_inode(inode)) goto no_delete; + /* + * Protect us against freezing - iput() caller didn't have to have any + * protection against it + */ + sb_start_intwrite(inode->i_sb); handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3); if (IS_ERR(handle)) { ext4_std_error(inode->i_sb, PTR_ERR(handle)); @@ -242,6 +247,7 @@ void ext4_evict_inode(struct inode *inode) * cleaned up. */ ext4_orphan_del(NULL, inode); + sb_end_intwrite(inode->i_sb); goto no_delete; } @@ -273,6 +279,7 @@ void ext4_evict_inode(struct inode *inode) stop_handle: ext4_journal_stop(handle); ext4_orphan_del(NULL, inode); + sb_end_intwrite(inode->i_sb); goto no_delete; } } @@ -301,6 +308,7 @@ void ext4_evict_inode(struct inode *inode) else ext4_free_inode(handle, inode); ext4_journal_stop(handle); + sb_end_intwrite(inode->i_sb); return; no_delete: ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ @@ -4779,11 +4787,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) get_block_t *get_block; int retries = 0; - /* - * This check is racy but catches the common case. We rely on - * __block_page_mkwrite() to do a reliable check. - */ - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); + sb_start_pagefault(inode->i_sb); /* Delalloc case is easy... */ if (test_opt(inode->i_sb, DELALLOC) && !ext4_should_journal_data(inode) && @@ -4851,5 +4855,6 @@ retry_alloc: out_ret: ret = block_page_mkwrite_return(ret); out: + sb_end_pagefault(inode->i_sb); return ret; } diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index f99a1311e84..fe7c63f4717 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c @@ -44,6 +44,11 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) { struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data); + /* + * We protect against freezing so that we don't create dirty buffers + * on frozen filesystem. + */ + sb_start_write(sb); ext4_mmp_csum_set(sb, mmp); mark_buffer_dirty(bh); lock_buffer(bh); @@ -51,6 +56,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) get_bh(bh); submit_bh(WRITE_SYNC, bh); wait_on_buffer(bh); + sb_end_write(sb); if (unlikely(!buffer_uptodate(bh))) return 1; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 2d51cd9af22..d76ec8277d3 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -331,33 +331,17 @@ static void ext4_put_nojournal(handle_t *handle) * journal_end calls result in the superblock being marked dirty, so * that sync() will call the filesystem's write_super callback if * appropriate. - * - * To avoid j_barrier hold in userspace when a user calls freeze(), - * ext4 prevents a new handle from being started by s_frozen, which - * is in an upper layer. */ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks) { journal_t *journal; - handle_t *handle; trace_ext4_journal_start(sb, nblocks, _RET_IP_); if (sb->s_flags & MS_RDONLY) return ERR_PTR(-EROFS); + WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE); journal = EXT4_SB(sb)->s_journal; - handle = ext4_journal_current_handle(); - - /* - * If a handle has been started, it should be allowed to - * finish, otherwise deadlock could happen between freeze - * and others(e.g. truncate) due to the restart of the - * journal handle if the filesystem is forzen and active - * handles are not stopped. - */ - if (!handle) - vfs_check_frozen(sb, SB_FREEZE_TRANS); - if (!journal) return ext4_get_nojournal(); /* @@ -2747,6 +2731,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr) sb = elr->lr_super; ngroups = EXT4_SB(sb)->s_groups_count; + sb_start_write(sb); for (group = elr->lr_next_group; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) { @@ -2773,6 +2758,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr) elr->lr_next_sched = jiffies + elr->lr_timeout; elr->lr_next_group = group + 1; } + sb_end_write(sb); return ret; } @@ -4460,10 +4446,8 @@ int ext4_force_commit(struct super_block *sb) return 0; journal = EXT4_SB(sb)->s_journal; - if (journal) { - vfs_check_frozen(sb, SB_FREEZE_TRANS); + if (journal) ret = ext4_journal_force_commit(journal); - } return ret; } @@ -4493,9 +4477,8 @@ static int ext4_sync_fs(struct super_block *sb, int wait) * gives us a chance to flush the journal completely and mark the fs clean. * * Note that only this function cannot bring a filesystem to be in a clean - * state independently, because ext4 prevents a new handle from being started - * by @sb->s_frozen, which stays in an upper layer. It thus needs help from - * the upper layer. + * state independently. It relies on upper layer to stop all data & metadata + * modifications. */ static int ext4_freeze(struct super_block *sb) { @@ -4522,7 +4505,7 @@ static int ext4_freeze(struct super_block *sb) EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); error = ext4_commit_super(sb, 1); out: - /* we rely on s_frozen to stop further updates */ + /* we rely on upper layer to stop further updates */ jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); return error; } diff --git a/fs/fat/file.c b/fs/fat/file.c index a71fe3715ee..e007b8bd8e5 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -43,10 +43,10 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr) if (err) goto out; - mutex_lock(&inode->i_mutex); err = mnt_want_write_file(file); if (err) - goto out_unlock_inode; + goto out; + mutex_lock(&inode->i_mutex); /* * ATTR_VOLUME and ATTR_DIR cannot be changed; this also @@ -73,14 +73,14 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr) /* The root directory has no attributes */ if (inode->i_ino == MSDOS_ROOT_INO && attr != ATTR_DIR) { err = -EINVAL; - goto out_drop_write; + goto out_unlock_inode; } if (sbi->options.sys_immutable && ((attr | oldattr) & ATTR_SYS) && !capable(CAP_LINUX_IMMUTABLE)) { err = -EPERM; - goto out_drop_write; + goto out_unlock_inode; } /* @@ -90,12 +90,12 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr) */ err = security_inode_setattr(file->f_path.dentry, &ia); if (err) - goto out_drop_write; + goto out_unlock_inode; /* This MUST be done before doing anything irreversible... */ err = fat_setattr(file->f_path.dentry, &ia); if (err) - goto out_drop_write; + goto out_unlock_inode; fsnotify_change(file->f_path.dentry, ia.ia_valid); if (sbi->options.sys_immutable) { @@ -107,10 +107,9 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr) fat_save_attrs(inode, attr); mark_inode_dirty(inode); -out_drop_write: - mnt_drop_write_file(file); out_unlock_inode: mutex_unlock(&inode->i_mutex); + mnt_drop_write_file(file); out: return err; } diff --git a/fs/file_table.c b/fs/file_table.c index b3fc4d67a26..701985e4ccd 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -43,7 +43,7 @@ static struct kmem_cache *filp_cachep __read_mostly; static struct percpu_counter nr_files __cacheline_aligned_in_smp; -static inline void file_free_rcu(struct rcu_head *head) +static void file_free_rcu(struct rcu_head *head) { struct file *f = container_of(head, struct file, f_u.fu_rcuhead); @@ -217,7 +217,7 @@ static void drop_file_write_access(struct file *file) return; if (file_check_writeable(file) != 0) return; - mnt_drop_write(mnt); + __mnt_drop_write(mnt); file_release_write(file); } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index b321a688cde..93d8d6c9494 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -944,9 +944,8 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, return err; count = ocount; - + sb_start_write(inode->i_sb); mutex_lock(&inode->i_mutex); - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; @@ -1004,6 +1003,7 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, out: current->backing_dev_info = NULL; mutex_unlock(&inode->i_mutex); + sb_end_write(inode->i_sb); return written ? written : err; } diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 9aa6af13823..d1d791ef38d 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -373,11 +373,10 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) loff_t size; int ret; - /* Wait if fs is frozen. This is racy so we check again later on - * and retry if the fs has been frozen after the page lock has - * been acquired - */ - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); + sb_start_pagefault(inode->i_sb); + + /* Update file times before taking page lock */ + file_update_time(vma->vm_file); ret = gfs2_rs_alloc(ip); if (ret) @@ -462,14 +461,9 @@ out: gfs2_holder_uninit(&gh); if (ret == 0) { set_page_dirty(page); - /* This check must be post dropping of transaction lock */ - if (inode->i_sb->s_frozen == SB_UNFROZEN) { - wait_on_page_writeback(page); - } else { - ret = -EAGAIN; - unlock_page(page); - } + wait_on_page_writeback(page); } + sb_end_pagefault(inode->i_sb); return block_page_mkwrite_return(ret); } diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index ad3e2fb763d..adbd27875ef 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -50,6 +50,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, if (revokes) tr->tr_reserved += gfs2_struct2blk(sdp, revokes, sizeof(u64)); + sb_start_intwrite(sdp->sd_vfs); gfs2_holder_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &tr->tr_t_gh); error = gfs2_glock_nq(&tr->tr_t_gh); @@ -68,6 +69,7 @@ fail_gunlock: gfs2_glock_dq(&tr->tr_t_gh); fail_holder_uninit: + sb_end_intwrite(sdp->sd_vfs); gfs2_holder_uninit(&tr->tr_t_gh); kfree(tr); @@ -116,6 +118,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp) gfs2_holder_uninit(&tr->tr_t_gh); kfree(tr); } + sb_end_intwrite(sdp->sd_vfs); return; } @@ -136,6 +139,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp) if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS) gfs2_log_flush(sdp, NULL); + sb_end_intwrite(sdp->sd_vfs); } /** diff --git a/fs/inode.c b/fs/inode.c index 3cc50432046..ac8d904b3f1 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1542,9 +1542,11 @@ void touch_atime(struct path *path) if (timespec_equal(&inode->i_atime, &now)) return; - if (mnt_want_write(mnt)) + if (!sb_start_write_trylock(inode->i_sb)) return; + if (__mnt_want_write(mnt)) + goto skip_update; /* * File systems can error out when updating inodes if they need to * allocate new space to modify an inode (such is the case for @@ -1555,7 +1557,9 @@ void touch_atime(struct path *path) * of the fs read only, e.g. subvolumes in Btrfs. */ update_time(inode, &now, S_ATIME); - mnt_drop_write(mnt); + __mnt_drop_write(mnt); +skip_update: + sb_end_write(inode->i_sb); } EXPORT_SYMBOL(touch_atime); @@ -1662,11 +1666,11 @@ int file_update_time(struct file *file) return 0; /* Finally allowed to write? Takes lock. */ - if (mnt_want_write_file(file)) + if (__mnt_want_write_file(file)) return 0; ret = update_time(inode, &now, sync_it); - mnt_drop_write_file(file); + __mnt_drop_write_file(file); return ret; } diff --git a/fs/internal.h b/fs/internal.h index a6fd56c68b1..371bcc4b169 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -61,6 +61,10 @@ extern void __init mnt_init(void); extern struct lglock vfsmount_lock; +extern int __mnt_want_write(struct vfsmount *); +extern int __mnt_want_write_file(struct file *); +extern void __mnt_drop_write(struct vfsmount *); +extern void __mnt_drop_write_file(struct file *); /* * fs_struct.c diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 8392cb85bd5..05d29124c6a 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -156,12 +156,16 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) struct nlm_rqst *call; int status; - nlm_get_host(host); call = nlm_alloc_call(host); if (call == NULL) return -ENOMEM; nlmclnt_locks_init_private(fl, host); + if (!fl->fl_u.nfs_fl.owner) { + /* lockowner allocation has failed */ + nlmclnt_release_call(call); + return -ENOMEM; + } /* Set up the argument struct */ nlmclnt_setlockargs(call, fl); @@ -185,9 +189,6 @@ EXPORT_SYMBOL_GPL(nlmclnt_proc); /* * Allocate an NLM RPC call struct - * - * Note: the caller must hold a reference to host. In case of failure, - * this reference will be released. */ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) { @@ -199,7 +200,7 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) atomic_set(&call->a_count, 1); locks_init_lock(&call->a_args.lock.fl); locks_init_lock(&call->a_res.lock.fl); - call->a_host = host; + call->a_host = nlm_get_host(host); return call; } if (signalled()) @@ -207,7 +208,6 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) printk("nlm_alloc_call: failed, waiting for memory\n"); schedule_timeout_interruptible(5*HZ); } - nlmclnt_release_host(host); return NULL; } @@ -750,7 +750,7 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl dprintk("lockd: blocking lock attempt was interrupted by a signal.\n" " Attempting to cancel lock.\n"); - req = nlm_alloc_call(nlm_get_host(host)); + req = nlm_alloc_call(host); if (!req) return -ENOMEM; req->a_flags = RPC_TASK_ASYNC; diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c index 4a43d253c04..b147d1ae71f 100644 --- a/fs/lockd/svc4proc.c +++ b/fs/lockd/svc4proc.c @@ -257,6 +257,7 @@ static __be32 nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args return rpc_system_err; call = nlm_alloc_call(host); + nlmsvc_release_host(host); if (call == NULL) return rpc_system_err; diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index afe4488c33d..fb1a2bedbe9 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -219,7 +219,6 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, struct nlm_block *block; struct nlm_rqst *call = NULL; - nlm_get_host(host); call = nlm_alloc_call(host); if (call == NULL) return NULL; diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index de8f2caa223..3009a365e08 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c @@ -297,6 +297,7 @@ static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args return rpc_system_err; call = nlm_alloc_call(host); + nlmsvc_release_host(host); if (call == NULL) return rpc_system_err; diff --git a/fs/locks.c b/fs/locks.c index cdcf219a739..7e81bfc7516 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -200,11 +200,7 @@ void locks_release_private(struct file_lock *fl) fl->fl_ops->fl_release_private(fl); fl->fl_ops = NULL; } - if (fl->fl_lmops) { - if (fl->fl_lmops->lm_release_private) - fl->fl_lmops->lm_release_private(fl); - fl->fl_lmops = NULL; - } + fl->fl_lmops = NULL; } EXPORT_SYMBOL_GPL(locks_release_private); diff --git a/fs/namei.c b/fs/namei.c index 2ccc35c4dc2..1b464390dde 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -650,6 +650,121 @@ static inline void put_link(struct nameidata *nd, struct path *link, void *cooki path_put(link); } +int sysctl_protected_symlinks __read_mostly = 1; +int sysctl_protected_hardlinks __read_mostly = 1; + +/** + * may_follow_link - Check symlink following for unsafe situations + * @link: The path of the symlink + * + * In the case of the sysctl_protected_symlinks sysctl being enabled, + * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is + * in a sticky world-writable directory. This is to protect privileged + * processes from failing races against path names that may change out + * from under them by way of other users creating malicious symlinks. + * It will permit symlinks to be followed only when outside a sticky + * world-writable directory, or when the uid of the symlink and follower + * match, or when the directory owner matches the symlink's owner. + * + * Returns 0 if following the symlink is allowed, -ve on error. + */ +static inline int may_follow_link(struct path *link, struct nameidata *nd) +{ + const struct inode *inode; + const struct inode *parent; + + if (!sysctl_protected_symlinks) + return 0; + + /* Allowed if owner and follower match. */ + inode = link->dentry->d_inode; + if (current_cred()->fsuid == inode->i_uid) + return 0; + + /* Allowed if parent directory not sticky and world-writable. */ + parent = nd->path.dentry->d_inode; + if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH)) + return 0; + + /* Allowed if parent directory and link owner match. */ + if (parent->i_uid == inode->i_uid) + return 0; + + path_put_conditional(link, nd); + path_put(&nd->path); + audit_log_link_denied("follow_link", link); + return -EACCES; +} + +/** + * safe_hardlink_source - Check for safe hardlink conditions + * @inode: the source inode to hardlink from + * + * Return false if at least one of the following conditions: + * - inode is not a regular file + * - inode is setuid + * - inode is setgid and group-exec + * - access failure for read and write + * + * Otherwise returns true. + */ +static bool safe_hardlink_source(struct inode *inode) +{ + umode_t mode = inode->i_mode; + + /* Special files should not get pinned to the filesystem. */ + if (!S_ISREG(mode)) + return false; + + /* Setuid files should not get pinned to the filesystem. */ + if (mode & S_ISUID) + return false; + + /* Executable setgid files should not get pinned to the filesystem. */ + if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) + return false; + + /* Hardlinking to unreadable or unwritable sources is dangerous. */ + if (inode_permission(inode, MAY_READ | MAY_WRITE)) + return false; + + return true; +} + +/** + * may_linkat - Check permissions for creating a hardlink + * @link: the source to hardlink from + * + * Block hardlink when all of: + * - sysctl_protected_hardlinks enabled + * - fsuid does not match inode + * - hardlink source is unsafe (see safe_hardlink_source() above) + * - not CAP_FOWNER + * + * Returns 0 if successful, -ve on error. + */ +static int may_linkat(struct path *link) +{ + const struct cred *cred; + struct inode *inode; + + if (!sysctl_protected_hardlinks) + return 0; + + cred = current_cred(); + inode = link->dentry->d_inode; + + /* Source inode owner (or CAP_FOWNER) can hardlink all they like, + * otherwise, it must be a safe source. + */ + if (cred->fsuid == inode->i_uid || safe_hardlink_source(inode) || + capable(CAP_FOWNER)) + return 0; + + audit_log_link_denied("linkat", link); + return -EPERM; +} + static __always_inline int follow_link(struct path *link, struct nameidata *nd, void **p) { @@ -1818,6 +1933,9 @@ static int path_lookupat(int dfd, const char *name, while (err > 0) { void *cookie; struct path link = path; + err = may_follow_link(&link, nd); + if (unlikely(err)) + break; nd->flags |= LOOKUP_PARENT; err = follow_link(&link, nd, &cookie); if (err) @@ -2277,7 +2395,7 @@ static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode) static int atomic_open(struct nameidata *nd, struct dentry *dentry, struct path *path, struct file *file, const struct open_flags *op, - bool *want_write, bool need_lookup, + bool got_write, bool need_lookup, int *opened) { struct inode *dir = nd->path.dentry->d_inode; @@ -2300,7 +2418,7 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry, if ((open_flag & O_CREAT) && !IS_POSIXACL(dir)) mode &= ~current_umask(); - if (open_flag & O_EXCL) { + if ((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT)) { open_flag &= ~O_TRUNC; *opened |= FILE_CREATED; } @@ -2314,12 +2432,9 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry, * Another problem is returing the "right" error value (e.g. for an * O_EXCL open we want to return EEXIST not EROFS). */ - if ((open_flag & (O_CREAT | O_TRUNC)) || - (open_flag & O_ACCMODE) != O_RDONLY) { - error = mnt_want_write(nd->path.mnt); - if (!error) { - *want_write = true; - } else if (!(open_flag & O_CREAT)) { + if (((open_flag & (O_CREAT | O_TRUNC)) || + (open_flag & O_ACCMODE) != O_RDONLY) && unlikely(!got_write)) { + if (!(open_flag & O_CREAT)) { /* * No O_CREATE -> atomicity not a requirement -> fall * back to lookup + open @@ -2327,11 +2442,11 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry, goto no_open; } else if (open_flag & (O_EXCL | O_TRUNC)) { /* Fall back and fail with the right error */ - create_error = error; + create_error = -EROFS; goto no_open; } else { /* No side effects, safe to clear O_CREAT */ - create_error = error; + create_error = -EROFS; open_flag &= ~O_CREAT; } } @@ -2438,7 +2553,7 @@ looked_up: static int lookup_open(struct nameidata *nd, struct path *path, struct file *file, const struct open_flags *op, - bool *want_write, int *opened) + bool got_write, int *opened) { struct dentry *dir = nd->path.dentry; struct inode *dir_inode = dir->d_inode; @@ -2456,7 +2571,7 @@ static int lookup_open(struct nameidata *nd, struct path *path, goto out_no_open; if ((nd->flags & LOOKUP_OPEN) && dir_inode->i_op->atomic_open) { - return atomic_open(nd, dentry, path, file, op, want_write, + return atomic_open(nd, dentry, path, file, op, got_write, need_lookup, opened); } @@ -2480,10 +2595,10 @@ static int lookup_open(struct nameidata *nd, struct path *path, * a permanent write count is taken through * the 'struct file' in finish_open(). */ - error = mnt_want_write(nd->path.mnt); - if (error) + if (!got_write) { + error = -EROFS; goto out_dput; - *want_write = true; + } *opened |= FILE_CREATED; error = security_path_mknod(&nd->path, dentry, mode, 0); if (error) @@ -2513,7 +2628,7 @@ static int do_last(struct nameidata *nd, struct path *path, struct dentry *dir = nd->path.dentry; int open_flag = op->open_flag; bool will_truncate = (open_flag & O_TRUNC) != 0; - bool want_write = false; + bool got_write = false; int acc_mode = op->acc_mode; struct inode *inode; bool symlink_ok = false; @@ -2582,8 +2697,18 @@ static int do_last(struct nameidata *nd, struct path *path, } retry_lookup: + if (op->open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) { + error = mnt_want_write(nd->path.mnt); + if (!error) + got_write = true; + /* + * do _not_ fail yet - we might not need that or fail with + * a different error; let lookup_open() decide; we'll be + * dropping this one anyway. + */ + } mutex_lock(&dir->d_inode->i_mutex); - error = lookup_open(nd, path, file, op, &want_write, opened); + error = lookup_open(nd, path, file, op, got_write, opened); mutex_unlock(&dir->d_inode->i_mutex); if (error <= 0) { @@ -2608,22 +2733,23 @@ retry_lookup: } /* - * It already exists. + * create/update audit record if it already exists. */ - audit_inode(pathname, path->dentry); + if (path->dentry->d_inode) + audit_inode(pathname, path->dentry); /* * If atomic_open() acquired write access it is dropped now due to * possible mount and symlink following (this might be optimized away if * necessary...) */ - if (want_write) { + if (got_write) { mnt_drop_write(nd->path.mnt); - want_write = false; + got_write = false; } error = -EEXIST; - if (open_flag & O_EXCL) + if ((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT)) goto exit_dput; error = follow_managed(path, nd->flags); @@ -2684,7 +2810,7 @@ finish_open: error = mnt_want_write(nd->path.mnt); if (error) goto out; - want_write = true; + got_write = true; } finish_open_created: error = may_open(&nd->path, acc_mode, open_flag); @@ -2711,7 +2837,7 @@ opened: goto exit_fput; } out: - if (want_write) + if (got_write) mnt_drop_write(nd->path.mnt); path_put(&save_parent); terminate_walk(nd); @@ -2735,9 +2861,9 @@ stale_open: nd->inode = dir->d_inode; save_parent.mnt = NULL; save_parent.dentry = NULL; - if (want_write) { + if (got_write) { mnt_drop_write(nd->path.mnt); - want_write = false; + got_write = false; } retried = true; goto retry_lookup; @@ -2777,6 +2903,9 @@ static struct file *path_openat(int dfd, const char *pathname, error = -ELOOP; break; } + error = may_follow_link(&link, nd); + if (unlikely(error)) + break; nd->flags |= LOOKUP_PARENT; nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL); error = follow_link(&link, nd, &cookie); @@ -2846,6 +2975,7 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path { struct dentry *dentry = ERR_PTR(-EEXIST); struct nameidata nd; + int err2; int error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd); if (error) return ERR_PTR(error); @@ -2859,16 +2989,19 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path nd.flags &= ~LOOKUP_PARENT; nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL; + /* don't fail immediately if it's r/o, at least try to report other errors */ + err2 = mnt_want_write(nd.path.mnt); /* * Do the final lookup. */ mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); if (IS_ERR(dentry)) - goto fail; + goto unlock; + error = -EEXIST; if (dentry->d_inode) - goto eexist; + goto fail; /* * Special case - lookup gave negative, but... we had foo/bar/ * From the vfs_mknod() POV we just have a negative dentry - @@ -2876,23 +3009,37 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path * been asking for (non-existent) directory. -ENOENT for you. */ if (unlikely(!is_dir && nd.last.name[nd.last.len])) { - dput(dentry); - dentry = ERR_PTR(-ENOENT); + error = -ENOENT; + goto fail; + } + if (unlikely(err2)) { + error = err2; goto fail; } *path = nd.path; return dentry; -eexist: - dput(dentry); - dentry = ERR_PTR(-EEXIST); fail: + dput(dentry); + dentry = ERR_PTR(error); +unlock: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); + if (!err2) + mnt_drop_write(nd.path.mnt); out: path_put(&nd.path); return dentry; } EXPORT_SYMBOL(kern_path_create); +void done_path_create(struct path *path, struct dentry *dentry) +{ + dput(dentry); + mutex_unlock(&path->dentry->d_inode->i_mutex); + mnt_drop_write(path->mnt); + path_put(path); +} +EXPORT_SYMBOL(done_path_create); + struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, int is_dir) { char *tmp = getname(pathname); @@ -2956,8 +3103,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, struct path path; int error; - if (S_ISDIR(mode)) - return -EPERM; + error = may_mknod(mode); + if (error) + return error; dentry = user_path_create(dfd, filename, &path, 0); if (IS_ERR(dentry)) @@ -2965,15 +3113,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); - error = may_mknod(mode); - if (error) - goto out_dput; - error = mnt_want_write(path.mnt); - if (error) - goto out_dput; error = security_path_mknod(&path, dentry, mode, dev); if (error) - goto out_drop_write; + goto out; switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(path.dentry->d_inode,dentry,mode,true); @@ -2986,13 +3128,8 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, error = vfs_mknod(path.dentry->d_inode,dentry,mode,0); break; } -out_drop_write: - mnt_drop_write(path.mnt); -out_dput: - dput(dentry); - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); - +out: + done_path_create(&path, dentry); return error; } @@ -3038,19 +3175,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode) if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); - error = mnt_want_write(path.mnt); - if (error) - goto out_dput; error = security_path_mkdir(&path, dentry, mode); - if (error) - goto out_drop_write; - error = vfs_mkdir(path.dentry->d_inode, dentry, mode); -out_drop_write: - mnt_drop_write(path.mnt); -out_dput: - dput(dentry); - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); + if (!error) + error = vfs_mkdir(path.dentry->d_inode, dentry, mode); + done_path_create(&path, dentry); return error; } @@ -3144,6 +3272,9 @@ static long do_rmdir(int dfd, const char __user *pathname) } nd.flags &= ~LOOKUP_PARENT; + error = mnt_want_write(nd.path.mnt); + if (error) + goto exit1; mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); @@ -3154,19 +3285,15 @@ static long do_rmdir(int dfd, const char __user *pathname) error = -ENOENT; goto exit3; } - error = mnt_want_write(nd.path.mnt); - if (error) - goto exit3; error = security_path_rmdir(&nd.path, dentry); if (error) - goto exit4; + goto exit3; error = vfs_rmdir(nd.path.dentry->d_inode, dentry); -exit4: - mnt_drop_write(nd.path.mnt); exit3: dput(dentry); exit2: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); + mnt_drop_write(nd.path.mnt); exit1: path_put(&nd.path); putname(name); @@ -3233,6 +3360,9 @@ static long do_unlinkat(int dfd, const char __user *pathname) goto exit1; nd.flags &= ~LOOKUP_PARENT; + error = mnt_want_write(nd.path.mnt); + if (error) + goto exit1; mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); @@ -3245,21 +3375,17 @@ static long do_unlinkat(int dfd, const char __user *pathname) if (!inode) goto slashes; ihold(inode); - error = mnt_want_write(nd.path.mnt); - if (error) - goto exit2; error = security_path_unlink(&nd.path, dentry); if (error) - goto exit3; + goto exit2; error = vfs_unlink(nd.path.dentry->d_inode, dentry); -exit3: - mnt_drop_write(nd.path.mnt); - exit2: +exit2: dput(dentry); } mutex_unlock(&nd.path.dentry->d_inode->i_mutex); if (inode) iput(inode); /* truncate the inode here */ + mnt_drop_write(nd.path.mnt); exit1: path_put(&nd.path); putname(name); @@ -3324,19 +3450,10 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, if (IS_ERR(dentry)) goto out_putname; - error = mnt_want_write(path.mnt); - if (error) - goto out_dput; error = security_path_symlink(&path, dentry, from); - if (error) - goto out_drop_write; - error = vfs_symlink(path.dentry->d_inode, dentry, from); -out_drop_write: - mnt_drop_write(path.mnt); -out_dput: - dput(dentry); - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); + if (!error) + error = vfs_symlink(path.dentry->d_inode, dentry, from); + done_path_create(&path, dentry); out_putname: putname(from); return error; @@ -3436,19 +3553,15 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, error = -EXDEV; if (old_path.mnt != new_path.mnt) goto out_dput; - error = mnt_want_write(new_path.mnt); - if (error) + error = may_linkat(&old_path); + if (unlikely(error)) goto out_dput; error = security_path_link(old_path.dentry, &new_path, new_dentry); if (error) - goto out_drop_write; + goto out_dput; error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry); -out_drop_write: - mnt_drop_write(new_path.mnt); out_dput: - dput(new_dentry); - mutex_unlock(&new_path.dentry->d_inode->i_mutex); - path_put(&new_path); + done_path_create(&new_path, new_dentry); out: path_put(&old_path); @@ -3644,6 +3757,10 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, if (newnd.last_type != LAST_NORM) goto exit2; + error = mnt_want_write(oldnd.path.mnt); + if (error) + goto exit2; + oldnd.flags &= ~LOOKUP_PARENT; newnd.flags &= ~LOOKUP_PARENT; newnd.flags |= LOOKUP_RENAME_TARGET; @@ -3679,23 +3796,19 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, if (new_dentry == trap) goto exit5; - error = mnt_want_write(oldnd.path.mnt); - if (error) - goto exit5; error = security_path_rename(&oldnd.path, old_dentry, &newnd.path, new_dentry); if (error) - goto exit6; + goto exit5; error = vfs_rename(old_dir->d_inode, old_dentry, new_dir->d_inode, new_dentry); -exit6: - mnt_drop_write(oldnd.path.mnt); exit5: dput(new_dentry); exit4: dput(old_dentry); exit3: unlock_rename(new_dir, old_dir); + mnt_drop_write(oldnd.path.mnt); exit2: path_put(&newnd.path); putname(to); diff --git a/fs/namespace.c b/fs/namespace.c index c53d3381b0d..4d31f73e256 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -283,24 +283,22 @@ static int mnt_is_readonly(struct vfsmount *mnt) } /* - * Most r/o checks on a fs are for operations that take - * discrete amounts of time, like a write() or unlink(). - * We must keep track of when those operations start - * (for permission checks) and when they end, so that - * we can determine when writes are able to occur to - * a filesystem. + * Most r/o & frozen checks on a fs are for operations that take discrete + * amounts of time, like a write() or unlink(). We must keep track of when + * those operations start (for permission checks) and when they end, so that we + * can determine when writes are able to occur to a filesystem. */ /** - * mnt_want_write - get write access to a mount + * __mnt_want_write - get write access to a mount without freeze protection * @m: the mount on which to take a write * - * This tells the low-level filesystem that a write is - * about to be performed to it, and makes sure that - * writes are allowed before returning success. When - * the write operation is finished, mnt_drop_write() - * must be called. This is effectively a refcount. + * This tells the low-level filesystem that a write is about to be performed to + * it, and makes sure that writes are allowed (mnt it read-write) before + * returning success. This operation does not protect against filesystem being + * frozen. When the write operation is finished, __mnt_drop_write() must be + * called. This is effectively a refcount. */ -int mnt_want_write(struct vfsmount *m) +int __mnt_want_write(struct vfsmount *m) { struct mount *mnt = real_mount(m); int ret = 0; @@ -326,6 +324,27 @@ int mnt_want_write(struct vfsmount *m) ret = -EROFS; } preempt_enable(); + + return ret; +} + +/** + * mnt_want_write - get write access to a mount + * @m: the mount on which to take a write + * + * This tells the low-level filesystem that a write is about to be performed to + * it, and makes sure that writes are allowed (mount is read-write, filesystem + * is not frozen) before returning success. When the write operation is + * finished, mnt_drop_write() must be called. This is effectively a refcount. + */ +int mnt_want_write(struct vfsmount *m) +{ + int ret; + + sb_start_write(m->mnt_sb); + ret = __mnt_want_write(m); + if (ret) + sb_end_write(m->mnt_sb); return ret; } EXPORT_SYMBOL_GPL(mnt_want_write); @@ -355,38 +374,76 @@ int mnt_clone_write(struct vfsmount *mnt) EXPORT_SYMBOL_GPL(mnt_clone_write); /** - * mnt_want_write_file - get write access to a file's mount + * __mnt_want_write_file - get write access to a file's mount * @file: the file who's mount on which to take a write * - * This is like mnt_want_write, but it takes a file and can + * This is like __mnt_want_write, but it takes a file and can * do some optimisations if the file is open for write already */ -int mnt_want_write_file(struct file *file) +int __mnt_want_write_file(struct file *file) { struct inode *inode = file->f_dentry->d_inode; + if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode)) - return mnt_want_write(file->f_path.mnt); + return __mnt_want_write(file->f_path.mnt); else return mnt_clone_write(file->f_path.mnt); } + +/** + * mnt_want_write_file - get write access to a file's mount + * @file: the file who's mount on which to take a write + * + * This is like mnt_want_write, but it takes a file and can + * do some optimisations if the file is open for write already + */ +int mnt_want_write_file(struct file *file) +{ + int ret; + + sb_start_write(file->f_path.mnt->mnt_sb); + ret = __mnt_want_write_file(file); + if (ret) + sb_end_write(file->f_path.mnt->mnt_sb); + return ret; +} EXPORT_SYMBOL_GPL(mnt_want_write_file); /** - * mnt_drop_write - give up write access to a mount + * __mnt_drop_write - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done * performing writes to it. Must be matched with - * mnt_want_write() call above. + * __mnt_want_write() call above. */ -void mnt_drop_write(struct vfsmount *mnt) +void __mnt_drop_write(struct vfsmount *mnt) { preempt_disable(); mnt_dec_writers(real_mount(mnt)); preempt_enable(); } + +/** + * mnt_drop_write - give up write access to a mount + * @mnt: the mount on which to give up write access + * + * Tells the low-level filesystem that we are done performing writes to it and + * also allows filesystem to be frozen again. Must be matched with + * mnt_want_write() call above. + */ +void mnt_drop_write(struct vfsmount *mnt) +{ + __mnt_drop_write(mnt); + sb_end_write(mnt->mnt_sb); +} EXPORT_SYMBOL_GPL(mnt_drop_write); +void __mnt_drop_write_file(struct file *file) +{ + __mnt_drop_write(file->f_path.mnt); +} + void mnt_drop_write_file(struct file *file) { mnt_drop_write(file->f_path.mnt); diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 5ff0b7b9fc0..43295d45cc2 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -154,6 +154,10 @@ nfsd4_create_clid_dir(struct nfs4_client *clp) if (status < 0) return; + status = mnt_want_write_file(rec_file); + if (status) + return; + dir = rec_file->f_path.dentry; /* lock the parent */ mutex_lock(&dir->d_inode->i_mutex); @@ -173,11 +177,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp) * as well be forgiving and just succeed silently. */ goto out_put; - status = mnt_want_write_file(rec_file); - if (status) - goto out_put; status = vfs_mkdir(dir->d_inode, dentry, S_IRWXU); - mnt_drop_write_file(rec_file); out_put: dput(dentry); out_unlock: @@ -189,6 +189,7 @@ out_unlock: " (err %d); please check that %s exists" " and is writeable", status, user_recovery_dirname); + mnt_drop_write_file(rec_file); nfs4_reset_creds(original_cred); } diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index cc793005a87..032af381b3a 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -635,6 +635,7 @@ fh_put(struct svc_fh *fhp) fhp->fh_post_saved = 0; #endif } + fh_drop_write(fhp); if (exp) { exp_put(exp); fhp->fh_export = NULL; diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index e15dc45fc5e..aad6d457b9e 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -196,6 +196,7 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp, struct dentry *dchild; int type, mode; __be32 nfserr; + int hosterr; dev_t rdev = 0, wanted = new_decode_dev(attr->ia_size); dprintk("nfsd: CREATE %s %.*s\n", @@ -214,6 +215,12 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp, nfserr = nfserr_exist; if (isdotent(argp->name, argp->len)) goto done; + hosterr = fh_want_write(dirfhp); + if (hosterr) { + nfserr = nfserrno(hosterr); + goto done; + } + fh_lock_nested(dirfhp, I_MUTEX_PARENT); dchild = lookup_one_len(argp->name, dirfhp->fh_dentry, argp->len); if (IS_ERR(dchild)) { @@ -330,7 +337,7 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp, out_unlock: /* We don't really need to unlock, as fh_put does it. */ fh_unlock(dirfhp); - + fh_drop_write(dirfhp); done: fh_put(dirfhp); return nfsd_return_dirop(nfserr, resp); diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 702f64e820c..a9269f142cc 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1284,6 +1284,10 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, * If it has, the parent directory should already be locked. */ if (!resfhp->fh_dentry) { + host_err = fh_want_write(fhp); + if (host_err) + goto out_nfserr; + /* called from nfsd_proc_mkdir, or possibly nfsd3_proc_create */ fh_lock_nested(fhp, I_MUTEX_PARENT); dchild = lookup_one_len(fname, dentry, flen); @@ -1327,14 +1331,11 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, goto out; } - host_err = fh_want_write(fhp); - if (host_err) - goto out_nfserr; - /* * Get the dir op function pointer. */ err = 0; + host_err = 0; switch (type) { case S_IFREG: host_err = vfs_create(dirp, dchild, iap->ia_mode, true); @@ -1351,10 +1352,8 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev); break; } - if (host_err < 0) { - fh_drop_write(fhp); + if (host_err < 0) goto out_nfserr; - } err = nfsd_create_setattr(rqstp, resfhp, iap); @@ -1366,7 +1365,6 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, err2 = nfserrno(commit_metadata(fhp)); if (err2) err = err2; - fh_drop_write(fhp); /* * Update the file handle to get the new inode info. */ @@ -1425,6 +1423,11 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, err = nfserr_notdir; if (!dirp->i_op->lookup) goto out; + + host_err = fh_want_write(fhp); + if (host_err) + goto out_nfserr; + fh_lock_nested(fhp, I_MUTEX_PARENT); /* @@ -1457,9 +1460,6 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, v_atime = verifier[1]&0x7fffffff; } - host_err = fh_want_write(fhp); - if (host_err) - goto out_nfserr; if (dchild->d_inode) { err = 0; @@ -1530,7 +1530,6 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, if (!err) err = nfserrno(commit_metadata(fhp)); - fh_drop_write(fhp); /* * Update the filehandle to get the new inode info. */ @@ -1541,6 +1540,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, fh_unlock(fhp); if (dchild && !IS_ERR(dchild)) dput(dchild); + fh_drop_write(fhp); return err; out_nfserr: @@ -1621,6 +1621,11 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp, err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); if (err) goto out; + + host_err = fh_want_write(fhp); + if (host_err) + goto out_nfserr; + fh_lock(fhp); dentry = fhp->fh_dentry; dnew = lookup_one_len(fname, dentry, flen); @@ -1628,10 +1633,6 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp, if (IS_ERR(dnew)) goto out_nfserr; - host_err = fh_want_write(fhp); - if (host_err) - goto out_nfserr; - if (unlikely(path[plen] != 0)) { char *path_alloced = kmalloc(plen+1, GFP_KERNEL); if (path_alloced == NULL) @@ -1691,6 +1692,12 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, if (isdotent(name, len)) goto out; + host_err = fh_want_write(tfhp); + if (host_err) { + err = nfserrno(host_err); + goto out; + } + fh_lock_nested(ffhp, I_MUTEX_PARENT); ddir = ffhp->fh_dentry; dirp = ddir->d_inode; @@ -1702,18 +1709,13 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, dold = tfhp->fh_dentry; - host_err = fh_want_write(tfhp); - if (host_err) { - err = nfserrno(host_err); - goto out_dput; - } err = nfserr_noent; if (!dold->d_inode) - goto out_drop_write; + goto out_dput; host_err = nfsd_break_lease(dold->d_inode); if (host_err) { err = nfserrno(host_err); - goto out_drop_write; + goto out_dput; } host_err = vfs_link(dold, dirp, dnew); if (!host_err) { @@ -1726,12 +1728,11 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, else err = nfserrno(host_err); } -out_drop_write: - fh_drop_write(tfhp); out_dput: dput(dnew); out_unlock: fh_unlock(ffhp); + fh_drop_write(tfhp); out: return err; @@ -1774,6 +1775,12 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen)) goto out; + host_err = fh_want_write(ffhp); + if (host_err) { + err = nfserrno(host_err); + goto out; + } + /* cannot use fh_lock as we need deadlock protective ordering * so do it by hand */ trap = lock_rename(tdentry, fdentry); @@ -1804,17 +1811,14 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, host_err = -EXDEV; if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt) goto out_dput_new; - host_err = fh_want_write(ffhp); - if (host_err) - goto out_dput_new; host_err = nfsd_break_lease(odentry->d_inode); if (host_err) - goto out_drop_write; + goto out_dput_new; if (ndentry->d_inode) { host_err = nfsd_break_lease(ndentry->d_inode); if (host_err) - goto out_drop_write; + goto out_dput_new; } host_err = vfs_rename(fdir, odentry, tdir, ndentry); if (!host_err) { @@ -1822,8 +1826,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, if (!host_err) host_err = commit_metadata(ffhp); } -out_drop_write: - fh_drop_write(ffhp); out_dput_new: dput(ndentry); out_dput_old: @@ -1839,6 +1841,7 @@ out_drop_write: fill_post_wcc(tfhp); unlock_rename(tdentry, fdentry); ffhp->fh_locked = tfhp->fh_locked = 0; + fh_drop_write(ffhp); out: return err; @@ -1864,6 +1867,10 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, if (err) goto out; + host_err = fh_want_write(fhp); + if (host_err) + goto out_nfserr; + fh_lock_nested(fhp, I_MUTEX_PARENT); dentry = fhp->fh_dentry; dirp = dentry->d_inode; @@ -1882,21 +1889,15 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, if (!type) type = rdentry->d_inode->i_mode & S_IFMT; - host_err = fh_want_write(fhp); - if (host_err) - goto out_put; - host_err = nfsd_break_lease(rdentry->d_inode); if (host_err) - goto out_drop_write; + goto out_put; if (type != S_IFDIR) host_err = vfs_unlink(dirp, rdentry); else host_err = vfs_rmdir(dirp, rdentry); if (!host_err) host_err = commit_metadata(fhp); -out_drop_write: - fh_drop_write(fhp); out_put: dput(rdentry); diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h index ec0611b2b73..359594c393d 100644 --- a/fs/nfsd/vfs.h +++ b/fs/nfsd/vfs.h @@ -110,12 +110,19 @@ int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *); static inline int fh_want_write(struct svc_fh *fh) { - return mnt_want_write(fh->fh_export->ex_path.mnt); + int ret = mnt_want_write(fh->fh_export->ex_path.mnt); + + if (!ret) + fh->fh_want_write = 1; + return ret; } static inline void fh_drop_write(struct svc_fh *fh) { - mnt_drop_write(fh->fh_export->ex_path.mnt); + if (fh->fh_want_write) { + fh->fh_want_write = 0; + mnt_drop_write(fh->fh_export->ex_path.mnt); + } } #endif /* LINUX_NFSD_VFS_H */ diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index 62cebc8e1a1..a4d56ac02e6 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c @@ -69,16 +69,18 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_dentry->d_inode; struct nilfs_transaction_info ti; - int ret; + int ret = 0; if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info))) return VM_FAULT_SIGBUS; /* -ENOSPC */ + sb_start_pagefault(inode->i_sb); lock_page(page); if (page->mapping != inode->i_mapping || page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) { unlock_page(page); - return VM_FAULT_NOPAGE; /* make the VM retry the fault */ + ret = -EFAULT; /* make the VM retry the fault */ + goto out; } /* @@ -112,19 +114,21 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ret = nilfs_transaction_begin(inode->i_sb, &ti, 1); /* never returns -ENOMEM, but may return -ENOSPC */ if (unlikely(ret)) - return VM_FAULT_SIGBUS; + goto out; - ret = block_page_mkwrite(vma, vmf, nilfs_get_block); - if (ret != VM_FAULT_LOCKED) { + ret = __block_page_mkwrite(vma, vmf, nilfs_get_block); + if (ret) { nilfs_transaction_abort(inode->i_sb); - return ret; + goto out; } nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits)); nilfs_transaction_commit(inode->i_sb); mapped: wait_on_page_writeback(page); - return VM_FAULT_LOCKED; + out: + sb_end_pagefault(inode->i_sb); + return block_page_mkwrite_return(ret); } static const struct vm_operations_struct nilfs_file_vm_ops = { diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 0b6387c67e6..fdb18076948 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -660,8 +660,6 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, goto out_free; } - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); - ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]); if (ret < 0) printk(KERN_ERR "NILFS: GC failed during preparation: " diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 88e11fb346b..a5752a58993 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -189,7 +189,7 @@ int nilfs_transaction_begin(struct super_block *sb, if (ret > 0) return 0; - vfs_check_frozen(sb, SB_FREEZE_WRITE); + sb_start_intwrite(sb); nilfs = sb->s_fs_info; down_read(&nilfs->ns_segctor_sem); @@ -205,6 +205,7 @@ int nilfs_transaction_begin(struct super_block *sb, current->journal_info = ti->ti_save; if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) kmem_cache_free(nilfs_transaction_cachep, ti); + sb_end_intwrite(sb); return ret; } @@ -246,6 +247,7 @@ int nilfs_transaction_commit(struct super_block *sb) err = nilfs_construct_segment(sb); if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) kmem_cache_free(nilfs_transaction_cachep, ti); + sb_end_intwrite(sb); return err; } @@ -264,6 +266,7 @@ void nilfs_transaction_abort(struct super_block *sb) current->journal_info = ti->ti_save; if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) kmem_cache_free(nilfs_transaction_cachep, ti); + sb_end_intwrite(sb); } void nilfs_relax_pressure_in_lock(struct super_block *sb) diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 7389d2d5e51..1ecf46448f8 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -2084,7 +2084,6 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb, if (err) return err; pos = *ppos; - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* We can write back this queue in page reclaim. */ current->backing_dev_info = mapping->backing_dev_info; written = 0; @@ -2119,6 +2118,7 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, BUG_ON(iocb->ki_pos != pos); + sb_start_write(inode->i_sb); mutex_lock(&inode->i_mutex); ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos); mutex_unlock(&inode->i_mutex); @@ -2127,6 +2127,7 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, if (err < 0) ret = err; } + sb_end_write(inode->i_sb); return ret; } diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 7602783d7f4..46a1f6d7510 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1971,6 +1971,7 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd, { struct inode *inode = file->f_path.dentry->d_inode; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + int ret; if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) && !ocfs2_writes_unwritten_extents(osb)) @@ -1985,7 +1986,12 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd, if (!(file->f_mode & FMODE_WRITE)) return -EBADF; - return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0); + ret = mnt_want_write_file(file); + if (ret) + return ret; + ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0); + mnt_drop_write_file(file); + return ret; } static long ocfs2_fallocate(struct file *file, int mode, loff_t offset, @@ -2261,7 +2267,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, if (iocb->ki_left == 0) return 0; - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); + sb_start_write(inode->i_sb); appending = file->f_flags & O_APPEND ? 1 : 0; direct_io = file->f_flags & O_DIRECT ? 1 : 0; @@ -2436,6 +2442,7 @@ out_sems: ocfs2_iocb_clear_sem_locked(iocb); mutex_unlock(&inode->i_mutex); + sb_end_write(inode->i_sb); if (written) ret = written; diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index d96f7f81d8d..f20edcbfe70 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -928,7 +928,12 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (get_user(new_clusters, (int __user *)arg)) return -EFAULT; - return ocfs2_group_extend(inode, new_clusters); + status = mnt_want_write_file(filp); + if (status) + return status; + status = ocfs2_group_extend(inode, new_clusters); + mnt_drop_write_file(filp); + return status; case OCFS2_IOC_GROUP_ADD: case OCFS2_IOC_GROUP_ADD64: if (!capable(CAP_SYS_RESOURCE)) @@ -937,7 +942,12 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (copy_from_user(&input, (int __user *) arg, sizeof(input))) return -EFAULT; - return ocfs2_group_add(inode, &input); + status = mnt_want_write_file(filp); + if (status) + return status; + status = ocfs2_group_add(inode, &input); + mnt_drop_write_file(filp); + return status; case OCFS2_IOC_REFLINK: if (copy_from_user(&args, argp, sizeof(args))) return -EFAULT; diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 0a42ae96dca..2dd36af79e2 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -355,11 +355,14 @@ handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs) if (journal_current_handle()) return jbd2_journal_start(journal, max_buffs); + sb_start_intwrite(osb->sb); + down_read(&osb->journal->j_trans_barrier); handle = jbd2_journal_start(journal, max_buffs); if (IS_ERR(handle)) { up_read(&osb->journal->j_trans_barrier); + sb_end_intwrite(osb->sb); mlog_errno(PTR_ERR(handle)); @@ -388,8 +391,10 @@ int ocfs2_commit_trans(struct ocfs2_super *osb, if (ret < 0) mlog_errno(ret); - if (!nested) + if (!nested) { up_read(&journal->j_trans_barrier); + sb_end_intwrite(osb->sb); + } return ret; } diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 9cd41083e99..d150372fd81 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c @@ -136,6 +136,7 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) sigset_t oldset; int ret; + sb_start_pagefault(inode->i_sb); ocfs2_block_signals(&oldset); /* @@ -165,6 +166,7 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) out: ocfs2_unblock_signals(&oldset); + sb_end_pagefault(inode->i_sb); return ret; } diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 9f32d7cbb7a..30a055049e1 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -4466,20 +4466,11 @@ int ocfs2_reflink_ioctl(struct inode *inode, goto out_dput; } - error = mnt_want_write(new_path.mnt); - if (error) { - mlog_errno(error); - goto out_dput; - } - error = ocfs2_vfs_reflink(old_path.dentry, new_path.dentry->d_inode, new_dentry, preserve); - mnt_drop_write(new_path.mnt); out_dput: - dput(new_dentry); - mutex_unlock(&new_path.dentry->d_inode->i_mutex); - path_put(&new_path); + done_path_create(&new_path, new_dentry); out: path_put(&old_path); diff --git a/fs/open.c b/fs/open.c index 1e914b397e1..f3d96e7e7b1 100644 --- a/fs/open.c +++ b/fs/open.c @@ -164,11 +164,13 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small) if (IS_APPEND(inode)) goto out_putf; + sb_start_write(inode->i_sb); error = locks_verify_truncate(inode, file, length); if (!error) error = security_path_truncate(&file->f_path); if (!error) error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, file); + sb_end_write(inode->i_sb); out_putf: fput(file); out: @@ -266,7 +268,10 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (!file->f_op->fallocate) return -EOPNOTSUPP; - return file->f_op->fallocate(file, mode, offset, len); + sb_start_write(inode->i_sb); + ret = file->f_op->fallocate(file, mode, offset, len); + sb_end_write(inode->i_sb); + return ret; } SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len) @@ -620,7 +625,7 @@ static inline int __get_file_write_access(struct inode *inode, /* * Balanced in __fput() */ - error = mnt_want_write(mnt); + error = __mnt_want_write(mnt); if (error) put_write_access(inode); } @@ -654,6 +659,7 @@ static int do_dentry_open(struct file *f, if (unlikely(f->f_flags & O_PATH)) f->f_mode = FMODE_PATH; + path_get(&f->f_path); inode = f->f_path.dentry->d_inode; if (f->f_mode & FMODE_WRITE) { error = __get_file_write_access(inode, f->f_path.mnt); @@ -739,9 +745,7 @@ int finish_open(struct file *file, struct dentry *dentry, int error; BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ - mntget(file->f_path.mnt); - file->f_path.dentry = dget(dentry); - + file->f_path.dentry = dentry; error = do_dentry_open(file, open, current_cred()); if (!error) *opened |= FILE_OPENED; @@ -784,7 +788,6 @@ struct file *dentry_open(const struct path *path, int flags, f->f_flags = flags; f->f_path = *path; - path_get(&f->f_path); error = do_dentry_open(f, NULL, cred); if (!error) { error = open_check_o_direct(f); diff --git a/fs/pipe.c b/fs/pipe.c index 95cbd6b227e..8d85d7068c1 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -1016,18 +1016,16 @@ fail_inode: return NULL; } -struct file *create_write_pipe(int flags) +int create_pipe_files(struct file **res, int flags) { int err; - struct inode *inode; + struct inode *inode = get_pipe_inode(); struct file *f; struct path path; - struct qstr name = { .name = "" }; + static struct qstr name = { .name = "" }; - err = -ENFILE; - inode = get_pipe_inode(); if (!inode) - goto err; + return -ENFILE; err = -ENOMEM; path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name); @@ -1041,62 +1039,43 @@ struct file *create_write_pipe(int flags) f = alloc_file(&path, FMODE_WRITE, &write_pipefifo_fops); if (!f) goto err_dentry; - f->f_mapping = inode->i_mapping; f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)); - f->f_version = 0; - return f; + res[0] = alloc_file(&path, FMODE_READ, &read_pipefifo_fops); + if (!res[0]) + goto err_file; + + path_get(&path); + res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK); + res[1] = f; + return 0; - err_dentry: +err_file: + put_filp(f); +err_dentry: free_pipe_info(inode); path_put(&path); - return ERR_PTR(err); + return err; - err_inode: +err_inode: free_pipe_info(inode); iput(inode); - err: - return ERR_PTR(err); -} - -void free_write_pipe(struct file *f) -{ - free_pipe_info(f->f_dentry->d_inode); - path_put(&f->f_path); - put_filp(f); -} - -struct file *create_read_pipe(struct file *wrf, int flags) -{ - /* Grab pipe from the writer */ - struct file *f = alloc_file(&wrf->f_path, FMODE_READ, - &read_pipefifo_fops); - if (!f) - return ERR_PTR(-ENFILE); - - path_get(&wrf->f_path); - f->f_flags = O_RDONLY | (flags & O_NONBLOCK); - - return f; + return err; } int do_pipe_flags(int *fd, int flags) { - struct file *fw, *fr; + struct file *files[2]; int error; int fdw, fdr; if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT)) return -EINVAL; - fw = create_write_pipe(flags); - if (IS_ERR(fw)) - return PTR_ERR(fw); - fr = create_read_pipe(fw, flags); - error = PTR_ERR(fr); - if (IS_ERR(fr)) - goto err_write_pipe; + error = create_pipe_files(files, flags); + if (error) + return error; error = get_unused_fd_flags(flags); if (error < 0) @@ -1109,8 +1088,8 @@ int do_pipe_flags(int *fd, int flags) fdw = error; audit_fd_pair(fdr, fdw); - fd_install(fdr, fr); - fd_install(fdw, fw); + fd_install(fdr, files[0]); + fd_install(fdw, files[1]); fd[0] = fdr; fd[1] = fdw; @@ -1119,10 +1098,8 @@ int do_pipe_flags(int *fd, int flags) err_fdr: put_unused_fd(fdr); err_read_pipe: - path_put(&fr->f_path); - put_filp(fr); - err_write_pipe: - free_write_pipe(fw); + fput(files[0]); + fput(files[1]); return error; } diff --git a/fs/splice.c b/fs/splice.c index 7bf08fa22ec..41514dd8946 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -996,6 +996,8 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, }; ssize_t ret; + sb_start_write(inode->i_sb); + pipe_lock(pipe); splice_from_pipe_begin(&sd); @@ -1034,6 +1036,7 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, *ppos += ret; balance_dirty_pages_ratelimited_nr(mapping, nr_pages); } + sb_end_write(inode->i_sb); return ret; } diff --git a/fs/super.c b/fs/super.c index 4bf714459a4..b05cf47463d 100644 --- a/fs/super.c +++ b/fs/super.c @@ -33,12 +33,19 @@ #include <linux/rculist_bl.h> #include <linux/cleancache.h> #include <linux/fsnotify.h> +#include <linux/lockdep.h> #include "internal.h" LIST_HEAD(super_blocks); DEFINE_SPINLOCK(sb_lock); +static char *sb_writers_name[SB_FREEZE_LEVELS] = { + "sb_writers", + "sb_pagefaults", + "sb_internal", +}; + /* * One thing we have to be careful of with a per-sb shrinker is that we don't * drop the last active reference to the superblock from within the shrinker. @@ -102,6 +109,35 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc) return total_objects; } +static int init_sb_writers(struct super_block *s, struct file_system_type *type) +{ + int err; + int i; + + for (i = 0; i < SB_FREEZE_LEVELS; i++) { + err = percpu_counter_init(&s->s_writers.counter[i], 0); + if (err < 0) + goto err_out; + lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i], + &type->s_writers_key[i], 0); + } + init_waitqueue_head(&s->s_writers.wait); + init_waitqueue_head(&s->s_writers.wait_unfrozen); + return 0; +err_out: + while (--i >= 0) + percpu_counter_destroy(&s->s_writers.counter[i]); + return err; +} + +static void destroy_sb_writers(struct super_block *s) +{ + int i; + + for (i = 0; i < SB_FREEZE_LEVELS; i++) + percpu_counter_destroy(&s->s_writers.counter[i]); +} + /** * alloc_super - create new superblock * @type: filesystem type superblock should belong to @@ -117,18 +153,19 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) if (s) { if (security_sb_alloc(s)) { + /* + * We cannot call security_sb_free() without + * security_sb_alloc() succeeding. So bail out manually + */ kfree(s); s = NULL; goto out; } #ifdef CONFIG_SMP s->s_files = alloc_percpu(struct list_head); - if (!s->s_files) { - security_sb_free(s); - kfree(s); - s = NULL; - goto out; - } else { + if (!s->s_files) + goto err_out; + else { int i; for_each_possible_cpu(i) @@ -137,6 +174,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) #else INIT_LIST_HEAD(&s->s_files); #endif + if (init_sb_writers(s, type)) + goto err_out; s->s_flags = flags; s->s_bdi = &default_backing_dev_info; INIT_HLIST_NODE(&s->s_instances); @@ -178,7 +217,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) mutex_init(&s->s_dquot.dqio_mutex); mutex_init(&s->s_dquot.dqonoff_mutex); init_rwsem(&s->s_dquot.dqptr_sem); - init_waitqueue_head(&s->s_wait_unfrozen); s->s_maxbytes = MAX_NON_LFS; s->s_op = &default_op; s->s_time_gran = 1000000000; @@ -190,6 +228,16 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) } out: return s; +err_out: + security_sb_free(s); +#ifdef CONFIG_SMP + if (s->s_files) + free_percpu(s->s_files); +#endif + destroy_sb_writers(s); + kfree(s); + s = NULL; + goto out; } /** @@ -203,6 +251,7 @@ static inline void destroy_super(struct super_block *s) #ifdef CONFIG_SMP free_percpu(s->s_files); #endif + destroy_sb_writers(s); security_sb_free(s); WARN_ON(!list_empty(&s->s_mounts)); kfree(s->s_subtype); @@ -651,10 +700,11 @@ struct super_block *get_super_thawed(struct block_device *bdev) { while (1) { struct super_block *s = get_super(bdev); - if (!s || s->s_frozen == SB_UNFROZEN) + if (!s || s->s_writers.frozen == SB_UNFROZEN) return s; up_read(&s->s_umount); - vfs_check_frozen(s, SB_FREEZE_WRITE); + wait_event(s->s_writers.wait_unfrozen, + s->s_writers.frozen == SB_UNFROZEN); put_super(s); } } @@ -732,7 +782,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) int retval; int remount_ro; - if (sb->s_frozen != SB_UNFROZEN) + if (sb->s_writers.frozen != SB_UNFROZEN) return -EBUSY; #ifdef CONFIG_BLOCK @@ -1163,6 +1213,120 @@ out: return ERR_PTR(error); } +/* + * This is an internal function, please use sb_end_{write,pagefault,intwrite} + * instead. + */ +void __sb_end_write(struct super_block *sb, int level) +{ + percpu_counter_dec(&sb->s_writers.counter[level-1]); + /* + * Make sure s_writers are updated before we wake up waiters in + * freeze_super(). + */ + smp_mb(); + if (waitqueue_active(&sb->s_writers.wait)) + wake_up(&sb->s_writers.wait); + rwsem_release(&sb->s_writers.lock_map[level-1], 1, _RET_IP_); +} +EXPORT_SYMBOL(__sb_end_write); + +#ifdef CONFIG_LOCKDEP +/* + * We want lockdep to tell us about possible deadlocks with freezing but + * it's it bit tricky to properly instrument it. Getting a freeze protection + * works as getting a read lock but there are subtle problems. XFS for example + * gets freeze protection on internal level twice in some cases, which is OK + * only because we already hold a freeze protection also on higher level. Due + * to these cases we have to tell lockdep we are doing trylock when we + * already hold a freeze protection for a higher freeze level. + */ +static void acquire_freeze_lock(struct super_block *sb, int level, bool trylock, + unsigned long ip) +{ + int i; + + if (!trylock) { + for (i = 0; i < level - 1; i++) + if (lock_is_held(&sb->s_writers.lock_map[i])) { + trylock = true; + break; + } + } + rwsem_acquire_read(&sb->s_writers.lock_map[level-1], 0, trylock, ip); +} +#endif + +/* + * This is an internal function, please use sb_start_{write,pagefault,intwrite} + * instead. + */ +int __sb_start_write(struct super_block *sb, int level, bool wait) +{ +retry: + if (unlikely(sb->s_writers.frozen >= level)) { + if (!wait) + return 0; + wait_event(sb->s_writers.wait_unfrozen, + sb->s_writers.frozen < level); + } + +#ifdef CONFIG_LOCKDEP + acquire_freeze_lock(sb, level, !wait, _RET_IP_); +#endif + percpu_counter_inc(&sb->s_writers.counter[level-1]); + /* + * Make sure counter is updated before we check for frozen. + * freeze_super() first sets frozen and then checks the counter. + */ + smp_mb(); + if (unlikely(sb->s_writers.frozen >= level)) { + __sb_end_write(sb, level); + goto retry; + } + return 1; +} +EXPORT_SYMBOL(__sb_start_write); + +/** + * sb_wait_write - wait until all writers to given file system finish + * @sb: the super for which we wait + * @level: type of writers we wait for (normal vs page fault) + * + * This function waits until there are no writers of given type to given file + * system. Caller of this function should make sure there can be no new writers + * of type @level before calling this function. Otherwise this function can + * livelock. + */ +static void sb_wait_write(struct super_block *sb, int level) +{ + s64 writers; + + /* + * We just cycle-through lockdep here so that it does not complain + * about returning with lock to userspace + */ + rwsem_acquire(&sb->s_writers.lock_map[level-1], 0, 0, _THIS_IP_); + rwsem_release(&sb->s_writers.lock_map[level-1], 1, _THIS_IP_); + + do { + DEFINE_WAIT(wait); + + /* + * We use a barrier in prepare_to_wait() to separate setting + * of frozen and checking of the counter + */ + prepare_to_wait(&sb->s_writers.wait, &wait, + TASK_UNINTERRUPTIBLE); + + writers = percpu_counter_sum(&sb->s_writers.counter[level-1]); + if (writers) + schedule(); + + finish_wait(&sb->s_writers.wait, &wait); + } while (writers); +} + /** * freeze_super - lock the filesystem and force it into a consistent state * @sb: the super to lock @@ -1170,6 +1334,31 @@ out: * Syncs the super to make sure the filesystem is consistent and calls the fs's * freeze_fs. Subsequent calls to this without first thawing the fs will return * -EBUSY. + * + * During this function, sb->s_writers.frozen goes through these values: + * + * SB_UNFROZEN: File system is normal, all writes progress as usual. + * + * SB_FREEZE_WRITE: The file system is in the process of being frozen. New + * writes should be blocked, though page faults are still allowed. We wait for + * all writes to complete and then proceed to the next stage. + * + * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked + * but internal fs threads can still modify the filesystem (although they + * should not dirty new pages or inodes), writeback can run etc. After waiting + * for all running page faults we sync the filesystem which will clean all + * dirty pages and inodes (no new dirty pages or inodes can be created when + * sync is running). + * + * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs + * modification are blocked (e.g. XFS preallocation truncation on inode + * reclaim). This is usually implemented by blocking new transactions for + * filesystems that have them and need this additional guard. After all + * internal writers are finished we call ->freeze_fs() to finish filesystem + * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is + * mostly auxiliary for filesystems to verify they do not modify frozen fs. + * + * sb->s_writers.frozen is protected by sb->s_umount. */ int freeze_super(struct super_block *sb) { @@ -1177,7 +1366,7 @@ int freeze_super(struct super_block *sb) atomic_inc(&sb->s_active); down_write(&sb->s_umount); - if (sb->s_frozen) { + if (sb->s_writers.frozen != SB_UNFROZEN) { deactivate_locked_super(sb); return -EBUSY; } @@ -1188,33 +1377,53 @@ int freeze_super(struct super_block *sb) } if (sb->s_flags & MS_RDONLY) { - sb->s_frozen = SB_FREEZE_TRANS; - smp_wmb(); + /* Nothing to do really... */ + sb->s_writers.frozen = SB_FREEZE_COMPLETE; up_write(&sb->s_umount); return 0; } - sb->s_frozen = SB_FREEZE_WRITE; + /* From now on, no new normal writers can start */ + sb->s_writers.frozen = SB_FREEZE_WRITE; + smp_wmb(); + + /* Release s_umount to preserve sb_start_write -> s_umount ordering */ + up_write(&sb->s_umount); + + sb_wait_write(sb, SB_FREEZE_WRITE); + + /* Now we go and block page faults... */ + down_write(&sb->s_umount); + sb->s_writers.frozen = SB_FREEZE_PAGEFAULT; smp_wmb(); + sb_wait_write(sb, SB_FREEZE_PAGEFAULT); + + /* All writers are done so after syncing there won't be dirty data */ sync_filesystem(sb); - sb->s_frozen = SB_FREEZE_TRANS; + /* Now wait for internal filesystem counter */ + sb->s_writers.frozen = SB_FREEZE_FS; smp_wmb(); + sb_wait_write(sb, SB_FREEZE_FS); - sync_blockdev(sb->s_bdev); if (sb->s_op->freeze_fs) { ret = sb->s_op->freeze_fs(sb); if (ret) { printk(KERN_ERR "VFS:Filesystem freeze failed\n"); - sb->s_frozen = SB_UNFROZEN; + sb->s_writers.frozen = SB_UNFROZEN; smp_wmb(); - wake_up(&sb->s_wait_unfrozen); + wake_up(&sb->s_writers.wait_unfrozen); deactivate_locked_super(sb); return ret; } } + /* + * This is just for debugging purposes so that fs can warn if it + * sees write activity when frozen is set to SB_FREEZE_COMPLETE. + */ + sb->s_writers.frozen = SB_FREEZE_COMPLETE; up_write(&sb->s_umount); return 0; } @@ -1231,7 +1440,7 @@ int thaw_super(struct super_block *sb) int error; down_write(&sb->s_umount); - if (sb->s_frozen == SB_UNFROZEN) { + if (sb->s_writers.frozen == SB_UNFROZEN) { up_write(&sb->s_umount); return -EINVAL; } @@ -1244,16 +1453,15 @@ int thaw_super(struct super_block *sb) if (error) { printk(KERN_ERR "VFS:Filesystem thaw failed\n"); - sb->s_frozen = SB_FREEZE_TRANS; up_write(&sb->s_umount); return error; } } out: - sb->s_frozen = SB_UNFROZEN; + sb->s_writers.frozen = SB_UNFROZEN; smp_wmb(); - wake_up(&sb->s_wait_unfrozen); + wake_up(&sb->s_writers.wait_unfrozen); deactivate_locked_super(sb); return 0; diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c index a4759833d62..614b2b54488 100644 --- a/fs/sysfs/bin.c +++ b/fs/sysfs/bin.c @@ -228,6 +228,8 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ret = 0; if (bb->vm_ops->page_mkwrite) ret = bb->vm_ops->page_mkwrite(vma, vmf); + else + file_update_time(file); sysfs_put_active(attr_sd); return ret; diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 15052ff916e..e562dd43f41 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -124,6 +124,12 @@ xfs_setfilesize_trans_alloc( ioend->io_append_trans = tp; /* + * We will pass freeze protection with a transaction. So tell lockdep + * we released it. + */ + rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], + 1, _THIS_IP_); + /* * We hand off the transaction to the completion thread now, so * clear the flag here. */ @@ -199,6 +205,15 @@ xfs_end_io( struct xfs_inode *ip = XFS_I(ioend->io_inode); int error = 0; + if (ioend->io_append_trans) { + /* + * We've got freeze protection passed with the transaction. + * Tell lockdep about it. + */ + rwsem_acquire_read( + &ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], + 0, 1, _THIS_IP_); + } if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { ioend->io_error = -EIO; goto done; @@ -1425,6 +1440,9 @@ out_trans_cancel: if (ioend->io_append_trans) { current_set_flags_nested(&ioend->io_append_trans->t_pflags, PF_FSTRANS); + rwsem_acquire_read( + &inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], + 0, 1, _THIS_IP_); xfs_trans_cancel(ioend->io_append_trans, 0); } out_destroy_ioend: diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index c4559c6e6f2..56afcdb2377 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -770,10 +770,12 @@ xfs_file_aio_write( if (ocount == 0) return 0; - xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE); + sb_start_write(inode->i_sb); - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) - return -EIO; + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { + ret = -EIO; + goto out; + } if (unlikely(file->f_flags & O_DIRECT)) ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount); @@ -792,6 +794,8 @@ xfs_file_aio_write( ret = err; } +out: + sb_end_write(inode->i_sb); return ret; } diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 1f1535d25a9..0e0232c3b6d 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -364,9 +364,15 @@ xfs_fssetdm_by_handle( if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(parfilp); + if (error) + return error; + dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq); - if (IS_ERR(dentry)) + if (IS_ERR(dentry)) { + mnt_drop_write_file(parfilp); return PTR_ERR(dentry); + } if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { error = -XFS_ERROR(EPERM); @@ -382,6 +388,7 @@ xfs_fssetdm_by_handle( fsd.fsd_dmstate); out: + mnt_drop_write_file(parfilp); dput(dentry); return error; } @@ -634,7 +641,11 @@ xfs_ioc_space( if (ioflags & IO_INVIS) attr_flags |= XFS_ATTR_DMI; + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_change_file_space(ip, cmd, bf, filp->f_pos, attr_flags); + mnt_drop_write_file(filp); return -error; } @@ -1163,6 +1174,7 @@ xfs_ioc_fssetxattr( { struct fsxattr fa; unsigned int mask; + int error; if (copy_from_user(&fa, arg, sizeof(fa))) return -EFAULT; @@ -1171,7 +1183,12 @@ xfs_ioc_fssetxattr( if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) mask |= FSX_NONBLOCK; - return -xfs_ioctl_setattr(ip, &fa, mask); + error = mnt_want_write_file(filp); + if (error) + return error; + error = xfs_ioctl_setattr(ip, &fa, mask); + mnt_drop_write_file(filp); + return -error; } STATIC int @@ -1196,6 +1213,7 @@ xfs_ioc_setxflags( struct fsxattr fa; unsigned int flags; unsigned int mask; + int error; if (copy_from_user(&flags, arg, sizeof(flags))) return -EFAULT; @@ -1210,7 +1228,12 @@ xfs_ioc_setxflags( mask |= FSX_NONBLOCK; fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip)); - return -xfs_ioctl_setattr(ip, &fa, mask); + error = mnt_want_write_file(filp); + if (error) + return error; + error = xfs_ioctl_setattr(ip, &fa, mask); + mnt_drop_write_file(filp); + return -error; } STATIC int @@ -1385,8 +1408,13 @@ xfs_file_ioctl( if (copy_from_user(&dmi, arg, sizeof(dmi))) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; + error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask, dmi.fsd_dmstate); + mnt_drop_write_file(filp); return -error; } @@ -1434,7 +1462,11 @@ xfs_file_ioctl( if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t))) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_swapext(&sxp); + mnt_drop_write_file(filp); return -error; } @@ -1463,9 +1495,14 @@ xfs_file_ioctl( if (copy_from_user(&inout, arg, sizeof(inout))) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; + /* input parameter is passed in resblks field of structure */ in = inout.resblks; error = xfs_reserve_blocks(mp, &in, &inout); + mnt_drop_write_file(filp); if (error) return -error; @@ -1496,7 +1533,11 @@ xfs_file_ioctl( if (copy_from_user(&in, arg, sizeof(in))) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_growfs_data(mp, &in); + mnt_drop_write_file(filp); return -error; } @@ -1506,7 +1547,11 @@ xfs_file_ioctl( if (copy_from_user(&in, arg, sizeof(in))) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_growfs_log(mp, &in); + mnt_drop_write_file(filp); return -error; } @@ -1516,7 +1561,11 @@ xfs_file_ioctl( if (copy_from_user(&in, arg, sizeof(in))) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_growfs_rt(mp, &in); + mnt_drop_write_file(filp); return -error; } diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c index c4f2da0d2bf..1244274a567 100644 --- a/fs/xfs/xfs_ioctl32.c +++ b/fs/xfs/xfs_ioctl32.c @@ -600,7 +600,11 @@ xfs_file_compat_ioctl( if (xfs_compat_growfs_data_copyin(&in, arg)) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_growfs_data(mp, &in); + mnt_drop_write_file(filp); return -error; } case XFS_IOC_FSGROWFSRT_32: { @@ -608,7 +612,11 @@ xfs_file_compat_ioctl( if (xfs_compat_growfs_rt_copyin(&in, arg)) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_growfs_rt(mp, &in); + mnt_drop_write_file(filp); return -error; } #endif @@ -627,7 +635,11 @@ xfs_file_compat_ioctl( offsetof(struct xfs_swapext, sx_stat)) || xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat)) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_swapext(&sxp); + mnt_drop_write_file(filp); return -error; } case XFS_IOC_FSBULKSTAT_32: diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 915edf6639f..973dff6ad93 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -680,9 +680,9 @@ xfs_iomap_write_unwritten( * the same inode that we complete here and might deadlock * on the iolock. */ - xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); + sb_start_intwrite(mp->m_super); tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS); - tp->t_flags |= XFS_TRANS_RESERVE; + tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT; error = xfs_trans_reserve(tp, resblks, XFS_WRITE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 711ca51ca3d..29c2f83d414 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1551,7 +1551,7 @@ xfs_unmountfs( int xfs_fs_writable(xfs_mount_t *mp) { - return !(xfs_test_for_freeze(mp) || XFS_FORCED_SHUTDOWN(mp) || + return !(mp->m_super->s_writers.frozen || XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_RDONLY)); } diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 8724336a9a0..05a05a7b611 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -311,9 +311,6 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, #define SHUTDOWN_REMOTE_REQ 0x0010 /* shutdown came from remote cell */ #define SHUTDOWN_DEVICE_REQ 0x0020 /* failed all paths to the device */ -#define xfs_test_for_freeze(mp) ((mp)->m_super->s_frozen) -#define xfs_wait_for_freeze(mp,l) vfs_check_frozen((mp)->m_super, (l)) - /* * Flags for xfs_mountfs */ diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c index 97304f10e78..96548176db8 100644 --- a/fs/xfs/xfs_sync.c +++ b/fs/xfs/xfs_sync.c @@ -403,7 +403,7 @@ xfs_sync_worker( if (!(mp->m_super->s_flags & MS_ACTIVE) && !(mp->m_flags & XFS_MOUNT_RDONLY)) { /* dgc: errors ignored here */ - if (mp->m_super->s_frozen == SB_UNFROZEN && + if (mp->m_super->s_writers.frozen == SB_UNFROZEN && xfs_log_need_covered(mp)) error = xfs_fs_log_dummy(mp); else diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index fdf324508c5..06ed520a767 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -576,8 +576,12 @@ xfs_trans_alloc( xfs_mount_t *mp, uint type) { - xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); - return _xfs_trans_alloc(mp, type, KM_SLEEP); + xfs_trans_t *tp; + + sb_start_intwrite(mp->m_super); + tp = _xfs_trans_alloc(mp, type, KM_SLEEP); + tp->t_flags |= XFS_TRANS_FREEZE_PROT; + return tp; } xfs_trans_t * @@ -588,6 +592,7 @@ _xfs_trans_alloc( { xfs_trans_t *tp; + WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); atomic_inc(&mp->m_active_trans); tp = kmem_zone_zalloc(xfs_trans_zone, memflags); @@ -611,6 +616,8 @@ xfs_trans_free( xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false); atomic_dec(&tp->t_mountp->m_active_trans); + if (tp->t_flags & XFS_TRANS_FREEZE_PROT) + sb_end_intwrite(tp->t_mountp->m_super); xfs_trans_free_dqinfo(tp); kmem_zone_free(xfs_trans_zone, tp); } @@ -643,7 +650,11 @@ xfs_trans_dup( ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); ASSERT(tp->t_ticket != NULL); - ntp->t_flags = XFS_TRANS_PERM_LOG_RES | (tp->t_flags & XFS_TRANS_RESERVE); + ntp->t_flags = XFS_TRANS_PERM_LOG_RES | + (tp->t_flags & XFS_TRANS_RESERVE) | + (tp->t_flags & XFS_TRANS_FREEZE_PROT); + /* We gave our writer reference to the new transaction */ + tp->t_flags &= ~XFS_TRANS_FREEZE_PROT; ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket); ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; tp->t_blk_res = tp->t_blk_res_used; diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index bc2afd52a0b..db056544cbb 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -179,6 +179,8 @@ struct xfs_log_item_desc { #define XFS_TRANS_SYNC 0x08 /* make commit synchronous */ #define XFS_TRANS_DQ_DIRTY 0x10 /* at least one dquot in trx dirty */ #define XFS_TRANS_RESERVE 0x20 /* OK to use reserved data blocks */ +#define XFS_TRANS_FREEZE_PROT 0x40 /* Transaction has elevated writer + count in superblock */ /* * Values for call flags parameter. diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 02549017212..2a5f64a11b7 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h @@ -21,8 +21,9 @@ #include <linux/dmaengine.h> #include <linux/interrupt.h> -struct pl08x_lli; struct pl08x_driver_data; +struct pl08x_phy_chan; +struct pl08x_txd; /* Bitmasks for selecting AHB ports for DMA transfers */ enum { @@ -46,170 +47,29 @@ enum { * devices with static assignments * @muxval: a number usually used to poke into some mux regiser to * mux in the signal to this channel - * @cctl_opt: default options for the channel control register + * @cctl_memcpy: options for the channel control register for memcpy + * *** not used for slave channels *** * @addr: source/target address in physical memory for this DMA channel, * can be the address of a FIFO register for burst requests for example. * This can be left undefined if the PrimeCell API is used for configuring * this. - * @circular_buffer: whether the buffer passed in is circular and - * shall simply be looped round round (like a record baby round - * round round round) * @single: the device connected to this channel will request single DMA * transfers, not bursts. (Bursts are default.) * @periph_buses: the device connected to this channel is accessible via * these buses (use PL08X_AHB1 | PL08X_AHB2). */ struct pl08x_channel_data { - char *bus_id; + const char *bus_id; int min_signal; int max_signal; u32 muxval; - u32 cctl; + u32 cctl_memcpy; dma_addr_t addr; - bool circular_buffer; bool single; u8 periph_buses; }; /** - * Struct pl08x_bus_data - information of source or destination - * busses for a transfer - * @addr: current address - * @maxwidth: the maximum width of a transfer on this bus - * @buswidth: the width of this bus in bytes: 1, 2 or 4 - */ -struct pl08x_bus_data { - dma_addr_t addr; - u8 maxwidth; - u8 buswidth; -}; - -/** - * struct pl08x_phy_chan - holder for the physical channels - * @id: physical index to this channel - * @lock: a lock to use when altering an instance of this struct - * @signal: the physical signal (aka channel) serving this physical channel - * right now - * @serving: the virtual channel currently being served by this physical - * channel - * @locked: channel unavailable for the system, e.g. dedicated to secure - * world - */ -struct pl08x_phy_chan { - unsigned int id; - void __iomem *base; - spinlock_t lock; - int signal; - struct pl08x_dma_chan *serving; - bool locked; -}; - -/** - * struct pl08x_sg - structure containing data per sg - * @src_addr: src address of sg - * @dst_addr: dst address of sg - * @len: transfer len in bytes - * @node: node for txd's dsg_list - */ -struct pl08x_sg { - dma_addr_t src_addr; - dma_addr_t dst_addr; - size_t len; - struct list_head node; -}; - -/** - * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor - * @tx: async tx descriptor - * @node: node for txd list for channels - * @dsg_list: list of children sg's - * @direction: direction of transfer - * @llis_bus: DMA memory address (physical) start for the LLIs - * @llis_va: virtual memory address start for the LLIs - * @cctl: control reg values for current txd - * @ccfg: config reg values for current txd - */ -struct pl08x_txd { - struct dma_async_tx_descriptor tx; - struct list_head node; - struct list_head dsg_list; - enum dma_transfer_direction direction; - dma_addr_t llis_bus; - struct pl08x_lli *llis_va; - /* Default cctl value for LLIs */ - u32 cctl; - /* - * Settings to be put into the physical channel when we - * trigger this txd. Other registers are in llis_va[0]. - */ - u32 ccfg; -}; - -/** - * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel - * states - * @PL08X_CHAN_IDLE: the channel is idle - * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport - * channel and is running a transfer on it - * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport - * channel, but the transfer is currently paused - * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport - * channel to become available (only pertains to memcpy channels) - */ -enum pl08x_dma_chan_state { - PL08X_CHAN_IDLE, - PL08X_CHAN_RUNNING, - PL08X_CHAN_PAUSED, - PL08X_CHAN_WAITING, -}; - -/** - * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel - * @chan: wrappped abstract channel - * @phychan: the physical channel utilized by this channel, if there is one - * @phychan_hold: if non-zero, hold on to the physical channel even if we - * have no pending entries - * @tasklet: tasklet scheduled by the IRQ to handle actual work etc - * @name: name of channel - * @cd: channel platform data - * @runtime_addr: address for RX/TX according to the runtime config - * @runtime_direction: current direction of this channel according to - * runtime config - * @pend_list: queued transactions pending on this channel - * @at: active transaction on this channel - * @lock: a lock for this channel data - * @host: a pointer to the host (internal use) - * @state: whether the channel is idle, paused, running etc - * @slave: whether this channel is a device (slave) or for memcpy - * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave - * channels. Fill with 'true' if peripheral should be flow controller. Direction - * will be selected at Runtime. - * @waiting: a TX descriptor on this channel which is waiting for a physical - * channel to become available - */ -struct pl08x_dma_chan { - struct dma_chan chan; - struct pl08x_phy_chan *phychan; - int phychan_hold; - struct tasklet_struct tasklet; - char *name; - const struct pl08x_channel_data *cd; - dma_addr_t src_addr; - dma_addr_t dst_addr; - u32 src_cctl; - u32 dst_cctl; - enum dma_transfer_direction runtime_direction; - struct list_head pend_list; - struct pl08x_txd *at; - spinlock_t lock; - struct pl08x_driver_data *host; - enum pl08x_dma_chan_state state; - bool slave; - bool device_fc; - struct pl08x_txd *waiting; -}; - -/** * struct pl08x_platform_data - the platform configuration for the PL08x * PrimeCells. * @slave_channels: the channels defined for the different devices on the @@ -229,8 +89,8 @@ struct pl08x_platform_data { const struct pl08x_channel_data *slave_channels; unsigned int num_slave_channels; struct pl08x_channel_data memcpy_channel; - int (*get_signal)(struct pl08x_dma_chan *); - void (*put_signal)(struct pl08x_dma_chan *); + int (*get_signal)(const struct pl08x_channel_data *); + void (*put_signal)(const struct pl08x_channel_data *, int); u8 lli_buses; u8 mem_buses; }; diff --git a/include/linux/audit.h b/include/linux/audit.h index 22f292a917a..36abf2aa7e6 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -130,6 +130,7 @@ #define AUDIT_LAST_KERN_ANOM_MSG 1799 #define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */ #define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */ +#define AUDIT_ANOM_LINK 1702 /* Suspicious use of file links */ #define AUDIT_INTEGRITY_DATA 1800 /* Data integrity verification */ #define AUDIT_INTEGRITY_METADATA 1801 /* Metadata integrity verification */ #define AUDIT_INTEGRITY_STATUS 1802 /* Integrity enable status */ @@ -687,6 +688,8 @@ extern void audit_log_d_path(struct audit_buffer *ab, const struct path *path); extern void audit_log_key(struct audit_buffer *ab, char *key); +extern void audit_log_link_denied(const char *operation, + struct path *link); extern void audit_log_lost(const char *message); #ifdef CONFIG_SECURITY extern void audit_log_secctx(struct audit_buffer *ab, u32 secid); @@ -716,6 +719,7 @@ extern int audit_enabled; #define audit_log_untrustedstring(a,s) do { ; } while (0) #define audit_log_d_path(b, p, d) do { ; } while (0) #define audit_log_key(b, k) do { ; } while (0) +#define audit_log_link_denied(o, l) do { ; } while (0) #define audit_log_secctx(b,s) do { ; } while (0) #define audit_enabled 0 #endif diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 07954b05b86..4e72a9d4823 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -46,16 +46,23 @@ struct blkcg_gq; struct request; typedef void (rq_end_io_fn)(struct request *, int); +#define BLK_RL_SYNCFULL (1U << 0) +#define BLK_RL_ASYNCFULL (1U << 1) + struct request_list { + struct request_queue *q; /* the queue this rl belongs to */ +#ifdef CONFIG_BLK_CGROUP + struct blkcg_gq *blkg; /* blkg this request pool belongs to */ +#endif /* * count[], starved[], and wait[] are indexed by * BLK_RW_SYNC/BLK_RW_ASYNC */ - int count[2]; - int starved[2]; - int elvpriv; - mempool_t *rq_pool; - wait_queue_head_t wait[2]; + int count[2]; + int starved[2]; + mempool_t *rq_pool; + wait_queue_head_t wait[2]; + unsigned int flags; }; /* @@ -138,6 +145,7 @@ struct request { struct hd_struct *part; unsigned long start_time; #ifdef CONFIG_BLK_CGROUP + struct request_list *rl; /* rl this rq is alloced from */ unsigned long long start_time_ns; unsigned long long io_start_time_ns; /* when passed to hardware */ #endif @@ -282,11 +290,16 @@ struct request_queue { struct list_head queue_head; struct request *last_merge; struct elevator_queue *elevator; + int nr_rqs[2]; /* # allocated [a]sync rqs */ + int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ /* - * the queue request freelist, one for reads and one for writes + * If blkcg is not used, @q->root_rl serves all requests. If blkcg + * is used, root blkg allocates from @q->root_rl and all other + * blkgs from their own blkg->rl. Which one to use should be + * determined using bio_request_list(). */ - struct request_list rq; + struct request_list root_rl; request_fn_proc *request_fn; make_request_fn *make_request_fn; @@ -561,27 +574,25 @@ static inline bool rq_is_sync(struct request *rq) return rw_is_sync(rq->cmd_flags); } -static inline int blk_queue_full(struct request_queue *q, int sync) +static inline bool blk_rl_full(struct request_list *rl, bool sync) { - if (sync) - return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); - return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); + unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; + + return rl->flags & flag; } -static inline void blk_set_queue_full(struct request_queue *q, int sync) +static inline void blk_set_rl_full(struct request_list *rl, bool sync) { - if (sync) - queue_flag_set(QUEUE_FLAG_SYNCFULL, q); - else - queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); + unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; + + rl->flags |= flag; } -static inline void blk_clear_queue_full(struct request_queue *q, int sync) +static inline void blk_clear_rl_full(struct request_list *rl, bool sync) { - if (sync) - queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); - else - queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); + unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; + + rl->flags &= ~flag; } @@ -911,11 +922,15 @@ struct blk_plug { }; #define BLK_MAX_REQUEST_COUNT 16 +struct blk_plug_cb; +typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); struct blk_plug_cb { struct list_head list; - void (*callback)(struct blk_plug_cb *); + blk_plug_cb_fn callback; + void *data; }; - +extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, + void *data, int size); extern void blk_start_plug(struct blk_plug *); extern void blk_finish_plug(struct blk_plug *); extern void blk_flush_plug_list(struct blk_plug *, bool); diff --git a/include/linux/blkpg.h b/include/linux/blkpg.h index faf8a45af21..a8519446c11 100644 --- a/include/linux/blkpg.h +++ b/include/linux/blkpg.h @@ -40,6 +40,7 @@ struct blkpg_ioctl_arg { /* The subfunctions (for the op field) */ #define BLKPG_ADD_PARTITION 1 #define BLKPG_DEL_PARTITION 2 +#define BLKPG_RESIZE_PARTITION 3 /* Sizes of name fields. Unused at present. */ #define BLKPG_DEVNAMELTH 64 diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index f55ab8cdc10..4d0fb3df2f4 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -67,7 +67,6 @@ void bsg_job_done(struct bsg_job *job, int result, int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, bsg_job_fn *job_fn, int dd_job_size); void bsg_request_fn(struct request_queue *q); -void bsg_remove_queue(struct request_queue *q); void bsg_goose_queue(struct request_queue *q); #endif diff --git a/include/linux/fs.h b/include/linux/fs.h index d7eed5b98ae..38dba16c417 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -414,6 +414,7 @@ struct inodes_stat_t { #include <linux/shrinker.h> #include <linux/migrate_mode.h> #include <linux/uidgid.h> +#include <linux/lockdep.h> #include <asm/byteorder.h> @@ -440,6 +441,8 @@ extern unsigned long get_max_files(void); extern int sysctl_nr_open; extern struct inodes_stat_t inodes_stat; extern int leases_enable, lease_break_time; +extern int sysctl_protected_symlinks; +extern int sysctl_protected_hardlinks; struct buffer_head; typedef int (get_block_t)(struct inode *inode, sector_t iblock, @@ -1162,7 +1165,6 @@ struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); void (*lm_notify)(struct file_lock *); /* unblock callback */ int (*lm_grant)(struct file_lock *, struct file_lock *, int); - void (*lm_release_private)(struct file_lock *); void (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock **, int); }; @@ -1446,6 +1448,8 @@ extern void f_delown(struct file *filp); extern pid_t f_getown(struct file *filp); extern int send_sigurg(struct fown_struct *fown); +struct mm_struct; + /* * Umount options */ @@ -1459,6 +1463,31 @@ extern int send_sigurg(struct fown_struct *fown); extern struct list_head super_blocks; extern spinlock_t sb_lock; +/* Possible states of 'frozen' field */ +enum { + SB_UNFROZEN = 0, /* FS is unfrozen */ + SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */ + SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */ + SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop + * internal threads if needed) */ + SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */ +}; + +#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1) + +struct sb_writers { + /* Counters for counting writers at each level */ + struct percpu_counter counter[SB_FREEZE_LEVELS]; + wait_queue_head_t wait; /* queue for waiting for + writers / faults to finish */ + int frozen; /* Is sb frozen? */ + wait_queue_head_t wait_unfrozen; /* queue for waiting for + sb to be thawed */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map lock_map[SB_FREEZE_LEVELS]; +#endif +}; + struct super_block { struct list_head s_list; /* Keep this first */ dev_t s_dev; /* search index; _not_ kdev_t */ @@ -1506,8 +1535,7 @@ struct super_block { struct hlist_node s_instances; struct quota_info s_dquot; /* Diskquota specific options */ - int s_frozen; - wait_queue_head_t s_wait_unfrozen; + struct sb_writers s_writers; char s_id[32]; /* Informational name */ u8 s_uuid[16]; /* UUID */ @@ -1562,14 +1590,117 @@ extern struct timespec current_fs_time(struct super_block *sb); /* * Snapshotting support. */ -enum { - SB_UNFROZEN = 0, - SB_FREEZE_WRITE = 1, - SB_FREEZE_TRANS = 2, -}; -#define vfs_check_frozen(sb, level) \ - wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level))) +void __sb_end_write(struct super_block *sb, int level); +int __sb_start_write(struct super_block *sb, int level, bool wait); + +/** + * sb_end_write - drop write access to a superblock + * @sb: the super we wrote to + * + * Decrement number of writers to the filesystem. Wake up possible waiters + * wanting to freeze the filesystem. + */ +static inline void sb_end_write(struct super_block *sb) +{ + __sb_end_write(sb, SB_FREEZE_WRITE); +} + +/** + * sb_end_pagefault - drop write access to a superblock from a page fault + * @sb: the super we wrote to + * + * Decrement number of processes handling write page fault to the filesystem. + * Wake up possible waiters wanting to freeze the filesystem. + */ +static inline void sb_end_pagefault(struct super_block *sb) +{ + __sb_end_write(sb, SB_FREEZE_PAGEFAULT); +} + +/** + * sb_end_intwrite - drop write access to a superblock for internal fs purposes + * @sb: the super we wrote to + * + * Decrement fs-internal number of writers to the filesystem. Wake up possible + * waiters wanting to freeze the filesystem. + */ +static inline void sb_end_intwrite(struct super_block *sb) +{ + __sb_end_write(sb, SB_FREEZE_FS); +} + +/** + * sb_start_write - get write access to a superblock + * @sb: the super we write to + * + * When a process wants to write data or metadata to a file system (i.e. dirty + * a page or an inode), it should embed the operation in a sb_start_write() - + * sb_end_write() pair to get exclusion against file system freezing. This + * function increments number of writers preventing freezing. If the file + * system is already frozen, the function waits until the file system is + * thawed. + * + * Since freeze protection behaves as a lock, users have to preserve + * ordering of freeze protection and other filesystem locks. Generally, + * freeze protection should be the outermost lock. In particular, we have: + * + * sb_start_write + * -> i_mutex (write path, truncate, directory ops, ...) + * -> s_umount (freeze_super, thaw_super) + */ +static inline void sb_start_write(struct super_block *sb) +{ + __sb_start_write(sb, SB_FREEZE_WRITE, true); +} + +static inline int sb_start_write_trylock(struct super_block *sb) +{ + return __sb_start_write(sb, SB_FREEZE_WRITE, false); +} + +/** + * sb_start_pagefault - get write access to a superblock from a page fault + * @sb: the super we write to + * + * When a process starts handling write page fault, it should embed the + * operation into sb_start_pagefault() - sb_end_pagefault() pair to get + * exclusion against file system freezing. This is needed since the page fault + * is going to dirty a page. This function increments number of running page + * faults preventing freezing. If the file system is already frozen, the + * function waits until the file system is thawed. + * + * Since page fault freeze protection behaves as a lock, users have to preserve + * ordering of freeze protection and other filesystem locks. It is advised to + * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault + * handling code implies lock dependency: + * + * mmap_sem + * -> sb_start_pagefault + */ +static inline void sb_start_pagefault(struct super_block *sb) +{ + __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true); +} + +/* + * sb_start_intwrite - get write access to a superblock for internal fs purposes + * @sb: the super we write to + * + * This is the third level of protection against filesystem freezing. It is + * free for use by a filesystem. The only requirement is that it must rank + * below sb_start_pagefault. + * + * For example filesystem can call sb_start_intwrite() when starting a + * transaction which somewhat eases handling of freezing for internal sources + * of filesystem changes (internal fs threads, discarding preallocation on file + * close, etc.). + */ +static inline void sb_start_intwrite(struct super_block *sb) +{ + __sb_start_write(sb, SB_FREEZE_FS, true); +} + extern bool inode_owner_or_capable(const struct inode *inode); @@ -1893,6 +2024,7 @@ struct file_system_type { struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; + struct lock_class_key s_writers_key[SB_FREEZE_LEVELS]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; @@ -2335,9 +2467,6 @@ static inline void i_readcount_inc(struct inode *inode) } #endif extern int do_pipe_flags(int *, int); -extern struct file *create_read_pipe(struct file *f, int flags); -extern struct file *create_write_pipe(int flags); -extern void free_write_pipe(struct file *); extern int kernel_read(struct file *, loff_t, char *, unsigned long); extern struct file * open_exec(const char *); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index ae0aaa9d42f..4f440b3e89f 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -97,7 +97,13 @@ struct partition_meta_info { struct hd_struct { sector_t start_sect; + /* + * nr_sects is protected by sequence counter. One might extend a + * partition while IO is happening to it and update of nr_sects + * can be non-atomic on 32bit machines with 64bit sector_t. + */ sector_t nr_sects; + seqcount_t nr_sects_seq; sector_t alignment_offset; unsigned int discard_alignment; struct device __dev; @@ -647,6 +653,57 @@ static inline void hd_struct_put(struct hd_struct *part) __delete_partition(part); } +/* + * Any access of part->nr_sects which is not protected by partition + * bd_mutex or gendisk bdev bd_mutex, should be done using this + * accessor function. + * + * Code written along the lines of i_size_read() and i_size_write(). + * CONFIG_PREEMPT case optimizes the case of UP kernel with preemption + * on. + */ +static inline sector_t part_nr_sects_read(struct hd_struct *part) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP) + sector_t nr_sects; + unsigned seq; + do { + seq = read_seqcount_begin(&part->nr_sects_seq); + nr_sects = part->nr_sects; + } while (read_seqcount_retry(&part->nr_sects_seq, seq)); + return nr_sects; +#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT) + sector_t nr_sects; + + preempt_disable(); + nr_sects = part->nr_sects; + preempt_enable(); + return nr_sects; +#else + return part->nr_sects; +#endif +} + +/* + * Should be called with mutex lock held (typically bd_mutex) of partition + * to provide mutual exlusion among writers otherwise seqcount might be + * left in wrong state leaving the readers spinning infinitely. + */ +static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP) + write_seqcount_begin(&part->nr_sects_seq); + part->nr_sects = size; + write_seqcount_end(&part->nr_sects_seq); +#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT) + preempt_disable(); + part->nr_sects = size; + preempt_enable(); +#else + part->nr_sects = size; +#endif +} + #else /* CONFIG_BLOCK */ static inline void printk_all_partitions(void) { } diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 7c08052e332..39ed62ab5b8 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -26,7 +26,8 @@ typedef struct mempool_s { extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data); extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data, int nid); + mempool_free_t *free_fn, void *pool_data, + gfp_t gfp_mask, int nid); extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask); extern void mempool_destroy(mempool_t *pool); diff --git a/include/linux/mm.h b/include/linux/mm.h index bd079a1b0fd..311be906b57 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1441,6 +1441,7 @@ extern void truncate_inode_pages_range(struct address_space *, /* generic vm_area_ops exported for stackable file systems */ extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); +extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); /* mm/page-writeback.c */ int write_one_page(struct page *page, int wait); diff --git a/include/linux/namei.h b/include/linux/namei.h index d2ef8b34b96..4bf19d8174e 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -67,6 +67,7 @@ extern int kern_path(const char *, unsigned, struct path *); extern struct dentry *kern_path_create(int, const char *, struct path *, int); extern struct dentry *user_path_create(int, const char __user *, struct path *, int); +extern void done_path_create(struct path *, struct dentry *); extern struct dentry *kern_path_locked(const char *, struct path *); extern int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, unsigned int, struct path *); diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h index ce4743a2601..fa63048fecf 100644 --- a/include/linux/nfsd/nfsfh.h +++ b/include/linux/nfsd/nfsfh.h @@ -143,6 +143,7 @@ typedef struct svc_fh { int fh_maxsize; /* max size for fh_handle */ unsigned char fh_locked; /* inode locked by us */ + unsigned char fh_want_write; /* remount protection taken */ #ifdef CONFIG_NFSD_V3 unsigned char fh_post_saved; /* post-op attrs saved */ diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h new file mode 100644 index 00000000000..eb475a8ea25 --- /dev/null +++ b/include/linux/omap-dma.h @@ -0,0 +1,22 @@ +/* + * OMAP DMA Engine support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_OMAP_DMA_H +#define __LINUX_OMAP_DMA_H + +struct dma_chan; + +#if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE) +bool omap_dma_filter_fn(struct dma_chan *, void *); +#else +static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d) +{ + return false; +} +#endif + +#endif diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index e11d1c0fc60..ad1a427b526 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -160,4 +160,6 @@ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); long pipe_fcntl(struct file *, unsigned int, unsigned long arg); struct pipe_inode_info *get_pipe_info(struct file *file); +int create_pipe_files(struct file **, int); + #endif diff --git a/include/sound/es1688.h b/include/sound/es1688.h index 3ec7ecbe250..f752dd33dfa 100644 --- a/include/sound/es1688.h +++ b/include/sound/es1688.h @@ -29,6 +29,7 @@ #define ES1688_HW_AUTO 0x0000 #define ES1688_HW_688 0x0001 #define ES1688_HW_1688 0x0002 +#define ES1688_HW_UNDEF 0x0003 struct snd_es1688 { unsigned long port; /* port of ESS chip */ diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h index 89d43b3d4cb..5a0e4f9efb5 100644 --- a/include/video/da8xx-fb.h +++ b/include/video/da8xx-fb.h @@ -82,6 +82,9 @@ struct lcd_ctrl_config { /* Raster Data Order Select: 1=Most-to-least 0=Least-to-most */ unsigned char raster_order; + + /* DMA FIFO threshold */ + int fifo_th; }; struct lcd_sync_arg { diff --git a/include/video/omapdss.h b/include/video/omapdss.h index c8e59b4a336..a6267a2d292 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h @@ -48,6 +48,10 @@ #define DISPC_IRQ_FRAMEDONEWB (1 << 23) #define DISPC_IRQ_FRAMEDONETV (1 << 24) #define DISPC_IRQ_WBBUFFEROVERFLOW (1 << 25) +#define DISPC_IRQ_FRAMEDONE3 (1 << 26) +#define DISPC_IRQ_VSYNC3 (1 << 27) +#define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 28) +#define DISPC_IRQ_SYNC_LOST3 (1 << 29) struct omap_dss_device; struct omap_overlay_manager; @@ -75,6 +79,7 @@ enum omap_channel { OMAP_DSS_CHANNEL_LCD = 0, OMAP_DSS_CHANNEL_DIGIT = 1, OMAP_DSS_CHANNEL_LCD2 = 2, + OMAP_DSS_CHANNEL_LCD3 = 3, }; enum omap_color_mode { @@ -99,11 +104,6 @@ enum omap_color_mode { OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */ }; -enum omap_lcd_display_type { - OMAP_DSS_LCD_DISPLAY_STN, - OMAP_DSS_LCD_DISPLAY_TFT, -}; - enum omap_dss_load_mode { OMAP_DSS_LOAD_CLUT_AND_FRAME = 0, OMAP_DSS_LOAD_CLUT_ONLY = 1, @@ -121,15 +121,15 @@ enum omap_rfbi_te_mode { OMAP_DSS_RFBI_TE_MODE_2 = 2, }; -enum omap_panel_config { - OMAP_DSS_LCD_IVS = 1<<0, - OMAP_DSS_LCD_IHS = 1<<1, - OMAP_DSS_LCD_IPC = 1<<2, - OMAP_DSS_LCD_IEO = 1<<3, - OMAP_DSS_LCD_RF = 1<<4, - OMAP_DSS_LCD_ONOFF = 1<<5, +enum omap_dss_signal_level { + OMAPDSS_SIG_ACTIVE_HIGH = 0, + OMAPDSS_SIG_ACTIVE_LOW = 1, +}; - OMAP_DSS_LCD_TFT = 1<<20, +enum omap_dss_signal_edge { + OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, + OMAPDSS_DRIVE_SIG_RISING_EDGE, + OMAPDSS_DRIVE_SIG_FALLING_EDGE, }; enum omap_dss_venc_type { @@ -167,13 +167,6 @@ enum omap_dss_audio_state { OMAP_DSS_AUDIO_PLAYING, }; -/* XXX perhaps this should be removed */ -enum omap_dss_overlay_managers { - OMAP_DSS_OVL_MGR_LCD, - OMAP_DSS_OVL_MGR_TV, - OMAP_DSS_OVL_MGR_LCD2, -}; - enum omap_dss_rotation_type { OMAP_DSS_ROT_DMA = 1 << 0, OMAP_DSS_ROT_VRFB = 1 << 1, @@ -268,9 +261,6 @@ struct omap_dss_dsi_videomode_data { int hfp_blanking_mode; /* Video port sync events */ - int vp_de_pol; - int vp_hsync_pol; - int vp_vsync_pol; bool vp_vsync_end; bool vp_hsync_end; @@ -346,6 +336,19 @@ struct omap_video_timings { u16 vfp; /* Vertical front porch */ /* Unit: line clocks */ u16 vbp; /* Vertical back porch */ + + /* Vsync logic level */ + enum omap_dss_signal_level vsync_level; + /* Hsync logic level */ + enum omap_dss_signal_level hsync_level; + /* Interlaced or Progressive timings */ + bool interlace; + /* Pixel clock edge to drive LCD data */ + enum omap_dss_signal_edge data_pclk_edge; + /* Data enable logic level */ + enum omap_dss_signal_level de_level; + /* Pixel clock edges to drive HSYNC and VSYNC signals */ + enum omap_dss_signal_edge sync_pclk_edge; }; #ifdef CONFIG_OMAP2_DSS_VENC @@ -559,8 +562,6 @@ struct omap_dss_device { /* Unit: line clocks */ int acb; /* ac-bias pin frequency */ - enum omap_panel_config config; - enum omap_dss_dsi_pixel_format dsi_pix_fmt; enum omap_dss_dsi_mode dsi_mode; struct omap_dss_dsi_videomode_data dsi_vm_data; diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h index 7571b27a0ba..ff43ffc1aab 100644 --- a/include/video/sh_mobile_lcdc.h +++ b/include/video/sh_mobile_lcdc.h @@ -166,6 +166,12 @@ struct sh_mobile_lcdc_bl_info { int (*get_brightness)(void); }; +struct sh_mobile_lcdc_overlay_cfg { + int fourcc; + unsigned int max_xres; + unsigned int max_yres; +}; + struct sh_mobile_lcdc_chan_cfg { int chan; int fourcc; @@ -186,6 +192,7 @@ struct sh_mobile_lcdc_chan_cfg { struct sh_mobile_lcdc_info { int clock_source; struct sh_mobile_lcdc_chan_cfg ch[2]; + struct sh_mobile_lcdc_overlay_cfg overlays[4]; struct sh_mobile_meram_info *meram_dev; }; diff --git a/include/video/sh_mobile_meram.h b/include/video/sh_mobile_meram.h index 29b2fd3b147..062e6e7f955 100644 --- a/include/video/sh_mobile_meram.h +++ b/include/video/sh_mobile_meram.h @@ -15,7 +15,6 @@ enum { struct sh_mobile_meram_priv; -struct sh_mobile_meram_ops; /* * struct sh_mobile_meram_info - MERAM platform data @@ -24,7 +23,6 @@ struct sh_mobile_meram_ops; struct sh_mobile_meram_info { int addr_mode; u32 reserved_icbs; - struct sh_mobile_meram_ops *ops; struct sh_mobile_meram_priv *priv; struct platform_device *pdev; }; @@ -38,26 +36,59 @@ struct sh_mobile_meram_cfg { struct sh_mobile_meram_icb_cfg icb[2]; }; -struct module; -struct sh_mobile_meram_ops { - struct module *module; - /* register usage of meram */ - void *(*meram_register)(struct sh_mobile_meram_info *meram_dev, - const struct sh_mobile_meram_cfg *cfg, - unsigned int xres, unsigned int yres, - unsigned int pixelformat, - unsigned int *pitch); - - /* unregister usage of meram */ - void (*meram_unregister)(struct sh_mobile_meram_info *meram_dev, - void *data); - - /* update meram settings */ - void (*meram_update)(struct sh_mobile_meram_info *meram_dev, void *data, +#if defined(CONFIG_FB_SH_MOBILE_MERAM) || \ + defined(CONFIG_FB_SH_MOBILE_MERAM_MODULE) +unsigned long sh_mobile_meram_alloc(struct sh_mobile_meram_info *meram_dev, + size_t size); +void sh_mobile_meram_free(struct sh_mobile_meram_info *meram_dev, + unsigned long mem, size_t size); +void *sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *dev, + const struct sh_mobile_meram_cfg *cfg, + unsigned int xres, unsigned int yres, + unsigned int pixelformat, + unsigned int *pitch); +void sh_mobile_meram_cache_free(struct sh_mobile_meram_info *dev, void *data); +void sh_mobile_meram_cache_update(struct sh_mobile_meram_info *dev, void *data, + unsigned long base_addr_y, + unsigned long base_addr_c, + unsigned long *icb_addr_y, + unsigned long *icb_addr_c); +#else +static inline unsigned long +sh_mobile_meram_alloc(struct sh_mobile_meram_info *meram_dev, size_t size) +{ + return 0; +} + +static inline void +sh_mobile_meram_free(struct sh_mobile_meram_info *meram_dev, + unsigned long mem, size_t size) +{ +} + +static inline void * +sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *dev, + const struct sh_mobile_meram_cfg *cfg, + unsigned int xres, unsigned int yres, + unsigned int pixelformat, + unsigned int *pitch) +{ + return ERR_PTR(-ENODEV); +} + +static inline void +sh_mobile_meram_cache_free(struct sh_mobile_meram_info *dev, void *data) +{ +} + +static inline void +sh_mobile_meram_cache_update(struct sh_mobile_meram_info *dev, void *data, unsigned long base_addr_y, unsigned long base_addr_c, unsigned long *icb_addr_y, - unsigned long *icb_addr_c); -}; + unsigned long *icb_addr_c) +{ +} +#endif #endif /* __VIDEO_SH_MOBILE_MERAM_H__ */ diff --git a/kernel/audit.c b/kernel/audit.c index 4a3f28d2ca6..ea3b7b6191c 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1456,6 +1456,27 @@ void audit_log_key(struct audit_buffer *ab, char *key) } /** + * audit_log_link_denied - report a link restriction denial + * @operation: specific link opreation + * @link: the path that triggered the restriction + */ +void audit_log_link_denied(const char *operation, struct path *link) +{ + struct audit_buffer *ab; + + ab = audit_log_start(current->audit_context, GFP_KERNEL, + AUDIT_ANOM_LINK); + audit_log_format(ab, "op=%s action=denied", operation); + audit_log_format(ab, " pid=%d comm=", current->pid); + audit_log_untrustedstring(ab, current->comm); + audit_log_d_path(ab, " path=", link); + audit_log_format(ab, " dev="); + audit_log_untrustedstring(ab, link->dentry->d_inode->i_sb->s_id); + audit_log_format(ab, " ino=%lu", link->dentry->d_inode->i_ino); + audit_log_end(ab); +} + +/** * audit_log_end - end one audit record * @ab: the audit_buffer * diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 6502d35a25b..87174ef5916 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1498,6 +1498,24 @@ static struct ctl_table fs_table[] = { #endif #endif { + .procname = "protected_symlinks", + .data = &sysctl_protected_symlinks, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, + { + .procname = "protected_hardlinks", + .data = &sysctl_protected_hardlinks, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, + { .procname = "suid_dumpable", .data = &suid_dumpable, .maxlen = sizeof(int), diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index f8a3f1a829b..ba6085d9c74 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -12,7 +12,7 @@ #ifdef CONFIG_HOTPLUG_CPU static LIST_HEAD(percpu_counters); -static DEFINE_MUTEX(percpu_counters_lock); +static DEFINE_SPINLOCK(percpu_counters_lock); #endif #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER @@ -123,9 +123,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, #ifdef CONFIG_HOTPLUG_CPU INIT_LIST_HEAD(&fbc->list); - mutex_lock(&percpu_counters_lock); + spin_lock(&percpu_counters_lock); list_add(&fbc->list, &percpu_counters); - mutex_unlock(&percpu_counters_lock); + spin_unlock(&percpu_counters_lock); #endif return 0; } @@ -139,9 +139,9 @@ void percpu_counter_destroy(struct percpu_counter *fbc) debug_percpu_counter_deactivate(fbc); #ifdef CONFIG_HOTPLUG_CPU - mutex_lock(&percpu_counters_lock); + spin_lock(&percpu_counters_lock); list_del(&fbc->list); - mutex_unlock(&percpu_counters_lock); + spin_unlock(&percpu_counters_lock); #endif free_percpu(fbc->counters); fbc->counters = NULL; @@ -170,7 +170,7 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, return NOTIFY_OK; cpu = (unsigned long)hcpu; - mutex_lock(&percpu_counters_lock); + spin_lock(&percpu_counters_lock); list_for_each_entry(fbc, &percpu_counters, list) { s32 *pcount; unsigned long flags; @@ -181,7 +181,7 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, *pcount = 0; raw_spin_unlock_irqrestore(&fbc->lock, flags); } - mutex_unlock(&percpu_counters_lock); + spin_unlock(&percpu_counters_lock); #endif return NOTIFY_OK; } diff --git a/mm/filemap.c b/mm/filemap.c index a4a5260b027..fa5ca304148 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1712,8 +1712,35 @@ page_not_uptodate: } EXPORT_SYMBOL(filemap_fault); +int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct page *page = vmf->page; + struct inode *inode = vma->vm_file->f_path.dentry->d_inode; + int ret = VM_FAULT_LOCKED; + + sb_start_pagefault(inode->i_sb); + file_update_time(vma->vm_file); + lock_page(page); + if (page->mapping != inode->i_mapping) { + unlock_page(page); + ret = VM_FAULT_NOPAGE; + goto out; + } + /* + * We mark the page dirty already here so that when freeze is in + * progress, we are guaranteed that writeback during freezing will + * see the dirty page and writeprotect it again. + */ + set_page_dirty(page); +out: + sb_end_pagefault(inode->i_sb); + return ret; +} +EXPORT_SYMBOL(filemap_page_mkwrite); + const struct vm_operations_struct generic_file_vm_ops = { .fault = filemap_fault, + .page_mkwrite = filemap_page_mkwrite, }; /* This is used for a general mmap of a disk file */ @@ -2407,8 +2434,6 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, count = ocount; pos = *ppos; - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); - /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; written = 0; @@ -2507,6 +2532,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, BUG_ON(iocb->ki_pos != pos); + sb_start_write(inode->i_sb); mutex_lock(&inode->i_mutex); blk_start_plug(&plug); ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); @@ -2520,6 +2546,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, ret = err; } blk_finish_plug(&plug); + sb_end_write(inode->i_sb); return ret; } EXPORT_SYMBOL(generic_file_aio_write); diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 213ca1f5340..13e013b1270 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -304,6 +304,7 @@ out: static const struct vm_operations_struct xip_file_vm_ops = { .fault = xip_file_fault, + .page_mkwrite = filemap_page_mkwrite, }; int xip_file_mmap(struct file * file, struct vm_area_struct * vma) @@ -401,6 +402,8 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, loff_t pos; ssize_t ret; + sb_start_write(inode->i_sb); + mutex_lock(&inode->i_mutex); if (!access_ok(VERIFY_READ, buf, len)) { @@ -411,8 +414,6 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, pos = *ppos; count = len; - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); - /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; @@ -436,6 +437,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, current->backing_dev_info = NULL; out_up: mutex_unlock(&inode->i_mutex); + sb_end_write(inode->i_sb); return ret; } EXPORT_SYMBOL_GPL(xip_file_write); diff --git a/mm/memory.c b/mm/memory.c index 482f089765f..57361708d1a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2650,6 +2650,9 @@ reuse: if (!page_mkwrite) { wait_on_page_locked(dirty_page); set_page_dirty_balance(dirty_page, page_mkwrite); + /* file_update_time outside page_lock */ + if (vma->vm_file) + file_update_time(vma->vm_file); } put_page(dirty_page); if (page_mkwrite) { @@ -2667,10 +2670,6 @@ reuse: } } - /* file_update_time outside page_lock */ - if (vma->vm_file) - file_update_time(vma->vm_file); - return ret; } @@ -3339,12 +3338,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (dirty_page) { struct address_space *mapping = page->mapping; + int dirtied = 0; if (set_page_dirty(dirty_page)) - page_mkwrite = 1; + dirtied = 1; unlock_page(dirty_page); put_page(dirty_page); - if (page_mkwrite && mapping) { + if ((dirtied || page_mkwrite) && mapping) { /* * Some device drivers do not set page.mapping but still * dirty their pages @@ -3353,7 +3353,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, } /* file_update_time outside page_lock */ - if (vma->vm_file) + if (vma->vm_file && !page_mkwrite) file_update_time(vma->vm_file); } else { unlock_page(vmf.page); diff --git a/mm/mempool.c b/mm/mempool.c index d9049811f35..54990476c04 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -63,19 +63,21 @@ EXPORT_SYMBOL(mempool_destroy); mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data) { - return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1); + return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data, + GFP_KERNEL, NUMA_NO_NODE); } EXPORT_SYMBOL(mempool_create); mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data, int node_id) + mempool_free_t *free_fn, void *pool_data, + gfp_t gfp_mask, int node_id) { mempool_t *pool; - pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id); + pool = kmalloc_node(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id); if (!pool) return NULL; pool->elements = kmalloc_node(min_nr * sizeof(void *), - GFP_KERNEL, node_id); + gfp_mask, node_id); if (!pool->elements) { kfree(pool); return NULL; @@ -93,7 +95,7 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, while (pool->curr_nr < pool->min_nr) { void *element; - element = pool->alloc(GFP_KERNEL, pool->pool_data); + element = pool->alloc(gfp_mask, pool->pool_data); if (unlikely(!element)) { mempool_destroy(pool); return NULL; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 889532b8e6c..009ac285fea 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4511,7 +4511,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, pg_data_t *pgdat = NODE_DATA(nid); /* pg_data_t should be reset to zero when it's allocated */ - WARN_ON(pgdat->nr_zones || pgdat->node_start_pfn || pgdat->classzone_idx); + WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); pgdat->node_id = nid; pgdat->node_start_pfn = node_start_pfn; diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index b780cb7947d..9da7fdd3cd8 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c @@ -466,6 +466,7 @@ void ceph_key_destroy(struct key *key) { struct ceph_crypto_key *ckey = key->payload.data; ceph_crypto_key_destroy(ckey); + kfree(ckey); } struct key_type key_type_ceph = { diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h index 1919d1550d7..3572dc518bc 100644 --- a/net/ceph/crypto.h +++ b/net/ceph/crypto.h @@ -16,7 +16,8 @@ struct ceph_crypto_key { static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key) { - kfree(key->key); + if (key) + kfree(key->key); } extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst, diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 79981d97bc9..e4768c180da 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -823,6 +823,34 @@ fail: return NULL; } +static int unix_mknod(const char *sun_path, umode_t mode, struct path *res) +{ + struct dentry *dentry; + struct path path; + int err = 0; + /* + * Get the parent directory, calculate the hash for last + * component. + */ + dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0); + err = PTR_ERR(dentry); + if (IS_ERR(dentry)) + return err; + + /* + * All right, let's create it. + */ + err = security_path_mknod(&path, dentry, mode, 0); + if (!err) { + err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0); + if (!err) { + res->mnt = mntget(path.mnt); + res->dentry = dget(dentry); + } + } + done_path_create(&path, dentry); + return err; +} static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { @@ -831,8 +859,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct unix_sock *u = unix_sk(sk); struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; char *sun_path = sunaddr->sun_path; - struct dentry *dentry = NULL; - struct path path; int err; unsigned int hash; struct unix_address *addr; @@ -869,43 +895,23 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) atomic_set(&addr->refcnt, 1); if (sun_path[0]) { - umode_t mode; - err = 0; - /* - * Get the parent directory, calculate the hash for last - * component. - */ - dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0); - err = PTR_ERR(dentry); - if (IS_ERR(dentry)) - goto out_mknod_parent; - - /* - * All right, let's create it. - */ - mode = S_IFSOCK | + struct path path; + umode_t mode = S_IFSOCK | (SOCK_INODE(sock)->i_mode & ~current_umask()); - err = mnt_want_write(path.mnt); - if (err) - goto out_mknod_dput; - err = security_path_mknod(&path, dentry, mode, 0); - if (err) - goto out_mknod_drop_write; - err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0); -out_mknod_drop_write: - mnt_drop_write(path.mnt); - if (err) - goto out_mknod_dput; - mutex_unlock(&path.dentry->d_inode->i_mutex); - dput(path.dentry); - path.dentry = dentry; - + err = unix_mknod(sun_path, mode, &path); + if (err) { + if (err == -EEXIST) + err = -EADDRINUSE; + unix_release_addr(addr); + goto out_up; + } addr->hash = UNIX_HASH_SIZE; - } - - spin_lock(&unix_table_lock); - - if (!sun_path[0]) { + hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1); + spin_lock(&unix_table_lock); + u->path = path; + list = &unix_socket_table[hash]; + } else { + spin_lock(&unix_table_lock); err = -EADDRINUSE; if (__unix_find_socket_byname(net, sunaddr, addr_len, sk->sk_type, hash)) { @@ -914,9 +920,6 @@ out_mknod_drop_write: } list = &unix_socket_table[addr->hash]; - } else { - list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)]; - u->path = path; } err = 0; @@ -930,16 +933,6 @@ out_up: mutex_unlock(&u->readlock); out: return err; - -out_mknod_dput: - dput(dentry); - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); -out_mknod_parent: - if (err == -EEXIST) - err = -EADDRINUSE; - unix_release_addr(addr); - goto out_up; } static void unix_state_double_lock(struct sock *sk1, struct sock *sk2) diff --git a/sound/drivers/mpu401/mpu401_uart.c b/sound/drivers/mpu401/mpu401_uart.c index 1cff331a228..4608c2ca43f 100644 --- a/sound/drivers/mpu401/mpu401_uart.c +++ b/sound/drivers/mpu401/mpu401_uart.c @@ -554,6 +554,7 @@ int snd_mpu401_uart_new(struct snd_card *card, int device, spin_lock_init(&mpu->output_lock); spin_lock_init(&mpu->timer_lock); mpu->hardware = hardware; + mpu->irq = -1; if (! (info_flags & MPU401_INFO_INTEGRATED)) { int res_size = hardware == MPU401_HW_PC98II ? 4 : 2; mpu->res = request_region(port, res_size, "MPU401 UART"); diff --git a/sound/isa/es1688/es1688_lib.c b/sound/isa/es1688/es1688_lib.c index 1d47be8170b..b3b4f15e45b 100644 --- a/sound/isa/es1688/es1688_lib.c +++ b/sound/isa/es1688/es1688_lib.c @@ -612,10 +612,10 @@ static int snd_es1688_capture_close(struct snd_pcm_substream *substream) static int snd_es1688_free(struct snd_es1688 *chip) { - if (chip->res_port) { + if (chip->hardware != ES1688_HW_UNDEF) snd_es1688_init(chip, 0); + if (chip->res_port) release_and_free_resource(chip->res_port); - } if (chip->irq >= 0) free_irq(chip->irq, (void *) chip); if (chip->dma8 >= 0) { @@ -657,19 +657,27 @@ int snd_es1688_create(struct snd_card *card, return -ENOMEM; chip->irq = -1; chip->dma8 = -1; + chip->hardware = ES1688_HW_UNDEF; - if ((chip->res_port = request_region(port + 4, 12, "ES1688")) == NULL) { + chip->res_port = request_region(port + 4, 12, "ES1688"); + if (chip->res_port == NULL) { snd_printk(KERN_ERR "es1688: can't grab port 0x%lx\n", port + 4); - return -EBUSY; + err = -EBUSY; + goto exit; } - if (request_irq(irq, snd_es1688_interrupt, 0, "ES1688", (void *) chip)) { + + err = request_irq(irq, snd_es1688_interrupt, 0, "ES1688", (void *) chip); + if (err < 0) { snd_printk(KERN_ERR "es1688: can't grab IRQ %d\n", irq); - return -EBUSY; + goto exit; } + chip->irq = irq; - if (request_dma(dma8, "ES1688")) { + err = request_dma(dma8, "ES1688"); + + if (err < 0) { snd_printk(KERN_ERR "es1688: can't grab DMA8 %d\n", dma8); - return -EBUSY; + goto exit; } chip->dma8 = dma8; @@ -685,14 +693,18 @@ int snd_es1688_create(struct snd_card *card, err = snd_es1688_probe(chip); if (err < 0) - return err; + goto exit; err = snd_es1688_init(chip, 1); if (err < 0) - return err; + goto exit; /* Register device */ - return snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); + err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); +exit: + if (err) + snd_es1688_free(chip); + return err; } static struct snd_pcm_ops snd_es1688_playback_ops = { diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 641408dc28c..69b92844978 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -877,6 +877,8 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, struct hdmi_eld *eld; struct hdmi_spec_per_cvt *per_cvt = NULL; + hinfo->nid = 0; /* clear the leftover value */ + /* Validate hinfo */ pin_idx = hinfo_to_pin_index(spec, hinfo); if (snd_BUG_ON(pin_idx < 0)) @@ -1161,9 +1163,9 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo, return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format); } -static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, - struct hda_codec *codec, - struct snd_pcm_substream *substream) +static int hdmi_pcm_close(struct hda_pcm_stream *hinfo, + struct hda_codec *codec, + struct snd_pcm_substream *substream) { struct hdmi_spec *spec = codec->spec; int cvt_idx, pin_idx; @@ -1171,8 +1173,6 @@ static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hdmi_spec_per_pin *per_pin; int pinctl; - snd_hda_codec_cleanup_stream(codec, hinfo->nid); - if (hinfo->nid) { cvt_idx = cvt_nid_to_cvt_index(spec, hinfo->nid); if (snd_BUG_ON(cvt_idx < 0)) @@ -1195,14 +1195,13 @@ static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, pinctl & ~PIN_OUT); snd_hda_spdif_ctls_unassign(codec, pin_idx); } - return 0; } static const struct hda_pcm_ops generic_ops = { .open = hdmi_pcm_open, + .close = hdmi_pcm_close, .prepare = generic_hdmi_playback_pcm_prepare, - .cleanup = generic_hdmi_playback_pcm_cleanup, }; static int generic_hdmi_build_pcms(struct hda_codec *codec) @@ -1221,6 +1220,7 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec) pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK]; pstr->substreams = 1; pstr->ops = generic_ops; + pstr->nid = 1; /* FIXME: just for avoiding a debug WARNING */ /* other pstr fields are set in open */ } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index f141395dfee..344b221d210 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -203,6 +203,7 @@ struct alc_spec { unsigned int shared_mic_hp:1; /* HP/Mic-in sharing */ unsigned int inv_dmic_fixup:1; /* has inverted digital-mic workaround */ unsigned int inv_dmic_muted:1; /* R-ch of inv d-mic is muted? */ + unsigned int no_primary_hp:1; /* Don't prefer HP pins to speaker pins */ /* auto-mute control */ int automute_mode; @@ -4323,7 +4324,8 @@ static int alc_parse_auto_config(struct hda_codec *codec, return 0; /* can't find valid BIOS pin config */ } - if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT && + if (!spec->no_primary_hp && + cfg->line_out_type == AUTO_PIN_SPEAKER_OUT && cfg->line_outs <= cfg->hp_outs) { /* use HP as primary out */ cfg->speaker_outs = cfg->line_outs; @@ -5050,6 +5052,7 @@ enum { ALC889_FIXUP_MBP_VREF, ALC889_FIXUP_IMAC91_VREF, ALC882_FIXUP_INV_DMIC, + ALC882_FIXUP_NO_PRIMARY_HP, }; static void alc889_fixup_coef(struct hda_codec *codec, @@ -5171,6 +5174,17 @@ static void alc889_fixup_imac91_vref(struct hda_codec *codec, spec->keep_vref_in_automute = 1; } +/* Don't take HP output as primary + * strangely, the speaker output doesn't work on VAIO Z through DAC 0x05 + */ +static void alc882_fixup_no_primary_hp(struct hda_codec *codec, + const struct alc_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + if (action == ALC_FIXUP_ACT_PRE_PROBE) + spec->no_primary_hp = 1; +} + static const struct alc_fixup alc882_fixups[] = { [ALC882_FIXUP_ABIT_AW9D_MAX] = { .type = ALC_FIXUP_PINS, @@ -5357,6 +5371,10 @@ static const struct alc_fixup alc882_fixups[] = { .type = ALC_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic_0x12, }, + [ALC882_FIXUP_NO_PRIMARY_HP] = { + .type = ALC_FIXUP_FUNC, + .v.func = alc882_fixup_no_primary_hp, + }, }; static const struct snd_pci_quirk alc882_fixup_tbl[] = { @@ -5391,6 +5409,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC), SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), + SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), /* All Apple entries are in codec SSIDs */ SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF), @@ -5432,6 +5451,7 @@ static const struct alc_model_fixup alc882_fixup_models[] = { {.id = ALC882_FIXUP_ACER_ASPIRE_8930G, .name = "acer-aspire-8930g"}, {.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"}, {.id = ALC882_FIXUP_INV_DMIC, .name = "inv-dmic"}, + {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"}, {} }; diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index a1596a3b171..94040ccf8e8 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -101,6 +101,8 @@ enum { STAC_92HD83XXX_HP_cNB11_INTQUAD, STAC_HP_DV7_4000, STAC_HP_ZEPHYR, + STAC_92HD83XXX_HP_LED, + STAC_92HD83XXX_HP_INV_LED, STAC_92HD83XXX_MODELS }; @@ -1675,6 +1677,8 @@ static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = { [STAC_92HD83XXX_HP_cNB11_INTQUAD] = "hp_cNB11_intquad", [STAC_HP_DV7_4000] = "hp-dv7-4000", [STAC_HP_ZEPHYR] = "hp-zephyr", + [STAC_92HD83XXX_HP_LED] = "hp-led", + [STAC_92HD83XXX_HP_INV_LED] = "hp-inv-led", }; static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { @@ -1729,6 +1733,8 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3561, "HP", STAC_HP_ZEPHYR), + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3660, + "HP Mini", STAC_92HD83XXX_HP_LED), {} /* terminator */ }; @@ -4414,7 +4420,12 @@ static int stac92xx_init(struct hda_codec *codec) snd_hda_jack_report_sync(codec); /* sync mute LED */ - snd_hda_sync_vmaster_hook(&spec->vmaster_mute); + if (spec->gpio_led) { + if (spec->vmaster_mute.hook) + snd_hda_sync_vmaster_hook(&spec->vmaster_mute); + else /* the very first init call doesn't have vmaster yet */ + stac92xx_update_led_status(codec, false); + } /* sync the power-map */ if (spec->num_pwrs) @@ -5507,6 +5518,7 @@ static void stac92hd8x_fill_auto_spec(struct hda_codec *codec) static int patch_stac92hd83xxx(struct hda_codec *codec) { struct sigmatel_spec *spec; + int default_polarity = -1; /* no default cfg */ int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); @@ -5555,9 +5567,15 @@ again: case STAC_HP_ZEPHYR: spec->init = stac92hd83xxx_hp_zephyr_init; break; + case STAC_92HD83XXX_HP_LED: + default_polarity = 0; + break; + case STAC_92HD83XXX_HP_INV_LED: + default_polarity = 1; + break; } - if (find_mute_led_cfg(codec, -1/*no default cfg*/)) + if (find_mute_led_cfg(codec, default_polarity)) snd_printd("mute LED gpio %d polarity %d\n", spec->gpio_led, spec->gpio_led_polarity); diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 90645560ed3..80d90cb4285 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -3226,7 +3226,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec) { struct via_spec *spec = codec->spec; int imux_is_smixer; - unsigned int parm; + unsigned int parm, parm2; /* MUX6 (1eh) = stereo mixer */ imux_is_smixer = snd_hda_codec_read(codec, 0x1e, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 5; @@ -3249,7 +3249,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec) parm = AC_PWRST_D3; set_pin_power_state(codec, 0x27, &parm); update_power_state(codec, 0x1a, parm); - update_power_state(codec, 0xb, parm); + parm2 = parm; /* for pin 0x0b */ /* PW2 (26h), AOW2 (ah) */ parm = AC_PWRST_D3; @@ -3264,6 +3264,9 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec) if (!spec->hp_independent_mode) /* check for redirected HP */ set_pin_power_state(codec, 0x28, &parm); update_power_state(codec, 0x8, parm); + if (!spec->hp_independent_mode && parm2 != AC_PWRST_D3) + parm = parm2; + update_power_state(codec, 0xb, parm); /* MW9 (21h), Mw2 (1ah), AOW0 (8h) */ update_power_state(codec, 0x21, imux_is_smixer ? AC_PWRST_D0 : parm); diff --git a/sound/sound_firmware.c b/sound/sound_firmware.c index 7e96249536b..37711a5d0d6 100644 --- a/sound/sound_firmware.c +++ b/sound/sound_firmware.c @@ -23,14 +23,14 @@ static int do_mod_firmware_load(const char *fn, char **fp) if (l <= 0 || l > 131072) { printk(KERN_INFO "Invalid firmware '%s'\n", fn); - filp_close(filp, current->files); + filp_close(filp, NULL); return 0; } dp = vmalloc(l); if (dp == NULL) { printk(KERN_INFO "Out of memory loading '%s'.\n", fn); - filp_close(filp, current->files); + filp_close(filp, NULL); return 0; } pos = 0; @@ -38,10 +38,10 @@ static int do_mod_firmware_load(const char *fn, char **fp) { printk(KERN_INFO "Failed to read '%s'.\n", fn); vfree(dp); - filp_close(filp, current->files); + filp_close(filp, NULL); return 0; } - filp_close(filp, current->files); + filp_close(filp, NULL); *fp = dp; return (int) l; } diff --git a/sound/usb/clock.c b/sound/usb/clock.c index 379baad3d5a..5e634a2eb28 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -111,7 +111,8 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, int source_id) return 0; /* If a clock source can't tell us whether it's valid, we assume it is */ - if (!uac2_control_is_readable(cs_desc->bmControls, UAC2_CS_CONTROL_CLOCK_VALID)) + if (!uac2_control_is_readable(cs_desc->bmControls, + UAC2_CS_CONTROL_CLOCK_VALID - 1)) return 1; err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, |