diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-02-10 11:35:36 -0800 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-02-10 11:35:36 -0800 |
commit | 4ba24fef3eb3b142197135223b90ced2f319cd53 (patch) | |
tree | a20c125b27740ec7b4c761b11d801108e1b316b2 /sound/core/pcm_native.c | |
parent | 47c1ffb2b6b630894e9a16442611c056ab21c057 (diff) | |
parent | 98a4a59ee31a12105a2b84f5b8b515ac2cb208ef (diff) |
Merge branch 'next' into for-linus
Prepare first round of input updates for 3.20.
Diffstat (limited to 'sound/core/pcm_native.c')
-rw-r--r-- | sound/core/pcm_native.c | 256 |
1 files changed, 194 insertions, 62 deletions
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 8cd2f930ad0..095d9572ad2 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -35,9 +35,6 @@ #include <sound/timer.h> #include <sound/minors.h> #include <asm/io.h> -#if defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) -#include <dma-coherence.h> -#endif /* * Compatibility @@ -74,11 +71,103 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream); * */ -DEFINE_RWLOCK(snd_pcm_link_rwlock); -EXPORT_SYMBOL(snd_pcm_link_rwlock); - +static DEFINE_RWLOCK(snd_pcm_link_rwlock); static DECLARE_RWSEM(snd_pcm_link_rwsem); +/** + * snd_pcm_stream_lock - Lock the PCM stream + * @substream: PCM substream + * + * This locks the PCM stream's spinlock or mutex depending on the nonatomic + * flag of the given substream. This also takes the global link rw lock + * (or rw sem), too, for avoiding the race with linked streams. + */ +void snd_pcm_stream_lock(struct snd_pcm_substream *substream) +{ + if (substream->pcm->nonatomic) { + down_read(&snd_pcm_link_rwsem); + mutex_lock(&substream->self_group.mutex); + } else { + read_lock(&snd_pcm_link_rwlock); + spin_lock(&substream->self_group.lock); + } +} +EXPORT_SYMBOL_GPL(snd_pcm_stream_lock); + +/** + * snd_pcm_stream_lock - Unlock the PCM stream + * @substream: PCM substream + * + * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock(). + */ +void snd_pcm_stream_unlock(struct snd_pcm_substream *substream) +{ + if (substream->pcm->nonatomic) { + mutex_unlock(&substream->self_group.mutex); + up_read(&snd_pcm_link_rwsem); + } else { + spin_unlock(&substream->self_group.lock); + read_unlock(&snd_pcm_link_rwlock); + } +} +EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock); + +/** + * snd_pcm_stream_lock_irq - Lock the PCM stream + * @substream: PCM substream + * + * This locks the PCM stream like snd_pcm_stream_lock() and disables the local + * IRQ (only when nonatomic is false). In nonatomic case, this is identical + * as snd_pcm_stream_lock(). + */ +void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) +{ + if (!substream->pcm->nonatomic) + local_irq_disable(); + snd_pcm_stream_lock(substream); +} +EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq); + +/** + * snd_pcm_stream_unlock_irq - Unlock the PCM stream + * @substream: PCM substream + * + * This is a counter-part of snd_pcm_stream_lock_irq(). + */ +void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream) +{ + snd_pcm_stream_unlock(substream); + if (!substream->pcm->nonatomic) + local_irq_enable(); +} +EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq); + +unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream) +{ + unsigned long flags = 0; + if (!substream->pcm->nonatomic) + local_irq_save(flags); + snd_pcm_stream_lock(substream); + return flags; +} +EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave); + +/** + * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream + * @substream: PCM substream + * @flags: irq flags + * + * This is a counter-part of snd_pcm_stream_lock_irqsave(). + */ +void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream, + unsigned long flags) +{ + snd_pcm_stream_unlock(substream); + if (!substream->pcm->nonatomic) + local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore); + static inline mm_segment_t snd_enter_user(void) { mm_segment_t fs = get_fs(); @@ -138,6 +227,21 @@ int snd_pcm_info_user(struct snd_pcm_substream *substream, return err; } +static bool hw_support_mmap(struct snd_pcm_substream *substream) +{ + if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP)) + return false; + /* check architectures that return -EINVAL from dma_mmap_coherent() */ + /* FIXME: this should be some global flag */ +#if defined(CONFIG_C6X) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) ||\ + defined(CONFIG_PARISC) || defined(CONFIG_XTENSA) + if (!substream->ops->mmap && + substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) + return false; +#endif + return true; +} + #undef RULES_DEBUG #ifdef RULES_DEBUG @@ -315,8 +419,12 @@ int snd_pcm_hw_refine(struct snd_pcm_substream *substream, } hw = &substream->runtime->hw; - if (!params->info) + if (!params->info) { params->info = hw->info & ~SNDRV_PCM_INFO_FIFO_IN_FRAMES; + if (!hw_support_mmap(substream)) + params->info &= ~(SNDRV_PCM_INFO_MMAP | + SNDRV_PCM_INFO_MMAP_VALID); + } if (!params->fifo_size) { m = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); @@ -724,12 +832,16 @@ static int snd_pcm_action_group(struct action_ops *ops, { struct snd_pcm_substream *s = NULL; struct snd_pcm_substream *s1; - int res = 0; + int res = 0, depth = 1; snd_pcm_group_for_each_entry(s, substream) { - if (do_lock && s != substream) - spin_lock_nested(&s->self_group.lock, - SINGLE_DEPTH_NESTING); + if (do_lock && s != substream) { + if (s->pcm->nonatomic) + mutex_lock_nested(&s->self_group.mutex, depth); + else + spin_lock_nested(&s->self_group.lock, depth); + depth++; + } res = ops->pre_action(s, state); if (res < 0) goto _unlock; @@ -755,8 +867,12 @@ static int snd_pcm_action_group(struct action_ops *ops, if (do_lock) { /* unlock streams */ snd_pcm_group_for_each_entry(s1, substream) { - if (s1 != substream) - spin_unlock(&s1->self_group.lock); + if (s1 != substream) { + if (s1->pcm->nonatomic) + mutex_unlock(&s1->self_group.mutex); + else + spin_unlock(&s1->self_group.lock); + } if (s1 == s) /* end */ break; } @@ -793,7 +909,18 @@ static int snd_pcm_action(struct action_ops *ops, { int res; - if (snd_pcm_stream_linked(substream)) { + if (!snd_pcm_stream_linked(substream)) + return snd_pcm_action_single(ops, substream, state); + + if (substream->pcm->nonatomic) { + if (!mutex_trylock(&substream->group->mutex)) { + mutex_unlock(&substream->self_group.mutex); + mutex_lock(&substream->group->mutex); + mutex_lock(&substream->self_group.mutex); + } + res = snd_pcm_action_group(ops, substream, state, 1); + mutex_unlock(&substream->group->mutex); + } else { if (!spin_trylock(&substream->group->lock)) { spin_unlock(&substream->self_group.lock); spin_lock(&substream->group->lock); @@ -801,8 +928,6 @@ static int snd_pcm_action(struct action_ops *ops, } res = snd_pcm_action_group(ops, substream, state, 1); spin_unlock(&substream->group->lock); - } else { - res = snd_pcm_action_single(ops, substream, state); } return res; } @@ -816,19 +941,9 @@ static int snd_pcm_action_lock_irq(struct action_ops *ops, { int res; - read_lock_irq(&snd_pcm_link_rwlock); - if (snd_pcm_stream_linked(substream)) { - spin_lock(&substream->group->lock); - spin_lock(&substream->self_group.lock); - res = snd_pcm_action_group(ops, substream, state, 1); - spin_unlock(&substream->self_group.lock); - spin_unlock(&substream->group->lock); - } else { - spin_lock(&substream->self_group.lock); - res = snd_pcm_action_single(ops, substream, state); - spin_unlock(&substream->self_group.lock); - } - read_unlock_irq(&snd_pcm_link_rwlock); + snd_pcm_stream_lock_irq(substream); + res = snd_pcm_action(ops, substream, state); + snd_pcm_stream_unlock_irq(substream); return res; } @@ -937,10 +1052,10 @@ static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state) struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state != state) { snd_pcm_trigger_tstamp(substream); + runtime->status->state = state; if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSTOP, &runtime->trigger_tstamp); - runtime->status->state = state; } wake_up(&runtime->sleep); wake_up(&runtime->tsleep); @@ -983,6 +1098,28 @@ int snd_pcm_drain_done(struct snd_pcm_substream *substream) SNDRV_PCM_STATE_SETUP); } +/** + * snd_pcm_stop_xrun - stop the running streams as XRUN + * @substream: the PCM substream instance + * + * This stops the given running substream (and all linked substreams) as XRUN. + * Unlike snd_pcm_stop(), this function takes the substream lock by itself. + * + * Return: Zero if successful, or a negative error code. + */ +int snd_pcm_stop_xrun(struct snd_pcm_substream *substream) +{ + unsigned long flags; + int ret = 0; + + snd_pcm_stream_lock_irqsave(substream, flags); + if (snd_pcm_running(substream)) + ret = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); + snd_pcm_stream_unlock_irqrestore(substream, flags); + return ret; +} +EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun); + /* * pause callbacks */ @@ -1089,11 +1226,11 @@ static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_trigger_tstamp(substream); + runtime->status->suspended_state = runtime->status->state; + runtime->status->state = SNDRV_PCM_STATE_SUSPENDED; if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSUSPEND, &runtime->trigger_tstamp); - runtime->status->suspended_state = runtime->status->state; - runtime->status->state = SNDRV_PCM_STATE_SUSPENDED; wake_up(&runtime->sleep); wake_up(&runtime->tsleep); } @@ -1196,10 +1333,10 @@ static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_trigger_tstamp(substream); + runtime->status->state = runtime->status->suspended_state; if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MRESUME, &runtime->trigger_tstamp); - runtime->status->state = runtime->status->suspended_state; } static struct action_ops snd_pcm_action_resume = { @@ -1634,7 +1771,8 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) down_write(&snd_pcm_link_rwsem); write_lock_irq(&snd_pcm_link_rwlock); if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN || - substream->runtime->status->state != substream1->runtime->status->state) { + substream->runtime->status->state != substream1->runtime->status->state || + substream->pcm->nonatomic != substream1->pcm->nonatomic) { res = -EBADFD; goto _end; } @@ -1646,6 +1784,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) substream->group = group; group = NULL; spin_lock_init(&substream->group->lock); + mutex_init(&substream->group->mutex); INIT_LIST_HEAD(&substream->group->substreams); list_add_tail(&substream->link_list, &substream->group->substreams); substream->group->count = 1; @@ -1954,7 +2093,7 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream) mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED; if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED; - if (hw->info & SNDRV_PCM_INFO_MMAP) { + if (hw_support_mmap(substream)) { if (hw->info & SNDRV_PCM_INFO_INTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED; if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED) @@ -3133,20 +3272,6 @@ static inline struct page * snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs) { void *vaddr = substream->runtime->dma_area + ofs; -#if defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) - if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) - return virt_to_page(CAC_ADDR(vaddr)); -#endif -#if defined(CONFIG_PPC32) && defined(CONFIG_NOT_COHERENT_CACHE) - if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) { - dma_addr_t addr = substream->runtime->dma_addr + ofs; - addr -= get_dma_offset(substream->dma_buffer.dev.dev); - /* assume dma_handle set via pfn_to_phys() in - * mm/dma-noncoherent.c - */ - return pfn_to_page(addr >> PAGE_SHIFT); - } -#endif return virt_to_page(vaddr); } @@ -3191,16 +3316,18 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = { .fault = snd_pcm_mmap_data_fault, }; -#ifndef ARCH_HAS_DMA_MMAP_COHERENT -/* This should be defined / handled globally! */ -#ifdef CONFIG_ARM -#define ARCH_HAS_DMA_MMAP_COHERENT -#endif -#endif - /* * mmap the DMA buffer on RAM */ + +/** + * snd_pcm_lib_default_mmap - Default PCM data mmap function + * @substream: PCM substream + * @area: VMA + * + * This is the default mmap handler for PCM data. When mmap pcm_ops is NULL, + * this function is invoked implicitly. + */ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *area) { @@ -3213,7 +3340,7 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, area->vm_end - area->vm_start, area->vm_page_prot); } #endif /* CONFIG_GENERIC_ALLOCATOR */ -#ifdef ARCH_HAS_DMA_MMAP_COHERENT +#ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */ if (!substream->ops->page && substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) return dma_mmap_coherent(substream->dma_buffer.dev.dev, @@ -3221,11 +3348,7 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, substream->runtime->dma_area, substream->runtime->dma_addr, area->vm_end - area->vm_start); -#elif defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) - if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV && - !plat_device_is_coherent(substream->dma_buffer.dev.dev)) - area->vm_page_prot = pgprot_noncached(area->vm_page_prot); -#endif /* ARCH_HAS_DMA_MMAP_COHERENT */ +#endif /* CONFIG_X86 */ /* mmap with fault handler */ area->vm_ops = &snd_pcm_vm_ops_data_fault; return 0; @@ -3236,6 +3359,15 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap); * mmap the DMA buffer on I/O memory area */ #if SNDRV_PCM_INFO_MMAP_IOMEM +/** + * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem + * @substream: PCM substream + * @area: VMA + * + * When your hardware uses the iomapped pages as the hardware buffer and + * wants to mmap it, pass this function as mmap pcm_ops. Note that this + * is supposed to work only on limited architectures. + */ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_struct *area) { |