diff options
Diffstat (limited to 'Documentation')
9 files changed, 359 insertions, 15 deletions
diff --git a/Documentation/DocBook/writing-an-alsa-driver.tmpl b/Documentation/DocBook/writing-an-alsa-driver.tmpl index 5de23c00707..cab4ec58e46 100644 --- a/Documentation/DocBook/writing-an-alsa-driver.tmpl +++ b/Documentation/DocBook/writing-an-alsa-driver.tmpl @@ -404,7 +404,7 @@ /* SNDRV_CARDS: maximum number of cards supported by this module */ static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; - static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; + static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* definition of the chip-specific record */ struct mychip { diff --git a/Documentation/devicetree/bindings/sound/tegra-audio-wm8903.txt b/Documentation/devicetree/bindings/sound/tegra-audio-wm8903.txt new file mode 100644 index 00000000000..d5b0da8bf1d --- /dev/null +++ b/Documentation/devicetree/bindings/sound/tegra-audio-wm8903.txt @@ -0,0 +1,71 @@ +NVIDIA Tegra audio complex + +Required properties: +- compatible : "nvidia,tegra-audio-wm8903" +- nvidia,model : The user-visible name of this sound complex. +- nvidia,audio-routing : A list of the connections between audio components. + Each entry is a pair of strings, the first being the connection's sink, + the second being the connection's source. Valid names for sources and + sinks are the WM8903's pins, and the jacks on the board: + + WM8903 pins: + + * IN1L + * IN1R + * IN2L + * IN2R + * IN3L + * IN3R + * DMICDAT + * HPOUTL + * HPOUTR + * LINEOUTL + * LINEOUTR + * LOP + * LON + * ROP + * RON + * MICBIAS + + Board connectors: + + * Headphone Jack + * Int Spk + * Mic Jack + +- nvidia,i2s-controller : The phandle of the Tegra I2S1 controller +- nvidia,audio-codec : The phandle of the WM8903 audio codec + +Optional properties: +- nvidia,spkr-en-gpios : The GPIO that enables the speakers +- nvidia,hp-mute-gpios : The GPIO that mutes the headphones +- nvidia,hp-det-gpios : The GPIO that detect headphones are plugged in +- nvidia,int-mic-en-gpios : The GPIO that enables the internal microphone +- nvidia,ext-mic-en-gpios : The GPIO that enables the external microphone + +Example: + +sound { + compatible = "nvidia,tegra-audio-wm8903-harmony", + "nvidia,tegra-audio-wm8903" + nvidia,model = "tegra-wm8903-harmony"; + + nvidia,audio-routing = + "Headphone Jack", "HPOUTR", + "Headphone Jack", "HPOUTL", + "Int Spk", "ROP", + "Int Spk", "RON", + "Int Spk", "LOP", + "Int Spk", "LON", + "Mic Jack", "MICBIAS", + "IN1L", "Mic Jack"; + + nvidia,i2s-controller = <&i2s1>; + nvidia,audio-codec = <&wm8903>; + + nvidia,spkr-en-gpios = <&codec 2 0>; + nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */ + nvidia,int-mic-en-gpios = <&gpio 184 0>; /*gpio PX0 */ + nvidia,ext-mic-en-gpios = <&gpio 185 0>; /* gpio PX1 */ +}; + diff --git a/Documentation/devicetree/bindings/sound/tegra20-das.txt b/Documentation/devicetree/bindings/sound/tegra20-das.txt new file mode 100644 index 00000000000..6de3a7ee4ef --- /dev/null +++ b/Documentation/devicetree/bindings/sound/tegra20-das.txt @@ -0,0 +1,12 @@ +NVIDIA Tegra 20 DAS (Digital Audio Switch) controller + +Required properties: +- compatible : "nvidia,tegra20-das" +- reg : Should contain DAS registers location and length + +Example: + +das@70000c00 { + compatible = "nvidia,tegra20-das"; + reg = <0x70000c00 0x80>; +}; diff --git a/Documentation/devicetree/bindings/sound/tegra20-i2s.txt b/Documentation/devicetree/bindings/sound/tegra20-i2s.txt new file mode 100644 index 00000000000..0df2b5c816e --- /dev/null +++ b/Documentation/devicetree/bindings/sound/tegra20-i2s.txt @@ -0,0 +1,17 @@ +NVIDIA Tegra 20 I2S controller + +Required properties: +- compatible : "nvidia,tegra20-i2s" +- reg : Should contain I2S registers location and length +- interrupts : Should contain I2S interrupt +- nvidia,dma-request-selector : The Tegra DMA controller's phandle and + request selector for this I2S controller + +Example: + +i2s@70002800 { + compatible = "nvidia,tegra20-i2s"; + reg = <0x70002800 0x200>; + interrupts = < 45 >; + nvidia,dma-request-selector = < &apbdma 2 >; +}; diff --git a/Documentation/devicetree/bindings/sound/wm8903.txt b/Documentation/devicetree/bindings/sound/wm8903.txt new file mode 100644 index 00000000000..f102cbc4269 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/wm8903.txt @@ -0,0 +1,50 @@ +WM8903 audio CODEC + +This device supports I2C only. + +Required properties: + + - compatible : "wlf,wm8903" + + - reg : the I2C address of the device. + + - gpio-controller : Indicates this device is a GPIO controller. + + - #gpio-cells : Should be two. The first cell is the pin number and the + second cell is used to specify optional parameters (currently unused). + +Optional properties: + + - interrupts : The interrupt line the codec is connected to. + + - micdet-cfg : Default register value for R6 (Mic Bias). If absent, the + default is 0. + + - micdet-delay : The debounce delay for microphone detection in mS. If + absent, the default is 100. + + - gpio-cfg : A list of GPIO configuration register values. The list must + be 5 entries long. If absent, no configuration of these registers is + performed. If any entry has the value 0xffffffff, that GPIO's + configuration will not be modified. + +Example: + +codec: wm8903@1a { + compatible = "wlf,wm8903"; + reg = <0x1a>; + interrupts = < 347 >; + + gpio-controller; + #gpio-cells = <2>; + + micdet-cfg = <0>; + micdet-delay = <100>; + gpio-cfg = < + 0x0600 /* DMIC_LR, output */ + 0x0680 /* DMIC_DAT, input */ + 0x0000 /* GPIO, output, low */ + 0x0200 /* Interrupt, output */ + 0x01a0 /* BCLK, input, active high */ + >; +}; diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt new file mode 100644 index 00000000000..7a7eb1e7bda --- /dev/null +++ b/Documentation/devicetree/bindings/sound/wm8994.txt @@ -0,0 +1,18 @@ +WM1811/WM8994/WM8958 audio CODEC + +These devices support both I2C and SPI (configured with pin strapping +on the board). + +Required properties: + + - compatible : "wlf,wm1811", "wlf,wm8994", "wlf,wm8958" + + - reg : the I2C address of the device for I2C, the chip select + number for SPI. + +Example: + +codec: wm8994@1a { + compatible = "wlf,wm8994"; + reg = <0x1a>; +}; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 6fdb450b05f..ecc6a6cd26c 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -42,4 +42,5 @@ sirf SiRF Technology, Inc. st STMicroelectronics stericsson ST-Ericsson ti Texas Instruments +wlf Wolfson Microelectronics xlnx Xilinx diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index edad99abec2..c8c54544abc 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt @@ -42,19 +42,7 @@ ALC260 ALC262 ====== - fujitsu Fujitsu Laptop - benq Benq ED8 - benq-t31 Benq T31 - hippo Hippo (ATI) with jack detection, Sony UX-90s - hippo_1 Hippo (Benq) with jack detection - toshiba-s06 Toshiba S06 - toshiba-rx1 Toshiba RX1 - tyan Tyan Thunder n6650W (S2915-E) - ultra Samsung Q1 Ultra Vista model - lenovo-3000 Lenovo 3000 y410 - nec NEC Versa S9100 - basic fixed pin assignment w/o SPDIF - auto auto-config reading BIOS (default) + N/A ALC267/268 ========== @@ -350,7 +338,6 @@ STAC92HD83* mic-ref Reference board with power management for ports dell-s14 Dell laptop dell-vostro-3500 Dell Vostro 3500 laptop - hp HP laptops with (inverted) mute-LED hp-dv7-4000 HP dv-7 4000 auto BIOS setup (default) diff --git a/Documentation/sound/alsa/compress_offload.txt b/Documentation/sound/alsa/compress_offload.txt new file mode 100644 index 00000000000..c83a835350f --- /dev/null +++ b/Documentation/sound/alsa/compress_offload.txt @@ -0,0 +1,188 @@ + compress_offload.txt + ===================== + Pierre-Louis.Bossart <pierre-louis.bossart@linux.intel.com> + Vinod Koul <vinod.koul@linux.intel.com> + +Overview + +Since its early days, the ALSA API was defined with PCM support or +constant bitrates payloads such as IEC61937 in mind. Arguments and +returned values in frames are the norm, making it a challenge to +extend the existing API to compressed data streams. + +In recent years, audio digital signal processors (DSP) were integrated +in system-on-chip designs, and DSPs are also integrated in audio +codecs. Processing compressed data on such DSPs results in a dramatic +reduction of power consumption compared to host-based +processing. Support for such hardware has not been very good in Linux, +mostly because of a lack of a generic API available in the mainline +kernel. + +Rather than requiring a compability break with an API change of the +ALSA PCM interface, a new 'Compressed Data' API is introduced to +provide a control and data-streaming interface for audio DSPs. + +The design of this API was inspired by the 2-year experience with the +Intel Moorestown SOC, with many corrections required to upstream the +API in the mainline kernel instead of the staging tree and make it +usable by others. + +Requirements + +The main requirements are: + +- separation between byte counts and time. Compressed formats may have + a header per file, per frame, or no header at all. The payload size + may vary from frame-to-frame. As a result, it is not possible to + estimate reliably the duration of audio buffers when handling + compressed data. Dedicated mechanisms are required to allow for + reliable audio-video synchronization, which requires precise + reporting of the number of samples rendered at any given time. + +- Handling of multiple formats. PCM data only requires a specification + of the sampling rate, number of channels and bits per sample. In + contrast, compressed data comes in a variety of formats. Audio DSPs + may also provide support for a limited number of audio encoders and + decoders embedded in firmware, or may support more choices through + dynamic download of libraries. + +- Focus on main formats. This API provides support for the most + popular formats used for audio and video capture and playback. It is + likely that as audio compression technology advances, new formats + will be added. + +- Handling of multiple configurations. Even for a given format like + AAC, some implementations may support AAC multichannel but HE-AAC + stereo. Likewise WMA10 level M3 may require too much memory and cpu + cycles. The new API needs to provide a generic way of listing these + formats. + +- Rendering/Grabbing only. This API does not provide any means of + hardware acceleration, where PCM samples are provided back to + user-space for additional processing. This API focuses instead on + streaming compressed data to a DSP, with the assumption that the + decoded samples are routed to a physical output or logical back-end. + + - Complexity hiding. Existing user-space multimedia frameworks all + have existing enums/structures for each compressed format. This new + API assumes the existence of a platform-specific compatibility layer + to expose, translate and make use of the capabilities of the audio + DSP, eg. Android HAL or PulseAudio sinks. By construction, regular + applications are not supposed to make use of this API. + + +Design + +The new API shares a number of concepts with with the PCM API for flow +control. Start, pause, resume, drain and stop commands have the same +semantics no matter what the content is. + +The concept of memory ring buffer divided in a set of fragments is +borrowed from the ALSA PCM API. However, only sizes in bytes can be +specified. + +Seeks/trick modes are assumed to be handled by the host. + +The notion of rewinds/forwards is not supported. Data committed to the +ring buffer cannot be invalidated, except when dropping all buffers. + +The Compressed Data API does not make any assumptions on how the data +is transmitted to the audio DSP. DMA transfers from main memory to an +embedded audio cluster or to a SPI interface for external DSPs are +possible. As in the ALSA PCM case, a core set of routines is exposed; +each driver implementer will have to write support for a set of +mandatory routines and possibly make use of optional ones. + +The main additions are + +- get_caps +This routine returns the list of audio formats supported. Querying the +codecs on a capture stream will return encoders, decoders will be +listed for playback streams. + +- get_codec_caps For each codec, this routine returns a list of +capabilities. The intent is to make sure all the capabilities +correspond to valid settings, and to minimize the risks of +configuration failures. For example, for a complex codec such as AAC, +the number of channels supported may depend on a specific profile. If +the capabilities were exposed with a single descriptor, it may happen +that a specific combination of profiles/channels/formats may not be +supported. Likewise, embedded DSPs have limited memory and cpu cycles, +it is likely that some implementations make the list of capabilities +dynamic and dependent on existing workloads. In addition to codec +settings, this routine returns the minimum buffer size handled by the +implementation. This information can be a function of the DMA buffer +sizes, the number of bytes required to synchronize, etc, and can be +used by userspace to define how much needs to be written in the ring +buffer before playback can start. + +- set_params +This routine sets the configuration chosen for a specific codec. The +most important field in the parameters is the codec type; in most +cases decoders will ignore other fields, while encoders will strictly +comply to the settings + +- get_params +This routines returns the actual settings used by the DSP. Changes to +the settings should remain the exception. + +- get_timestamp +The timestamp becomes a multiple field structure. It lists the number +of bytes transferred, the number of samples processed and the number +of samples rendered/grabbed. All these values can be used to determine +the avarage bitrate, figure out if the ring buffer needs to be +refilled or the delay due to decoding/encoding/io on the DSP. + +Note that the list of codecs/profiles/modes was derived from the +OpenMAX AL specification instead of reinventing the wheel. +Modifications include: +- Addition of FLAC and IEC formats +- Merge of encoder/decoder capabilities +- Profiles/modes listed as bitmasks to make descriptors more compact +- Addition of set_params for decoders (missing in OpenMAX AL) +- Addition of AMR/AMR-WB encoding modes (missing in OpenMAX AL) +- Addition of format information for WMA +- Addition of encoding options when required (derived from OpenMAX IL) +- Addition of rateControlSupported (missing in OpenMAX AL) + +Not supported: + +- Support for VoIP/circuit-switched calls is not the target of this + API. Support for dynamic bit-rate changes would require a tight + coupling between the DSP and the host stack, limiting power savings. + +- Packet-loss concealment is not supported. This would require an + additional interface to let the decoder synthesize data when frames + are lost during transmission. This may be added in the future. + +- Volume control/routing is not handled by this API. Devices exposing a + compressed data interface will be considered as regular ALSA devices; + volume changes and routing information will be provided with regular + ALSA kcontrols. + +- Embedded audio effects. Such effects should be enabled in the same + manner, no matter if the input was PCM or compressed. + +- multichannel IEC encoding. Unclear if this is required. + +- Encoding/decoding acceleration is not supported as mentioned + above. It is possible to route the output of a decoder to a capture + stream, or even implement transcoding capabilities. This routing + would be enabled with ALSA kcontrols. + +- Audio policy/resource management. This API does not provide any + hooks to query the utilization of the audio DSP, nor any premption + mechanisms. + +- No notion of underun/overrun. Since the bytes written are compressed + in nature and data written/read doesn't translate directly to + rendered output in time, this does not deal with underrun/overun and + maybe dealt in user-library + +Credits: +- Mark Brown and Liam Girdwood for discussions on the need for this API +- Harsha Priya for her work on intel_sst compressed API +- Rakesh Ughreja for valuable feedback +- Sing Nallasellan, Sikkandar Madar and Prasanna Samaga for + demonstrating and quantifying the benefits of audio offload on a + real platform. |