summaryrefslogtreecommitdiffstats
path: root/Documentation
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2008-10-22 23:57:26 -0400
committerLen Brown <len.brown@intel.com>2008-10-23 00:11:07 -0400
commit057316cc6a5b521b332a1d7ccc871cd60c904c74 (patch)
tree4333e608da237c73ff69b10878025cca96dcb4c8 /Documentation
parent3e2dab9a1c2deb03c311eb3f83466009147ed4d3 (diff)
parent2515ddc6db8eb49a79f0fe5e67ff09ac7c81eab4 (diff)
Merge branch 'linus' into test
Conflicts: MAINTAINERS arch/x86/kernel/acpi/boot.c arch/x86/kernel/acpi/sleep.c drivers/acpi/Kconfig drivers/pnp/Makefile drivers/pnp/quirks.c Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'Documentation')
-rw-r--r--Documentation/00-INDEX7
-rw-r--r--Documentation/ABI/stable/sysfs-driver-usb-usbtmc62
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb16
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb-devices-usbsevseg43
-rw-r--r--Documentation/ABI/testing/sysfs-class-regulator55
-rw-r--r--Documentation/ABI/testing/sysfs-profiling13
-rw-r--r--Documentation/DMA-API.txt2
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/gadget.tmpl3
-rw-r--r--Documentation/DocBook/kernel-api.tmpl5
-rw-r--r--Documentation/DocBook/kernel-hacking.tmpl2
-rw-r--r--Documentation/DocBook/mac80211.tmpl12
-rw-r--r--Documentation/DocBook/procfs-guide.tmpl29
-rw-r--r--Documentation/DocBook/procfs_example.c20
-rw-r--r--Documentation/DocBook/videobook.tmpl1654
-rw-r--r--Documentation/HOWTO4
-rw-r--r--Documentation/MSI-HOWTO.txt6
-rw-r--r--Documentation/PCI/pci.txt4
-rw-r--r--Documentation/PCI/pcieaer-howto.txt11
-rw-r--r--Documentation/RCU/checklist.txt2
-rw-r--r--Documentation/RCU/rcuref.txt16
-rw-r--r--Documentation/RCU/whatisRCU.txt2
-rw-r--r--Documentation/SAK.txt2
-rw-r--r--Documentation/SELinux.txt27
-rw-r--r--Documentation/SubmitChecklist3
-rw-r--r--Documentation/SubmittingDrivers2
-rw-r--r--Documentation/SubmittingPatches11
-rw-r--r--Documentation/blackfin/kgdb.txt155
-rw-r--r--Documentation/block/data-integrity.txt4
-rw-r--r--Documentation/block/deadline-iosched.txt14
-rw-r--r--Documentation/cdrom/ide-cd3
-rw-r--r--Documentation/cgroups/cgroups.txt (renamed from Documentation/cgroups.txt)0
-rw-r--r--Documentation/cgroups/freezer-subsystem.txt99
-rw-r--r--Documentation/controllers/memory.txt24
-rw-r--r--Documentation/cpusets.txt2
-rw-r--r--Documentation/cris/README2
-rw-r--r--Documentation/development-process/1.Intro274
-rw-r--r--Documentation/development-process/2.Process459
-rw-r--r--Documentation/development-process/3.Early-stage195
-rw-r--r--Documentation/development-process/4.Coding384
-rw-r--r--Documentation/development-process/5.Posting278
-rw-r--r--Documentation/development-process/6.Followthrough202
-rw-r--r--Documentation/development-process/7.AdvancedTopics173
-rw-r--r--Documentation/development-process/8.Conclusion74
-rw-r--r--Documentation/devices.txt3
-rw-r--r--Documentation/dontdiff59
-rw-r--r--Documentation/fb/intelfb.txt1
-rw-r--r--Documentation/fb/uvesafb.txt4
-rw-r--r--Documentation/fb/viafb.modes870
-rw-r--r--Documentation/fb/viafb.txt214
-rw-r--r--Documentation/feature-removal-schedule.txt39
-rw-r--r--Documentation/filesystems/autofs4-mount-control.txt393
-rw-r--r--Documentation/filesystems/ext3.txt8
-rw-r--r--Documentation/filesystems/ext4.txt51
-rw-r--r--Documentation/filesystems/fiemap.txt228
-rw-r--r--Documentation/filesystems/nfsroot.txt2
-rw-r--r--Documentation/filesystems/ocfs2.txt6
-rw-r--r--Documentation/filesystems/proc.txt114
-rw-r--r--Documentation/filesystems/ramfs-rootfs-initramfs.txt2
-rw-r--r--Documentation/filesystems/ubifs.txt9
-rw-r--r--Documentation/gpio.txt9
-rw-r--r--Documentation/hwmon/adt747076
-rw-r--r--Documentation/hwmon/it874
-rw-r--r--Documentation/hwmon/lm8510
-rw-r--r--Documentation/hwmon/lm879
-rw-r--r--Documentation/hwmon/lm9045
-rw-r--r--Documentation/hwmon/pc873607
-rw-r--r--Documentation/hwmon/pc874272
-rw-r--r--Documentation/hwmon/w83781d37
-rw-r--r--Documentation/hwmon/w83791d43
-rw-r--r--Documentation/i2c/busses/i2c-viapro8
-rw-r--r--Documentation/i2c/dev-interface110
-rw-r--r--Documentation/i2c/smbus-protocol4
-rw-r--r--Documentation/i2c/writing-clients4
-rw-r--r--Documentation/ia64/kvm.txt9
-rw-r--r--Documentation/ioctl-number.txt3
-rw-r--r--Documentation/kernel-doc-nano-HOWTO.txt4
-rw-r--r--Documentation/kernel-parameters.txt123
-rw-r--r--Documentation/kobject.txt4
-rw-r--r--Documentation/laptops/disk-shock-protection.txt149
-rw-r--r--Documentation/markers.txt10
-rw-r--r--Documentation/mtd/nand_ecc.txt714
-rw-r--r--Documentation/networking/LICENSE.qlge46
-rw-r--r--Documentation/networking/can.txt44
-rw-r--r--Documentation/networking/cs89x0.txt4
-rw-r--r--Documentation/networking/multiqueue.txt54
-rw-r--r--Documentation/networking/phonet.txt175
-rw-r--r--Documentation/networking/regulatory.txt194
-rw-r--r--Documentation/networking/tproxy.txt85
-rw-r--r--Documentation/networking/vortex.txt9
-rw-r--r--Documentation/pcmcia/driver-changes.txt6
-rw-r--r--Documentation/power/regulator/machine.txt140
-rw-r--r--Documentation/power/regulator/regulator.txt8
-rw-r--r--Documentation/power/s2ram.txt18
-rw-r--r--Documentation/powerpc/00-INDEX4
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/83xx-512x-pci.txt40
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt40
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/dma.txt13
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/ssi.txt23
-rw-r--r--Documentation/powerpc/ppc_htab.txt118
-rw-r--r--Documentation/powerpc/smp.txt34
-rw-r--r--Documentation/rfkill.txt32
-rw-r--r--Documentation/s390/CommonIO11
-rw-r--r--Documentation/scheduler/sched-design-CFS.txt395
-rw-r--r--Documentation/scsi/ChangeLog.megaraid6
-rw-r--r--Documentation/scsi/scsi_fc_transport.txt36
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt62
-rw-r--r--Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl65
-rw-r--r--Documentation/sound/alsa/soc/dapm.txt12
-rw-r--r--Documentation/sparc/sbus_drivers.txt309
-rw-r--r--Documentation/spi/pxa2xx34
-rw-r--r--Documentation/sysctl/kernel.txt1
-rw-r--r--Documentation/sysrq.txt4
-rw-r--r--Documentation/timers/00-INDEX10
-rw-r--r--Documentation/timers/hpet.txt (renamed from Documentation/hpet.txt)43
-rw-r--r--Documentation/tracepoints.txt101
-rw-r--r--Documentation/tracers/mmiotrace.txt5
-rw-r--r--Documentation/usb/anchors.txt17
-rw-r--r--Documentation/usb/misc_usbsevseg.txt46
-rw-r--r--Documentation/usb/power-management.txt8
-rw-r--r--Documentation/video4linux/CARDLIST.au08282
-rw-r--r--Documentation/video4linux/CARDLIST.bttv1
-rw-r--r--Documentation/video4linux/CARDLIST.cx238852
-rw-r--r--Documentation/video4linux/CARDLIST.cx888
-rw-r--r--Documentation/video4linux/CARDLIST.em28xx4
-rw-r--r--Documentation/video4linux/CARDLIST.saa71348
-rw-r--r--Documentation/video4linux/CARDLIST.tuner2
-rw-r--r--Documentation/video4linux/gspca.txt28
-rw-r--r--Documentation/video4linux/m5602.txt12
-rw-r--r--Documentation/video4linux/soc-camera.txt120
-rw-r--r--Documentation/vm/unevictable-lru.txt615
-rw-r--r--Documentation/w1/00-INDEX2
-rw-r--r--Documentation/w1/masters/ds249052
-rw-r--r--Documentation/w1/slaves/00-INDEX4
-rw-r--r--Documentation/w1/slaves/w1_therm41
-rw-r--r--Documentation/w1/w1.generic11
-rw-r--r--Documentation/x86/00-INDEX4
-rw-r--r--Documentation/x86/boot.txt (renamed from Documentation/x86/i386/boot.txt)2
-rw-r--r--Documentation/x86/mtrr.txt (renamed from Documentation/mtrr.txt)4
-rw-r--r--Documentation/x86/pat.txt54
-rw-r--r--Documentation/x86/usb-legacy-support.txt (renamed from Documentation/x86/i386/usb-legacy-support.txt)0
-rw-r--r--Documentation/x86/x86_64/boot-options.txt4
-rw-r--r--Documentation/x86/zero-page.txt (renamed from Documentation/x86/i386/zero-page.txt)0
143 files changed, 7992 insertions, 2984 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 5b5aba404aa..7286ad090db 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -21,6 +21,9 @@ Changes
- list of changes that break older software packages.
CodingStyle
- how the boss likes the C code in the kernel to look.
+development-process/
+ - An extended tutorial on how to work with the kernel development
+ process.
DMA-API.txt
- DMA API, pci_ API & extensions for non-consistent memory machines.
DMA-ISA-LPC.txt
@@ -159,8 +162,6 @@ hayes-esp.txt
- info on using the Hayes ESP serial driver.
highuid.txt
- notes on the change from 16 bit to 32 bit user/group IDs.
-hpet.txt
- - High Precision Event Timer Driver for Linux.
timers/
- info on the timer related topics
hw_random.txt
@@ -251,8 +252,6 @@ mono.txt
- how to execute Mono-based .NET binaries with the help of BINFMT_MISC.
moxa-smartio
- file with info on installing/using Moxa multiport serial driver.
-mtrr.txt
- - how to use PPro Memory Type Range Registers to increase performance.
mutex-design.txt
- info on the generic mutex subsystem.
namespaces/
diff --git a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
new file mode 100644
index 00000000000..9a75fb22187
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
@@ -0,0 +1,62 @@
+What: /sys/bus/usb/drivers/usbtmc/devices/*/interface_capabilities
+What: /sys/bus/usb/drivers/usbtmc/devices/*/device_capabilities
+Date: August 2008
+Contact: Greg Kroah-Hartman <gregkh@suse.de>
+Description:
+ These files show the various USB TMC capabilities as described
+ by the device itself. The full description of the bitfields
+ can be found in the USB TMC documents from the USB-IF entitled
+ "Universal Serial Bus Test and Measurement Class Specification
+ (USBTMC) Revision 1.0" section 4.2.1.8.
+
+ The files are read only.
+
+
+What: /sys/bus/usb/drivers/usbtmc/devices/*/usb488_interface_capabilities
+What: /sys/bus/usb/drivers/usbtmc/devices/*/usb488_device_capabilities
+Date: August 2008
+Contact: Greg Kroah-Hartman <gregkh@suse.de>
+Description:
+ These files show the various USB TMC capabilities as described
+ by the device itself. The full description of the bitfields
+ can be found in the USB TMC documents from the USB-IF entitled
+ "Universal Serial Bus Test and Measurement Class, Subclass
+ USB488 Specification (USBTMC-USB488) Revision 1.0" section
+ 4.2.2.
+
+ The files are read only.
+
+
+What: /sys/bus/usb/drivers/usbtmc/devices/*/TermChar
+Date: August 2008
+Contact: Greg Kroah-Hartman <gregkh@suse.de>
+Description:
+ This file is the TermChar value to be sent to the USB TMC
+ device as described by the document, "Universal Serial Bus Test
+ and Measurement Class Specification
+ (USBTMC) Revision 1.0" as published by the USB-IF.
+
+ Note that the TermCharEnabled file determines if this value is
+ sent to the device or not.
+
+
+What: /sys/bus/usb/drivers/usbtmc/devices/*/TermCharEnabled
+Date: August 2008
+Contact: Greg Kroah-Hartman <gregkh@suse.de>
+Description:
+ This file determines if the TermChar is to be sent to the
+ device on every transaction or not. For more details about
+ this, please see the document, "Universal Serial Bus Test and
+ Measurement Class Specification (USBTMC) Revision 1.0" as
+ published by the USB-IF.
+
+
+What: /sys/bus/usb/drivers/usbtmc/devices/*/auto_abort
+Date: August 2008
+Contact: Greg Kroah-Hartman <gregkh@suse.de>
+Description:
+ This file determines if the the transaction of the USB TMC
+ device is to be automatically aborted if there is any error.
+ For more details about this, please see the document,
+ "Universal Serial Bus Test and Measurement Class Specification
+ (USBTMC) Revision 1.0" as published by the USB-IF.
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index 11a3c1682ce..df6c8a0159f 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -85,3 +85,19 @@ Description:
Users:
PowerTOP <power@bughost.org>
http://www.lesswatts.org/projects/powertop/
+
+What: /sys/bus/usb/device/<busnum>-<devnum>...:<config num>-<interface num>/supports_autosuspend
+Date: January 2008
+KernelVersion: 2.6.27
+Contact: Sarah Sharp <sarah.a.sharp@intel.com>
+Description:
+ When read, this file returns 1 if the interface driver
+ for this interface supports autosuspend. It also
+ returns 1 if no driver has claimed this interface, as an
+ unclaimed interface will not stop the device from being
+ autosuspended if all other interface drivers are idle.
+ The file returns 0 if autosuspend support has not been
+ added to the driver.
+Users:
+ USB PM tool
+ git://git.moblin.org/users/sarah/usb-pm-tool/
diff --git a/Documentation/ABI/testing/sysfs-bus-usb-devices-usbsevseg b/Documentation/ABI/testing/sysfs-bus-usb-devices-usbsevseg
new file mode 100644
index 00000000000..cb830df8777
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-usb-devices-usbsevseg
@@ -0,0 +1,43 @@
+Where: /sys/bus/usb/.../powered
+Date: August 2008
+Kernel Version: 2.6.26
+Contact: Harrison Metzger <harrisonmetz@gmail.com>
+Description: Controls whether the device's display will powered.
+ A value of 0 is off and a non-zero value is on.
+
+Where: /sys/bus/usb/.../mode_msb
+Where: /sys/bus/usb/.../mode_lsb
+Date: August 2008
+Kernel Version: 2.6.26
+Contact: Harrison Metzger <harrisonmetz@gmail.com>
+Description: Controls the devices display mode.
+ For a 6 character display the values are
+ MSB 0x06; LSB 0x3F, and
+ for an 8 character display the values are
+ MSB 0x08; LSB 0xFF.
+
+Where: /sys/bus/usb/.../textmode
+Date: August 2008
+Kernel Version: 2.6.26
+Contact: Harrison Metzger <harrisonmetz@gmail.com>
+Description: Controls the way the device interprets its text buffer.
+ raw: each character controls its segment manually
+ hex: each character is between 0-15
+ ascii: each character is between '0'-'9' and 'A'-'F'.
+
+Where: /sys/bus/usb/.../text
+Date: August 2008
+Kernel Version: 2.6.26
+Contact: Harrison Metzger <harrisonmetz@gmail.com>
+Description: The text (or data) for the device to display
+
+Where: /sys/bus/usb/.../decimals
+Date: August 2008
+Kernel Version: 2.6.26
+Contact: Harrison Metzger <harrisonmetz@gmail.com>
+Description: Controls the decimal places on the device.
+ To set the nth decimal place, give this field
+ the value of 10 ** n. Assume this field has
+ the value k and has 1 or more decimal places set,
+ to set the mth place (where m is not already set),
+ change this fields value to k + 10 ** m. \ No newline at end of file
diff --git a/Documentation/ABI/testing/sysfs-class-regulator b/Documentation/ABI/testing/sysfs-class-regulator
index 79a4a75b2d2..3731f6f29bc 100644
--- a/Documentation/ABI/testing/sysfs-class-regulator
+++ b/Documentation/ABI/testing/sysfs-class-regulator
@@ -1,7 +1,7 @@
What: /sys/class/regulator/.../state
Date: April 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
state. This holds the regulator output state.
@@ -27,7 +27,7 @@ Description:
What: /sys/class/regulator/.../type
Date: April 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
type. This holds the regulator type.
@@ -51,7 +51,7 @@ Description:
What: /sys/class/regulator/.../microvolts
Date: April 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
microvolts. This holds the regulator output voltage setting
@@ -65,7 +65,7 @@ Description:
What: /sys/class/regulator/.../microamps
Date: April 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
microamps. This holds the regulator output current limit
@@ -79,7 +79,7 @@ Description:
What: /sys/class/regulator/.../opmode
Date: April 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
opmode. This holds the regulator operating mode setting.
@@ -102,7 +102,7 @@ Description:
What: /sys/class/regulator/.../min_microvolts
Date: April 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
min_microvolts. This holds the minimum safe working regulator
@@ -116,7 +116,7 @@ Description:
What: /sys/class/regulator/.../max_microvolts
Date: April 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
max_microvolts. This holds the maximum safe working regulator
@@ -130,7 +130,7 @@ Description:
What: /sys/class/regulator/.../min_microamps
Date: April 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
min_microamps. This holds the minimum safe working regulator
@@ -145,7 +145,7 @@ Description:
What: /sys/class/regulator/.../max_microamps
Date: April 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
max_microamps. This holds the maximum safe working regulator
@@ -157,10 +157,23 @@ Description:
platform code.
+What: /sys/class/regulator/.../name
+Date: October 2008
+KernelVersion: 2.6.28
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
+Description:
+ Each regulator directory will contain a field called
+ name. This holds a string identifying the regulator for
+ display purposes.
+
+ NOTE: this will be empty if no suitable name is provided
+ by platform or regulator drivers.
+
+
What: /sys/class/regulator/.../num_users
Date: April 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
num_users. This holds the number of consumer devices that
@@ -170,7 +183,7 @@ Description:
What: /sys/class/regulator/.../requested_microamps
Date: April 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
requested_microamps. This holds the total requested load
@@ -181,7 +194,7 @@ Description:
What: /sys/class/regulator/.../parent
Date: April 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Some regulator directories will contain a link called parent.
This points to the parent or supply regulator if one exists.
@@ -189,7 +202,7 @@ Description:
What: /sys/class/regulator/.../suspend_mem_microvolts
Date: May 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
suspend_mem_microvolts. This holds the regulator output
@@ -203,7 +216,7 @@ Description:
What: /sys/class/regulator/.../suspend_disk_microvolts
Date: May 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
suspend_disk_microvolts. This holds the regulator output
@@ -217,7 +230,7 @@ Description:
What: /sys/class/regulator/.../suspend_standby_microvolts
Date: May 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
suspend_standby_microvolts. This holds the regulator output
@@ -231,7 +244,7 @@ Description:
What: /sys/class/regulator/.../suspend_mem_mode
Date: May 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
suspend_mem_mode. This holds the regulator operating mode
@@ -245,7 +258,7 @@ Description:
What: /sys/class/regulator/.../suspend_disk_mode
Date: May 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
suspend_disk_mode. This holds the regulator operating mode
@@ -258,7 +271,7 @@ Description:
What: /sys/class/regulator/.../suspend_standby_mode
Date: May 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
suspend_standby_mode. This holds the regulator operating mode
@@ -272,7 +285,7 @@ Description:
What: /sys/class/regulator/.../suspend_mem_state
Date: May 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
suspend_mem_state. This holds the regulator operating state
@@ -287,7 +300,7 @@ Description:
What: /sys/class/regulator/.../suspend_disk_state
Date: May 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
suspend_disk_state. This holds the regulator operating state
@@ -302,7 +315,7 @@ Description:
What: /sys/class/regulator/.../suspend_standby_state
Date: May 2008
KernelVersion: 2.6.26
-Contact: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Contact: Liam Girdwood <lrg@slimlogic.co.uk>
Description:
Each regulator directory will contain a field called
suspend_standby_state. This holds the regulator operating
diff --git a/Documentation/ABI/testing/sysfs-profiling b/Documentation/ABI/testing/sysfs-profiling
new file mode 100644
index 00000000000..b02d8b8c173
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-profiling
@@ -0,0 +1,13 @@
+What: /sys/kernel/profile
+Date: September 2008
+Contact: Dave Hansen <dave@linux.vnet.ibm.com>
+Description:
+ /sys/kernel/profile is the runtime equivalent
+ of the boot-time profile= option.
+
+ You can get the same effect running:
+
+ echo 2 > /sys/kernel/profile
+
+ as you would by issuing profile=2 on the boot
+ command line.
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index d8b63d164e4..b8e86460046 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -337,7 +337,7 @@ With scatterlists, you use the resulting mapping like this:
int i, count = dma_map_sg(dev, sglist, nents, direction);
struct scatterlist *sg;
- for (i = 0, sg = sglist; i < count; i++, sg++) {
+ for_each_sg(sglist, sg, count, i) {
hw_address[i] = sg_dma_address(sg);
hw_len[i] = sg_dma_len(sg);
}
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 1615350b7b5..fabc06466b9 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -6,7 +6,7 @@
# To add a new book the only step required is to add the book to the
# list of DOCBOOKS.
-DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml videobook.xml \
+DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml \
kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
procfs-guide.xml writing_usb_driver.xml networking.xml \
kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
diff --git a/Documentation/DocBook/gadget.tmpl b/Documentation/DocBook/gadget.tmpl
index ea3bc9565e6..6ef2f0073e5 100644
--- a/Documentation/DocBook/gadget.tmpl
+++ b/Documentation/DocBook/gadget.tmpl
@@ -557,6 +557,9 @@ Near-term plans include converting all of them, except for "gadgetfs".
</para>
!Edrivers/usb/gadget/f_acm.c
+!Edrivers/usb/gadget/f_ecm.c
+!Edrivers/usb/gadget/f_subset.c
+!Edrivers/usb/gadget/f_obex.c
!Edrivers/usb/gadget/f_serial.c
</sect1>
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index b7b1482f6e0..9d0058e788e 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -283,6 +283,7 @@ X!Earch/x86/kernel/mca_32.c
<chapter id="security">
<title>Security Framework</title>
!Isecurity/security.c
+!Esecurity/inode.c
</chapter>
<chapter id="audit">
@@ -364,6 +365,10 @@ X!Edrivers/pnp/system.c
!Eblock/blk-barrier.c
!Eblock/blk-tag.c
!Iblock/blk-tag.c
+!Eblock/blk-integrity.c
+!Iblock/blktrace.c
+!Iblock/genhd.c
+!Eblock/genhd.c
</chapter>
<chapter id="chrdev">
diff --git a/Documentation/DocBook/kernel-hacking.tmpl b/Documentation/DocBook/kernel-hacking.tmpl
index 4c63e586416..ae15d55350e 100644
--- a/Documentation/DocBook/kernel-hacking.tmpl
+++ b/Documentation/DocBook/kernel-hacking.tmpl
@@ -1105,7 +1105,7 @@ static struct block_device_operations opt_fops = {
</listitem>
<listitem>
<para>
- Function names as strings (__FUNCTION__).
+ Function names as strings (__func__).
</para>
</listitem>
<listitem>
diff --git a/Documentation/DocBook/mac80211.tmpl b/Documentation/DocBook/mac80211.tmpl
index b651e0a4b1c..77c3c202991 100644
--- a/Documentation/DocBook/mac80211.tmpl
+++ b/Documentation/DocBook/mac80211.tmpl
@@ -145,7 +145,6 @@ usage should require reading the full document.
this though and the recommendation to allow only a single
interface in STA mode at first!
</para>
-!Finclude/net/mac80211.h ieee80211_if_types
!Finclude/net/mac80211.h ieee80211_if_init_conf
!Finclude/net/mac80211.h ieee80211_if_conf
</chapter>
@@ -177,8 +176,7 @@ usage should require reading the full document.
<title>functions/definitions</title>
!Finclude/net/mac80211.h ieee80211_rx_status
!Finclude/net/mac80211.h mac80211_rx_flags
-!Finclude/net/mac80211.h ieee80211_tx_control
-!Finclude/net/mac80211.h ieee80211_tx_status_flags
+!Finclude/net/mac80211.h ieee80211_tx_info
!Finclude/net/mac80211.h ieee80211_rx
!Finclude/net/mac80211.h ieee80211_rx_irqsafe
!Finclude/net/mac80211.h ieee80211_tx_status
@@ -189,12 +187,11 @@ usage should require reading the full document.
!Finclude/net/mac80211.h ieee80211_ctstoself_duration
!Finclude/net/mac80211.h ieee80211_generic_frame_duration
!Finclude/net/mac80211.h ieee80211_get_hdrlen_from_skb
-!Finclude/net/mac80211.h ieee80211_get_hdrlen
+!Finclude/net/mac80211.h ieee80211_hdrlen
!Finclude/net/mac80211.h ieee80211_wake_queue
!Finclude/net/mac80211.h ieee80211_stop_queue
-!Finclude/net/mac80211.h ieee80211_start_queues
-!Finclude/net/mac80211.h ieee80211_stop_queues
!Finclude/net/mac80211.h ieee80211_wake_queues
+!Finclude/net/mac80211.h ieee80211_stop_queues
</sect1>
</chapter>
@@ -230,8 +227,7 @@ usage should require reading the full document.
<title>Multiple queues and QoS support</title>
<para>TBD</para>
!Finclude/net/mac80211.h ieee80211_tx_queue_params
-!Finclude/net/mac80211.h ieee80211_tx_queue_stats_data
-!Finclude/net/mac80211.h ieee80211_tx_queue
+!Finclude/net/mac80211.h ieee80211_tx_queue_stats
</chapter>
<chapter id="AP">
diff --git a/Documentation/DocBook/procfs-guide.tmpl b/Documentation/DocBook/procfs-guide.tmpl
index 8a5dc6e021f..9eba4b7af73 100644
--- a/Documentation/DocBook/procfs-guide.tmpl
+++ b/Documentation/DocBook/procfs-guide.tmpl
@@ -14,17 +14,20 @@
<othername>(J.A.K.)</othername>
<surname>Mouw</surname>
<affiliation>
- <orgname>Delft University of Technology</orgname>
- <orgdiv>Faculty of Information Technology and Systems</orgdiv>
<address>
- <email>J.A.K.Mouw@its.tudelft.nl</email>
- <pob>PO BOX 5031</pob>
- <postcode>2600 GA</postcode>
- <city>Delft</city>
- <country>The Netherlands</country>
+ <email>mouw@nl.linux.org</email>
</address>
</affiliation>
</author>
+ <othercredit>
+ <contrib>
+ This software and documentation were written while working on the
+ LART computing board
+ (<ulink url="http://www.lartmaker.nl/">http://www.lartmaker.nl/</ulink>),
+ which was sponsored by the Delt University of Technology projects
+ Mobile Multi-media Communications and Ubiquitous Communications.
+ </contrib>
+ </othercredit>
</authorgroup>
<revhistory>
@@ -109,18 +112,6 @@
</para>
<para>
- This documentation was written while working on the LART
- computing board (<ulink
- url="http://www.lart.tudelft.nl/">http://www.lart.tudelft.nl/</ulink>),
- which is sponsored by the Mobile Multi-media Communications
- (<ulink
- url="http://www.mmc.tudelft.nl/">http://www.mmc.tudelft.nl/</ulink>)
- and Ubiquitous Communications (<ulink
- url="http://www.ubicom.tudelft.nl/">http://www.ubicom.tudelft.nl/</ulink>)
- projects.
- </para>
-
- <para>
Erik
</para>
</preface>
diff --git a/Documentation/DocBook/procfs_example.c b/Documentation/DocBook/procfs_example.c
index 2f3de0fb836..8c6396e4bf3 100644
--- a/Documentation/DocBook/procfs_example.c
+++ b/Documentation/DocBook/procfs_example.c
@@ -1,28 +1,16 @@
/*
* procfs_example.c: an example proc interface
*
- * Copyright (C) 2001, Erik Mouw (J.A.K.Mouw@its.tudelft.nl)
+ * Copyright (C) 2001, Erik Mouw (mouw@nl.linux.org)
*
* This file accompanies the procfs-guide in the Linux kernel
* source. Its main use is to demonstrate the concepts and
* functions described in the guide.
*
* This software has been developed while working on the LART
- * computing board (http://www.lart.tudelft.nl/), which is
- * sponsored by the Mobile Multi-media Communications
- * (http://www.mmc.tudelft.nl/) and Ubiquitous Communications
- * (http://www.ubicom.tudelft.nl/) projects.
- *
- * The author can be reached at:
- *
- * Erik Mouw
- * Information and Communication Theory Group
- * Faculty of Information Technology and Systems
- * Delft University of Technology
- * P.O. Box 5031
- * 2600 GA Delft
- * The Netherlands
- *
+ * computing board (http://www.lartmaker.nl), which was sponsored
+ * by the Delt University of Technology projects Mobile Multi-media
+ * Communications and Ubiquitous Communications.
*
* This program is free software; you can redistribute
* it and/or modify it under the terms of the GNU General
diff --git a/Documentation/DocBook/videobook.tmpl b/Documentation/DocBook/videobook.tmpl
deleted file mode 100644
index 0bc25949b66..00000000000
--- a/Documentation/DocBook/videobook.tmpl
+++ /dev/null
@@ -1,1654 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="V4LGuide">
- <bookinfo>
- <title>Video4Linux Programming</title>
-
- <authorgroup>
- <author>
- <firstname>Alan</firstname>
- <surname>Cox</surname>
- <affiliation>
- <address>
- <email>alan@redhat.com</email>
- </address>
- </affiliation>
- </author>
- </authorgroup>
-
- <copyright>
- <year>2000</year>
- <holder>Alan Cox</holder>
- </copyright>
-
- <legalnotice>
- <para>
- This documentation is free software; you can redistribute
- it and/or modify it under the terms of the GNU General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later
- version.
- </para>
-
- <para>
- This program is distributed in the hope that it will be
- useful, but WITHOUT ANY WARRANTY; without even the implied
- warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- </para>
-
- <para>
- You should have received a copy of the GNU General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- MA 02111-1307 USA
- </para>
-
- <para>
- For more details see the file COPYING in the source
- distribution of Linux.
- </para>
- </legalnotice>
- </bookinfo>
-
-<toc></toc>
-
- <chapter id="intro">
- <title>Introduction</title>
- <para>
- Parts of this document first appeared in Linux Magazine under a
- ninety day exclusivity.
- </para>
- <para>
- Video4Linux is intended to provide a common programming interface
- for the many TV and capture cards now on the market, as well as
- parallel port and USB video cameras. Radio, teletext decoders and
- vertical blanking data interfaces are also provided.
- </para>
- </chapter>
- <chapter id="radio">
- <title>Radio Devices</title>
- <para>
- There are a wide variety of radio interfaces available for PC's, and these
- are generally very simple to program. The biggest problem with supporting
- such devices is normally extracting documentation from the vendor.
- </para>
- <para>
- The radio interface supports a simple set of control ioctls standardised
- across all radio and tv interfaces. It does not support read or write, which
- are used for video streams. The reason radio cards do not allow you to read
- the audio stream into an application is that without exception they provide
- a connection on to a soundcard. Soundcards can be used to read the radio
- data just fine.
- </para>
- <sect1 id="registerradio">
- <title>Registering Radio Devices</title>
- <para>
- The Video4linux core provides an interface for registering devices. The
- first step in writing our radio card driver is to register it.
- </para>
- <programlisting>
-
-
-static struct video_device my_radio
-{
- "My radio",
- VID_TYPE_TUNER,
- radio_open.
- radio_close,
- NULL, /* no read */
- NULL, /* no write */
- NULL, /* no poll */
- radio_ioctl,
- NULL, /* no special init function */
- NULL /* no private data */
-};
-
-
- </programlisting>
- <para>
- This declares our video4linux device driver interface. The VID_TYPE_ value
- defines what kind of an interface we are, and defines basic capabilities.
- </para>
- <para>
- The only defined value relevant for a radio card is VID_TYPE_TUNER which
- indicates that the device can be tuned. Clearly our radio is going to have some
- way to change channel so it is tuneable.
- </para>
- <para>
- We declare an open and close routine, but we do not need read or write,
- which are used to read and write video data to or from the card itself. As
- we have no read or write there is no poll function.
- </para>
- <para>
- The private initialise function is run when the device is registered. In
- this driver we've already done all the work needed. The final pointer is a
- private data pointer that can be used by the device driver to attach and
- retrieve private data structures. We set this field "priv" to NULL for
- the moment.
- </para>
- <para>
- Having the structure defined is all very well but we now need to register it
- with the kernel.
- </para>
- <programlisting>
-
-
-static int io = 0x320;
-
-int __init myradio_init(struct video_init *v)
-{
- if(!request_region(io, MY_IO_SIZE, "myradio"))
- {
- printk(KERN_ERR
- "myradio: port 0x%03X is in use.\n", io);
- return -EBUSY;
- }
-
- if(video_device_register(&amp;my_radio, VFL_TYPE_RADIO)==-1) {
- release_region(io, MY_IO_SIZE);
- return -EINVAL;
- }
- return 0;
-}
-
- </programlisting>
- <para>
- The first stage of the initialisation, as is normally the case, is to check
- that the I/O space we are about to fiddle with doesn't belong to some other
- driver. If it is we leave well alone. If the user gives the address of the
- wrong device then we will spot this. These policies will generally avoid
- crashing the machine.
- </para>
- <para>
- Now we ask the Video4Linux layer to register the device for us. We hand it
- our carefully designed video_device structure and also tell it which group
- of devices we want it registered with. In this case VFL_TYPE_RADIO.
- </para>
- <para>
- The types available are
- </para>
- <table frame="all" id="Device_Types"><title>Device Types</title>
- <tgroup cols="3" align="left">
- <tbody>
- <row>
- <entry>VFL_TYPE_RADIO</entry><entry>/dev/radio{n}</entry><entry>
-
- Radio devices are assigned in this block. As with all of these
- selections the actual number assignment is done by the video layer
- accordijng to what is free.</entry>
- </row><row>
- <entry>VFL_TYPE_GRABBER</entry><entry>/dev/video{n}</entry><entry>
- Video capture devices and also -- counter-intuitively for the name --
- hardware video playback devices such as MPEG2 cards.</entry>
- </row><row>
- <entry>VFL_TYPE_VBI</entry><entry>/dev/vbi{n}</entry><entry>
- The VBI devices capture the hidden lines on a television picture
- that carry further information like closed caption data, teletext
- (primarily in Europe) and now Intercast and the ATVEC internet
- television encodings.</entry>
- </row><row>
- <entry>VFL_TYPE_VTX</entry><entry>/dev/vtx[n}</entry><entry>
- VTX is 'Videotext' also known as 'Teletext'. This is a system for
- sending numbered, 40x25, mostly textual page images over the hidden
- lines. Unlike the /dev/vbi interfaces, this is for 'smart' decoder
- chips. (The use of the word smart here has to be taken in context,
- the smartest teletext chips are fairly dumb pieces of technology).
- </entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <para>
- We are most definitely a radio.
- </para>
- <para>
- Finally we allocate our I/O space so that nobody treads on us and return 0
- to signify general happiness with the state of the universe.
- </para>
- </sect1>
- <sect1 id="openradio">
- <title>Opening And Closing The Radio</title>
-
- <para>
- The functions we declared in our video_device are mostly very simple.
- Firstly we can drop in what is basically standard code for open and close.
- </para>
- <programlisting>
-
-
-static int users = 0;
-
-static int radio_open(struct video_device *dev, int flags)
-{
- if(users)
- return -EBUSY;
- users++;
- return 0;
-}
-
- </programlisting>
- <para>
- At open time we need to do nothing but check if someone else is also using
- the radio card. If nobody is using it we make a note that we are using it,
- then we ensure that nobody unloads our driver on us.
- </para>
- <programlisting>
-
-
-static int radio_close(struct video_device *dev)
-{
- users--;
-}
-
- </programlisting>
- <para>
- At close time we simply need to reduce the user count and allow the module
- to become unloadable.
- </para>
- <para>
- If you are sharp you will have noticed neither the open nor the close
- routines attempt to reset or change the radio settings. This is intentional.
- It allows an application to set up the radio and exit. It avoids a user
- having to leave an application running all the time just to listen to the
- radio.
- </para>
- </sect1>
- <sect1 id="ioctlradio">
- <title>The Ioctl Interface</title>
- <para>
- This leaves the ioctl routine, without which the driver will not be
- terribly useful to anyone.
- </para>
- <programlisting>
-
-
-static int radio_ioctl(struct video_device *dev, unsigned int cmd, void *arg)
-{
- switch(cmd)
- {
- case VIDIOCGCAP:
- {
- struct video_capability v;
- v.type = VID_TYPE_TUNER;
- v.channels = 1;
- v.audios = 1;
- v.maxwidth = 0;
- v.minwidth = 0;
- v.maxheight = 0;
- v.minheight = 0;
- strcpy(v.name, "My Radio");
- if(copy_to_user(arg, &amp;v, sizeof(v)))
- return -EFAULT;
- return 0;
- }
-
- </programlisting>
- <para>
- VIDIOCGCAP is the first ioctl all video4linux devices must support. It
- allows the applications to find out what sort of a card they have found and
- to figure out what they want to do about it. The fields in the structure are
- </para>
- <table frame="all" id="video_capability_fields"><title>struct video_capability fields</title>
- <tgroup cols="2" align="left">
- <tbody>
- <row>
- <entry>name</entry><entry>The device text name. This is intended for the user.</entry>
- </row><row>
- <entry>channels</entry><entry>The number of different channels you can tune on
- this card. It could even by zero for a card that has
- no tuning capability. For our simple FM radio it is 1.
- An AM/FM radio would report 2.</entry>
- </row><row>
- <entry>audios</entry><entry>The number of audio inputs on this device. For our
- radio there is only one audio input.</entry>
- </row><row>
- <entry>minwidth,minheight</entry><entry>The smallest size the card is capable of capturing
- images in. We set these to zero. Radios do not
- capture pictures</entry>
- </row><row>
- <entry>maxwidth,maxheight</entry><entry>The largest image size the card is capable of
- capturing. For our radio we report 0.
- </entry>
- </row><row>
- <entry>type</entry><entry>This reports the capabilities of the device, and
- matches the field we filled in in the struct
- video_device when registering.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <para>
- Having filled in the fields, we use copy_to_user to copy the structure into
- the users buffer. If the copy fails we return an EFAULT to the application
- so that it knows it tried to feed us garbage.
- </para>
- <para>
- The next pair of ioctl operations select which tuner is to be used and let
- the application find the tuner properties. We have only a single FM band
- tuner in our example device.
- </para>
- <programlisting>
-
-
- case VIDIOCGTUNER:
- {
- struct video_tuner v;
- if(copy_from_user(&amp;v, arg, sizeof(v))!=0)
- return -EFAULT;
- if(v.tuner)
- return -EINVAL;
- v.rangelow=(87*16000);
- v.rangehigh=(108*16000);
- v.flags = VIDEO_TUNER_LOW;
- v.mode = VIDEO_MODE_AUTO;
- v.signal = 0xFFFF;
- strcpy(v.name, "FM");
- if(copy_to_user(&amp;v, arg, sizeof(v))!=0)
- return -EFAULT;
- return 0;
- }
-
- </programlisting>
- <para>
- The VIDIOCGTUNER ioctl allows applications to query a tuner. The application
- sets the tuner field to the tuner number it wishes to query. The query does
- not change the tuner that is being used, it merely enquires about the tuner
- in question.
- </para>
- <para>
- We have exactly one tuner so after copying the user buffer to our temporary
- structure we complain if they asked for a tuner other than tuner 0.
- </para>
- <para>
- The video_tuner structure has the following fields
- </para>
- <table frame="all" id="video_tuner_fields"><title>struct video_tuner fields</title>
- <tgroup cols="2" align="left">
- <tbody>
- <row>
- <entry>int tuner</entry><entry>The number of the tuner in question</entry>
- </row><row>
- <entry>char name[32]</entry><entry>A text description of this tuner. "FM" will do fine.
- This is intended for the application.</entry>
- </row><row>
- <entry>u32 flags</entry>
- <entry>Tuner capability flags</entry>
- </row>
- <row>
- <entry>u16 mode</entry><entry>The current reception mode</entry>
-
- </row><row>
- <entry>u16 signal</entry><entry>The signal strength scaled between 0 and 65535. If
- a device cannot tell the signal strength it should
- report 65535. Many simple cards contain only a
- signal/no signal bit. Such cards will report either
- 0 or 65535.</entry>
-
- </row><row>
- <entry>u32 rangelow, rangehigh</entry><entry>
- The range of frequencies supported by the radio
- or TV. It is scaled according to the VIDEO_TUNER_LOW
- flag.</entry>
-
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="all" id="video_tuner_flags"><title>struct video_tuner flags</title>
- <tgroup cols="2" align="left">
- <tbody>
- <row>
- <entry>VIDEO_TUNER_PAL</entry><entry>A PAL TV tuner</entry>
- </row><row>
- <entry>VIDEO_TUNER_NTSC</entry><entry>An NTSC (US) TV tuner</entry>
- </row><row>
- <entry>VIDEO_TUNER_SECAM</entry><entry>A SECAM (French) TV tuner</entry>
- </row><row>
- <entry>VIDEO_TUNER_LOW</entry><entry>
- The tuner frequency is scaled in 1/16th of a KHz
- steps. If not it is in 1/16th of a MHz steps
- </entry>
- </row><row>
- <entry>VIDEO_TUNER_NORM</entry><entry>The tuner can set its format</entry>
- </row><row>
- <entry>VIDEO_TUNER_STEREO_ON</entry><entry>The tuner is currently receiving a stereo signal</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="all" id="video_tuner_modes"><title>struct video_tuner modes</title>
- <tgroup cols="2" align="left">
- <tbody>
- <row>
- <entry>VIDEO_MODE_PAL</entry><entry>PAL Format</entry>
- </row><row>
- <entry>VIDEO_MODE_NTSC</entry><entry>NTSC Format (USA)</entry>
- </row><row>
- <entry>VIDEO_MODE_SECAM</entry><entry>French Format</entry>
- </row><row>
- <entry>VIDEO_MODE_AUTO</entry><entry>A device that does not need to do
- TV format switching</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <para>
- The settings for the radio card are thus fairly simple. We report that we
- are a tuner called "FM" for FM radio. In order to get the best tuning
- resolution we report VIDEO_TUNER_LOW and select tuning to 1/16th of KHz. Its
- unlikely our card can do that resolution but it is a fair bet the card can
- do better than 1/16th of a MHz. VIDEO_TUNER_LOW is appropriate to almost all
- radio usage.
- </para>
- <para>
- We report that the tuner automatically handles deciding what format it is
- receiving - true enough as it only handles FM radio. Our example card is
- also incapable of detecting stereo or signal strengths so it reports a
- strength of 0xFFFF (maximum) and no stereo detected.
- </para>
- <para>
- To finish off we set the range that can be tuned to be 87-108Mhz, the normal
- FM broadcast radio range. It is important to find out what the card is
- actually capable of tuning. It is easy enough to simply use the FM broadcast
- range. Unfortunately if you do this you will discover the FM broadcast
- ranges in the USA, Europe and Japan are all subtly different and some users
- cannot receive all the stations they wish.
- </para>
- <para>
- The application also needs to be able to set the tuner it wishes to use. In
- our case, with a single tuner this is rather simple to arrange.
- </para>
- <programlisting>
-
- case VIDIOCSTUNER:
- {
- struct video_tuner v;
- if(copy_from_user(&amp;v, arg, sizeof(v)))
- return -EFAULT;
- if(v.tuner != 0)
- return -EINVAL;
- return 0;
- }
-
- </programlisting>
- <para>
- We copy the user supplied structure into kernel memory so we can examine it.
- If the user has selected a tuner other than zero we reject the request. If
- they wanted tuner 0 then, surprisingly enough, that is the current tuner already.
- </para>
- <para>
- The next two ioctls we need to provide are to get and set the frequency of
- the radio. These both use an unsigned long argument which is the frequency.
- The scale of the frequency depends on the VIDEO_TUNER_LOW flag as I
- mentioned earlier on. Since we have VIDEO_TUNER_LOW set this will be in
- 1/16ths of a KHz.
- </para>
- <programlisting>
-
-static unsigned long current_freq;
-
-
-
- case VIDIOCGFREQ:
- if(copy_to_user(arg, &amp;current_freq,
- sizeof(unsigned long))
- return -EFAULT;
- return 0;
-
- </programlisting>
- <para>
- Querying the frequency in our case is relatively simple. Our radio card is
- too dumb to let us query the signal strength so we remember our setting if
- we know it. All we have to do is copy it to the user.
- </para>
- <programlisting>
-
-
- case VIDIOCSFREQ:
- {
- u32 freq;
- if(copy_from_user(arg, &amp;freq,
- sizeof(unsigned long))!=0)
- return -EFAULT;
- if(hardware_set_freq(freq)&lt;0)
- return -EINVAL;
- current_freq = freq;
- return 0;
- }
-
- </programlisting>
- <para>
- Setting the frequency is a little more complex. We begin by copying the
- desired frequency into kernel space. Next we call a hardware specific routine
- to set the radio up. This might be as simple as some scaling and a few
- writes to an I/O port. For most radio cards it turns out a good deal more
- complicated and may involve programming things like a phase locked loop on
- the card. This is what documentation is for.
- </para>
- <para>
- The final set of operations we need to provide for our radio are the
- volume controls. Not all radio cards can even do volume control. After all
- there is a perfectly good volume control on the sound card. We will assume
- our radio card has a simple 4 step volume control.
- </para>
- <para>
- There are two ioctls with audio we need to support
- </para>
- <programlisting>
-
-static int current_volume=0;
-
- case VIDIOCGAUDIO:
- {
- struct video_audio v;
- if(copy_from_user(&amp;v, arg, sizeof(v)))
- return -EFAULT;
- if(v.audio != 0)
- return -EINVAL;
- v.volume = 16384*current_volume;
- v.step = 16384;
- strcpy(v.name, "Radio");
- v.mode = VIDEO_SOUND_MONO;
- v.balance = 0;
- v.base = 0;
- v.treble = 0;
-
- if(copy_to_user(arg. &amp;v, sizeof(v)))
- return -EFAULT;
- return 0;
- }
-
- </programlisting>
- <para>
- Much like the tuner we start by copying the user structure into kernel
- space. Again we check if the user has asked for a valid audio input. We have
- only input 0 and we punt if they ask for another input.
- </para>
- <para>
- Then we fill in the video_audio structure. This has the following format
- </para>
- <table frame="all" id="video_audio_fields"><title>struct video_audio fields</title>
- <tgroup cols="2" align="left">
- <tbody>
- <row>
- <entry>audio</entry><entry>The input the user wishes to query</entry>
- </row><row>
- <entry>volume</entry><entry>The volume setting on a scale of 0-65535</entry>
- </row><row>
- <entry>base</entry><entry>The base level on a scale of 0-65535</entry>
- </row><row>
- <entry>treble</entry><entry>The treble level on a scale of 0-65535</entry>
- </row><row>
- <entry>flags</entry><entry>The features this audio device supports
- </entry>
- </row><row>
- <entry>name</entry><entry>A text name to display to the user. We picked
- "Radio" as it explains things quite nicely.</entry>
- </row><row>
- <entry>mode</entry><entry>The current reception mode for the audio
-
- We report MONO because our card is too stupid to know if it is in
- mono or stereo.
- </entry>
- </row><row>
- <entry>balance</entry><entry>The stereo balance on a scale of 0-65535, 32768 is
- middle.</entry>
- </row><row>
- <entry>step</entry><entry>The step by which the volume control jumps. This is
- used to help make it easy for applications to set
- slider behaviour.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="all" id="video_audio_flags"><title>struct video_audio flags</title>
- <tgroup cols="2" align="left">
- <tbody>
- <row>
- <entry>VIDEO_AUDIO_MUTE</entry><entry>The audio is currently muted. We
- could fake this in our driver but we
- choose not to bother.</entry>
- </row><row>
- <entry>VIDEO_AUDIO_MUTABLE</entry><entry>The input has a mute option</entry>
- </row><row>
- <entry>VIDEO_AUDIO_TREBLE</entry><entry>The input has a treble control</entry>
- </row><row>
- <entry>VIDEO_AUDIO_BASS</entry><entry>The input has a base control</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="all" id="video_audio_modes"><title>struct video_audio modes</title>
- <tgroup cols="2" align="left">
- <tbody>
- <row>
- <entry>VIDEO_SOUND_MONO</entry><entry>Mono sound</entry>
- </row><row>
- <entry>VIDEO_SOUND_STEREO</entry><entry>Stereo sound</entry>
- </row><row>
- <entry>VIDEO_SOUND_LANG1</entry><entry>Alternative language 1 (TV specific)</entry>
- </row><row>
- <entry>VIDEO_SOUND_LANG2</entry><entry>Alternative language 2 (TV specific)</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <para>
- Having filled in the structure we copy it back to user space.
- </para>
- <para>
- The VIDIOCSAUDIO ioctl allows the user to set the audio parameters in the
- video_audio structure. The driver does its best to honour the request.
- </para>
- <programlisting>
-
- case VIDIOCSAUDIO:
- {
- struct video_audio v;
- if(copy_from_user(&amp;v, arg, sizeof(v)))
- return -EFAULT;
- if(v.audio)
- return -EINVAL;
- current_volume = v/16384;
- hardware_set_volume(current_volume);
- return 0;
- }
-
- </programlisting>
- <para>
- In our case there is very little that the user can set. The volume is
- basically the limit. Note that we could pretend to have a mute feature
- by rewriting this to
- </para>
- <programlisting>
-
- case VIDIOCSAUDIO:
- {
- struct video_audio v;
- if(copy_from_user(&amp;v, arg, sizeof(v)))
- return -EFAULT;
- if(v.audio)
- return -EINVAL;
- current_volume = v/16384;
- if(v.flags&amp;VIDEO_AUDIO_MUTE)
- hardware_set_volume(0);
- else
- hardware_set_volume(current_volume);
- current_muted = v.flags &amp;
- VIDEO_AUDIO_MUTE;
- return 0;
- }
-
- </programlisting>
- <para>
- This with the corresponding changes to the VIDIOCGAUDIO code to report the
- state of the mute flag we save and to report the card has a mute function,
- will allow applications to use a mute facility with this card. It is
- questionable whether this is a good idea however. User applications can already
- fake this themselves and kernel space is precious.
- </para>
- <para>
- We now have a working radio ioctl handler. So we just wrap up the function
- </para>
- <programlisting>
-
-
- }
- return -ENOIOCTLCMD;
-}
-
- </programlisting>
- <para>
- and pass the Video4Linux layer back an error so that it knows we did not
- understand the request we got passed.
- </para>
- </sect1>
- <sect1 id="modradio">
- <title>Module Wrapper</title>
- <para>
- Finally we add in the usual module wrapping and the driver is done.
- </para>
- <programlisting>
-
-#ifndef MODULE
-
-static int io = 0x300;
-
-#else
-
-static int io = -1;
-
-#endif
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("A driver for an imaginary radio card.");
-module_param(io, int, 0444);
-MODULE_PARM_DESC(io, "I/O address of the card.");
-
-static int __init init(void)
-{
- if(io==-1)
- {
- printk(KERN_ERR
- "You must set an I/O address with io=0x???\n");
- return -EINVAL;
- }
- return myradio_init(NULL);
-}
-
-static void __exit cleanup(void)
-{
- video_unregister_device(&amp;my_radio);
- release_region(io, MY_IO_SIZE);
-}
-
-module_init(init);
-module_exit(cleanup);
-
- </programlisting>
- <para>
- In this example we set the IO base by default if the driver is compiled into
- the kernel: you can still set it using "my_radio.irq" if this file is called <filename>my_radio.c</filename>. For the module we require the
- user sets the parameter. We set io to a nonsense port (-1) so that we can
- tell if the user supplied an io parameter or not.
- </para>
- <para>
- We use MODULE_ defines to give an author for the card driver and a
- description. We also use them to declare that io is an integer and it is the
- address of the card, and can be read by anyone from sysfs.
- </para>
- <para>
- The clean-up routine unregisters the video_device we registered, and frees
- up the I/O space. Note that the unregister takes the actual video_device
- structure as its argument. Unlike the file operations structure which can be
- shared by all instances of a device a video_device structure as an actual
- instance of the device. If you are registering multiple radio devices you
- need to fill in one structure per device (most likely by setting up a
- template and copying it to each of the actual device structures).
- </para>
- </sect1>
- </chapter>
- <chapter id="Video_Capture_Devices">
- <title>Video Capture Devices</title>
- <sect1 id="introvid">
- <title>Video Capture Device Types</title>
- <para>
- The video capture devices share the same interfaces as radio devices. In
- order to explain the video capture interface I will use the example of a
- camera that has no tuners or audio input. This keeps the example relatively
- clean. To get both combine the two driver examples.
- </para>
- <para>
- Video capture devices divide into four categories. A little technology
- backgrounder. Full motion video even at television resolution (which is
- actually fairly low) is pretty resource-intensive. You are continually
- passing megabytes of data every second from the capture card to the display.
- several alternative approaches have emerged because copying this through the
- processor and the user program is a particularly bad idea .
- </para>
- <para>
- The first is to add the television image onto the video output directly.
- This is also how some 3D cards work. These basic cards can generally drop the
- video into any chosen rectangle of the display. Cards like this, which
- include most mpeg1 cards that used the feature connector, aren't very
- friendly in a windowing environment. They don't understand windows or
- clipping. The video window is always on the top of the display.
- </para>
- <para>
- Chroma keying is a technique used by cards to get around this. It is an old
- television mixing trick where you mark all the areas you wish to replace
- with a single clear colour that isn't used in the image - TV people use an
- incredibly bright blue while computing people often use a particularly
- virulent purple. Bright blue occurs on the desktop. Anyone with virulent
- purple windows has another problem besides their TV overlay.
- </para>
- <para>
- The third approach is to copy the data from the capture card to the video
- card, but to do it directly across the PCI bus. This relieves the processor
- from doing the work but does require some smartness on the part of the video
- capture chip, as well as a suitable video card. Programming this kind of
- card and more so debugging it can be extremely tricky. There are some quite
- complicated interactions with the display and you may also have to cope with
- various chipset bugs that show up when PCI cards start talking to each
- other.
- </para>
- <para>
- To keep our example fairly simple we will assume a card that supports
- overlaying a flat rectangular image onto the frame buffer output, and which
- can also capture stuff into processor memory.
- </para>
- </sect1>
- <sect1 id="regvid">
- <title>Registering Video Capture Devices</title>
- <para>
- This time we need to add more functions for our camera device.
- </para>
- <programlisting>
-static struct video_device my_camera
-{
- "My Camera",
- VID_TYPE_OVERLAY|VID_TYPE_SCALES|\
- VID_TYPE_CAPTURE|VID_TYPE_CHROMAKEY,
- camera_open.
- camera_close,
- camera_read, /* no read */
- NULL, /* no write */
- camera_poll, /* no poll */
- camera_ioctl,
- NULL, /* no special init function */
- NULL /* no private data */
-};
- </programlisting>
- <para>
- We need a read() function which is used for capturing data from
- the card, and we need a poll function so that a driver can wait for the next
- frame to be captured.
- </para>
- <para>
- We use the extra video capability flags that did not apply to the
- radio interface. The video related flags are
- </para>
- <table frame="all" id="Capture_Capabilities"><title>Capture Capabilities</title>
- <tgroup cols="2" align="left">
- <tbody>
- <row>
-<entry>VID_TYPE_CAPTURE</entry><entry>We support image capture</entry>
-</row><row>
-<entry>VID_TYPE_TELETEXT</entry><entry>A teletext capture device (vbi{n])</entry>
-</row><row>
-<entry>VID_TYPE_OVERLAY</entry><entry>The image can be directly overlaid onto the
- frame buffer</entry>
-</row><row>
-<entry>VID_TYPE_CHROMAKEY</entry><entry>Chromakey can be used to select which parts
- of the image to display</entry>
-</row><row>
-<entry>VID_TYPE_CLIPPING</entry><entry>It is possible to give the board a list of
- rectangles to draw around. </entry>
-</row><row>
-<entry>VID_TYPE_FRAMERAM</entry><entry>The video capture goes into the video memory
- and actually changes it. Applications need
- to know this so they can clean up after the
- card</entry>
-</row><row>
-<entry>VID_TYPE_SCALES</entry><entry>The image can be scaled to various sizes,
- rather than being a single fixed size.</entry>
-</row><row>
-<entry>VID_TYPE_MONOCHROME</entry><entry>The capture will be monochrome. This isn't a
- complete answer to the question since a mono
- camera on a colour capture card will still
- produce mono output.</entry>
-</row><row>
-<entry>VID_TYPE_SUBCAPTURE</entry><entry>The card allows only part of its field of
- view to be captured. This enables
- applications to avoid copying all of a large
- image into memory when only some section is
- relevant.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <para>
- We set VID_TYPE_CAPTURE so that we are seen as a capture card,
- VID_TYPE_CHROMAKEY so the application knows it is time to draw in virulent
- purple, and VID_TYPE_SCALES because we can be resized.
- </para>
- <para>
- Our setup is fairly similar. This time we also want an interrupt line
- for the 'frame captured' signal. Not all cards have this so some of them
- cannot handle poll().
- </para>
- <programlisting>
-
-
-static int io = 0x320;
-static int irq = 11;
-
-int __init mycamera_init(struct video_init *v)
-{
- if(!request_region(io, MY_IO_SIZE, "mycamera"))
- {
- printk(KERN_ERR
- "mycamera: port 0x%03X is in use.\n", io);
- return -EBUSY;
- }
-
- if(video_device_register(&amp;my_camera,
- VFL_TYPE_GRABBER)==-1) {
- release_region(io, MY_IO_SIZE);
- return -EINVAL;
- }
- return 0;
-}
-
- </programlisting>
- <para>
- This is little changed from the needs of the radio card. We specify
- VFL_TYPE_GRABBER this time as we want to be allocated a /dev/video name.
- </para>
- </sect1>
- <sect1 id="opvid">
- <title>Opening And Closing The Capture Device</title>
- <programlisting>
-
-
-static int users = 0;
-
-static int camera_open(struct video_device *dev, int flags)
-{
- if(users)
- return -EBUSY;
- if(request_irq(irq, camera_irq, 0, "camera", dev)&lt;0)
- return -EBUSY;
- users++;
- return 0;
-}
-
-
-static int camera_close(struct video_device *dev)
-{
- users--;
- free_irq(irq, dev);
-}
- </programlisting>
- <para>
- The open and close routines are also quite similar. The only real change is
- that we now request an interrupt for the camera device interrupt line. If we
- cannot get the interrupt we report EBUSY to the application and give up.
- </para>
- </sect1>
- <sect1 id="irqvid">
- <title>Interrupt Handling</title>
- <para>
- Our example handler is for an ISA bus device. If it was PCI you would be
- able to share the interrupt and would have set IRQF_SHARED to indicate a
- shared IRQ. We pass the device pointer as the interrupt routine argument. We
- don't need to since we only support one card but doing this will make it
- easier to upgrade the driver for multiple devices in the future.
- </para>
- <para>
- Our interrupt routine needs to do little if we assume the card can simply
- queue one frame to be read after it captures it.
- </para>
- <programlisting>
-
-
-static struct wait_queue *capture_wait;
-static int capture_ready = 0;
-
-static void camera_irq(int irq, void *dev_id,
- struct pt_regs *regs)
-{
- capture_ready=1;
- wake_up_interruptible(&amp;capture_wait);
-}
- </programlisting>
- <para>
- The interrupt handler is nice and simple for this card as we are assuming
- the card is buffering the frame for us. This means we have little to do but
- wake up anybody interested. We also set a capture_ready flag, as we may
- capture a frame before an application needs it. In this case we need to know
- that a frame is ready. If we had to collect the frame on the interrupt life
- would be more complex.
- </para>
- <para>
- The two new routines we need to supply are camera_read which returns a
- frame, and camera_poll which waits for a frame to become ready.
- </para>
- <programlisting>
-
-
-static int camera_poll(struct video_device *dev,
- struct file *file, struct poll_table *wait)
-{
- poll_wait(file, &amp;capture_wait, wait);
- if(capture_read)
- return POLLIN|POLLRDNORM;
- return 0;
-}
-
- </programlisting>
- <para>
- Our wait queue for polling is the capture_wait queue. This will cause the
- task to be woken up by our camera_irq routine. We check capture_read to see
- if there is an image present and if so report that it is readable.
- </para>
- </sect1>
- <sect1 id="rdvid">
- <title>Reading The Video Image</title>
- <programlisting>
-
-
-static long camera_read(struct video_device *dev, char *buf,
- unsigned long count)
-{
- struct wait_queue wait = { current, NULL };
- u8 *ptr;
- int len;
- int i;
-
- add_wait_queue(&amp;capture_wait, &amp;wait);
-
- while(!capture_ready)
- {
- if(file->flags&amp;O_NDELAY)
- {
- remove_wait_queue(&amp;capture_wait, &amp;wait);
- current->state = TASK_RUNNING;
- return -EWOULDBLOCK;
- }
- if(signal_pending(current))
- {
- remove_wait_queue(&amp;capture_wait, &amp;wait);
- current->state = TASK_RUNNING;
- return -ERESTARTSYS;
- }
- schedule();
- current->state = TASK_INTERRUPTIBLE;
- }
- remove_wait_queue(&amp;capture_wait, &amp;wait);
- current->state = TASK_RUNNING;
-
- </programlisting>
- <para>
- The first thing we have to do is to ensure that the application waits until
- the next frame is ready. The code here is almost identical to the mouse code
- we used earlier in this chapter. It is one of the common building blocks of
- Linux device driver code and probably one which you will find occurs in any
- drivers you write.
- </para>
- <para>
- We wait for a frame to be ready, or for a signal to interrupt our waiting. If a
- signal occurs we need to return from the system call so that the signal can
- be sent to the application itself. We also check to see if the user actually
- wanted to avoid waiting - ie if they are using non-blocking I/O and have other things
- to get on with.
- </para>
- <para>
- Next we copy the data from the card to the user application. This is rarely
- as easy as our example makes out. We will add capture_w, and capture_h here
- to hold the width and height of the captured image. We assume the card only
- supports 24bit RGB for now.
- </para>
- <programlisting>
-
-
-
- capture_ready = 0;
-
- ptr=(u8 *)buf;
- len = capture_w * 3 * capture_h; /* 24bit RGB */
-
- if(len>count)
- len=count; /* Doesn't all fit */
-
- for(i=0; i&lt;len; i++)
- {
- put_user(inb(io+IMAGE_DATA), ptr);
- ptr++;
- }
-
- hardware_restart_capture();
-
- return i;
-}
-
- </programlisting>
- <para>
- For a real hardware device you would try to avoid the loop with put_user().
- Each call to put_user() has a time overhead checking whether the accesses to user
- space are allowed. It would be better to read a line into a temporary buffer
- then copy this to user space in one go.
- </para>
- <para>
- Having captured the image and put it into user space we can kick the card to
- get the next frame acquired.
- </para>
- </sect1>
- <sect1 id="iocvid">
- <title>Video Ioctl Handling</title>
- <para>
- As with the radio driver the major control interface is via the ioctl()
- function. Video capture devices support the same tuner calls as a radio
- device and also support additional calls to control how the video functions
- are handled. In this simple example the card has no tuners to avoid making
- the code complex.
- </para>
- <programlisting>
-
-
-
-static int camera_ioctl(struct video_device *dev, unsigned int cmd, void *arg)
-{
- switch(cmd)
- {
- case VIDIOCGCAP:
- {
- struct video_capability v;
- v.type = VID_TYPE_CAPTURE|\
- VID_TYPE_CHROMAKEY|\
- VID_TYPE_SCALES|\
- VID_TYPE_OVERLAY;
- v.channels = 1;
- v.audios = 0;
- v.maxwidth = 640;
- v.minwidth = 16;
- v.maxheight = 480;
- v.minheight = 16;
- strcpy(v.name, "My Camera");
- if(copy_to_user(arg, &amp;v, sizeof(v)))
- return -EFAULT;
- return 0;
- }
-
-
- </programlisting>
- <para>
- The first ioctl we must support and which all video capture and radio
- devices are required to support is VIDIOCGCAP. This behaves exactly the same
- as with a radio device. This time, however, we report the extra capabilities
- we outlined earlier on when defining our video_dev structure.
- </para>
- <para>
- We now set the video flags saying that we support overlay, capture,
- scaling and chromakey. We also report size limits - our smallest image is
- 16x16 pixels, our largest is 640x480.
- </para>
- <para>
- To keep things simple we report no audio and no tuning capabilities at all.
- </para>
- <programlisting>
-
- case VIDIOCGCHAN:
- {
- struct video_channel v;
- if(copy_from_user(&amp;v, arg, sizeof(v)))
- return -EFAULT;
- if(v.channel != 0)
- return -EINVAL;
- v.flags = 0;
- v.tuners = 0;
- v.type = VIDEO_TYPE_CAMERA;
- v.norm = VIDEO_MODE_AUTO;
- strcpy(v.name, "Camera Input");break;
- if(copy_to_user(&amp;v, arg, sizeof(v)))
- return -EFAULT;
- return 0;
- }
-
-
- </programlisting>
- <para>
- This follows what is very much the standard way an ioctl handler looks
- in Linux. We copy the data into a kernel space variable and we check that the
- request is valid (in this case that the input is 0). Finally we copy the
- camera info back to the user.
- </para>
- <para>
- The VIDIOCGCHAN ioctl allows a user to ask about video channels (that is
- inputs to the video card). Our example card has a single camera input. The
- fields in the structure are
- </para>
- <table frame="all" id="video_channel_fields"><title>struct video_channel fields</title>
- <tgroup cols="2" align="left">
- <tbody>
- <row>
-
- <entry>channel</entry><entry>The channel number we are selecting</entry>
- </row><row>
- <entry>name</entry><entry>The name for this channel. This is intended
- to describe the port to the user.
- Appropriate names are therefore things like
- "Camera" "SCART input"</entry>
- </row><row>
- <entry>flags</entry><entry>Channel properties</entry>
- </row><row>
- <entry>type</entry><entry>Input type</entry>
- </row><row>
- <entry>norm</entry><entry>The current television encoding being used
- if relevant for this channel.
- </entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <table frame="all" id="video_channel_flags"><title>struct video_channel flags</title>
- <tgroup cols="2" align="left">
- <tbody>
- <row>
- <entry>VIDEO_VC_TUNER</entry><entry>Channel has a tuner.</entry>
- </row><row>
- <entry>VIDEO_VC_AUDIO</entry><entry>Channel has audio.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <table frame="all" id="video_channel_types"><title>struct video_channel types</title>
- <tgroup cols="2" align="left">
- <tbody>
- <row>
- <entry>VIDEO_TYPE_TV</entry><entry>Television input.</entry>
- </row><row>
- <entry>VIDEO_TYPE_CAMERA</entry><entry>Fixed camera input.</entry>
- </row><row>
- <entry>0</entry><entry>Type is unknown.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <table frame="all" id="video_channel_norms"><title>struct video_channel norms</title>
- <tgroup cols="2" align="left">
- <tbody>
- <row>
- <entry>VIDEO_MODE_PAL</entry><entry>PAL encoded Television</entry>
- </row><row>
- <entry>VIDEO_MODE_NTSC</entry><entry>NTSC (US) encoded Television</entry>
- </row><row>
- <entry>VIDEO_MODE_SECAM</entry><entry>SECAM (French) Television </entry>
- </row><row>
- <entry>VIDEO_MODE_AUTO</entry><entry>Automatic switching, or format does not
- matter</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <para>
- The corresponding VIDIOCSCHAN ioctl allows a user to change channel and to
- request the norm is changed - for example to switch between a PAL or an NTSC
- format camera.
- </para>
- <programlisting>
-
-
- case VIDIOCSCHAN:
- {
- struct video_channel v;
- if(copy_from_user(&amp;v, arg, sizeof(v)))
- return -EFAULT;
- if(v.channel != 0)
- return -EINVAL;
- if(v.norm != VIDEO_MODE_AUTO)
- return -EINVAL;
- return 0;
- }
-
-
- </programlisting>
- <para>
- The implementation of this call in our driver is remarkably easy. Because we
- are assuming fixed format hardware we need only check that the user has not
- tried to change anything.
- </para>
- <para>
- The user also needs to be able to configure and adjust the picture they are
- seeing. This is much like adjusting a television set. A user application
- also needs to know the palette being used so that it knows how to display
- the image that has been captured. The VIDIOCGPICT and VIDIOCSPICT ioctl
- calls provide this information.
- </para>
- <programlisting>
-
-
- case VIDIOCGPICT
- {
- struct video_picture v;
- v.brightness = hardware_brightness();
- v.hue = hardware_hue();
- v.colour = hardware_saturation();
- v.contrast = hardware_brightness();
- /* Not settable */
- v.whiteness = 32768;
- v.depth = 24; /* 24bit */
- v.palette = VIDEO_PALETTE_RGB24;
- if(copy_to_user(&amp;v, arg,
- sizeof(v)))
- return -EFAULT;
- return 0;
- }
-
-
- </programlisting>
- <para>
- The brightness, hue, color, and contrast provide the picture controls that
- are akin to a conventional television. Whiteness provides additional
- control for greyscale images. All of these values are scaled between 0-65535
- and have 32768 as the mid point setting. The scaling means that applications
- do not have to worry about the capability range of the hardware but can let
- it make a best effort attempt.
- </para>
- <para>
- Our depth is 24, as this is in bits. We will be returning RGB24 format. This
- has one byte of red, then one of green, then one of blue. This then repeats
- for every other pixel in the image. The other common formats the interface
- defines are
- </para>
- <table frame="all" id="Framebuffer_Encodings"><title>Framebuffer Encodings</title>
- <tgroup cols="2" align="left">
- <tbody>
- <row>
- <entry>GREY</entry><entry>Linear greyscale. This is for simple cameras and the
- like</entry>
- </row><row>
- <entry>RGB565</entry><entry>The top 5 bits hold 32 red levels, the next six bits
- hold green and the low 5 bits hold blue. </entry>
- </row><row>
- <entry>RGB555</entry><entry>The top bit is clear. The red green and blue levels
- each occupy five bits.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <para>
- Additional modes are support for YUV capture formats. These are common for
- TV and video conferencing applications.
- </para>
- <para>
- The VIDIOCSPICT ioctl allows a user to set some of the picture parameters.
- Exactly which ones are supported depends heavily on the card itself. It is
- possible to support many modes and effects in software. In general doing
- this in the kernel is a bad idea. Video capture is a performance-sensitive
- application and the programs can often do better if they aren't being
- 'helped' by an overkeen driver writer. Thus for our device we will report
- RGB24 only and refuse to allow a change.
- </para>
- <programlisting>
-
-
- case VIDIOCSPICT:
- {
- struct video_picture v;
- if(copy_from_user(&amp;v, arg, sizeof(v)))
- return -EFAULT;
- if(v.depth!=24 ||
- v.palette != VIDEO_PALETTE_RGB24)
- return -EINVAL;
- set_hardware_brightness(v.brightness);
- set_hardware_hue(v.hue);
- set_hardware_saturation(v.colour);
- set_hardware_brightness(v.contrast);
- return 0;
- }
-
-
- </programlisting>
- <para>
- We check the user has not tried to change the palette or the depth. We do
- not want to carry out some of the changes and then return an error. This may
- confuse the application which will be assuming no change occurred.
- </para>
- <para>
- In much the same way as you need to be able to set the picture controls to
- get the right capture images, many cards need to know what they are
- displaying onto when generating overlay output. In some cases getting this
- wrong even makes a nasty mess or may crash the computer. For that reason
- the VIDIOCSBUF ioctl used to set up the frame buffer information may well
- only be usable by root.
- </para>
- <para>
- We will assume our card is one of the old ISA devices with feature connector
- and only supports a couple of standard video modes. Very common for older
- cards although the PCI devices are way smarter than this.
- </para>
- <programlisting>
-
-
-static struct video_buffer capture_fb;
-
- case VIDIOCGFBUF:
- {
- if(copy_to_user(arg, &amp;capture_fb,
- sizeof(capture_fb)))
- return -EFAULT;
- return 0;
-
- }
-
-
- </programlisting>
- <para>
- We keep the frame buffer information in the format the ioctl uses. This
- makes it nice and easy to work with in the ioctl calls.
- </para>
- <programlisting>
-
- case VIDIOCSFBUF:
- {
- struct video_buffer v;
-
- if(!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if(copy_from_user(&amp;v, arg, sizeof(v)))
- return -EFAULT;
- if(v.width!=320 &amp;&amp; v.width!=640)
- return -EINVAL;
- if(v.height!=200 &amp;&amp; v.height!=240
- &amp;&amp; v.height!=400
- &amp;&amp; v.height !=480)
- return -EINVAL;
- memcpy(&amp;capture_fb, &amp;v, sizeof(v));
- hardware_set_fb(&amp;v);
- return 0;
- }
-
-
-
- </programlisting>
- <para>
- The capable() function checks a user has the required capability. The Linux
- operating system has a set of about 30 capabilities indicating privileged
- access to services. The default set up gives the superuser (uid 0) all of
- them and nobody else has any.
- </para>
- <para>
- We check that the user has the SYS_ADMIN capability, that is they are
- allowed to operate as the machine administrator. We don't want anyone but
- the administrator making a mess of the display.
- </para>
- <para>
- Next we check for standard PC video modes (320 or 640 wide with either
- EGA or VGA depths). If the mode is not a standard video mode we reject it as
- not supported by our card. If the mode is acceptable we save it so that
- VIDIOCFBUF will give the right answer next time it is called. The
- hardware_set_fb() function is some undescribed card specific function to
- program the card for the desired mode.
- </para>
- <para>
- Before the driver can display an overlay window it needs to know where the
- window should be placed, and also how large it should be. If the card
- supports clipping it needs to know which rectangles to omit from the
- display. The video_window structure is used to describe the way the image
- should be displayed.
- </para>
- <table frame="all" id="video_window_fields"><title>struct video_window fields</title>
- <tgroup cols="2" align="left">
- <tbody>
- <row>
- <entry>width</entry><entry>The width in pixels of the desired image. The card
- may use a smaller size if this size is not available</entry>
- </row><row>
- <entry>height</entry><entry>The height of the image. The card may use a smaller
- size if this size is not available.</entry>
- </row><row>
- <entry>x</entry><entry> The X position of the top left of the window. This
- is in pixels relative to the left hand edge of the
- picture. Not all cards can display images aligned on
- any pixel boundary. If the position is unsuitable
- the card adjusts the image right and reduces the
- width.</entry>
- </row><row>
- <entry>y</entry><entry> The Y position of the top left of the window. This
- is counted in pixels relative to the top edge of the
- picture. As with the width if the card cannot
- display starting on this line it will adjust the
- values.</entry>
- </row><row>
- <entry>chromakey</entry><entry>The colour (expressed in RGB32 format) for the
- chromakey colour if chroma keying is being used. </entry>
- </row><row>
- <entry>clips</entry><entry>An array of rectangles that must not be drawn
- over.</entry>
- </row><row>
- <entry>clipcount</entry><entry>The number of clips in this array.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <para>
- Each clip is a struct video_clip which has the following fields
- </para>
- <table frame="all" id="video_clip_fields"><title>video_clip fields</title>
- <tgroup cols="2" align="left">
- <tbody>
- <row>
- <entry>x, y</entry><entry>Co-ordinates relative to the display</entry>
- </row><row>
- <entry>width, height</entry><entry>Width and height in pixels</entry>
- </row><row>
- <entry>next</entry><entry>A spare field for the application to use</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <para>
- The driver is required to ensure it always draws in the area requested or a smaller area, and that it never draws in any of the areas that are clipped.
- This may well mean it has to leave alone. small areas the application wished to be
- drawn.
- </para>
- <para>
- Our example card uses chromakey so does not have to address most of the
- clipping. We will add a video_window structure to our global variables to
- remember our parameters, as we did with the frame buffer.
- </para>
- <programlisting>
-
-
- case VIDIOCGWIN:
- {
- if(copy_to_user(arg, &amp;capture_win,
- sizeof(capture_win)))
- return -EFAULT;
- return 0;
- }
-
-
- case VIDIOCSWIN:
- {
- struct video_window v;
- if(copy_from_user(&amp;v, arg, sizeof(v)))
- return -EFAULT;
- if(v.width &gt; 640 || v.height &gt; 480)
- return -EINVAL;
- if(v.width &lt; 16 || v.height &lt; 16)
- return -EINVAL;
- hardware_set_key(v.chromakey);
- hardware_set_window(v);
- memcpy(&amp;capture_win, &amp;v, sizeof(v));
- capture_w = v.width;
- capture_h = v.height;
- return 0;
- }
-
-
- </programlisting>
- <para>
- Because we are using Chromakey our setup is fairly simple. Mostly we have to
- check the values are sane and load them into the capture card.
- </para>
- <para>
- With all the setup done we can now turn on the actual capture/overlay. This
- is done with the VIDIOCCAPTURE ioctl. This takes a single integer argument
- where 0 is on and 1 is off.
- </para>
- <programlisting>
-
-
- case VIDIOCCAPTURE:
- {
- int v;
- if(get_user(v, (int *)arg))
- return -EFAULT;
- if(v==0)
- hardware_capture_off();
- else
- {
- if(capture_fb.width == 0
- || capture_w == 0)
- return -EINVAL;
- hardware_capture_on();
- }
- return 0;
- }
-
-
- </programlisting>
- <para>
- We grab the flag from user space and either enable or disable according to
- its value. There is one small corner case we have to consider here. Suppose
- that the capture was requested before the video window or the frame buffer
- had been set up. In those cases there will be unconfigured fields in our
- card data, as well as unconfigured hardware settings. We check for this case and
- return an error if the frame buffer or the capture window width is zero.
- </para>
- <programlisting>
-
-
- default:
- return -ENOIOCTLCMD;
- }
-}
- </programlisting>
- <para>
-
- We don't need to support any other ioctls, so if we get this far, it is time
- to tell the video layer that we don't now what the user is talking about.
- </para>
- </sect1>
- <sect1 id="endvid">
- <title>Other Functionality</title>
- <para>
- The Video4Linux layer supports additional features, including a high
- performance mmap() based capture mode and capturing part of the image.
- These features are out of the scope of the book. You should however have enough
- example code to implement most simple video4linux devices for radio and TV
- cards.
- </para>
- </sect1>
- </chapter>
- <chapter id="bugs">
- <title>Known Bugs And Assumptions</title>
- <para>
- <variablelist>
- <varlistentry><term>Multiple Opens</term>
- <listitem>
- <para>
- The driver assumes multiple opens should not be allowed. A driver
- can work around this but not cleanly.
- </para>
- </listitem></varlistentry>
-
- <varlistentry><term>API Deficiencies</term>
- <listitem>
- <para>
- The existing API poorly reflects compression capable devices. There
- are plans afoot to merge V4L, V4L2 and some other ideas into a
- better interface.
- </para>
- </listitem></varlistentry>
- </variablelist>
-
- </para>
- </chapter>
-
- <chapter id="pubfunctions">
- <title>Public Functions Provided</title>
-!Edrivers/media/video/v4l2-dev.c
- </chapter>
-
-</book>
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index 48a3955f05f..8495fc97039 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -112,7 +112,7 @@ required reading:
Other excellent descriptions of how to create patches properly are:
"The Perfect Patch"
- http://www.zip.com.au/~akpm/linux/patches/stuff/tpp.txt
+ http://userweb.kernel.org/~akpm/stuff/tpp.txt
"Linux kernel patch submission format"
http://linux.yyz.us/patch-format.html
@@ -620,7 +620,7 @@ all time. It should describe the patch completely, containing:
For more details on what this should all look like, please see the
ChangeLog section of the document:
"The Perfect Patch"
- http://www.zip.com.au/~akpm/linux/patches/stuff/tpp.txt
+ http://userweb.kernel.org/~akpm/stuff/tpp.txt
diff --git a/Documentation/MSI-HOWTO.txt b/Documentation/MSI-HOWTO.txt
index a51f693c154..256defd7e17 100644
--- a/Documentation/MSI-HOWTO.txt
+++ b/Documentation/MSI-HOWTO.txt
@@ -236,10 +236,8 @@ software system can set different pages for controlling accesses to the
MSI-X structure. The implementation of MSI support requires the PCI
subsystem, not a device driver, to maintain full control of the MSI-X
table/MSI-X PBA (Pending Bit Array) and MMIO address space of the MSI-X
-table/MSI-X PBA. A device driver is prohibited from requesting the MMIO
-address space of the MSI-X table/MSI-X PBA. Otherwise, the PCI subsystem
-will fail enabling MSI-X on its hardware device when it calls the function
-pci_enable_msix().
+table/MSI-X PBA. A device driver should not access the MMIO address
+space of the MSI-X table/MSI-X PBA.
5.3.2 API pci_enable_msix
diff --git a/Documentation/PCI/pci.txt b/Documentation/PCI/pci.txt
index 8d4dc6250c5..fd4907a2968 100644
--- a/Documentation/PCI/pci.txt
+++ b/Documentation/PCI/pci.txt
@@ -163,6 +163,10 @@ need pass only as many optional fields as necessary:
o class and classmask fields default to 0
o driver_data defaults to 0UL.
+Note that driver_data must match the value used by any of the pci_device_id
+entries defined in the driver. This makes the driver_data field mandatory
+if all the pci_device_id entries have a non-zero driver_data value.
+
Once added, the driver probe routine will be invoked for any unclaimed
PCI devices listed in its (newly updated) pci_ids list.
diff --git a/Documentation/PCI/pcieaer-howto.txt b/Documentation/PCI/pcieaer-howto.txt
index 16c251230c8..ddeb14beacc 100644
--- a/Documentation/PCI/pcieaer-howto.txt
+++ b/Documentation/PCI/pcieaer-howto.txt
@@ -203,22 +203,17 @@ to mmio_enabled.
3.3 helper functions
-3.3.1 int pci_find_aer_capability(struct pci_dev *dev);
-pci_find_aer_capability locates the PCI Express AER capability
-in the device configuration space. If the device doesn't support
-PCI-Express AER, the function returns 0.
-
-3.3.2 int pci_enable_pcie_error_reporting(struct pci_dev *dev);
+3.3.1 int pci_enable_pcie_error_reporting(struct pci_dev *dev);
pci_enable_pcie_error_reporting enables the device to send error
messages to root port when an error is detected. Note that devices
don't enable the error reporting by default, so device drivers need
call this function to enable it.
-3.3.3 int pci_disable_pcie_error_reporting(struct pci_dev *dev);
+3.3.2 int pci_disable_pcie_error_reporting(struct pci_dev *dev);
pci_disable_pcie_error_reporting disables the device to send error
messages to root port when an error is detected.
-3.3.4 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
+3.3.3 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
pci_cleanup_aer_uncorrect_error_status cleanups the uncorrectable
error status register.
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index cf5562cbe35..6e253407b3d 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -210,7 +210,7 @@ over a rather long period of time, but improvements are always welcome!
number of updates per grace period.
9. All RCU list-traversal primitives, which include
- rcu_dereference(), list_for_each_rcu(), list_for_each_entry_rcu(),
+ rcu_dereference(), list_for_each_entry_rcu(),
list_for_each_continue_rcu(), and list_for_each_safe_rcu(),
must be either within an RCU read-side critical section or
must be protected by appropriate update-side locks. RCU
diff --git a/Documentation/RCU/rcuref.txt b/Documentation/RCU/rcuref.txt
index 451de2ad832..4202ad09313 100644
--- a/Documentation/RCU/rcuref.txt
+++ b/Documentation/RCU/rcuref.txt
@@ -29,9 +29,9 @@ release_referenced() delete()
}
If this list/array is made lock free using RCU as in changing the
-write_lock() in add() and delete() to spin_lock and changing read_lock
-in search_and_reference to rcu_read_lock(), the atomic_get in
-search_and_reference could potentially hold reference to an element which
+write_lock() in add() and delete() to spin_lock() and changing read_lock()
+in search_and_reference() to rcu_read_lock(), the atomic_inc() in
+search_and_reference() could potentially hold reference to an element which
has already been deleted from the list/array. Use atomic_inc_not_zero()
in this scenario as follows:
@@ -40,20 +40,20 @@ add() search_and_reference()
{ {
alloc_object rcu_read_lock();
... search_for_element
- atomic_set(&el->rc, 1); if (atomic_inc_not_zero(&el->rc)) {
- write_lock(&list_lock); rcu_read_unlock();
+ atomic_set(&el->rc, 1); if (!atomic_inc_not_zero(&el->rc)) {
+ spin_lock(&list_lock); rcu_read_unlock();
return FAIL;
add_element }
... ...
- write_unlock(&list_lock); rcu_read_unlock();
+ spin_unlock(&list_lock); rcu_read_unlock();
} }
3. 4.
release_referenced() delete()
{ {
- ... write_lock(&list_lock);
+ ... spin_lock(&list_lock);
if (atomic_dec_and_test(&el->rc)) ...
call_rcu(&el->head, el_free); delete_element
- ... write_unlock(&list_lock);
+ ... spin_unlock(&list_lock);
} ...
if (atomic_dec_and_test(&el->rc))
call_rcu(&el->head, el_free);
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index e04d643a9f5..96170824a71 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -786,8 +786,6 @@ RCU pointer/list traversal:
list_for_each_entry_rcu
hlist_for_each_entry_rcu
- list_for_each_rcu (to be deprecated in favor of
- list_for_each_entry_rcu)
list_for_each_continue_rcu (to be deprecated in favor of new
list_for_each_entry_continue_rcu)
diff --git a/Documentation/SAK.txt b/Documentation/SAK.txt
index b9019ca872e..74be14679ed 100644
--- a/Documentation/SAK.txt
+++ b/Documentation/SAK.txt
@@ -1,5 +1,5 @@
Linux 2.4.2 Secure Attention Key (SAK) handling
-18 March 2001, Andrew Morton <akpm@osdl.org>
+18 March 2001, Andrew Morton
An operating system's Secure Attention Key is a security tool which is
provided as protection against trojan password capturing programs. It
diff --git a/Documentation/SELinux.txt b/Documentation/SELinux.txt
new file mode 100644
index 00000000000..07eae00f331
--- /dev/null
+++ b/Documentation/SELinux.txt
@@ -0,0 +1,27 @@
+If you want to use SELinux, chances are you will want
+to use the distro-provided policies, or install the
+latest reference policy release from
+ http://oss.tresys.com/projects/refpolicy
+
+However, if you want to install a dummy policy for
+testing, you can do using 'mdp' provided under
+scripts/selinux. Note that this requires the selinux
+userspace to be installed - in particular you will
+need checkpolicy to compile a kernel, and setfiles and
+fixfiles to label the filesystem.
+
+ 1. Compile the kernel with selinux enabled.
+ 2. Type 'make' to compile mdp.
+ 3. Make sure that you are not running with
+ SELinux enabled and a real policy. If
+ you are, reboot with selinux disabled
+ before continuing.
+ 4. Run install_policy.sh:
+ cd scripts/selinux
+ sh install_policy.sh
+
+Step 4 will create a new dummy policy valid for your
+kernel, with a single selinux user, role, and type.
+It will compile the policy, will set your SELINUXTYPE to
+dummy in /etc/selinux/config, install the compiled policy
+as 'dummy', and relabel your filesystem.
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist
index 21f0795af20..ac5e0b2f109 100644
--- a/Documentation/SubmitChecklist
+++ b/Documentation/SubmitChecklist
@@ -85,3 +85,6 @@ kernel patches.
23: Tested after it has been merged into the -mm patchset to make sure
that it still works with all of the other queued patches and various
changes in the VM, VFS, and other subsystems.
+
+24: All memory barriers {e.g., barrier(), rmb(), wmb()} need a comment in the
+ source code that explains the logic of what they are doing and why.
diff --git a/Documentation/SubmittingDrivers b/Documentation/SubmittingDrivers
index 24f2eb40cae..99e72a81fa2 100644
--- a/Documentation/SubmittingDrivers
+++ b/Documentation/SubmittingDrivers
@@ -41,7 +41,7 @@ Linux 2.4:
Linux 2.6:
The same rules apply as 2.4 except that you should follow linux-kernel
to track changes in API's. The final contact point for Linux 2.6
- submissions is Andrew Morton <akpm@osdl.org>.
+ submissions is Andrew Morton.
What Criteria Determine Acceptance
----------------------------------
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index f79ad9ff603..f309d3c6221 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -77,7 +77,7 @@ Quilt:
http://savannah.nongnu.org/projects/quilt
Andrew Morton's patch scripts:
-http://www.zip.com.au/~akpm/linux/patches/
+http://userweb.kernel.org/~akpm/stuff/patch-scripts.tar.gz
Instead of these scripts, quilt is the recommended patch management
tool (see above).
@@ -405,7 +405,7 @@ person it names. This tag documents that potentially interested parties
have been included in the discussion
-14) Using Test-by: and Reviewed-by:
+14) Using Tested-by: and Reviewed-by:
A Tested-by: tag indicates that the patch has been successfully tested (in
some environment) by the person named. This tag informs maintainers that
@@ -653,7 +653,7 @@ SECTION 3 - REFERENCES
----------------------
Andrew Morton, "The perfect patch" (tpp).
- <http://www.zip.com.au/~akpm/linux/patches/stuff/tpp.txt>
+ <http://userweb.kernel.org/~akpm/stuff/tpp.txt>
Jeff Garzik, "Linux kernel patch submission format".
<http://linux.yyz.us/patch-format.html>
@@ -672,4 +672,9 @@ Kernel Documentation/CodingStyle:
Linus Torvalds's mail on the canonical patch format:
<http://lkml.org/lkml/2005/4/7/183>
+
+Andi Kleen, "On submitting kernel patches"
+ Some strategies to get difficult or controversal changes in.
+ http://halobates.de/on-submitting-patches.pdf
+
--
diff --git a/Documentation/blackfin/kgdb.txt b/Documentation/blackfin/kgdb.txt
deleted file mode 100644
index 84f6a484ae9..00000000000
--- a/Documentation/blackfin/kgdb.txt
+++ /dev/null
@@ -1,155 +0,0 @@
- A Simple Guide to Configure KGDB
-
- Sonic Zhang <sonic.zhang@analog.com>
- Aug. 24th 2006
-
-
-This KGDB patch enables the kernel developer to do source level debugging on
-the kernel for the Blackfin architecture. The debugging works over either the
-ethernet interface or one of the uarts. Both software breakpoints and
-hardware breakpoints are supported in this version.
-http://docs.blackfin.uclinux.org/doku.php?id=kgdb
-
-
-2 known issues:
-1. This bug:
- http://blackfin.uclinux.org/tracker/index.php?func=detail&aid=544&group_id=18&atid=145
- The GDB client for Blackfin uClinux causes incorrect values of local
- variables to be displayed when the user breaks the running of kernel in GDB.
-2. Because of a hardware bug in Blackfin 533 v1.0.3:
- 05000067 - Watchpoints (Hardware Breakpoints) are not supported
- Hardware breakpoints cannot be set properly.
-
-
-Debug over Ethernet:
-
-1. Compile and install the cross platform version of gdb for blackfin, which
- can be found at $(BINROOT)/bfin-elf-gdb.
-
-2. Apply this patch to the 2.6.x kernel. Select the menuconfig option under
- "Kernel hacking" -> "Kernel debugging" -> "KGDB: kernel debug with remote gdb".
- With this selected, option "Full Symbolic/Source Debugging support" and
- "Compile the kernel with frame pointers" are also selected.
-
-3. Select option "KGDB: connect over (Ethernet)". Add "kgdboe=@target-IP/,@host-IP/" to
- the option "Compiled-in Kernel Boot Parameter" under "Kernel hacking".
-
-4. Connect minicom to the serial port and boot the kernel image.
-
-5. Configure the IP "/> ifconfig eth0 target-IP"
-
-6. Start GDB client "bfin-elf-gdb vmlinux".
-
-7. Connect to the target "(gdb) target remote udp:target-IP:6443".
-
-8. Set software breakpoint "(gdb) break sys_open".
-
-9. Continue "(gdb) c".
-
-10. Run ls in the target console "/> ls".
-
-11. Breakpoint hits. "Breakpoint 1: sys_open(..."
-
-12. Display local variables and function paramters.
- (*) This operation gives wrong results, see known issue 1.
-
-13. Single stepping "(gdb) si".
-
-14. Remove breakpoint 1. "(gdb) del 1"
-
-15. Set hardware breakpoint "(gdb) hbreak sys_open".
-
-16. Continue "(gdb) c".
-
-17. Run ls in the target console "/> ls".
-
-18. Hardware breakpoint hits. "Breakpoint 1: sys_open(...".
- (*) This hardware breakpoint will not be hit, see known issue 2.
-
-19. Continue "(gdb) c".
-
-20. Interrupt the target in GDB "Ctrl+C".
-
-21. Detach from the target "(gdb) detach".
-
-22. Exit GDB "(gdb) quit".
-
-
-Debug over the UART:
-
-1. Compile and install the cross platform version of gdb for blackfin, which
- can be found at $(BINROOT)/bfin-elf-gdb.
-
-2. Apply this patch to the 2.6.x kernel. Select the menuconfig option under
- "Kernel hacking" -> "Kernel debugging" -> "KGDB: kernel debug with remote gdb".
- With this selected, option "Full Symbolic/Source Debugging support" and
- "Compile the kernel with frame pointers" are also selected.
-
-3. Select option "KGDB: connect over (UART)". Set "KGDB: UART port number" to be
- a different one from the console. Don't forget to change the mode of
- blackfin serial driver to PIO. Otherwise kgdb works incorrectly on UART.
-
-4. If you want connect to kgdb when the kernel boots, enable
- "KGDB: Wait for gdb connection early"
-
-5. Compile kernel.
-
-6. Connect minicom to the serial port of the console and boot the kernel image.
-
-7. Start GDB client "bfin-elf-gdb vmlinux".
-
-8. Set the baud rate in GDB "(gdb) set remotebaud 57600".
-
-9. Connect to the target on the second serial port "(gdb) target remote /dev/ttyS1".
-
-10. Set software breakpoint "(gdb) break sys_open".
-
-11. Continue "(gdb) c".
-
-12. Run ls in the target console "/> ls".
-
-13. A breakpoint is hit. "Breakpoint 1: sys_open(..."
-
-14. All other operations are the same as that in KGDB over Ethernet.
-
-
-Debug over the same UART as console:
-
-1. Compile and install the cross platform version of gdb for blackfin, which
- can be found at $(BINROOT)/bfin-elf-gdb.
-
-2. Apply this patch to the 2.6.x kernel. Select the menuconfig option under
- "Kernel hacking" -> "Kernel debugging" -> "KGDB: kernel debug with remote gdb".
- With this selected, option "Full Symbolic/Source Debugging support" and
- "Compile the kernel with frame pointers" are also selected.
-
-3. Select option "KGDB: connect over UART". Set "KGDB: UART port number" to console.
- Don't forget to change the mode of blackfin serial driver to PIO.
- Otherwise kgdb works incorrectly on UART.
-
-4. If you want connect to kgdb when the kernel boots, enable
- "KGDB: Wait for gdb connection early"
-
-5. Connect minicom to the serial port and boot the kernel image.
-
-6. (Optional) Ask target to wait for gdb connection by entering Ctrl+A. In minicom, you should enter Ctrl+A+A.
-
-7. Start GDB client "bfin-elf-gdb vmlinux".
-
-8. Set the baud rate in GDB "(gdb) set remotebaud 57600".
-
-9. Connect to the target "(gdb) target remote /dev/ttyS0".
-
-10. Set software breakpoint "(gdb) break sys_open".
-
-11. Continue "(gdb) c". Then enter Ctrl+C twice to stop GDB connection.
-
-12. Run ls in the target console "/> ls". Dummy string can be seen on the console.
-
-13. Then connect the gdb to target again. "(gdb) target remote /dev/ttyS0".
- Now you will find a breakpoint is hit. "Breakpoint 1: sys_open(..."
-
-14. All other operations are the same as that in KGDB over Ethernet. The only
- difference is that after continue command in GDB, please stop GDB
- connection by 2 "Ctrl+C"s and connect again after breakpoints are hit or
- Ctrl+A is entered.
diff --git a/Documentation/block/data-integrity.txt b/Documentation/block/data-integrity.txt
index e9dc8d86adc..e8ca040ba2c 100644
--- a/Documentation/block/data-integrity.txt
+++ b/Documentation/block/data-integrity.txt
@@ -246,7 +246,7 @@ will require extra work due to the application tag.
retrieve the tag buffer using bio_integrity_get_tag().
-6.3 PASSING EXISTING INTEGRITY METADATA
+5.3 PASSING EXISTING INTEGRITY METADATA
Filesystems that either generate their own integrity metadata or
are capable of transferring IMD from user space can use the
@@ -283,7 +283,7 @@ will require extra work due to the application tag.
integrity upon completion.
-6.4 REGISTERING A BLOCK DEVICE AS CAPABLE OF EXCHANGING INTEGRITY
+5.4 REGISTERING A BLOCK DEVICE AS CAPABLE OF EXCHANGING INTEGRITY
METADATA
To enable integrity exchange on a block device the gendisk must be
diff --git a/Documentation/block/deadline-iosched.txt b/Documentation/block/deadline-iosched.txt
index c23cab13c3d..72576769e0f 100644
--- a/Documentation/block/deadline-iosched.txt
+++ b/Documentation/block/deadline-iosched.txt
@@ -30,12 +30,18 @@ write_expire (in ms)
Similar to read_expire mentioned above, but for writes.
-fifo_batch
+fifo_batch (number of requests)
----------
-When a read request expires its deadline, we must move some requests from
-the sorted io scheduler list to the block device dispatch queue. fifo_batch
-controls how many requests we move.
+Requests are grouped into ``batches'' of a particular data direction (read or
+write) which are serviced in increasing sector order. To limit extra seeking,
+deadline expiries are only checked between batches. fifo_batch controls the
+maximum number of requests per batch.
+
+This parameter tunes the balance between per-request latency and aggregate
+throughput. When low latency is the primary concern, smaller is better (where
+a value of 1 yields first-come first-served behaviour). Increasing fifo_batch
+generally improves throughput, at the cost of latency variation.
writes_starved (number of dispatches)
diff --git a/Documentation/cdrom/ide-cd b/Documentation/cdrom/ide-cd
index 91c0dcc6fa5..2c558cd6c1e 100644
--- a/Documentation/cdrom/ide-cd
+++ b/Documentation/cdrom/ide-cd
@@ -145,8 +145,7 @@ useful for reading photocds.
To play an audio CD, you should first unmount and remove any data
CDROM. Any of the CDROM player programs should then work (workman,
-workbone, cdplayer, etc.). Lacking anything else, you could use the
-cdtester program in Documentation/cdrom/sbpcd.
+workbone, cdplayer, etc.).
On a few drives, you can read digital audio directly using a program
such as cdda2wav. The only types of drive which I've heard support
diff --git a/Documentation/cgroups.txt b/Documentation/cgroups/cgroups.txt
index d9014aa0eb6..d9014aa0eb6 100644
--- a/Documentation/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
diff --git a/Documentation/cgroups/freezer-subsystem.txt b/Documentation/cgroups/freezer-subsystem.txt
new file mode 100644
index 00000000000..c50ab58b72e
--- /dev/null
+++ b/Documentation/cgroups/freezer-subsystem.txt
@@ -0,0 +1,99 @@
+ The cgroup freezer is useful to batch job management system which start
+and stop sets of tasks in order to schedule the resources of a machine
+according to the desires of a system administrator. This sort of program
+is often used on HPC clusters to schedule access to the cluster as a
+whole. The cgroup freezer uses cgroups to describe the set of tasks to
+be started/stopped by the batch job management system. It also provides
+a means to start and stop the tasks composing the job.
+
+ The cgroup freezer will also be useful for checkpointing running groups
+of tasks. The freezer allows the checkpoint code to obtain a consistent
+image of the tasks by attempting to force the tasks in a cgroup into a
+quiescent state. Once the tasks are quiescent another task can
+walk /proc or invoke a kernel interface to gather information about the
+quiesced tasks. Checkpointed tasks can be restarted later should a
+recoverable error occur. This also allows the checkpointed tasks to be
+migrated between nodes in a cluster by copying the gathered information
+to another node and restarting the tasks there.
+
+ Sequences of SIGSTOP and SIGCONT are not always sufficient for stopping
+and resuming tasks in userspace. Both of these signals are observable
+from within the tasks we wish to freeze. While SIGSTOP cannot be caught,
+blocked, or ignored it can be seen by waiting or ptracing parent tasks.
+SIGCONT is especially unsuitable since it can be caught by the task. Any
+programs designed to watch for SIGSTOP and SIGCONT could be broken by
+attempting to use SIGSTOP and SIGCONT to stop and resume tasks. We can
+demonstrate this problem using nested bash shells:
+
+ $ echo $$
+ 16644
+ $ bash
+ $ echo $$
+ 16690
+
+ From a second, unrelated bash shell:
+ $ kill -SIGSTOP 16690
+ $ kill -SIGCONT 16990
+
+ <at this point 16990 exits and causes 16644 to exit too>
+
+ This happens because bash can observe both signals and choose how it
+responds to them.
+
+ Another example of a program which catches and responds to these
+signals is gdb. In fact any program designed to use ptrace is likely to
+have a problem with this method of stopping and resuming tasks.
+
+ In contrast, the cgroup freezer uses the kernel freezer code to
+prevent the freeze/unfreeze cycle from becoming visible to the tasks
+being frozen. This allows the bash example above and gdb to run as
+expected.
+
+ The freezer subsystem in the container filesystem defines a file named
+freezer.state. Writing "FROZEN" to the state file will freeze all tasks in the
+cgroup. Subsequently writing "THAWED" will unfreeze the tasks in the cgroup.
+Reading will return the current state.
+
+* Examples of usage :
+
+ # mkdir /containers/freezer
+ # mount -t cgroup -ofreezer freezer /containers
+ # mkdir /containers/0
+ # echo $some_pid > /containers/0/tasks
+
+to get status of the freezer subsystem :
+
+ # cat /containers/0/freezer.state
+ THAWED
+
+to freeze all tasks in the container :
+
+ # echo FROZEN > /containers/0/freezer.state
+ # cat /containers/0/freezer.state
+ FREEZING
+ # cat /containers/0/freezer.state
+ FROZEN
+
+to unfreeze all tasks in the container :
+
+ # echo THAWED > /containers/0/freezer.state
+ # cat /containers/0/freezer.state
+ THAWED
+
+This is the basic mechanism which should do the right thing for user space task
+in a simple scenario.
+
+It's important to note that freezing can be incomplete. In that case we return
+EBUSY. This means that some tasks in the cgroup are busy doing something that
+prevents us from completely freezing the cgroup at this time. After EBUSY,
+the cgroup will remain partially frozen -- reflected by freezer.state reporting
+"FREEZING" when read. The state will remain "FREEZING" until one of these
+things happens:
+
+ 1) Userspace cancels the freezing operation by writing "THAWED" to
+ the freezer.state file
+ 2) Userspace retries the freezing operation by writing "FROZEN" to
+ the freezer.state file (writing "FREEZING" is not legal
+ and returns EIO)
+ 3) The tasks that blocked the cgroup from entering the "FROZEN"
+ state disappear from the cgroup's set of tasks.
diff --git a/Documentation/controllers/memory.txt b/Documentation/controllers/memory.txt
index 9b53d582736..1c07547d3f8 100644
--- a/Documentation/controllers/memory.txt
+++ b/Documentation/controllers/memory.txt
@@ -112,14 +112,22 @@ the per cgroup LRU.
2.2.1 Accounting details
-All mapped pages (RSS) and unmapped user pages (Page Cache) are accounted.
-RSS pages are accounted at the time of page_add_*_rmap() unless they've already
-been accounted for earlier. A file page will be accounted for as Page Cache;
-it's mapped into the page tables of a process, duplicate accounting is carefully
-avoided. Page Cache pages are accounted at the time of add_to_page_cache().
-The corresponding routines that remove a page from the page tables or removes
-a page from Page Cache is used to decrement the accounting counters of the
-cgroup.
+All mapped anon pages (RSS) and cache pages (Page Cache) are accounted.
+(some pages which never be reclaimable and will not be on global LRU
+ are not accounted. we just accounts pages under usual vm management.)
+
+RSS pages are accounted at page_fault unless they've already been accounted
+for earlier. A file page will be accounted for as Page Cache when it's
+inserted into inode (radix-tree). While it's mapped into the page tables of
+processes, duplicate accounting is carefully avoided.
+
+A RSS page is unaccounted when it's fully unmapped. A PageCache page is
+unaccounted when it's removed from radix-tree.
+
+At page migration, accounting information is kept.
+
+Note: we just account pages-on-lru because our purpose is to control amount
+of used pages. not-on-lru pages are tend to be out-of-control from vm view.
2.3 Shared Page Accounting
diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt
index 47e568a9370..5c86c258c79 100644
--- a/Documentation/cpusets.txt
+++ b/Documentation/cpusets.txt
@@ -48,7 +48,7 @@ hooks, beyond what is already present, required to manage dynamic
job placement on large systems.
Cpusets use the generic cgroup subsystem described in
-Documentation/cgroup.txt.
+Documentation/cgroups/cgroups.txt.
Requests by a task, using the sched_setaffinity(2) system call to
include CPUs in its CPU affinity mask, and using the mbind(2) and
diff --git a/Documentation/cris/README b/Documentation/cris/README
index 795a1dabe6c..d9b086869a6 100644
--- a/Documentation/cris/README
+++ b/Documentation/cris/README
@@ -27,7 +27,7 @@ operating system.
The ETRAX 100LX chip
--------------------
-For reference, plase see the press-release:
+For reference, please see the press-release:
http://www.axis.com/news/us/001101_etrax.htm
diff --git a/Documentation/development-process/1.Intro b/Documentation/development-process/1.Intro
new file mode 100644
index 00000000000..8cc2cba2b10
--- /dev/null
+++ b/Documentation/development-process/1.Intro
@@ -0,0 +1,274 @@
+1: A GUIDE TO THE KERNEL DEVELOPMENT PROCESS
+
+The purpose of this document is to help developers (and their managers)
+work with the development community with a minimum of frustration. It is
+an attempt to document how this community works in a way which is
+accessible to those who are not intimately familiar with Linux kernel
+development (or, indeed, free software development in general). While
+there is some technical material here, this is very much a process-oriented
+discussion which does not require a deep knowledge of kernel programming to
+understand.
+
+
+1.1: EXECUTIVE SUMMARY
+
+The rest of this section covers the scope of the kernel development process
+and the kinds of frustrations that developers and their employers can
+encounter there. There are a great many reasons why kernel code should be
+merged into the official ("mainline") kernel, including automatic
+availability to users, community support in many forms, and the ability to
+influence the direction of kernel development. Code contributed to the
+Linux kernel must be made available under a GPL-compatible license.
+
+Section 2 introduces the development process, the kernel release cycle, and
+the mechanics of the merge window. The various phases in the patch
+development, review, and merging cycle are covered. There is some
+discussion of tools and mailing lists. Developers wanting to get started
+with kernel development are encouraged to track down and fix bugs as an
+initial exercise.
+
+Section 3 covers early-stage project planning, with an emphasis on
+involving the development community as soon as possible.
+
+Section 4 is about the coding process; several pitfalls which have been
+encountered by other developers are discussed. Some requirements for
+patches are covered, and there is an introduction to some of the tools
+which can help to ensure that kernel patches are correct.
+
+Section 5 talks about the process of posting patches for review. To be
+taken seriously by the development community, patches must be properly
+formatted and described, and they must be sent to the right place.
+Following the advice in this section should help to ensure the best
+possible reception for your work.
+
+Section 6 covers what happens after posting patches; the job is far from
+done at that point. Working with reviewers is a crucial part of the
+development process; this section offers a number of tips on how to avoid
+problems at this important stage. Developers are cautioned against
+assuming that the job is done when a patch is merged into the mainline.
+
+Section 7 introduces a couple of "advanced" topics: managing patches with
+git and reviewing patches posted by others.
+
+Section 8 concludes the document with pointers to sources for more
+information on kernel development.
+
+
+1.2: WHAT THIS DOCUMENT IS ABOUT
+
+The Linux kernel, at over 6 million lines of code and well over 1000 active
+contributors, is one of the largest and most active free software projects
+in existence. Since its humble beginning in 1991, this kernel has evolved
+into a best-of-breed operating system component which runs on pocket-sized
+digital music players, desktop PCs, the largest supercomputers in
+existence, and all types of systems in between. It is a robust, efficient,
+and scalable solution for almost any situation.
+
+With the growth of Linux has come an increase in the number of developers
+(and companies) wishing to participate in its development. Hardware
+vendors want to ensure that Linux supports their products well, making
+those products attractive to Linux users. Embedded systems vendors, who
+use Linux as a component in an integrated product, want Linux to be as
+capable and well-suited to the task at hand as possible. Distributors and
+other software vendors who base their products on Linux have a clear
+interest in the capabilities, performance, and reliability of the Linux
+kernel. And end users, too, will often wish to change Linux to make it
+better suit their needs.
+
+One of the most compelling features of Linux is that it is accessible to
+these developers; anybody with the requisite skills can improve Linux and
+influence the direction of its development. Proprietary products cannot
+offer this kind of openness, which is a characteristic of the free software
+process. But, if anything, the kernel is even more open than most other
+free software projects. A typical three-month kernel development cycle can
+involve over 1000 developers working for more than 100 different companies
+(or for no company at all).
+
+Working with the kernel development community is not especially hard. But,
+that notwithstanding, many potential contributors have experienced
+difficulties when trying to do kernel work. The kernel community has
+evolved its own distinct ways of operating which allow it to function
+smoothly (and produce a high-quality product) in an environment where
+thousands of lines of code are being changed every day. So it is not
+surprising that Linux kernel development process differs greatly from
+proprietary development methods.
+
+The kernel's development process may come across as strange and
+intimidating to new developers, but there are good reasons and solid
+experience behind it. A developer who does not understand the kernel
+community's ways (or, worse, who tries to flout or circumvent them) will
+have a frustrating experience in store. The development community, while
+being helpful to those who are trying to learn, has little time for those
+who will not listen or who do not care about the development process.
+
+It is hoped that those who read this document will be able to avoid that
+frustrating experience. There is a lot of material here, but the effort
+involved in reading it will be repaid in short order. The development
+community is always in need of developers who will help to make the kernel
+better; the following text should help you - or those who work for you -
+join our community.
+
+
+1.3: CREDITS
+
+This document was written by Jonathan Corbet, corbet@lwn.net. It has been
+improved by comments from Johannes Berg, James Berry, Alex Chiang, Roland
+Dreier, Randy Dunlap, Jake Edge, Jiri Kosina, Matt Mackall, Arthur Marsh,
+Amanda McPherson, Andrew Morton, Andrew Price, Tsugikazu Shibata, and
+Jochen Voß.
+
+This work was supported by the Linux Foundation; thanks especially to
+Amanda McPherson, who saw the value of this effort and made it all happen.
+
+
+1.4: THE IMPORTANCE OF GETTING CODE INTO THE MAINLINE
+
+Some companies and developers occasionally wonder why they should bother
+learning how to work with the kernel community and get their code into the
+mainline kernel (the "mainline" being the kernel maintained by Linus
+Torvalds and used as a base by Linux distributors). In the short term,
+contributing code can look like an avoidable expense; it seems easier to
+just keep the code separate and support users directly. The truth of the
+matter is that keeping code separate ("out of tree") is a false economy.
+
+As a way of illustrating the costs of out-of-tree code, here are a few
+relevant aspects of the kernel development process; most of these will be
+discussed in greater detail later in this document. Consider:
+
+- Code which has been merged into the mainline kernel is available to all
+ Linux users. It will automatically be present on all distributions which
+ enable it. There is no need for driver disks, downloads, or the hassles
+ of supporting multiple versions of multiple distributions; it all just
+ works, for the developer and for the user. Incorporation into the
+ mainline solves a large number of distribution and support problems.
+
+- While kernel developers strive to maintain a stable interface to user
+ space, the internal kernel API is in constant flux. The lack of a stable
+ internal interface is a deliberate design decision; it allows fundamental
+ improvements to be made at any time and results in higher-quality code.
+ But one result of that policy is that any out-of-tree code requires
+ constant upkeep if it is to work with new kernels. Maintaining
+ out-of-tree code requires significant amounts of work just to keep that
+ code working.
+
+ Code which is in the mainline, instead, does not require this work as the
+ result of a simple rule requiring any developer who makes an API change
+ to also fix any code that breaks as the result of that change. So code
+ which has been merged into the mainline has significantly lower
+ maintenance costs.
+
+- Beyond that, code which is in the kernel will often be improved by other
+ developers. Surprising results can come from empowering your user
+ community and customers to improve your product.
+
+- Kernel code is subjected to review, both before and after merging into
+ the mainline. No matter how strong the original developer's skills are,
+ this review process invariably finds ways in which the code can be
+ improved. Often review finds severe bugs and security problems. This is
+ especially true for code which has been developed in a closed
+ environment; such code benefits strongly from review by outside
+ developers. Out-of-tree code is lower-quality code.
+
+- Participation in the development process is your way to influence the
+ direction of kernel development. Users who complain from the sidelines
+ are heard, but active developers have a stronger voice - and the ability
+ to implement changes which make the kernel work better for their needs.
+
+- When code is maintained separately, the possibility that a third party
+ will contribute a different implementation of a similar feature always
+ exists. Should that happen, getting your code merged will become much
+ harder - to the point of impossibility. Then you will be faced with the
+ unpleasant alternatives of either (1) maintaining a nonstandard feature
+ out of tree indefinitely, or (2) abandoning your code and migrating your
+ users over to the in-tree version.
+
+- Contribution of code is the fundamental action which makes the whole
+ process work. By contributing your code you can add new functionality to
+ the kernel and provide capabilities and examples which are of use to
+ other kernel developers. If you have developed code for Linux (or are
+ thinking about doing so), you clearly have an interest in the continued
+ success of this platform; contributing code is one of the best ways to
+ help ensure that success.
+
+All of the reasoning above applies to any out-of-tree kernel code,
+including code which is distributed in proprietary, binary-only form.
+There are, however, additional factors which should be taken into account
+before considering any sort of binary-only kernel code distribution. These
+include:
+
+- The legal issues around the distribution of proprietary kernel modules
+ are cloudy at best; quite a few kernel copyright holders believe that
+ most binary-only modules are derived products of the kernel and that, as
+ a result, their distribution is a violation of the GNU General Public
+ license (about which more will be said below). Your author is not a
+ lawyer, and nothing in this document can possibly be considered to be
+ legal advice. The true legal status of closed-source modules can only be
+ determined by the courts. But the uncertainty which haunts those modules
+ is there regardless.
+
+- Binary modules greatly increase the difficulty of debugging kernel
+ problems, to the point that most kernel developers will not even try. So
+ the distribution of binary-only modules will make it harder for your
+ users to get support from the community.
+
+- Support is also harder for distributors of binary-only modules, who must
+ provide a version of the module for every distribution and every kernel
+ version they wish to support. Dozens of builds of a single module can
+ be required to provide reasonably comprehensive coverage, and your users
+ will have to upgrade your module separately every time they upgrade their
+ kernel.
+
+- Everything that was said above about code review applies doubly to
+ closed-source code. Since this code is not available at all, it cannot
+ have been reviewed by the community and will, beyond doubt, have serious
+ problems.
+
+Makers of embedded systems, in particular, may be tempted to disregard much
+of what has been said in this section in the belief that they are shipping
+a self-contained product which uses a frozen kernel version and requires no
+more development after its release. This argument misses the value of
+widespread code review and the value of allowing your users to add
+capabilities to your product. But these products, too, have a limited
+commercial life, after which a new version must be released. At that
+point, vendors whose code is in the mainline and well maintained will be
+much better positioned to get the new product ready for market quickly.
+
+
+1.5: LICENSING
+
+Code is contributed to the Linux kernel under a number of licenses, but all
+code must be compatible with version 2 of the GNU General Public License
+(GPLv2), which is the license covering the kernel distribution as a whole.
+In practice, that means that all code contributions are covered either by
+GPLv2 (with, optionally, language allowing distribution under later
+versions of the GPL) or the three-clause BSD license. Any contributions
+which are not covered by a compatible license will not be accepted into the
+kernel.
+
+Copyright assignments are not required (or requested) for code contributed
+to the kernel. All code merged into the mainline kernel retains its
+original ownership; as a result, the kernel now has thousands of owners.
+
+One implication of this ownership structure is that any attempt to change
+the licensing of the kernel is doomed to almost certain failure. There are
+few practical scenarios where the agreement of all copyright holders could
+be obtained (or their code removed from the kernel). So, in particular,
+there is no prospect of a migration to version 3 of the GPL in the
+foreseeable future.
+
+It is imperative that all code contributed to the kernel be legitimately
+free software. For that reason, code from anonymous (or pseudonymous)
+contributors will not be accepted. All contributors are required to "sign
+off" on their code, stating that the code can be distributed with the
+kernel under the GPL. Code which has not been licensed as free software by
+its owner, or which risks creating copyright-related problems for the
+kernel (such as code which derives from reverse-engineering efforts lacking
+proper safeguards) cannot be contributed.
+
+Questions about copyright-related issues are common on Linux development
+mailing lists. Such questions will normally receive no shortage of
+answers, but one should bear in mind that the people answering those
+questions are not lawyers and cannot provide legal advice. If you have
+legal questions relating to Linux source code, there is no substitute for
+talking with a lawyer who understands this field. Relying on answers
+obtained on technical mailing lists is a risky affair.
diff --git a/Documentation/development-process/2.Process b/Documentation/development-process/2.Process
new file mode 100644
index 00000000000..d750321acd5
--- /dev/null
+++ b/Documentation/development-process/2.Process
@@ -0,0 +1,459 @@
+2: HOW THE DEVELOPMENT PROCESS WORKS
+
+Linux kernel development in the early 1990's was a pretty loose affair,
+with relatively small numbers of users and developers involved. With a
+user base in the millions and with some 2,000 developers involved over the
+course of one year, the kernel has since had to evolve a number of
+processes to keep development happening smoothly. A solid understanding of
+how the process works is required in order to be an effective part of it.
+
+
+2.1: THE BIG PICTURE
+
+The kernel developers use a loosely time-based release process, with a new
+major kernel release happening every two or three months. The recent
+release history looks like this:
+
+ 2.6.26 July 13, 2008
+ 2.6.25 April 16, 2008
+ 2.6.24 January 24, 2008
+ 2.6.23 October 9, 2007
+ 2.6.22 July 8, 2007
+ 2.6.21 April 25, 2007
+ 2.6.20 February 4, 2007
+
+Every 2.6.x release is a major kernel release with new features, internal
+API changes, and more. A typical 2.6 release can contain over 10,000
+changesets with changes to several hundred thousand lines of code. 2.6 is
+thus the leading edge of Linux kernel development; the kernel uses a
+rolling development model which is continually integrating major changes.
+
+A relatively straightforward discipline is followed with regard to the
+merging of patches for each release. At the beginning of each development
+cycle, the "merge window" is said to be open. At that time, code which is
+deemed to be sufficiently stable (and which is accepted by the development
+community) is merged into the mainline kernel. The bulk of changes for a
+new development cycle (and all of the major changes) will be merged during
+this time, at a rate approaching 1,000 changes ("patches," or "changesets")
+per day.
+
+(As an aside, it is worth noting that the changes integrated during the
+merge window do not come out of thin air; they have been collected, tested,
+and staged ahead of time. How that process works will be described in
+detail later on).
+
+The merge window lasts for two weeks. At the end of this time, Linus
+Torvalds will declare that the window is closed and release the first of
+the "rc" kernels. For the kernel which is destined to be 2.6.26, for
+example, the release which happens at the end of the merge window will be
+called 2.6.26-rc1. The -rc1 release is the signal that the time to merge
+new features has passed, and that the time to stabilize the next kernel has
+begun.
+
+Over the next six to ten weeks, only patches which fix problems should be
+submitted to the mainline. On occasion a more significant change will be
+allowed, but such occasions are rare; developers who try to merge new
+features outside of the merge window tend to get an unfriendly reception.
+As a general rule, if you miss the merge window for a given feature, the
+best thing to do is to wait for the next development cycle. (An occasional
+exception is made for drivers for previously-unsupported hardware; if they
+touch no in-tree code, they cannot cause regressions and should be safe to
+add at any time).
+
+As fixes make their way into the mainline, the patch rate will slow over
+time. Linus releases new -rc kernels about once a week; a normal series
+will get up to somewhere between -rc6 and -rc9 before the kernel is
+considered to be sufficiently stable and the final 2.6.x release is made.
+At that point the whole process starts over again.
+
+As an example, here is how the 2.6.25 development cycle went (all dates in
+2008):
+
+ January 24 2.6.24 stable release
+ February 10 2.6.25-rc1, merge window closes
+ February 15 2.6.25-rc2
+ February 24 2.6.25-rc3
+ March 4 2.6.25-rc4
+ March 9 2.6.25-rc5
+ March 16 2.6.25-rc6
+ March 25 2.6.25-rc7
+ April 1 2.6.25-rc8
+ April 11 2.6.25-rc9
+ April 16 2.6.25 stable release
+
+How do the developers decide when to close the development cycle and create
+the stable release? The most significant metric used is the list of
+regressions from previous releases. No bugs are welcome, but those which
+break systems which worked in the past are considered to be especially
+serious. For this reason, patches which cause regressions are looked upon
+unfavorably and are quite likely to be reverted during the stabilization
+period.
+
+The developers' goal is to fix all known regressions before the stable
+release is made. In the real world, this kind of perfection is hard to
+achieve; there are just too many variables in a project of this size.
+There comes a point where delaying the final release just makes the problem
+worse; the pile of changes waiting for the next merge window will grow
+larger, creating even more regressions the next time around. So most 2.6.x
+kernels go out with a handful of known regressions though, hopefully, none
+of them are serious.
+
+Once a stable release is made, its ongoing maintenance is passed off to the
+"stable team," currently comprised of Greg Kroah-Hartman and Chris Wright.
+The stable team will release occasional updates to the stable release using
+the 2.6.x.y numbering scheme. To be considered for an update release, a
+patch must (1) fix a significant bug, and (2) already be merged into the
+mainline for the next development kernel. Continuing our 2.6.25 example,
+the history (as of this writing) is:
+
+ May 1 2.6.25.1
+ May 6 2.6.25.2
+ May 9 2.6.25.3
+ May 15 2.6.25.4
+ June 7 2.6.25.5
+ June 9 2.6.25.6
+ June 16 2.6.25.7
+ June 21 2.6.25.8
+ June 24 2.6.25.9
+
+Stable updates for a given kernel are made for approximately six months;
+after that, the maintenance of stable releases is solely the responsibility
+of the distributors which have shipped that particular kernel.
+
+
+2.2: THE LIFECYCLE OF A PATCH
+
+Patches do not go directly from the developer's keyboard into the mainline
+kernel. There is, instead, a somewhat involved (if somewhat informal)
+process designed to ensure that each patch is reviewed for quality and that
+each patch implements a change which is desirable to have in the mainline.
+This process can happen quickly for minor fixes, or, in the case of large
+and controversial changes, go on for years. Much developer frustration
+comes from a lack of understanding of this process or from attempts to
+circumvent it.
+
+In the hopes of reducing that frustration, this document will describe how
+a patch gets into the kernel. What follows below is an introduction which
+describes the process in a somewhat idealized way. A much more detailed
+treatment will come in later sections.
+
+The stages that a patch goes through are, generally:
+
+ - Design. This is where the real requirements for the patch - and the way
+ those requirements will be met - are laid out. Design work is often
+ done without involving the community, but it is better to do this work
+ in the open if at all possible; it can save a lot of time redesigning
+ things later.
+
+ - Early review. Patches are posted to the relevant mailing list, and
+ developers on that list reply with any comments they may have. This
+ process should turn up any major problems with a patch if all goes
+ well.
+
+ - Wider review. When the patch is getting close to ready for mainline
+ inclusion, it will be accepted by a relevant subsystem maintainer -
+ though this acceptance is not a guarantee that the patch will make it
+ all the way to the mainline. The patch will show up in the maintainer's
+ subsystem tree and into the staging trees (described below). When the
+ process works, this step leads to more extensive review of the patch and
+ the discovery of any problems resulting from the integration of this
+ patch with work being done by others.
+
+ - Merging into the mainline. Eventually, a successful patch will be
+ merged into the mainline repository managed by Linus Torvalds. More
+ comments and/or problems may surface at this time; it is important that
+ the developer be responsive to these and fix any issues which arise.
+
+ - Stable release. The number of users potentially affected by the patch
+ is now large, so, once again, new problems may arise.
+
+ - Long-term maintenance. While it is certainly possible for a developer
+ to forget about code after merging it, that sort of behavior tends to
+ leave a poor impression in the development community. Merging code
+ eliminates some of the maintenance burden, in that others will fix
+ problems caused by API changes. But the original developer should
+ continue to take responsibility for the code if it is to remain useful
+ in the longer term.
+
+One of the largest mistakes made by kernel developers (or their employers)
+is to try to cut the process down to a single "merging into the mainline"
+step. This approach invariably leads to frustration for everybody
+involved.
+
+
+2.3: HOW PATCHES GET INTO THE KERNEL
+
+There is exactly one person who can merge patches into the mainline kernel
+repository: Linus Torvalds. But, of the over 12,000 patches which went
+into the 2.6.25 kernel, only 250 (around 2%) were directly chosen by Linus
+himself. The kernel project has long since grown to a size where no single
+developer could possibly inspect and select every patch unassisted. The
+way the kernel developers have addressed this growth is through the use of
+a lieutenant system built around a chain of trust.
+
+The kernel code base is logically broken down into a set of subsystems:
+networking, specific architecture support, memory management, video
+devices, etc. Most subsystems have a designated maintainer, a developer
+who has overall responsibility for the code within that subsystem. These
+subsystem maintainers are the gatekeepers (in a loose way) for the portion
+of the kernel they manage; they are the ones who will (usually) accept a
+patch for inclusion into the mainline kernel.
+
+Subsystem maintainers each manage their own version of the kernel source
+tree, usually (but certainly not always) using the git source management
+tool. Tools like git (and related tools like quilt or mercurial) allow
+maintainers to track a list of patches, including authorship information
+and other metadata. At any given time, the maintainer can identify which
+patches in his or her repository are not found in the mainline.
+
+When the merge window opens, top-level maintainers will ask Linus to "pull"
+the patches they have selected for merging from their repositories. If
+Linus agrees, the stream of patches will flow up into his repository,
+becoming part of the mainline kernel. The amount of attention that Linus
+pays to specific patches received in a pull operation varies. It is clear
+that, sometimes, he looks quite closely. But, as a general rule, Linus
+trusts the subsystem maintainers to not send bad patches upstream.
+
+Subsystem maintainers, in turn, can pull patches from other maintainers.
+For example, the networking tree is built from patches which accumulated
+first in trees dedicated to network device drivers, wireless networking,
+etc. This chain of repositories can be arbitrarily long, though it rarely
+exceeds two or three links. Since each maintainer in the chain trusts
+those managing lower-level trees, this process is known as the "chain of
+trust."
+
+Clearly, in a system like this, getting patches into the kernel depends on
+finding the right maintainer. Sending patches directly to Linus is not
+normally the right way to go.
+
+
+2.4: STAGING TREES
+
+The chain of subsystem trees guides the flow of patches into the kernel,
+but it also raises an interesting question: what if somebody wants to look
+at all of the patches which are being prepared for the next merge window?
+Developers will be interested in what other changes are pending to see
+whether there are any conflicts to worry about; a patch which changes a
+core kernel function prototype, for example, will conflict with any other
+patches which use the older form of that function. Reviewers and testers
+want access to the changes in their integrated form before all of those
+changes land in the mainline kernel. One could pull changes from all of
+the interesting subsystem trees, but that would be a big and error-prone
+job.
+
+The answer comes in the form of staging trees, where subsystem trees are
+collected for testing and review. The older of these trees, maintained by
+Andrew Morton, is called "-mm" (for memory management, which is how it got
+started). The -mm tree integrates patches from a long list of subsystem
+trees; it also has some patches aimed at helping with debugging.
+
+Beyond that, -mm contains a significant collection of patches which have
+been selected by Andrew directly. These patches may have been posted on a
+mailing list, or they may apply to a part of the kernel for which there is
+no designated subsystem tree. As a result, -mm operates as a sort of
+subsystem tree of last resort; if there is no other obvious path for a
+patch into the mainline, it is likely to end up in -mm. Miscellaneous
+patches which accumulate in -mm will eventually either be forwarded on to
+an appropriate subsystem tree or be sent directly to Linus. In a typical
+development cycle, approximately 10% of the patches going into the mainline
+get there via -mm.
+
+The current -mm patch can always be found from the front page of
+
+ http://kernel.org/
+
+Those who want to see the current state of -mm can get the "-mm of the
+moment" tree, found at:
+
+ http://userweb.kernel.org/~akpm/mmotm/
+
+Use of the MMOTM tree is likely to be a frustrating experience, though;
+there is a definite chance that it will not even compile.
+
+The other staging tree, started more recently, is linux-next, maintained by
+Stephen Rothwell. The linux-next tree is, by design, a snapshot of what
+the mainline is expected to look like after the next merge window closes.
+Linux-next trees are announced on the linux-kernel and linux-next mailing
+lists when they are assembled; they can be downloaded from:
+
+ http://www.kernel.org/pub/linux/kernel/people/sfr/linux-next/
+
+Some information about linux-next has been gathered at:
+
+ http://linux.f-seidel.de/linux-next/pmwiki/
+
+How the linux-next tree will fit into the development process is still
+changing. As of this writing, the first full development cycle involving
+linux-next (2.6.26) is coming to an end; thus far, it has proved to be a
+valuable resource for finding and fixing integration problems before the
+beginning of the merge window. See http://lwn.net/Articles/287155/ for
+more information on how linux-next has worked to set up the 2.6.27 merge
+window.
+
+Some developers have begun to suggest that linux-next should be used as the
+target for future development as well. The linux-next tree does tend to be
+far ahead of the mainline and is more representative of the tree into which
+any new work will be merged. The downside to this idea is that the
+volatility of linux-next tends to make it a difficult development target.
+See http://lwn.net/Articles/289013/ for more information on this topic, and
+stay tuned; much is still in flux where linux-next is involved.
+
+
+2.5: TOOLS
+
+As can be seen from the above text, the kernel development process depends
+heavily on the ability to herd collections of patches in various
+directions. The whole thing would not work anywhere near as well as it
+does without suitably powerful tools. Tutorials on how to use these tools
+are well beyond the scope of this document, but there is space for a few
+pointers.
+
+By far the dominant source code management system used by the kernel
+community is git. Git is one of a number of distributed version control
+systems being developed in the free software community. It is well tuned
+for kernel development, in that it performs quite well when dealing with
+large repositories and large numbers of patches. It also has a reputation
+for being difficult to learn and use, though it has gotten better over
+time. Some sort of familiarity with git is almost a requirement for kernel
+developers; even if they do not use it for their own work, they'll need git
+to keep up with what other developers (and the mainline) are doing.
+
+Git is now packaged by almost all Linux distributions. There is a home
+page at
+
+ http://git.or.cz/
+
+That page has pointers to documentation and tutorials. One should be
+aware, in particular, of the Kernel Hacker's Guide to git, which has
+information specific to kernel development:
+
+ http://linux.yyz.us/git-howto.html
+
+Among the kernel developers who do not use git, the most popular choice is
+almost certainly Mercurial:
+
+ http://www.selenic.com/mercurial/
+
+Mercurial shares many features with git, but it provides an interface which
+many find easier to use.
+
+The other tool worth knowing about is Quilt:
+
+ http://savannah.nongnu.org/projects/quilt/
+
+Quilt is a patch management system, rather than a source code management
+system. It does not track history over time; it is, instead, oriented
+toward tracking a specific set of changes against an evolving code base.
+Some major subsystem maintainers use quilt to manage patches intended to go
+upstream. For the management of certain kinds of trees (-mm, for example),
+quilt is the best tool for the job.
+
+
+2.6: MAILING LISTS
+
+A great deal of Linux kernel development work is done by way of mailing
+lists. It is hard to be a fully-functioning member of the community
+without joining at least one list somewhere. But Linux mailing lists also
+represent a potential hazard to developers, who risk getting buried under a
+load of electronic mail, running afoul of the conventions used on the Linux
+lists, or both.
+
+Most kernel mailing lists are run on vger.kernel.org; the master list can
+be found at:
+
+ http://vger.kernel.org/vger-lists.html
+
+There are lists hosted elsewhere, though; a number of them are at
+lists.redhat.com.
+
+The core mailing list for kernel development is, of course, linux-kernel.
+This list is an intimidating place to be; volume can reach 500 messages per
+day, the amount of noise is high, the conversation can be severely
+technical, and participants are not always concerned with showing a high
+degree of politeness. But there is no other place where the kernel
+development community comes together as a whole; developers who avoid this
+list will miss important information.
+
+There are a few hints which can help with linux-kernel survival:
+
+- Have the list delivered to a separate folder, rather than your main
+ mailbox. One must be able to ignore the stream for sustained periods of
+ time.
+
+- Do not try to follow every conversation - nobody else does. It is
+ important to filter on both the topic of interest (though note that
+ long-running conversations can drift away from the original subject
+ without changing the email subject line) and the people who are
+ participating.
+
+- Do not feed the trolls. If somebody is trying to stir up an angry
+ response, ignore them.
+
+- When responding to linux-kernel email (or that on other lists) preserve
+ the Cc: header for all involved. In the absence of a strong reason (such
+ as an explicit request), you should never remove recipients. Always make
+ sure that the person you are responding to is in the Cc: list. This
+ convention also makes it unnecessary to explicitly ask to be copied on
+ replies to your postings.
+
+- Search the list archives (and the net as a whole) before asking
+ questions. Some developers can get impatient with people who clearly
+ have not done their homework.
+
+- Avoid top-posting (the practice of putting your answer above the quoted
+ text you are responding to). It makes your response harder to read and
+ makes a poor impression.
+
+- Ask on the correct mailing list. Linux-kernel may be the general meeting
+ point, but it is not the best place to find developers from all
+ subsystems.
+
+The last point - finding the correct mailing list - is a common place for
+beginning developers to go wrong. Somebody who asks a networking-related
+question on linux-kernel will almost certainly receive a polite suggestion
+to ask on the netdev list instead, as that is the list frequented by most
+networking developers. Other lists exist for the SCSI, video4linux, IDE,
+filesystem, etc. subsystems. The best place to look for mailing lists is
+in the MAINTAINERS file packaged with the kernel source.
+
+
+2.7: GETTING STARTED WITH KERNEL DEVELOPMENT
+
+Questions about how to get started with the kernel development process are
+common - from both individuals and companies. Equally common are missteps
+which make the beginning of the relationship harder than it has to be.
+
+Companies often look to hire well-known developers to get a development
+group started. This can, in fact, be an effective technique. But it also
+tends to be expensive and does not do much to grow the pool of experienced
+kernel developers. It is possible to bring in-house developers up to speed
+on Linux kernel development, given the investment of a bit of time. Taking
+this time can endow an employer with a group of developers who understand
+the kernel and the company both, and who can help to train others as well.
+Over the medium term, this is often the more profitable approach.
+
+Individual developers are often, understandably, at a loss for a place to
+start. Beginning with a large project can be intimidating; one often wants
+to test the waters with something smaller first. This is the point where
+some developers jump into the creation of patches fixing spelling errors or
+minor coding style issues. Unfortunately, such patches create a level of
+noise which is distracting for the development community as a whole, so,
+increasingly, they are looked down upon. New developers wishing to
+introduce themselves to the community will not get the sort of reception
+they wish for by these means.
+
+Andrew Morton gives this advice for aspiring kernel developers
+
+ The #1 project for all kernel beginners should surely be "make sure
+ that the kernel runs perfectly at all times on all machines which
+ you can lay your hands on". Usually the way to do this is to work
+ with others on getting things fixed up (this can require
+ persistence!) but that's fine - it's a part of kernel development.
+
+(http://lwn.net/Articles/283982/).
+
+In the absence of obvious problems to fix, developers are advised to look
+at the current lists of regressions and open bugs in general. There is
+never any shortage of issues in need of fixing; by addressing these issues,
+developers will gain experience with the process while, at the same time,
+building respect with the rest of the development community.
diff --git a/Documentation/development-process/3.Early-stage b/Documentation/development-process/3.Early-stage
new file mode 100644
index 00000000000..307a159a70c
--- /dev/null
+++ b/Documentation/development-process/3.Early-stage
@@ -0,0 +1,195 @@
+3: EARLY-STAGE PLANNING
+
+When contemplating a Linux kernel development project, it can be tempting
+to jump right in and start coding. As with any significant project,
+though, much of the groundwork for success is best laid before the first
+line of code is written. Some time spent in early planning and
+communication can save far more time later on.
+
+
+3.1: SPECIFYING THE PROBLEM
+
+Like any engineering project, a successful kernel enhancement starts with a
+clear description of the problem to be solved. In some cases, this step is
+easy: when a driver is needed for a specific piece of hardware, for
+example. In others, though, it is tempting to confuse the real problem
+with the proposed solution, and that can lead to difficulties.
+
+Consider an example: some years ago, developers working with Linux audio
+sought a way to run applications without dropouts or other artifacts caused
+by excessive latency in the system. The solution they arrived at was a
+kernel module intended to hook into the Linux Security Module (LSM)
+framework; this module could be configured to give specific applications
+access to the realtime scheduler. This module was implemented and sent to
+the linux-kernel mailing list, where it immediately ran into problems.
+
+To the audio developers, this security module was sufficient to solve their
+immediate problem. To the wider kernel community, though, it was seen as a
+misuse of the LSM framework (which is not intended to confer privileges
+onto processes which they would not otherwise have) and a risk to system
+stability. Their preferred solutions involved realtime scheduling access
+via the rlimit mechanism for the short term, and ongoing latency reduction
+work in the long term.
+
+The audio community, however, could not see past the particular solution
+they had implemented; they were unwilling to accept alternatives. The
+resulting disagreement left those developers feeling disillusioned with the
+entire kernel development process; one of them went back to an audio list
+and posted this:
+
+ There are a number of very good Linux kernel developers, but they
+ tend to get outshouted by a large crowd of arrogant fools. Trying
+ to communicate user requirements to these people is a waste of
+ time. They are much too "intelligent" to listen to lesser mortals.
+
+(http://lwn.net/Articles/131776/).
+
+The reality of the situation was different; the kernel developers were far
+more concerned about system stability, long-term maintenance, and finding
+the right solution to the problem than they were with a specific module.
+The moral of the story is to focus on the problem - not a specific solution
+- and to discuss it with the development community before investing in the
+creation of a body of code.
+
+So, when contemplating a kernel development project, one should obtain
+answers to a short set of questions:
+
+ - What, exactly, is the problem which needs to be solved?
+
+ - Who are the users affected by this problem? Which use cases should the
+ solution address?
+
+ - How does the kernel fall short in addressing that problem now?
+
+Only then does it make sense to start considering possible solutions.
+
+
+3.2: EARLY DISCUSSION
+
+When planning a kernel development project, it makes great sense to hold
+discussions with the community before launching into implementation. Early
+communication can save time and trouble in a number of ways:
+
+ - It may well be that the problem is addressed by the kernel in ways which
+ you have not understood. The Linux kernel is large and has a number of
+ features and capabilities which are not immediately obvious. Not all
+ kernel capabilities are documented as well as one might like, and it is
+ easy to miss things. Your author has seen the posting of a complete
+ driver which duplicated an existing driver that the new author had been
+ unaware of. Code which reinvents existing wheels is not only wasteful;
+ it will also not be accepted into the mainline kernel.
+
+ - There may be elements of the proposed solution which will not be
+ acceptable for mainline merging. It is better to find out about
+ problems like this before writing the code.
+
+ - It's entirely possible that other developers have thought about the
+ problem; they may have ideas for a better solution, and may be willing
+ to help in the creation of that solution.
+
+Years of experience with the kernel development community have taught a
+clear lesson: kernel code which is designed and developed behind closed
+doors invariably has problems which are only revealed when the code is
+released into the community. Sometimes these problems are severe,
+requiring months or years of effort before the code can be brought up to
+the kernel community's standards. Some examples include:
+
+ - The Devicescape network stack was designed and implemented for
+ single-processor systems. It could not be merged into the mainline
+ until it was made suitable for multiprocessor systems. Retrofitting
+ locking and such into code is a difficult task; as a result, the merging
+ of this code (now called mac80211) was delayed for over a year.
+
+ - The Reiser4 filesystem included a number of capabilities which, in the
+ core kernel developers' opinion, should have been implemented in the
+ virtual filesystem layer instead. It also included features which could
+ not easily be implemented without exposing the system to user-caused
+ deadlocks. The late revelation of these problems - and refusal to
+ address some of them - has caused Reiser4 to stay out of the mainline
+ kernel.
+
+ - The AppArmor security module made use of internal virtual filesystem
+ data structures in ways which were considered to be unsafe and
+ unreliable. This code has since been significantly reworked, but
+ remains outside of the mainline.
+
+In each of these cases, a great deal of pain and extra work could have been
+avoided with some early discussion with the kernel developers.
+
+
+3.3: WHO DO YOU TALK TO?
+
+When developers decide to take their plans public, the next question will
+be: where do we start? The answer is to find the right mailing list(s) and
+the right maintainer. For mailing lists, the best approach is to look in
+the MAINTAINERS file for a relevant place to post. If there is a suitable
+subsystem list, posting there is often preferable to posting on
+linux-kernel; you are more likely to reach developers with expertise in the
+relevant subsystem and the environment may be more supportive.
+
+Finding maintainers can be a bit harder. Again, the MAINTAINERS file is
+the place to start. That file tends to not always be up to date, though,
+and not all subsystems are represented there. The person listed in the
+MAINTAINERS file may, in fact, not be the person who is actually acting in
+that role currently. So, when there is doubt about who to contact, a
+useful trick is to use git (and "git log" in particular) to see who is
+currently active within the subsystem of interest. Look at who is writing
+patches, and who, if anybody, is attaching Signed-off-by lines to those
+patches. Those are the people who will be best placed to help with a new
+development project.
+
+If all else fails, talking to Andrew Morton can be an effective way to
+track down a maintainer for a specific piece of code.
+
+
+3.4: WHEN TO POST?
+
+If possible, posting your plans during the early stages can only be
+helpful. Describe the problem being solved and any plans that have been
+made on how the implementation will be done. Any information you can
+provide can help the development community provide useful input on the
+project.
+
+One discouraging thing which can happen at this stage is not a hostile
+reaction, but, instead, little or no reaction at all. The sad truth of the
+matter is (1) kernel developers tend to be busy, (2) there is no shortage
+of people with grand plans and little code (or even prospect of code) to
+back them up, and (3) nobody is obligated to review or comment on ideas
+posted by others. If a request-for-comments posting yields little in the
+way of comments, do not assume that it means there is no interest in the
+project. Unfortunately, you also cannot assume that there are no problems
+with your idea. The best thing to do in this situation is to proceed,
+keeping the community informed as you go.
+
+
+3.5: GETTING OFFICIAL BUY-IN
+
+If your work is being done in a corporate environment - as most Linux
+kernel work is - you must, obviously, have permission from suitably
+empowered managers before you can post your company's plans or code to a
+public mailing list. The posting of code which has not been cleared for
+release under a GPL-compatible license can be especially problematic; the
+sooner that a company's management and legal staff can agree on the posting
+of a kernel development project, the better off everybody involved will be.
+
+Some readers may be thinking at this point that their kernel work is
+intended to support a product which does not yet have an officially
+acknowledged existence. Revealing their employer's plans on a public
+mailing list may not be a viable option. In cases like this, it is worth
+considering whether the secrecy is really necessary; there is often no real
+need to keep development plans behind closed doors.
+
+That said, there are also cases where a company legitimately cannot
+disclose its plans early in the development process. Companies with
+experienced kernel developers may choose to proceed in an open-loop manner
+on the assumption that they will be able to avoid serious integration
+problems later. For companies without that sort of in-house expertise, the
+best option is often to hire an outside developer to review the plans under
+a non-disclosure agreement. The Linux Foundation operates an NDA program
+designed to help with this sort of situation; more information can be found
+at:
+
+ http://www.linuxfoundation.org/en/NDA_program
+
+This kind of review is often enough to avoid serious problems later on
+without requiring public disclosure of the project.
diff --git a/Documentation/development-process/4.Coding b/Documentation/development-process/4.Coding
new file mode 100644
index 00000000000..014aca8f14e
--- /dev/null
+++ b/Documentation/development-process/4.Coding
@@ -0,0 +1,384 @@
+4: GETTING THE CODE RIGHT
+
+While there is much to be said for a solid and community-oriented design
+process, the proof of any kernel development project is in the resulting
+code. It is the code which will be examined by other developers and merged
+(or not) into the mainline tree. So it is the quality of this code which
+will determine the ultimate success of the project.
+
+This section will examine the coding process. We'll start with a look at a
+number of ways in which kernel developers can go wrong. Then the focus
+will shift toward doing things right and the tools which can help in that
+quest.
+
+
+4.1: PITFALLS
+
+* Coding style
+
+The kernel has long had a standard coding style, described in
+Documentation/CodingStyle. For much of that time, the policies described
+in that file were taken as being, at most, advisory. As a result, there is
+a substantial amount of code in the kernel which does not meet the coding
+style guidelines. The presence of that code leads to two independent
+hazards for kernel developers.
+
+The first of these is to believe that the kernel coding standards do not
+matter and are not enforced. The truth of the matter is that adding new
+code to the kernel is very difficult if that code is not coded according to
+the standard; many developers will request that the code be reformatted
+before they will even review it. A code base as large as the kernel
+requires some uniformity of code to make it possible for developers to
+quickly understand any part of it. So there is no longer room for
+strangely-formatted code.
+
+Occasionally, the kernel's coding style will run into conflict with an
+employer's mandated style. In such cases, the kernel's style will have to
+win before the code can be merged. Putting code into the kernel means
+giving up a degree of control in a number of ways - including control over
+how the code is formatted.
+
+The other trap is to assume that code which is already in the kernel is
+urgently in need of coding style fixes. Developers may start to generate
+reformatting patches as a way of gaining familiarity with the process, or
+as a way of getting their name into the kernel changelogs - or both. But
+pure coding style fixes are seen as noise by the development community;
+they tend to get a chilly reception. So this type of patch is best
+avoided. It is natural to fix the style of a piece of code while working
+on it for other reasons, but coding style changes should not be made for
+their own sake.
+
+The coding style document also should not be read as an absolute law which
+can never be transgressed. If there is a good reason to go against the
+style (a line which becomes far less readable if split to fit within the
+80-column limit, for example), just do it.
+
+
+* Abstraction layers
+
+Computer Science professors teach students to make extensive use of
+abstraction layers in the name of flexibility and information hiding.
+Certainly the kernel makes extensive use of abstraction; no project
+involving several million lines of code could do otherwise and survive.
+But experience has shown that excessive or premature abstraction can be
+just as harmful as premature optimization. Abstraction should be used to
+the level required and no further.
+
+At a simple level, consider a function which has an argument which is
+always passed as zero by all callers. One could retain that argument just
+in case somebody eventually needs to use the extra flexibility that it
+provides. By that time, though, chances are good that the code which
+implements this extra argument has been broken in some subtle way which was
+never noticed - because it has never been used. Or, when the need for
+extra flexibility arises, it does not do so in a way which matches the
+programmer's early expectation. Kernel developers will routinely submit
+patches to remove unused arguments; they should, in general, not be added
+in the first place.
+
+Abstraction layers which hide access to hardware - often to allow the bulk
+of a driver to be used with multiple operating systems - are especially
+frowned upon. Such layers obscure the code and may impose a performance
+penalty; they do not belong in the Linux kernel.
+
+On the other hand, if you find yourself copying significant amounts of code
+from another kernel subsystem, it is time to ask whether it would, in fact,
+make sense to pull out some of that code into a separate library or to
+implement that functionality at a higher level. There is no value in
+replicating the same code throughout the kernel.
+
+
+* #ifdef and preprocessor use in general
+
+The C preprocessor seems to present a powerful temptation to some C
+programmers, who see it as a way to efficiently encode a great deal of
+flexibility into a source file. But the preprocessor is not C, and heavy
+use of it results in code which is much harder for others to read and
+harder for the compiler to check for correctness. Heavy preprocessor use
+is almost always a sign of code which needs some cleanup work.
+
+Conditional compilation with #ifdef is, indeed, a powerful feature, and it
+is used within the kernel. But there is little desire to see code which is
+sprinkled liberally with #ifdef blocks. As a general rule, #ifdef use
+should be confined to header files whenever possible.
+Conditionally-compiled code can be confined to functions which, if the code
+is not to be present, simply become empty. The compiler will then quietly
+optimize out the call to the empty function. The result is far cleaner
+code which is easier to follow.
+
+C preprocessor macros present a number of hazards, including possible
+multiple evaluation of expressions with side effects and no type safety.
+If you are tempted to define a macro, consider creating an inline function
+instead. The code which results will be the same, but inline functions are
+easier to read, do not evaluate their arguments multiple times, and allow
+the compiler to perform type checking on the arguments and return value.
+
+
+* Inline functions
+
+Inline functions present a hazard of their own, though. Programmers can
+become enamored of the perceived efficiency inherent in avoiding a function
+call and fill a source file with inline functions. Those functions,
+however, can actually reduce performance. Since their code is replicated
+at each call site, they end up bloating the size of the compiled kernel.
+That, in turn, creates pressure on the processor's memory caches, which can
+slow execution dramatically. Inline functions, as a rule, should be quite
+small and relatively rare. The cost of a function call, after all, is not
+that high; the creation of large numbers of inline functions is a classic
+example of premature optimization.
+
+In general, kernel programmers ignore cache effects at their peril. The
+classic time/space tradeoff taught in beginning data structures classes
+often does not apply to contemporary hardware. Space *is* time, in that a
+larger program will run slower than one which is more compact.
+
+
+* Locking
+
+In May, 2006, the "Devicescape" networking stack was, with great
+fanfare, released under the GPL and made available for inclusion in the
+mainline kernel. This donation was welcome news; support for wireless
+networking in Linux was considered substandard at best, and the Devicescape
+stack offered the promise of fixing that situation. Yet, this code did not
+actually make it into the mainline until June, 2007 (2.6.22). What
+happened?
+
+This code showed a number of signs of having been developed behind
+corporate doors. But one large problem in particular was that it was not
+designed to work on multiprocessor systems. Before this networking stack
+(now called mac80211) could be merged, a locking scheme needed to be
+retrofitted onto it.
+
+Once upon a time, Linux kernel code could be developed without thinking
+about the concurrency issues presented by multiprocessor systems. Now,
+however, this document is being written on a dual-core laptop. Even on
+single-processor systems, work being done to improve responsiveness will
+raise the level of concurrency within the kernel. The days when kernel
+code could be written without thinking about locking are long past.
+
+Any resource (data structures, hardware registers, etc.) which could be
+accessed concurrently by more than one thread must be protected by a lock.
+New code should be written with this requirement in mind; retrofitting
+locking after the fact is a rather more difficult task. Kernel developers
+should take the time to understand the available locking primitives well
+enough to pick the right tool for the job. Code which shows a lack of
+attention to concurrency will have a difficult path into the mainline.
+
+
+* Regressions
+
+One final hazard worth mentioning is this: it can be tempting to make a
+change (which may bring big improvements) which causes something to break
+for existing users. This kind of change is called a "regression," and
+regressions have become most unwelcome in the mainline kernel. With few
+exceptions, changes which cause regressions will be backed out if the
+regression cannot be fixed in a timely manner. Far better to avoid the
+regression in the first place.
+
+It is often argued that a regression can be justified if it causes things
+to work for more people than it creates problems for. Why not make a
+change if it brings new functionality to ten systems for each one it
+breaks? The best answer to this question was expressed by Linus in July,
+2007:
+
+ So we don't fix bugs by introducing new problems. That way lies
+ madness, and nobody ever knows if you actually make any real
+ progress at all. Is it two steps forwards, one step back, or one
+ step forward and two steps back?
+
+(http://lwn.net/Articles/243460/).
+
+An especially unwelcome type of regression is any sort of change to the
+user-space ABI. Once an interface has been exported to user space, it must
+be supported indefinitely. This fact makes the creation of user-space
+interfaces particularly challenging: since they cannot be changed in
+incompatible ways, they must be done right the first time. For this
+reason, a great deal of thought, clear documentation, and wide review for
+user-space interfaces is always required.
+
+
+
+4.2: CODE CHECKING TOOLS
+
+For now, at least, the writing of error-free code remains an ideal that few
+of us can reach. What we can hope to do, though, is to catch and fix as
+many of those errors as possible before our code goes into the mainline
+kernel. To that end, the kernel developers have put together an impressive
+array of tools which can catch a wide variety of obscure problems in an
+automated way. Any problem caught by the computer is a problem which will
+not afflict a user later on, so it stands to reason that the automated
+tools should be used whenever possible.
+
+The first step is simply to heed the warnings produced by the compiler.
+Contemporary versions of gcc can detect (and warn about) a large number of
+potential errors. Quite often, these warnings point to real problems.
+Code submitted for review should, as a rule, not produce any compiler
+warnings. When silencing warnings, take care to understand the real cause
+and try to avoid "fixes" which make the warning go away without addressing
+its cause.
+
+Note that not all compiler warnings are enabled by default. Build the
+kernel with "make EXTRA_CFLAGS=-W" to get the full set.
+
+The kernel provides several configuration options which turn on debugging
+features; most of these are found in the "kernel hacking" submenu. Several
+of these options should be turned on for any kernel used for development or
+testing purposes. In particular, you should turn on:
+
+ - ENABLE_WARN_DEPRECATED, ENABLE_MUST_CHECK, and FRAME_WARN to get an
+ extra set of warnings for problems like the use of deprecated interfaces
+ or ignoring an important return value from a function. The output
+ generated by these warnings can be verbose, but one need not worry about
+ warnings from other parts of the kernel.
+
+ - DEBUG_OBJECTS will add code to track the lifetime of various objects
+ created by the kernel and warn when things are done out of order. If
+ you are adding a subsystem which creates (and exports) complex objects
+ of its own, consider adding support for the object debugging
+ infrastructure.
+
+ - DEBUG_SLAB can find a variety of memory allocation and use errors; it
+ should be used on most development kernels.
+
+ - DEBUG_SPINLOCK, DEBUG_SPINLOCK_SLEEP, and DEBUG_MUTEXES will find a
+ number of common locking errors.
+
+There are quite a few other debugging options, some of which will be
+discussed below. Some of them have a significant performance impact and
+should not be used all of the time. But some time spent learning the
+available options will likely be paid back many times over in short order.
+
+One of the heavier debugging tools is the locking checker, or "lockdep."
+This tool will track the acquisition and release of every lock (spinlock or
+mutex) in the system, the order in which locks are acquired relative to
+each other, the current interrupt environment, and more. It can then
+ensure that locks are always acquired in the same order, that the same
+interrupt assumptions apply in all situations, and so on. In other words,
+lockdep can find a number of scenarios in which the system could, on rare
+occasion, deadlock. This kind of problem can be painful (for both
+developers and users) in a deployed system; lockdep allows them to be found
+in an automated manner ahead of time. Code with any sort of non-trivial
+locking should be run with lockdep enabled before being submitted for
+inclusion.
+
+As a diligent kernel programmer, you will, beyond doubt, check the return
+status of any operation (such as a memory allocation) which can fail. The
+fact of the matter, though, is that the resulting failure recovery paths
+are, probably, completely untested. Untested code tends to be broken code;
+you could be much more confident of your code if all those error-handling
+paths had been exercised a few times.
+
+The kernel provides a fault injection framework which can do exactly that,
+especially where memory allocations are involved. With fault injection
+enabled, a configurable percentage of memory allocations will be made to
+fail; these failures can be restricted to a specific range of code.
+Running with fault injection enabled allows the programmer to see how the
+code responds when things go badly. See
+Documentation/fault-injection/fault-injection.text for more information on
+how to use this facility.
+
+Other kinds of errors can be found with the "sparse" static analysis tool.
+With sparse, the programmer can be warned about confusion between
+user-space and kernel-space addresses, mixture of big-endian and
+small-endian quantities, the passing of integer values where a set of bit
+flags is expected, and so on. Sparse must be installed separately (it can
+be found at http://www.kernel.org/pub/software/devel/sparse/ if your
+distributor does not package it); it can then be run on the code by adding
+"C=1" to your make command.
+
+Other kinds of portability errors are best found by compiling your code for
+other architectures. If you do not happen to have an S/390 system or a
+Blackfin development board handy, you can still perform the compilation
+step. A large set of cross compilers for x86 systems can be found at
+
+ http://www.kernel.org/pub/tools/crosstool/
+
+Some time spent installing and using these compilers will help avoid
+embarrassment later.
+
+
+4.3: DOCUMENTATION
+
+Documentation has often been more the exception than the rule with kernel
+development. Even so, adequate documentation will help to ease the merging
+of new code into the kernel, make life easier for other developers, and
+will be helpful for your users. In many cases, the addition of
+documentation has become essentially mandatory.
+
+The first piece of documentation for any patch is its associated
+changelog. Log entries should describe the problem being solved, the form
+of the solution, the people who worked on the patch, any relevant
+effects on performance, and anything else that might be needed to
+understand the patch.
+
+Any code which adds a new user-space interface - including new sysfs or
+/proc files - should include documentation of that interface which enables
+user-space developers to know what they are working with. See
+Documentation/ABI/README for a description of how this documentation should
+be formatted and what information needs to be provided.
+
+The file Documentation/kernel-parameters.txt describes all of the kernel's
+boot-time parameters. Any patch which adds new parameters should add the
+appropriate entries to this file.
+
+Any new configuration options must be accompanied by help text which
+clearly explains the options and when the user might want to select them.
+
+Internal API information for many subsystems is documented by way of
+specially-formatted comments; these comments can be extracted and formatted
+in a number of ways by the "kernel-doc" script. If you are working within
+a subsystem which has kerneldoc comments, you should maintain them and add
+them, as appropriate, for externally-available functions. Even in areas
+which have not been so documented, there is no harm in adding kerneldoc
+comments for the future; indeed, this can be a useful activity for
+beginning kernel developers. The format of these comments, along with some
+information on how to create kerneldoc templates can be found in the file
+Documentation/kernel-doc-nano-HOWTO.txt.
+
+Anybody who reads through a significant amount of existing kernel code will
+note that, often, comments are most notable by their absence. Once again,
+the expectations for new code are higher than they were in the past;
+merging uncommented code will be harder. That said, there is little desire
+for verbosely-commented code. The code should, itself, be readable, with
+comments explaining the more subtle aspects.
+
+Certain things should always be commented. Uses of memory barriers should
+be accompanied by a line explaining why the barrier is necessary. The
+locking rules for data structures generally need to be explained somewhere.
+Major data structures need comprehensive documentation in general.
+Non-obvious dependencies between separate bits of code should be pointed
+out. Anything which might tempt a code janitor to make an incorrect
+"cleanup" needs a comment saying why it is done the way it is. And so on.
+
+
+4.4: INTERNAL API CHANGES
+
+The binary interface provided by the kernel to user space cannot be broken
+except under the most severe circumstances. The kernel's internal
+programming interfaces, instead, are highly fluid and can be changed when
+the need arises. If you find yourself having to work around a kernel API,
+or simply not using a specific functionality because it does not meet your
+needs, that may be a sign that the API needs to change. As a kernel
+developer, you are empowered to make such changes.
+
+There are, of course, some catches. API changes can be made, but they need
+to be well justified. So any patch making an internal API change should be
+accompanied by a description of what the change is and why it is
+necessary. This kind of change should also be broken out into a separate
+patch, rather than buried within a larger patch.
+
+The other catch is that a developer who changes an internal API is
+generally charged with the task of fixing any code within the kernel tree
+which is broken by the change. For a widely-used function, this duty can
+lead to literally hundreds or thousands of changes - many of which are
+likely to conflict with work being done by other developers. Needless to
+say, this can be a large job, so it is best to be sure that the
+justification is solid.
+
+When making an incompatible API change, one should, whenever possible,
+ensure that code which has not been updated is caught by the compiler.
+This will help you to be sure that you have found all in-tree uses of that
+interface. It will also alert developers of out-of-tree code that there is
+a change that they need to respond to. Supporting out-of-tree code is not
+something that kernel developers need to be worried about, but we also do
+not have to make life harder for out-of-tree developers than it it needs to
+be.
diff --git a/Documentation/development-process/5.Posting b/Documentation/development-process/5.Posting
new file mode 100644
index 00000000000..dd48132a74d
--- /dev/null
+++ b/Documentation/development-process/5.Posting
@@ -0,0 +1,278 @@
+5: POSTING PATCHES
+
+Sooner or later, the time comes when your work is ready to be presented to
+the community for review and, eventually, inclusion into the mainline
+kernel. Unsurprisingly, the kernel development community has evolved a set
+of conventions and procedures which are used in the posting of patches;
+following them will make life much easier for everybody involved. This
+document will attempt to cover these expectations in reasonable detail;
+more information can also be found in the files SubmittingPatches,
+SubmittingDrivers, and SubmitChecklist in the kernel documentation
+directory.
+
+
+5.1: WHEN TO POST
+
+There is a constant temptation to avoid posting patches before they are
+completely "ready." For simple patches, that is not a problem. If the
+work being done is complex, though, there is a lot to be gained by getting
+feedback from the community before the work is complete. So you should
+consider posting in-progress work, or even making a git tree available so
+that interested developers can catch up with your work at any time.
+
+When posting code which is not yet considered ready for inclusion, it is a
+good idea to say so in the posting itself. Also mention any major work
+which remains to be done and any known problems. Fewer people will look at
+patches which are known to be half-baked, but those who do will come in
+with the idea that they can help you drive the work in the right direction.
+
+
+5.2: BEFORE CREATING PATCHES
+
+There are a number of things which should be done before you consider
+sending patches to the development community. These include:
+
+ - Test the code to the extent that you can. Make use of the kernel's
+ debugging tools, ensure that the kernel will build with all reasonable
+ combinations of configuration options, use cross-compilers to build for
+ different architectures, etc.
+
+ - Make sure your code is compliant with the kernel coding style
+ guidelines.
+
+ - Does your change have performance implications? If so, you should run
+ benchmarks showing what the impact (or benefit) of your change is; a
+ summary of the results should be included with the patch.
+
+ - Be sure that you have the right to post the code. If this work was done
+ for an employer, the employer likely has a right to the work and must be
+ agreeable with its release under the GPL.
+
+As a general rule, putting in some extra thought before posting code almost
+always pays back the effort in short order.
+
+
+5.3: PATCH PREPARATION
+
+The preparation of patches for posting can be a surprising amount of work,
+but, once again, attempting to save time here is not generally advisable
+even in the short term.
+
+Patches must be prepared against a specific version of the kernel. As a
+general rule, a patch should be based on the current mainline as found in
+Linus's git tree. It may become necessary to make versions against -mm,
+linux-next, or a subsystem tree, though, to facilitate wider testing and
+review. Depending on the area of your patch and what is going on
+elsewhere, basing a patch against these other trees can require a
+significant amount of work resolving conflicts and dealing with API
+changes.
+
+Only the most simple changes should be formatted as a single patch;
+everything else should be made as a logical series of changes. Splitting
+up patches is a bit of an art; some developers spend a long time figuring
+out how to do it in the way that the community expects. There are a few
+rules of thumb, however, which can help considerably:
+
+ - The patch series you post will almost certainly not be the series of
+ changes found in your working revision control system. Instead, the
+ changes you have made need to be considered in their final form, then
+ split apart in ways which make sense. The developers are interested in
+ discrete, self-contained changes, not the path you took to get to those
+ changes.
+
+ - Each logically independent change should be formatted as a separate
+ patch. These changes can be small ("add a field to this structure") or
+ large (adding a significant new driver, for example), but they should be
+ conceptually small and amenable to a one-line description. Each patch
+ should make a specific change which can be reviewed on its own and
+ verified to do what it says it does.
+
+ - As a way of restating the guideline above: do not mix different types of
+ changes in the same patch. If a single patch fixes a critical security
+ bug, rearranges a few structures, and reformats the code, there is a
+ good chance that it will be passed over and the important fix will be
+ lost.
+
+ - Each patch should yield a kernel which builds and runs properly; if your
+ patch series is interrupted in the middle, the result should still be a
+ working kernel. Partial application of a patch series is a common
+ scenario when the "git bisect" tool is used to find regressions; if the
+ result is a broken kernel, you will make life harder for developers and
+ users who are engaging in the noble work of tracking down problems.
+
+ - Do not overdo it, though. One developer recently posted a set of edits
+ to a single file as 500 separate patches - an act which did not make him
+ the most popular person on the kernel mailing list. A single patch can
+ be reasonably large as long as it still contains a single *logical*
+ change.
+
+ - It can be tempting to add a whole new infrastructure with a series of
+ patches, but to leave that infrastructure unused until the final patch
+ in the series enables the whole thing. This temptation should be
+ avoided if possible; if that series adds regressions, bisection will
+ finger the last patch as the one which caused the problem, even though
+ the real bug is elsewhere. Whenever possible, a patch which adds new
+ code should make that code active immediately.
+
+Working to create the perfect patch series can be a frustrating process
+which takes quite a bit of time and thought after the "real work" has been
+done. When done properly, though, it is time well spent.
+
+
+5.4: PATCH FORMATTING
+
+So now you have a perfect series of patches for posting, but the work is
+not done quite yet. Each patch needs to be formatted into a message which
+quickly and clearly communicates its purpose to the rest of the world. To
+that end, each patch will be composed of the following:
+
+ - An optional "From" line naming the author of the patch. This line is
+ only necessary if you are passing on somebody else's patch via email,
+ but it never hurts to add it when in doubt.
+
+ - A one-line description of what the patch does. This message should be
+ enough for a reader who sees it with no other context to figure out the
+ scope of the patch; it is the line that will show up in the "short form"
+ changelogs. This message is usually formatted with the relevant
+ subsystem name first, followed by the purpose of the patch. For
+ example:
+
+ gpio: fix build on CONFIG_GPIO_SYSFS=n
+
+ - A blank line followed by a detailed description of the contents of the
+ patch. This description can be as long as is required; it should say
+ what the patch does and why it should be applied to the kernel.
+
+ - One or more tag lines, with, at a minimum, one Signed-off-by: line from
+ the author of the patch. Tags will be described in more detail below.
+
+The above three items should, normally, be the text used when committing
+the change to a revision control system. They are followed by:
+
+ - The patch itself, in the unified ("-u") patch format. Using the "-p"
+ option to diff will associate function names with changes, making the
+ resulting patch easier for others to read.
+
+You should avoid including changes to irrelevant files (those generated by
+the build process, for example, or editor backup files) in the patch. The
+file "dontdiff" in the Documentation directory can help in this regard;
+pass it to diff with the "-X" option.
+
+The tags mentioned above are used to describe how various developers have
+been associated with the development of this patch. They are described in
+detail in the SubmittingPatches document; what follows here is a brief
+summary. Each of these lines has the format:
+
+ tag: Full Name <email address> optional-other-stuff
+
+The tags in common use are:
+
+ - Signed-off-by: this is a developer's certification that he or she has
+ the right to submit the patch for inclusion into the kernel. It is an
+ agreement to the Developer's Certificate of Origin, the full text of
+ which can be found in Documentation/SubmittingPatches. Code without a
+ proper signoff cannot be merged into the mainline.
+
+ - Acked-by: indicates an agreement by another developer (often a
+ maintainer of the relevant code) that the patch is appropriate for
+ inclusion into the kernel.
+
+ - Tested-by: states that the named person has tested the patch and found
+ it to work.
+
+ - Reviewed-by: the named developer has reviewed the patch for correctness;
+ see the reviewer's statement in Documentation/SubmittingPatches for more
+ detail.
+
+ - Reported-by: names a user who reported a problem which is fixed by this
+ patch; this tag is used to give credit to the (often underappreciated)
+ people who test our code and let us know when things do not work
+ correctly.
+
+ - Cc: the named person received a copy of the patch and had the
+ opportunity to comment on it.
+
+Be careful in the addition of tags to your patches: only Cc: is appropriate
+for addition without the explicit permission of the person named.
+
+
+5.5: SENDING THE PATCH
+
+Before you mail your patches, there are a couple of other things you should
+take care of:
+
+ - Are you sure that your mailer will not corrupt the patches? Patches
+ which have had gratuitous white-space changes or line wrapping performed
+ by the mail client will not apply at the other end, and often will not
+ be examined in any detail. If there is any doubt at all, mail the patch
+ to yourself and convince yourself that it shows up intact.
+
+ Documentation/email-clients.txt has some helpful hints on making
+ specific mail clients work for sending patches.
+
+ - Are you sure your patch is free of silly mistakes? You should always
+ run patches through scripts/checkpatch.pl and address the complaints it
+ comes up with. Please bear in mind that checkpatch.pl, while being the
+ embodiment of a fair amount of thought about what kernel patches should
+ look like, is not smarter than you. If fixing a checkpatch.pl complaint
+ would make the code worse, don't do it.
+
+Patches should always be sent as plain text. Please do not send them as
+attachments; that makes it much harder for reviewers to quote sections of
+the patch in their replies. Instead, just put the patch directly into your
+message.
+
+When mailing patches, it is important to send copies to anybody who might
+be interested in it. Unlike some other projects, the kernel encourages
+people to err on the side of sending too many copies; don't assume that the
+relevant people will see your posting on the mailing lists. In particular,
+copies should go to:
+
+ - The maintainer(s) of the affected subsystem(s). As described earlier,
+ the MAINTAINERS file is the first place to look for these people.
+
+ - Other developers who have been working in the same area - especially
+ those who might be working there now. Using git to see who else has
+ modified the files you are working on can be helpful.
+
+ - If you are responding to a bug report or a feature request, copy the
+ original poster as well.
+
+ - Send a copy to the relevant mailing list, or, if nothing else applies,
+ the linux-kernel list.
+
+ - If you are fixing a bug, think about whether the fix should go into the
+ next stable update. If so, stable@kernel.org should get a copy of the
+ patch. Also add a "Cc: stable@kernel.org" to the tags within the patch
+ itself; that will cause the stable team to get a notification when your
+ fix goes into the mainline.
+
+When selecting recipients for a patch, it is good to have an idea of who
+you think will eventually accept the patch and get it merged. While it
+is possible to send patches directly to Linus Torvalds and have him merge
+them, things are not normally done that way. Linus is busy, and there are
+subsystem maintainers who watch over specific parts of the kernel. Usually
+you will be wanting that maintainer to merge your patches. If there is no
+obvious maintainer, Andrew Morton is often the patch target of last resort.
+
+Patches need good subject lines. The canonical format for a patch line is
+something like:
+
+ [PATCH nn/mm] subsys: one-line description of the patch
+
+where "nn" is the ordinal number of the patch, "mm" is the total number of
+patches in the series, and "subsys" is the name of the affected subsystem.
+Clearly, nn/mm can be omitted for a single, standalone patch.
+
+If you have a significant series of patches, it is customary to send an
+introductory description as part zero. This convention is not universally
+followed though; if you use it, remember that information in the
+introduction does not make it into the kernel changelogs. So please ensure
+that the patches, themselves, have complete changelog information.
+
+In general, the second and following parts of a multi-part patch should be
+sent as a reply to the first part so that they all thread together at the
+receiving end. Tools like git and quilt have commands to mail out a set of
+patches with the proper threading. If you have a long series, though, and
+are using git, please provide the --no-chain-reply-to option to avoid
+creating exceptionally deep nesting.
diff --git a/Documentation/development-process/6.Followthrough b/Documentation/development-process/6.Followthrough
new file mode 100644
index 00000000000..a8fba3d83a8
--- /dev/null
+++ b/Documentation/development-process/6.Followthrough
@@ -0,0 +1,202 @@
+6: FOLLOWTHROUGH
+
+At this point, you have followed the guidelines given so far and, with the
+addition of your own engineering skills, have posted a perfect series of
+patches. One of the biggest mistakes that even experienced kernel
+developers can make is to conclude that their work is now done. In truth,
+posting patches indicates a transition into the next stage of the process,
+with, possibly, quite a bit of work yet to be done.
+
+It is a rare patch which is so good at its first posting that there is no
+room for improvement. The kernel development process recognizes this fact,
+and, as a result, is heavily oriented toward the improvement of posted
+code. You, as the author of that code, will be expected to work with the
+kernel community to ensure that your code is up to the kernel's quality
+standards. A failure to participate in this process is quite likely to
+prevent the inclusion of your patches into the mainline.
+
+
+6.1: WORKING WITH REVIEWERS
+
+A patch of any significance will result in a number of comments from other
+developers as they review the code. Working with reviewers can be, for
+many developers, the most intimidating part of the kernel development
+process. Life can be made much easier, though, if you keep a few things in
+mind:
+
+ - If you have explained your patch well, reviewers will understand its
+ value and why you went to the trouble of writing it. But that value
+ will not keep them from asking a fundamental question: what will it be
+ like to maintain a kernel with this code in it five or ten years later?
+ Many of the changes you may be asked to make - from coding style tweaks
+ to substantial rewrites - come from the understanding that Linux will
+ still be around and under development a decade from now.
+
+ - Code review is hard work, and it is a relatively thankless occupation;
+ people remember who wrote kernel code, but there is little lasting fame
+ for those who reviewed it. So reviewers can get grumpy, especially when
+ they see the same mistakes being made over and over again. If you get a
+ review which seems angry, insulting, or outright offensive, resist the
+ impulse to respond in kind. Code review is about the code, not about
+ the people, and code reviewers are not attacking you personally.
+
+ - Similarly, code reviewers are not trying to promote their employers'
+ agendas at the expense of your own. Kernel developers often expect to
+ be working on the kernel years from now, but they understand that their
+ employer could change. They truly are, almost without exception,
+ working toward the creation of the best kernel they can; they are not
+ trying to create discomfort for their employers' competitors.
+
+What all of this comes down to is that, when reviewers send you comments,
+you need to pay attention to the technical observations that they are
+making. Do not let their form of expression or your own pride keep that
+from happening. When you get review comments on a patch, take the time to
+understand what the reviewer is trying to say. If possible, fix the things
+that the reviewer is asking you to fix. And respond back to the reviewer:
+thank them, and describe how you will answer their questions.
+
+Note that you do not have to agree with every change suggested by
+reviewers. If you believe that the reviewer has misunderstood your code,
+explain what is really going on. If you have a technical objection to a
+suggested change, describe it and justify your solution to the problem. If
+your explanations make sense, the reviewer will accept them. Should your
+explanation not prove persuasive, though, especially if others start to
+agree with the reviewer, take some time to think things over again. It can
+be easy to become blinded by your own solution to a problem to the point
+that you don't realize that something is fundamentally wrong or, perhaps,
+you're not even solving the right problem.
+
+One fatal mistake is to ignore review comments in the hope that they will
+go away. They will not go away. If you repost code without having
+responded to the comments you got the time before, you're likely to find
+that your patches go nowhere.
+
+Speaking of reposting code: please bear in mind that reviewers are not
+going to remember all the details of the code you posted the last time
+around. So it is always a good idea to remind reviewers of previously
+raised issues and how you dealt with them; the patch changelog is a good
+place for this kind of information. Reviewers should not have to search
+through list archives to familiarize themselves with what was said last
+time; if you help them get a running start, they will be in a better mood
+when they revisit your code.
+
+What if you've tried to do everything right and things still aren't going
+anywhere? Most technical disagreements can be resolved through discussion,
+but there are times when somebody simply has to make a decision. If you
+honestly believe that this decision is going against you wrongly, you can
+always try appealing to a higher power. As of this writing, that higher
+power tends to be Andrew Morton. Andrew has a great deal of respect in the
+kernel development community; he can often unjam a situation which seems to
+be hopelessly blocked. Appealing to Andrew should not be done lightly,
+though, and not before all other alternatives have been explored. And bear
+in mind, of course, that he may not agree with you either.
+
+
+6.2: WHAT HAPPENS NEXT
+
+If a patch is considered to be a good thing to add to the kernel, and once
+most of the review issues have been resolved, the next step is usually
+entry into a subsystem maintainer's tree. How that works varies from one
+subsystem to the next; each maintainer has his or her own way of doing
+things. In particular, there may be more than one tree - one, perhaps,
+dedicated to patches planned for the next merge window, and another for
+longer-term work.
+
+For patches applying to areas for which there is no obvious subsystem tree
+(memory management patches, for example), the default tree often ends up
+being -mm. Patches which affect multiple subsystems can also end up going
+through the -mm tree.
+
+Inclusion into a subsystem tree can bring a higher level of visibility to a
+patch. Now other developers working with that tree will get the patch by
+default. Subsystem trees typically feed into -mm and linux-next as well,
+making their contents visible to the development community as a whole. At
+this point, there's a good chance that you will get more comments from a
+new set of reviewers; these comments need to be answered as in the previous
+round.
+
+What may also happen at this point, depending on the nature of your patch,
+is that conflicts with work being done by others turn up. In the worst
+case, heavy patch conflicts can result in some work being put on the back
+burner so that the remaining patches can be worked into shape and merged.
+Other times, conflict resolution will involve working with the other
+developers and, possibly, moving some patches between trees to ensure that
+everything applies cleanly. This work can be a pain, but count your
+blessings: before the advent of the linux-next tree, these conflicts often
+only turned up during the merge window and had to be addressed in a hurry.
+Now they can be resolved at leisure, before the merge window opens.
+
+Some day, if all goes well, you'll log on and see that your patch has been
+merged into the mainline kernel. Congratulations! Once the celebration is
+complete (and you have added yourself to the MAINTAINERS file), though, it
+is worth remembering an important little fact: the job still is not done.
+Merging into the mainline brings its own challenges.
+
+To begin with, the visibility of your patch has increased yet again. There
+may be a new round of comments from developers who had not been aware of
+the patch before. It may be tempting to ignore them, since there is no
+longer any question of your code being merged. Resist that temptation,
+though; you still need to be responsive to developers who have questions or
+suggestions.
+
+More importantly, though: inclusion into the mainline puts your code into
+the hands of a much larger group of testers. Even if you have contributed
+a driver for hardware which is not yet available, you will be surprised by
+how many people will build your code into their kernels. And, of course,
+where there are testers, there will be bug reports.
+
+The worst sort of bug reports are regressions. If your patch causes a
+regression, you'll find an uncomfortable number of eyes upon you;
+regressions need to be fixed as soon as possible. If you are unwilling or
+unable to fix the regression (and nobody else does it for you), your patch
+will almost certainly be removed during the stabilization period. Beyond
+negating all of the work you have done to get your patch into the mainline,
+having a patch pulled as the result of a failure to fix a regression could
+well make it harder for you to get work merged in the future.
+
+After any regressions have been dealt with, there may be other, ordinary
+bugs to deal with. The stabilization period is your best opportunity to
+fix these bugs and ensure that your code's debut in a mainline kernel
+release is as solid as possible. So, please, answer bug reports, and fix
+the problems if at all possible. That's what the stabilization period is
+for; you can start creating cool new patches once any problems with the old
+ones have been taken care of.
+
+And don't forget that there are other milestones which may also create bug
+reports: the next mainline stable release, when prominent distributors pick
+up a version of the kernel containing your patch, etc. Continuing to
+respond to these reports is a matter of basic pride in your work. If that
+is insufficient motivation, though, it's also worth considering that the
+development community remembers developers who lose interest in their code
+after it's merged. The next time you post a patch, they will be evaluating
+it with the assumption that you will not be around to maintain it
+afterward.
+
+
+6.3: OTHER THINGS THAT CAN HAPPEN
+
+One day, you may open your mail client and see that somebody has mailed you
+a patch to your code. That is one of the advantages of having your code
+out there in the open, after all. If you agree with the patch, you can
+either forward it on to the subsystem maintainer (be sure to include a
+proper From: line so that the attribution is correct, and add a signoff of
+your own), or send an Acked-by: response back and let the original poster
+send it upward.
+
+If you disagree with the patch, send a polite response explaining why. If
+possible, tell the author what changes need to be made to make the patch
+acceptable to you. There is a certain resistance to merging patches which
+are opposed by the author and maintainer of the code, but it only goes so
+far. If you are seen as needlessly blocking good work, those patches will
+eventually flow around you and get into the mainline anyway. In the Linux
+kernel, nobody has absolute veto power over any code. Except maybe Linus.
+
+On very rare occasion, you may see something completely different: another
+developer posts a different solution to your problem. At that point,
+chances are that one of the two patches will not be merged, and "mine was
+here first" is not considered to be a compelling technical argument. If
+somebody else's patch displaces yours and gets into the mainline, there is
+really only one way to respond: be pleased that your problem got solved and
+get on with your work. Having one's work shoved aside in this manner can
+be hurtful and discouraging, but the community will remember your reaction
+long after they have forgotten whose patch actually got merged.
diff --git a/Documentation/development-process/7.AdvancedTopics b/Documentation/development-process/7.AdvancedTopics
new file mode 100644
index 00000000000..a2cf74093aa
--- /dev/null
+++ b/Documentation/development-process/7.AdvancedTopics
@@ -0,0 +1,173 @@
+7: ADVANCED TOPICS
+
+At this point, hopefully, you have a handle on how the development process
+works. There is still more to learn, however! This section will cover a
+number of topics which can be helpful for developers wanting to become a
+regular part of the Linux kernel development process.
+
+7.1: MANAGING PATCHES WITH GIT
+
+The use of distributed version control for the kernel began in early 2002,
+when Linus first started playing with the proprietary BitKeeper
+application. While BitKeeper was controversial, the approach to software
+version management it embodied most certainly was not. Distributed version
+control enabled an immediate acceleration of the kernel development
+project. In current times, there are several free alternatives to
+BitKeeper. For better or for worse, the kernel project has settled on git
+as its tool of choice.
+
+Managing patches with git can make life much easier for the developer,
+especially as the volume of those patches grows. Git also has its rough
+edges and poses certain hazards; it is a young and powerful tool which is
+still being civilized by its developers. This document will not attempt to
+teach the reader how to use git; that would be sufficient material for a
+long document in its own right. Instead, the focus here will be on how git
+fits into the kernel development process in particular. Developers who
+wish to come up to speed with git will find more information at:
+
+ http://git.or.cz/
+
+ http://www.kernel.org/pub/software/scm/git/docs/user-manual.html
+
+and on various tutorials found on the web.
+
+The first order of business is to read the above sites and get a solid
+understanding of how git works before trying to use it to make patches
+available to others. A git-using developer should be able to obtain a copy
+of the mainline repository, explore the revision history, commit changes to
+the tree, use branches, etc. An understanding of git's tools for the
+rewriting of history (such as rebase) is also useful. Git comes with its
+own terminology and concepts; a new user of git should know about refs,
+remote branches, the index, fast-forward merges, pushes and pulls, detached
+heads, etc. It can all be a little intimidating at the outset, but the
+concepts are not that hard to grasp with a bit of study.
+
+Using git to generate patches for submission by email can be a good
+exercise while coming up to speed.
+
+When you are ready to start putting up git trees for others to look at, you
+will, of course, need a server that can be pulled from. Setting up such a
+server with git-daemon is relatively straightforward if you have a system
+which is accessible to the Internet. Otherwise, free, public hosting sites
+(Github, for example) are starting to appear on the net. Established
+developers can get an account on kernel.org, but those are not easy to come
+by; see http://kernel.org/faq/ for more information.
+
+The normal git workflow involves the use of a lot of branches. Each line
+of development can be separated into a separate "topic branch" and
+maintained independently. Branches in git are cheap, there is no reason to
+not make free use of them. And, in any case, you should not do your
+development in any branch which you intend to ask others to pull from.
+Publicly-available branches should be created with care; merge in patches
+from development branches when they are in complete form and ready to go -
+not before.
+
+Git provides some powerful tools which can allow you to rewrite your
+development history. An inconvenient patch (one which breaks bisection,
+say, or which has some other sort of obvious bug) can be fixed in place or
+made to disappear from the history entirely. A patch series can be
+rewritten as if it had been written on top of today's mainline, even though
+you have been working on it for months. Changes can be transparently
+shifted from one branch to another. And so on. Judicious use of git's
+ability to revise history can help in the creation of clean patch sets with
+fewer problems.
+
+Excessive use of this capability can lead to other problems, though, beyond
+a simple obsession for the creation of the perfect project history.
+Rewriting history will rewrite the changes contained in that history,
+turning a tested (hopefully) kernel tree into an untested one. But, beyond
+that, developers cannot easily collaborate if they do not have a shared
+view of the project history; if you rewrite history which other developers
+have pulled into their repositories, you will make life much more difficult
+for those developers. So a simple rule of thumb applies here: history
+which has been exported to others should generally be seen as immutable
+thereafter.
+
+So, once you push a set of changes to your publicly-available server, those
+changes should not be rewritten. Git will attempt to enforce this rule if
+you try to push changes which do not result in a fast-forward merge
+(i.e. changes which do not share the same history). It is possible to
+override this check, and there may be times when it is necessary to rewrite
+an exported tree. Moving changesets between trees to avoid conflicts in
+linux-next is one example. But such actions should be rare. This is one
+of the reasons why development should be done in private branches (which
+can be rewritten if necessary) and only moved into public branches when
+it's in a reasonably advanced state.
+
+As the mainline (or other tree upon which a set of changes is based)
+advances, it is tempting to merge with that tree to stay on the leading
+edge. For a private branch, rebasing can be an easy way to keep up with
+another tree, but rebasing is not an option once a tree is exported to the
+world. Once that happens, a full merge must be done. Merging occasionally
+makes good sense, but overly frequent merges can clutter the history
+needlessly. Suggested technique in this case is to merge infrequently, and
+generally only at specific release points (such as a mainline -rc
+release). If you are nervous about specific changes, you can always
+perform test merges in a private branch. The git "rerere" tool can be
+useful in such situations; it remembers how merge conflicts were resolved
+so that you don't have to do the same work twice.
+
+One of the biggest recurring complaints about tools like git is this: the
+mass movement of patches from one repository to another makes it easy to
+slip in ill-advised changes which go into the mainline below the review
+radar. Kernel developers tend to get unhappy when they see that kind of
+thing happening; putting up a git tree with unreviewed or off-topic patches
+can affect your ability to get trees pulled in the future. Quoting Linus:
+
+ You can send me patches, but for me to pull a git patch from you, I
+ need to know that you know what you're doing, and I need to be able
+ to trust things *without* then having to go and check every
+ individual change by hand.
+
+(http://lwn.net/Articles/224135/).
+
+To avoid this kind of situation, ensure that all patches within a given
+branch stick closely to the associated topic; a "driver fixes" branch
+should not be making changes to the core memory management code. And, most
+importantly, do not use a git tree to bypass the review process. Post an
+occasional summary of the tree to the relevant list, and, when the time is
+right, request that the tree be included in linux-next.
+
+If and when others start to send patches for inclusion into your tree,
+don't forget to review them. Also ensure that you maintain the correct
+authorship information; the git "am" tool does its best in this regard, but
+you may have to add a "From:" line to the patch if it has been relayed to
+you via a third party.
+
+When requesting a pull, be sure to give all the relevant information: where
+your tree is, what branch to pull, and what changes will result from the
+pull. The git request-pull command can be helpful in this regard; it will
+format the request as other developers expect, and will also check to be
+sure that you have remembered to push those changes to the public server.
+
+
+7.2: REVIEWING PATCHES
+
+Some readers will certainly object to putting this section with "advanced
+topics" on the grounds that even beginning kernel developers should be
+reviewing patches. It is certainly true that there is no better way to
+learn how to program in the kernel environment than by looking at code
+posted by others. In addition, reviewers are forever in short supply; by
+looking at code you can make a significant contribution to the process as a
+whole.
+
+Reviewing code can be an intimidating prospect, especially for a new kernel
+developer who may well feel nervous about questioning code - in public -
+which has been posted by those with more experience. Even code written by
+the most experienced developers can be improved, though. Perhaps the best
+piece of advice for reviewers (all reviewers) is this: phrase review
+comments as questions rather than criticisms. Asking "how does the lock
+get released in this path?" will always work better than stating "the
+locking here is wrong."
+
+Different developers will review code from different points of view. Some
+are mostly concerned with coding style and whether code lines have trailing
+white space. Others will focus primarily on whether the change implemented
+by the patch as a whole is a good thing for the kernel or not. Yet others
+will check for problematic locking, excessive stack usage, possible
+security issues, duplication of code found elsewhere, adequate
+documentation, adverse effects on performance, user-space ABI changes, etc.
+All types of review, if they lead to better code going into the kernel, are
+welcome and worthwhile.
+
+
diff --git a/Documentation/development-process/8.Conclusion b/Documentation/development-process/8.Conclusion
new file mode 100644
index 00000000000..1990ab4b494
--- /dev/null
+++ b/Documentation/development-process/8.Conclusion
@@ -0,0 +1,74 @@
+8: FOR MORE INFORMATION
+
+There are numerous sources of information on Linux kernel development and
+related topics. First among those will always be the Documentation
+directory found in the kernel source distribution. The top-level HOWTO
+file is an important starting point; SubmittingPatches and
+SubmittingDrivers are also something which all kernel developers should
+read. Many internal kernel APIs are documented using the kerneldoc
+mechanism; "make htmldocs" or "make pdfdocs" can be used to generate those
+documents in HTML or PDF format (though the version of TeX shipped by some
+distributions runs into internal limits and fails to process the documents
+properly).
+
+Various web sites discuss kernel development at all levels of detail. Your
+author would like to humbly suggest http://lwn.net/ as a source;
+information on many specific kernel topics can be found via the LWN kernel
+index at:
+
+ http://lwn.net/Kernel/Index/
+
+Beyond that, a valuable resource for kernel developers is:
+
+ http://kernelnewbies.org/
+
+Information about the linux-next tree gathers at:
+
+ http://linux.f-seidel.de/linux-next/pmwiki/
+
+And, of course, one should not forget http://kernel.org/, the definitive
+location for kernel release information.
+
+There are a number of books on kernel development:
+
+ Linux Device Drivers, 3rd Edition (Jonathan Corbet, Alessandro
+ Rubini, and Greg Kroah-Hartman). Online at
+ http://lwn.net/Kernel/LDD3/.
+
+ Linux Kernel Development (Robert Love).
+
+ Understanding the Linux Kernel (Daniel Bovet and Marco Cesati).
+
+All of these books suffer from a common fault, though: they tend to be
+somewhat obsolete by the time they hit the shelves, and they have been on
+the shelves for a while now. Still, there is quite a bit of good
+information to be found there.
+
+Documentation for git can be found at:
+
+ http://www.kernel.org/pub/software/scm/git/docs/
+
+ http://www.kernel.org/pub/software/scm/git/docs/user-manual.html
+
+
+9: CONCLUSION
+
+Congratulations to anybody who has made it through this long-winded
+document. Hopefully it has provided a helpful understanding of how the
+Linux kernel is developed and how you can participate in that process.
+
+In the end, it's the participation that matters. Any open source software
+project is no more than the sum of what its contributors put into it. The
+Linux kernel has progressed as quickly and as well as it has because it has
+been helped by an impressively large group of developers, all of whom are
+working to make it better. The kernel is a premier example of what can be
+done when thousands of people work together toward a common goal.
+
+The kernel can always benefit from a larger developer base, though. There
+is always more work to do. But, just as importantly, most other
+participants in the Linux ecosystem can benefit through contributing to the
+kernel. Getting code into the mainline is the key to higher code quality,
+lower maintenance and distribution costs, a higher level of influence over
+the direction of kernel development, and more. It is a situation where
+everybody involved wins. Fire up your editor and come join us; you will be
+more than welcome.
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 05c80645e4e..2be08240ee8 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -2571,6 +2571,9 @@ Your cooperation is appreciated.
160 = /dev/usb/legousbtower0 1st USB Legotower device
...
175 = /dev/usb/legousbtower15 16th USB Legotower device
+ 176 = /dev/usb/usbtmc1 First USB TMC device
+ ...
+ 192 = /dev/usb/usbtmc16 16th USB TMC device
240 = /dev/usb/dabusb0 First daubusb device
...
243 = /dev/usb/dabusb3 Fourth dabusb device
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 27809357da5..1e89a51ea49 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -2,11 +2,13 @@
*.aux
*.bin
*.cpio
-*.css
+*.csp
+*.dsp
*.dvi
+*.elf
*.eps
-*.fw.gen.S
*.fw
+*.gen.S
*.gif
*.grep
*.grp
@@ -30,6 +32,7 @@
*.s
*.sgml
*.so
+*.so.dbg
*.symtypes
*.tab.c
*.tab.h
@@ -38,24 +41,17 @@
*.xml
*_MODULES
*_vga16.c
-*cscope*
*~
*.9
*.9.gz
.*
-.cscope
-.gitignore
-.mailmap
.mm
53c700_d.h
-53c8xx_d.h*
-COPYING
-CREDITS
CVS
ChangeSet
Image
Kerntypes
-MODS.txt
+Module.markers
Module.symvers
PENDING
SCCS
@@ -73,7 +69,9 @@ autoconf.h*
bbootsect
bin2c
binkernel.spec
+binoffset
bootsect
+bounds.h
bsetup
btfixupprep
build
@@ -89,39 +87,36 @@ config_data.h*
config_data.gz*
conmakehash
consolemap_deftbl.c*
+cpustr.h
crc32table.h*
cscope.*
-defkeymap.c*
+defkeymap.c
devlist.h*
docproc
-dummy_sym.c*
elf2ecoff
elfconfig.h*
-filelist
fixdep
fore200e_mkfirm
fore200e_pca_fw.c*
gconf
gen-devlist
-gen-kdb_cmds.c*
gen_crc32table
gen_init_cpio
genksyms
-gentbl
*_gray256.c
+ihex2fw
ikconfig.h*
initramfs_data.cpio
initramfs_data.cpio.gz
initramfs_list
kallsyms
kconfig
-kconfig.tk
-keywords.c*
+keywords.c
ksym.c*
ksym.h*
kxgettext
lkc_defs.h
-lex.c*
+lex.c
lex.*.c
logo_*.c
logo_*_clut224.c
@@ -130,7 +125,6 @@ lxdialog
mach-types
mach-types.h
machtypes.h
-make_times_h
map
maui_boot.h
mconf
@@ -138,6 +132,7 @@ miboot*
mk_elfconfig
mkboot
mkbugboot
+mkcpustr
mkdep
mkprep
mktables
@@ -145,11 +140,12 @@ mktree
modpost
modules.order
modversions.h*
+ncscope.*
offset.h
offsets.h
oui.c*
-parse.c*
-parse.h*
+parse.c
+parse.h
patches*
pca200e.bin
pca200e_ecd.bin2
@@ -157,7 +153,7 @@ piggy.gz
piggyback
pnmtologo
ppc_defs.h*
-promcon_tbl.c*
+promcon_tbl.c
pss_boot.h
qconf
raid6altivec*.c
@@ -168,27 +164,38 @@ series
setup
setup.bin
setup.elf
-sim710_d.h*
sImage
sm_tbl*
split-include
+syscalltab.h
tags
tftpboot.img
timeconst.h
times.h*
-tkparse
trix_boot.h
utsrelease.h*
+vdso-syms.lds
vdso.lds
+vdso32-int80-syms.lds
+vdso32-syms.lds
+vdso32-syscall-syms.lds
+vdso32-sysenter-syms.lds
+vdso32.lds
+vdso32.so.dbg
+vdso64.lds
+vdso64.so.dbg
version.h*
vmlinux
vmlinux-*
vmlinux.aout
-vmlinux*.lds*
-vmlinux*.scr
+vmlinux.lds
vsyscall.lds
+vsyscall_32.lds
wanxlfw.inc
uImage
unifdef
+wakeup.bin
+wakeup.elf
+wakeup.lds
zImage*
zconf.hash.c
diff --git a/Documentation/fb/intelfb.txt b/Documentation/fb/intelfb.txt
index 27a3160650a..dd9e944ea62 100644
--- a/Documentation/fb/intelfb.txt
+++ b/Documentation/fb/intelfb.txt
@@ -14,6 +14,7 @@ graphics devices. These would include:
Intel 915GM
Intel 945G
Intel 945GM
+ Intel 945GME
Intel 965G
Intel 965GM
diff --git a/Documentation/fb/uvesafb.txt b/Documentation/fb/uvesafb.txt
index bcfc233a008..7ac3c4078ff 100644
--- a/Documentation/fb/uvesafb.txt
+++ b/Documentation/fb/uvesafb.txt
@@ -52,7 +52,7 @@ are either given on the kernel command line or as module parameters, e.g.:
video=uvesafb:1024x768-32,mtrr:3,ywrap (compiled into the kernel)
- # modprobe uvesafb mode=1024x768-32 mtrr=3 scroll=ywrap (module)
+ # modprobe uvesafb mode_option=1024x768-32 mtrr=3 scroll=ywrap (module)
Accepted options:
@@ -105,7 +105,7 @@ vtotal:n
<mode> The mode you want to set, in the standard modedb format. Refer to
modedb.txt for a detailed description. When uvesafb is compiled as
a module, the mode string should be provided as a value of the
- 'mode' option.
+ 'mode_option' option.
vbemode:x
Force the use of VBE mode x. The mode will only be set if it's
diff --git a/Documentation/fb/viafb.modes b/Documentation/fb/viafb.modes
new file mode 100644
index 00000000000..02e5b487f00
--- /dev/null
+++ b/Documentation/fb/viafb.modes
@@ -0,0 +1,870 @@
+#
+#
+# These data are based on the CRTC parameters in
+#
+# VIA Integration Graphics Chip
+# (C) 2004 VIA Technologies Inc.
+#
+
+#
+# 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 640 480
+# Scan Frequency 31.469 kHz 59.94 Hz
+# Sync Width 3.813 us 0.064 ms
+# 12 chars 2 lines
+# Front Porch 0.636 us 0.318 ms
+# 2 chars 10 lines
+# Back Porch 1.907 us 1.048 ms
+# 6 chars 33 lines
+# Active Time 25.422 us 15.253 ms
+# 80 chars 480 lines
+# Blank Time 6.356 us 1.430 ms
+# 20 chars 45 lines
+# Polarity negative negative
+#
+
+mode "640x480-60"
+# D: 25.175 MHz, H: 31.469 kHz, V: 59.94 Hz
+ geometry 640 480 640 480 32
+ timings 39722 48 16 33 10 96 2 endmode mode "480x640-60"
+# D: 24.823 MHz, H: 39.780 kHz, V: 60.00 Hz
+ geometry 480 640 480 640 32 timings 39722 72 24 19 1 48 3 endmode
+#
+# 640x480, 75 Hz, Non-Interlaced (31.50 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 640 480
+# Scan Frequency 37.500 kHz 75.00 Hz
+# Sync Width 2.032 us 0.080 ms
+# 8 chars 3 lines
+# Front Porch 0.508 us 0.027 ms
+# 2 chars 1 lines
+# Back Porch 3.810 us 0.427 ms
+# 15 chars 16 lines
+# Active Time 20.317 us 12.800 ms
+# 80 chars 480 lines
+# Blank Time 6.349 us 0.533 ms
+# 25 chars 20 lines
+# Polarity negative negative
+#
+ mode "640x480-75"
+# D: 31.50 MHz, H: 37.500 kHz, V: 75.00 Hz
+ geometry 640 480 640 480 32 timings 31747 120 16 16 1 64 3 endmode
+#
+# 640x480, 85 Hz, Non-Interlaced (36.000 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 640 480
+# Scan Frequency 43.269 kHz 85.00 Hz
+# Sync Width 1.556 us 0.069 ms
+# 7 chars 3 lines
+# Front Porch 1.556 us 0.023 ms
+# 7 chars 1 lines
+# Back Porch 2.222 us 0.578 ms
+# 10 chars 25 lines
+# Active Time 17.778 us 11.093 ms
+# 80 chars 480 lines
+# Blank Time 5.333 us 0.670 ms
+# 24 chars 29 lines
+# Polarity negative negative
+#
+ mode "640x480-85"
+# D: 36.000 MHz, H: 43.269 kHz, V: 85.00 Hz
+ geometry 640 480 640 480 32 timings 27777 80 56 25 1 56 3 endmode
+#
+# 640x480, 100 Hz, Non-Interlaced (43.163 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 640 480
+# Scan Frequency 50.900 kHz 100.00 Hz
+# Sync Width 1.483 us 0.058 ms
+# 8 chars 3 lines
+# Front Porch 0.927 us 0.019 ms
+# 5 chars 1 lines
+# Back Porch 2.409 us 0.475 ms
+# 13 chars 25 lines
+# Active Time 14.827 us 9.430 ms
+# 80 chars 480 lines
+# Blank Time 4.819 us 0.570 ms
+# 26 chars 29 lines
+# Polarity positive positive
+#
+ mode "640x480-100"
+# D: 43.163 MHz, H: 50.900 kHz, V: 100.00 Hz
+ geometry 640 480 640 480 32 timings 23168 104 40 25 1 64 3 endmode
+#
+# 640x480, 120 Hz, Non-Interlaced (52.406 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 640 480
+# Scan Frequency 61.800 kHz 120.00 Hz
+# Sync Width 1.221 us 0.048 ms
+# 8 chars 3 lines
+# Front Porch 0.763 us 0.016 ms
+# 5 chars 1 lines
+# Back Porch 1.984 us 0.496 ms
+# 13 chars 31 lines
+# Active Time 12.212 us 7.767 ms
+# 80 chars 480 lines
+# Blank Time 3.969 us 0.566 ms
+# 26 chars 35 lines
+# Polarity positive positive
+#
+ mode "640x480-120"
+# D: 52.406 MHz, H: 61.800 kHz, V: 120.00 Hz
+ geometry 640 480 640 480 32 timings 19081 104 40 31 1 64 3 endmode
+#
+# 720x480, 60 Hz, Non-Interlaced (26.880 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 720 480
+# Scan Frequency 30.000 kHz 60.241 Hz
+# Sync Width 2.679 us 0.099 ms
+# 9 chars 3 lines
+# Front Porch 0.595 us 0.033 ms
+# 2 chars 1 lines
+# Back Porch 3.274 us 0.462 ms
+# 11 chars 14 lines
+# Active Time 26.786 us 16.000 ms
+# 90 chars 480 lines
+# Blank Time 6.548 us 0.600 ms
+# 22 chars 18 lines
+# Polarity positive positive
+#
+ mode "720x480-60"
+# D: 26.880 MHz, H: 30.000 kHz, V: 60.24 Hz
+ geometry 720 480 720 480 32 timings 37202 88 16 14 1 72 3 endmode
+#
+# 800x480, 60 Hz, Non-Interlaced (29.581 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 800 480
+# Scan Frequency 29.892 kHz 60.00 Hz
+# Sync Width 2.704 us 100.604 us
+# 10 chars 3 lines
+# Front Porch 0.541 us 33.535 us
+# 2 chars 1 lines
+# Back Porch 3.245 us 435.949 us
+# 12 chars 13 lines
+# Active Time 27.044 us 16.097 ms
+# 100 chars 480 lines
+# Blank Time 6.491 us 0.570 ms
+# 24 chars 17 lines
+# Polarity positive positive
+#
+ mode "800x480-60"
+# D: 29.500 MHz, H: 29.738 kHz, V: 60.00 Hz
+ geometry 800 480 800 480 32 timings 33805 96 24 10 3 72 7 endmode
+#
+# 720x576, 60 Hz, Non-Interlaced (32.668 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 720 576
+# Scan Frequency 35.820 kHz 60.00 Hz
+# Sync Width 2.204 us 0.083 ms
+# 9 chars 3 lines
+# Front Porch 0.735 us 0.027 ms
+# 3 chars 1 lines
+# Back Porch 2.939 us 0.459 ms
+# 12 chars 17 lines
+# Active Time 22.040 us 16.080 ms
+# 90 chars 476 lines
+# Blank Time 5.877 us 0.586 ms
+# 24 chars 21 lines
+# Polarity positive positive
+#
+ mode "720x576-60"
+# D: 32.668 MHz, H: 35.820 kHz, V: 60.00 Hz
+ geometry 720 576 720 576 32 timings 30611 96 24 17 1 72 3 endmode
+#
+# 800x600, 60 Hz, Non-Interlaced (40.00 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 800 600
+# Scan Frequency 37.879 kHz 60.32 Hz
+# Sync Width 3.200 us 0.106 ms
+# 16 chars 4 lines
+# Front Porch 1.000 us 0.026 ms
+# 5 chars 1 lines
+# Back Porch 2.200 us 0.607 ms
+# 11 chars 23 lines
+# Active Time 20.000 us 15.840 ms
+# 100 chars 600 lines
+# Blank Time 6.400 us 0.739 ms
+# 32 chars 28 lines
+# Polarity positive positive
+#
+ mode "800x600-60"
+# D: 40.00 MHz, H: 37.879 kHz, V: 60.32 Hz
+ geometry 800 600 800 600 32
+ timings 25000 88 40 23 1 128 4 hsync high vsync high endmode
+#
+# 800x600, 75 Hz, Non-Interlaced (49.50 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 800 600
+# Scan Frequency 46.875 kHz 75.00 Hz
+# Sync Width 1.616 us 0.064 ms
+# 10 chars 3 lines
+# Front Porch 0.323 us 0.021 ms
+# 2 chars 1 lines
+# Back Porch 3.232 us 0.448 ms
+# 20 chars 21 lines
+# Active Time 16.162 us 12.800 ms
+# 100 chars 600 lines
+# Blank Time 5.172 us 0.533 ms
+# 32 chars 25 lines
+# Polarity positive positive
+#
+ mode "800x600-75"
+# D: 49.50 MHz, H: 46.875 kHz, V: 75.00 Hz
+ geometry 800 600 800 600 32
+ timings 20203 160 16 21 1 80 3 hsync high vsync high endmode
+#
+# 800x600, 85 Hz, Non-Interlaced (56.25 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 800 600
+# Scan Frequency 53.674 kHz 85.061 Hz
+# Sync Width 1.138 us 0.056 ms
+# 8 chars 3 lines
+# Front Porch 0.569 us 0.019 ms
+# 4 chars 1 lines
+# Back Porch 2.702 us 0.503 ms
+# 19 chars 27 lines
+# Active Time 14.222 us 11.179 ms
+# 100 chars 600 lines
+# Blank Time 4.409 us 0.578 ms
+# 31 chars 31 lines
+# Polarity positive positive
+#
+ mode "800x600-85"
+# D: 56.25 MHz, H: 53.674 kHz, V: 85.061 Hz
+ geometry 800 600 800 600 32
+ timings 17777 152 32 27 1 64 3 hsync high vsync high endmode
+#
+# 800x600, 100 Hz, Non-Interlaced (67.50 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 800 600
+# Scan Frequency 62.500 kHz 100.00 Hz
+# Sync Width 0.948 us 0.064 ms
+# 8 chars 4 lines
+# Front Porch 0.000 us 0.112 ms
+# 0 chars 7 lines
+# Back Porch 3.200 us 0.224 ms
+# 27 chars 14 lines
+# Active Time 11.852 us 9.600 ms
+# 100 chars 600 lines
+# Blank Time 4.148 us 0.400 ms
+# 35 chars 25 lines
+# Polarity positive positive
+#
+ mode "800x600-100"
+# D: 67.50 MHz, H: 62.500 kHz, V: 100.00 Hz
+ geometry 800 600 800 600 32
+ timings 14667 216 0 14 7 64 4 hsync high vsync high endmode
+#
+# 800x600, 120 Hz, Non-Interlaced (83.950 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 800 600
+# Scan Frequency 77.160 kHz 120.00 Hz
+# Sync Width 1.048 us 0.039 ms
+# 11 chars 3 lines
+# Front Porch 0.667 us 0.013 ms
+# 7 chars 1 lines
+# Back Porch 1.715 us 0.507 ms
+# 18 chars 39 lines
+# Active Time 9.529 us 7.776 ms
+# 100 chars 600 lines
+# Blank Time 3.431 us 0.557 ms
+# 36 chars 43 lines
+# Polarity positive positive
+#
+ mode "800x600-120"
+# D: 83.950 MHz, H: 77.160 kHz, V: 120.00 Hz
+ geometry 800 600 800 600 32
+ timings 11912 144 56 39 1 88 3 hsync high vsync high endmode
+#
+# 848x480, 60 Hz, Non-Interlaced (31.490 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 848 480
+# Scan Frequency 29.820 kHz 60.00 Hz
+# Sync Width 2.795 us 0.099 ms
+# 11 chars 3 lines
+# Front Porch 0.508 us 0.033 ms
+# 2 chars 1 lines
+# Back Porch 3.303 us 0.429 ms
+# 13 chars 13 lines
+# Active Time 26.929 us 16.097 ms
+# 106 chars 480 lines
+# Blank Time 6.605 us 0.570 ms
+# 26 chars 17 lines
+# Polarity positive positive
+#
+ mode "848x480-60"
+# D: 31.500 MHz, H: 29.830 kHz, V: 60.00 Hz
+ geometry 848 480 848 480 32
+ timings 31746 104 24 12 3 80 5 hsync high vsync high endmode
+#
+# 856x480, 60 Hz, Non-Interlaced (31.728 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 856 480
+# Scan Frequency 29.820 kHz 60.00 Hz
+# Sync Width 2.774 us 0.099 ms
+# 11 chars 3 lines
+# Front Porch 0.504 us 0.033 ms
+# 2 chars 1 lines
+# Back Porch 3.728 us 0.429 ms
+# 13 chars 13 lines
+# Active Time 26.979 us 16.097 ms
+# 107 chars 480 lines
+# Blank Time 6.556 us 0.570 ms
+# 26 chars 17 lines
+# Polarity positive positive
+#
+ mode "856x480-60"
+# D: 31.728 MHz, H: 29.820 kHz, V: 60.00 Hz
+ geometry 856 480 856 480 32
+ timings 31518 104 16 13 1 88 3
+ hsync high vsync high endmode mode "960x600-60"
+# D: 45.250 MHz, H: 37.212 kHz, V: 60.00 Hz
+ geometry 960 600 960 600 32 timings 22099 128 32 15 3 96 6 endmode
+#
+# 1000x600, 60 Hz, Non-Interlaced (48.068 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1000 600
+# Scan Frequency 37.320 kHz 60.00 Hz
+# Sync Width 2.164 us 0.080 ms
+# 13 chars 3 lines
+# Front Porch 0.832 us 0.027 ms
+# 5 chars 1 lines
+# Back Porch 2.996 us 0.483 ms
+# 18 chars 18 lines
+# Active Time 20.804 us 16.077 ms
+# 125 chars 600 lines
+# Blank Time 5.991 us 0.589 ms
+# 36 chars 22 lines
+# Polarity negative positive
+#
+ mode "1000x600-60"
+# D: 48.068 MHz, H: 37.320 kHz, V: 60.00 Hz
+ geometry 1000 600 1000 600 32
+ timings 20834 144 40 18 1 104 3 endmode mode "1024x576-60"
+# D: 46.996 MHz, H: 35.820 kHz, V: 60.00 Hz
+ geometry 1024 576 1024 576 32
+ timings 21278 144 40 17 1 104 3 endmode mode "1024x600-60"
+# D: 48.964 MHz, H: 37.320 kHz, V: 60.00 Hz
+ geometry 1024 600 1024 600 32
+ timings 20461 144 40 18 1 104 3 endmode mode "1088x612-60"
+# D: 52.952 MHz, H: 38.040 kHz, V: 60.00 Hz
+ geometry 1088 612 1088 612 32 timings 18877 152 48 16 3 104 5 endmode
+#
+# 1024x512, 60 Hz, Non-Interlaced (41.291 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1024 512
+# Scan Frequency 31.860 kHz 60.00 Hz
+# Sync Width 2.519 us 0.094 ms
+# 13 chars 3 lines
+# Front Porch 0.775 us 0.031 ms
+# 4 chars 1 lines
+# Back Porch 3.294 us 0.465 ms
+# 17 chars 15 lines
+# Active Time 24.800 us 16.070 ms
+# 128 chars 512 lines
+# Blank Time 6.587 us 0.596 ms
+# 34 chars 19 lines
+# Polarity positive positive
+#
+ mode "1024x512-60"
+# D: 41.291 MHz, H: 31.860 kHz, V: 60.00 Hz
+ geometry 1024 512 1024 512 32
+ timings 24218 126 32 15 1 104 3 hsync high vsync high endmode
+#
+# 1024x600, 60 Hz, Non-Interlaced (48.875 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1024 768
+# Scan Frequency 37.252 kHz 60.00 Hz
+# Sync Width 2.128 us 80.532us
+# 13 chars 3 lines
+# Front Porch 0.818 us 26.844 us
+# 5 chars 1 lines
+# Back Porch 2.946 us 483.192 us
+# 18 chars 18 lines
+# Active Time 20.951 us 16.697 ms
+# 128 chars 622 lines
+# Blank Time 5.893 us 0.591 ms
+# 36 chars 22 lines
+# Polarity negative positive
+#
+#mode "1024x600-60"
+# # D: 48.875 MHz, H: 37.252 kHz, V: 60.00 Hz
+# geometry 1024 600 1024 600 32
+# timings 20460 144 40 18 1 104 3
+# endmode
+#
+# 1024x768, 60 Hz, Non-Interlaced (65.00 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1024 768
+# Scan Frequency 48.363 kHz 60.00 Hz
+# Sync Width 2.092 us 0.124 ms
+# 17 chars 6 lines
+# Front Porch 0.369 us 0.062 ms
+# 3 chars 3 lines
+# Back Porch 2.462 us 0.601 ms
+# 20 chars 29 lines
+# Active Time 15.754 us 15.880 ms
+# 128 chars 768 lines
+# Blank Time 4.923 us 0.786 ms
+# 40 chars 38 lines
+# Polarity negative negative
+#
+ mode "1024x768-60"
+# D: 65.00 MHz, H: 48.363 kHz, V: 60.00 Hz
+ geometry 1024 768 1024 768 32 timings 15385 160 24 29 3 136 6 endmode
+#
+# 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1024 768
+# Scan Frequency 60.023 kHz 75.03 Hz
+# Sync Width 1.219 us 0.050 ms
+# 12 chars 3 lines
+# Front Porch 0.203 us 0.017 ms
+# 2 chars 1 lines
+# Back Porch 2.235 us 0.466 ms
+# 22 chars 28 lines
+# Active Time 13.003 us 12.795 ms
+# 128 chars 768 lines
+# Blank Time 3.657 us 0.533 ms
+# 36 chars 32 lines
+# Polarity positive positive
+#
+ mode "1024x768-75"
+# D: 78.75 MHz, H: 60.023 kHz, V: 75.03 Hz
+ geometry 1024 768 1024 768 32
+ timings 12699 176 16 28 1 96 3 hsync high vsync high endmode
+#
+# 1024x768, 85 Hz, Non-Interlaced (94.50 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1024 768
+# Scan Frequency 68.677 kHz 85.00 Hz
+# Sync Width 1.016 us 0.044 ms
+# 12 chars 3 lines
+# Front Porch 0.508 us 0.015 ms
+# 6 chars 1 lines
+# Back Porch 2.201 us 0.524 ms
+# 26 chars 36 lines
+# Active Time 10.836 us 11.183 ms
+# 128 chars 768 lines
+# Blank Time 3.725 us 0.582 ms
+# 44 chars 40 lines
+# Polarity positive positive
+#
+ mode "1024x768-85"
+# D: 94.50 MHz, H: 68.677 kHz, V: 85.00 Hz
+ geometry 1024 768 1024 768 32
+ timings 10582 208 48 36 1 96 3 hsync high vsync high endmode
+#
+# 1024x768, 100 Hz, Non-Interlaced (110.0 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1024 768
+# Scan Frequency 79.023 kHz 99.78 Hz
+# Sync Width 0.800 us 0.101 ms
+# 11 chars 8 lines
+# Front Porch 0.000 us 0.000 ms
+# 0 chars 0 lines
+# Back Porch 2.545 us 0.202 ms
+# 35 chars 16 lines
+# Active Time 9.309 us 9.719 ms
+# 128 chars 768 lines
+# Blank Time 3.345 us 0.304 ms
+# 46 chars 24 lines
+# Polarity negative negative
+#
+ mode "1024x768-100"
+# D: 113.3 MHz, H: 79.023 kHz, V: 99.78 Hz
+ geometry 1024 768 1024 768 32
+ timings 8825 280 0 16 0 88 8 endmode mode "1152x720-60"
+# D: 66.750 MHz, H: 44.859 kHz, V: 60.00 Hz
+ geometry 1152 720 1152 720 32 timings 14981 168 56 19 3 112 6 endmode
+#
+# 1152x864, 75 Hz, Non-Interlaced (110.0 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1152 864
+# Scan Frequency 75.137 kHz 74.99 Hz
+# Sync Width 1.309 us 0.106 ms
+# 18 chars 8 lines
+# Front Porch 0.245 us 0.599 ms
+# 3 chars 45 lines
+# Back Porch 1.282 us 1.132 ms
+# 18 chars 85 lines
+# Active Time 10.473 us 11.499 ms
+# 144 chars 864 lines
+# Blank Time 2.836 us 1.837 ms
+# 39 chars 138 lines
+# Polarity positive positive
+#
+ mode "1152x864-75"
+# D: 110.0 MHz, H: 75.137 kHz, V: 74.99 Hz
+ geometry 1152 864 1152 864 32
+ timings 9259 144 24 85 45 144 8
+ hsync high vsync high endmode mode "1200x720-60"
+# D: 70.184 MHz, H: 44.760 kHz, V: 60.00 Hz
+ geometry 1200 720 1200 720 32
+ timings 14253 184 28 22 1 128 3 endmode mode "1280x600-60"
+# D: 61.503 MHz, H: 37.320 kHz, V: 60.00 Hz
+ geometry 1280 600 1280 600 32
+ timings 16260 184 28 18 1 128 3 endmode mode "1280x720-50"
+# D: 60.466 MHz, H: 37.050 kHz, V: 50.00 Hz
+ geometry 1280 720 1280 720 32
+ timings 16538 176 48 17 1 128 3 endmode mode "1280x768-50"
+# D: 65.178 MHz, H: 39.550 kHz, V: 50.00 Hz
+ geometry 1280 768 1280 768 32 timings 15342 184 28 19 1 128 3 endmode
+#
+# 1280x768, 60 Hz, Non-Interlaced (80.136 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1280 768
+# Scan Frequency 47.700 kHz 60.00 Hz
+# Sync Width 1.697 us 0.063 ms
+# 17 chars 3 lines
+# Front Porch 0.799 us 0.021 ms
+# 8 chars 1 lines
+# Back Porch 2.496 us 0.483 ms
+# 25 chars 23 lines
+# Active Time 15.973 us 16.101 ms
+# 160 chars 768 lines
+# Blank Time 4.992 us 0.566 ms
+# 50 chars 27 lines
+# Polarity positive positive
+#
+ mode "1280x768-60"
+# D: 80.13 MHz, H: 47.700 kHz, V: 60.00 Hz
+ geometry 1280 768 1280 768 32
+ timings 12480 200 48 23 1 126 3 hsync high vsync high endmode
+#
+# 1280x800, 60 Hz, Non-Interlaced (83.375 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1280 800
+# Scan Frequency 49.628 kHz 60.00 Hz
+# Sync Width 1.631 us 60.450 us
+# 17 chars 3 lines
+# Front Porch 0.768 us 20.15 us
+# 8 chars 1 lines
+# Back Porch 2.399 us 0.483 ms
+# 25 chars 24 lines
+# Active Time 15.352 us 16.120 ms
+# 160 chars 800 lines
+# Blank Time 4.798 us 0.564 ms
+# 50 chars 28 lines
+# Polarity negtive positive
+#
+ mode "1280x800-60"
+# D: 83.500 MHz, H: 49.702 kHz, V: 60.00 Hz
+ geometry 1280 800 1280 800 32 timings 11994 200 72 22 3 128 6 endmode
+#
+# 1280x960, 60 Hz, Non-Interlaced (108.00 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1280 960
+# Scan Frequency 60.000 kHz 60.00 Hz
+# Sync Width 1.037 us 0.050 ms
+# 14 chars 3 lines
+# Front Porch 0.889 us 0.017 ms
+# 12 chars 1 lines
+# Back Porch 2.889 us 0.600 ms
+# 39 chars 36 lines
+# Active Time 11.852 us 16.000 ms
+# 160 chars 960 lines
+# Blank Time 4.815 us 0.667 ms
+# 65 chars 40 lines
+# Polarity positive positive
+#
+ mode "1280x960-60"
+# D: 108.00 MHz, H: 60.000 kHz, V: 60.00 Hz
+ geometry 1280 960 1280 960 32
+ timings 9259 312 96 36 1 112 3 hsync high vsync high endmode
+#
+# 1280x1024, 60 Hz, Non-Interlaced (108.00 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1280 1024
+# Scan Frequency 63.981 kHz 60.02 Hz
+# Sync Width 1.037 us 0.047 ms
+# 14 chars 3 lines
+# Front Porch 0.444 us 0.015 ms
+# 6 chars 1 lines
+# Back Porch 2.297 us 0.594 ms
+# 31 chars 38 lines
+# Active Time 11.852 us 16.005 ms
+# 160 chars 1024 lines
+# Blank Time 3.778 us 0.656 ms
+# 51 chars 42 lines
+# Polarity positive positive
+#
+ mode "1280x1024-60"
+# D: 108.00 MHz, H: 63.981 kHz, V: 60.02 Hz
+ geometry 1280 1024 1280 1024 32
+ timings 9260 248 48 38 1 112 3 hsync high vsync high endmode
+#
+# 1280x1024, 75 Hz, Non-Interlaced (135.00 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1280 1024
+# Scan Frequency 79.976 kHz 75.02 Hz
+# Sync Width 1.067 us 0.038 ms
+# 18 chars 3 lines
+# Front Porch 0.119 us 0.012 ms
+# 2 chars 1 lines
+# Back Porch 1.837 us 0.475 ms
+# 31 chars 38 lines
+# Active Time 9.481 us 12.804 ms
+# 160 chars 1024 lines
+# Blank Time 3.022 us 0.525 ms
+# 51 chars 42 lines
+# Polarity positive positive
+#
+ mode "1280x1024-75"
+# D: 135.00 MHz, H: 79.976 kHz, V: 75.02 Hz
+ geometry 1280 1024 1280 1024 32
+ timings 7408 248 16 38 1 144 3 hsync high vsync high endmode
+#
+# 1280x1024, 85 Hz, Non-Interlaced (157.50 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1280 1024
+# Scan Frequency 91.146 kHz 85.02 Hz
+# Sync Width 1.016 us 0.033 ms
+# 20 chars 3 lines
+# Front Porch 0.406 us 0.011 ms
+# 8 chars 1 lines
+# Back Porch 1.422 us 0.483 ms
+# 28 chars 44 lines
+# Active Time 8.127 us 11.235 ms
+# 160 chars 1024 lines
+# Blank Time 2.844 us 0.527 ms
+# 56 chars 48 lines
+# Polarity positive positive
+#
+ mode "1280x1024-85"
+# D: 157.50 MHz, H: 91.146 kHz, V: 85.02 Hz
+ geometry 1280 1024 1280 1024 32
+ timings 6349 224 64 44 1 160 3
+ hsync high vsync high endmode mode "1440x900-60"
+# D: 106.500 MHz, H: 55.935 kHz, V: 60.00 Hz
+ geometry 1440 900 1440 900 32
+ timings 9390 232 80 25 3 152 6
+ hsync high vsync high endmode mode "1440x900-75"
+# D: 136.750 MHz, H: 70.635 kHz, V: 75.00 Hz
+ geometry 1440 900 1440 900 32
+ timings 7315 248 96 33 3 152 6 hsync high vsync high endmode
+#
+# 1440x1050, 60 Hz, Non-Interlaced (125.10 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1440 1050
+# Scan Frequency 65.220 kHz 60.00 Hz
+# Sync Width 1.204 us 0.046 ms
+# 19 chars 3 lines
+# Front Porch 0.760 us 0.015 ms
+# 12 chars 1 lines
+# Back Porch 1.964 us 0.495 ms
+# 31 chars 33 lines
+# Active Time 11.405 us 16.099 ms
+# 180 chars 1050 lines
+# Blank Time 3.928 us 0.567 ms
+# 62 chars 37 lines
+# Polarity positive positive
+#
+ mode "1440x1050-60"
+# D: 125.10 MHz, H: 65.220 kHz, V: 60.00 Hz
+ geometry 1440 1050 1440 1050 32
+ timings 7993 248 96 33 1 152 3
+ hsync high vsync high endmode mode "1600x900-60"
+# D: 118.250 MHz, H: 55.990 kHz, V: 60.00 Hz
+ geometry 1600 900 1600 900 32
+ timings 8415 256 88 26 3 168 5 endmode mode "1600x1024-60"
+# D: 136.358 MHz, H: 63.600 kHz, V: 60.00 Hz
+ geometry 1600 1024 1600 1024 32 timings 7315 272 104 32 1 168 3 endmode
+#
+# 1600x1200, 60 Hz, Non-Interlaced (156.00 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1600 1200
+# Scan Frequency 76.200 kHz 60.00 Hz
+# Sync Width 1.026 us 0.105 ms
+# 20 chars 8 lines
+# Front Porch 0.205 us 0.131 ms
+# 4 chars 10 lines
+# Back Porch 1.636 us 0.682 ms
+# 32 chars 52 lines
+# Active Time 10.256 us 15.748 ms
+# 200 chars 1200 lines
+# Blank Time 2.872 us 0.866 ms
+# 56 chars 66 lines
+# Polarity negative negative
+#
+ mode "1600x1200-60"
+# D: 156.00 MHz, H: 76.200 kHz, V: 60.00 Hz
+ geometry 1600 1200 1600 1200 32 timings 6172 256 32 52 10 160 8 endmode
+#
+# 1600x1200, 75 Hz, Non-Interlaced (202.50 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1600 1200
+# Scan Frequency 93.750 kHz 75.00 Hz
+# Sync Width 0.948 us 0.032 ms
+# 24 chars 3 lines
+# Front Porch 0.316 us 0.011 ms
+# 8 chars 1 lines
+# Back Porch 1.501 us 0.491 ms
+# 38 chars 46 lines
+# Active Time 7.901 us 12.800 ms
+# 200 chars 1200 lines
+# Blank Time 2.765 us 0.533 ms
+# 70 chars 50 lines
+# Polarity positive positive
+#
+ mode "1600x1200-75"
+# D: 202.50 MHz, H: 93.750 kHz, V: 75.00 Hz
+ geometry 1600 1200 1600 1200 32
+ timings 4938 304 64 46 1 192 3
+ hsync high vsync high endmode mode "1680x1050-60"
+# D: 146.250 MHz, H: 65.290 kHz, V: 59.954 Hz
+ geometry 1680 1050 1680 1050 32
+ timings 6814 280 104 30 3 176 6
+ hsync high vsync high endmode mode "1680x1050-75"
+# D: 187.000 MHz, H: 82.306 kHz, V: 74.892 Hz
+ geometry 1680 1050 1680 1050 32
+ timings 5348 296 120 40 3 176 6
+ hsync high vsync high endmode mode "1792x1344-60"
+# D: 202.975 MHz, H: 83.460 kHz, V: 60.00 Hz
+ geometry 1792 1344 1792 1344 32
+ timings 4902 320 128 43 1 192 3
+ hsync high vsync high endmode mode "1856x1392-60"
+# D: 218.571 MHz, H: 86.460 kHz, V: 60.00 Hz
+ geometry 1856 1392 1856 1392 32
+ timings 4577 336 136 45 1 200 3
+ hsync high vsync high endmode mode "1920x1200-60"
+# D: 193.250 MHz, H: 74.556 kHz, V: 60.00 Hz
+ geometry 1920 1200 1920 1200 32
+ timings 5173 336 136 36 3 200 6
+ hsync high vsync high endmode mode "1920x1440-60"
+# D: 234.000 MHz, H:90.000 kHz, V: 60.00 Hz
+ geometry 1920 1440 1920 1440 32
+ timings 4274 344 128 56 1 208 3
+ hsync high vsync high endmode mode "1920x1440-75"
+# D: 297.000 MHz, H:112.500 kHz, V: 75.00 Hz
+ geometry 1920 1440 1920 1440 32
+ timings 3367 352 144 56 1 224 3
+ hsync high vsync high endmode mode "2048x1536-60"
+# D: 267.250 MHz, H: 95.446 kHz, V: 60.00 Hz
+ geometry 2048 1536 2048 1536 32
+ timings 3742 376 152 49 3 224 4 hsync high vsync high endmode
+#
+# 1280x720, 60 Hz, Non-Interlaced (74.481 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1280 720
+# Scan Frequency 44.760 kHz 60.00 Hz
+# Sync Width 1.826 us 67.024 ms
+# 17 chars 3 lines
+# Front Porch 0.752 us 22.341 ms
+# 7 chars 1 lines
+# Back Porch 2.578 us 491.510 ms
+# 24 chars 22 lines
+# Active Time 17.186 us 16.086 ms
+# 160 chars 720 lines
+# Blank Time 5.156 us 0.581 ms
+# 48 chars 26 lines
+# Polarity negative negative
+#
+ mode "1280x720-60"
+# D: 74.481 MHz, H: 44.760 kHz, V: 60.00 Hz
+ geometry 1280 720 1280 720 32 timings 13426 192 64 22 1 136 3 endmode
+#
+# 1920x1080, 60 Hz, Non-Interlaced (172.798 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1920 1080
+# Scan Frequency 67.080 kHz 60.00 Hz
+# Sync Width 1.204 us 44.723 ms
+# 26 chars 3 lines
+# Front Porch 0.694 us 14.908 ms
+# 15 chars 1 lines
+# Back Porch 1.898 us 506.857 ms
+# 41 chars 34 lines
+# Active Time 11.111 us 16.100 ms
+# 240 chars 1080 lines
+# Blank Time 3.796 us 0.566 ms
+# 82 chars 38 lines
+# Polarity negative negative
+#
+ mode "1920x1080-60"
+# D: 74.481 MHz, H: 67.080 kHz, V: 60.00 Hz
+ geometry 1920 1080 1920 1080 32 timings 5787 328 120 34 1 208 3 endmode
+#
+# 1400x1050, 60 Hz, Non-Interlaced (122.61 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1400 1050
+# Scan Frequency 65.218 kHz 59.99 Hz
+# Sync Width 1.037 us 0.047 ms
+# 19 chars 3 lines
+# Front Porch 0.444 us 0.015 ms
+# 11 chars 1 lines
+# Back Porch 1.185 us 0.188 ms
+# 30 chars 33 lines
+# Active Time 12.963 us 16.411 ms
+# 175 chars 1050 lines
+# Blank Time 2.667 us 0.250 ms
+# 60 chars 37 lines
+# Polarity negative positive
+#
+ mode "1400x1050-60"
+# D: 122.750 MHz, H: 65.317 kHz, V: 59.99 Hz
+ geometry 1400 1050 1408 1050 32
+ timings 8214 232 88 32 3 144 4 endmode mode "1400x1050-75"
+# D: 156.000 MHz, H: 82.278 kHz, V: 74.867 Hz
+ geometry 1400 1050 1408 1050 32 timings 6410 248 104 42 3 144 4 endmode
+#
+# 1366x768, 60 Hz, Non-Interlaced (85.86 MHz dotclock)
+#
+# Horizontal Vertical
+# Resolution 1366 768
+# Scan Frequency 47.700 kHz 60.00 Hz
+# Sync Width 1.677 us 0.063 ms
+# 18 chars 3 lines
+# Front Porch 0.839 us 0.021 ms
+# 9 chars 1 lines
+# Back Porch 2.516 us 0.482 ms
+# 27 chars 23 lines
+# Active Time 15.933 us 16.101 ms
+# 171 chars 768 lines
+# Blank Time 5.031 us 0.566 ms
+# 54 chars 27 lines
+# Polarity negative positive
+#
+ mode "1360x768-60"
+# D: 84.750 MHz, H: 47.720 kHz, V: 60.00 Hz
+ geometry 1360 768 1360 768 32
+ timings 11799 208 72 22 3 136 5 endmode mode "1366x768-60"
+# D: 85.86 MHz, H: 47.700 kHz, V: 60.00 Hz
+ geometry 1366 768 1366 768 32
+ timings 11647 216 72 23 1 144 3 endmode mode "1366x768-50"
+# D: 69,924 MHz, H: 39.550 kHz, V: 50.00 Hz
+ geometry 1366 768 1366 768 32 timings 14301 200 56 19 1 144 3 endmode
diff --git a/Documentation/fb/viafb.txt b/Documentation/fb/viafb.txt
new file mode 100644
index 00000000000..67dbf442b0b
--- /dev/null
+++ b/Documentation/fb/viafb.txt
@@ -0,0 +1,214 @@
+
+ VIA Integration Graphic Chip Console Framebuffer Driver
+
+[Platform]
+-----------------------
+ The console framebuffer driver is for graphics chips of
+ VIA UniChrome Family(CLE266, PM800 / CN400 / CN300,
+ P4M800CE / P4M800Pro / CN700 / VN800,
+ CX700 / VX700, K8M890, P4M890,
+ CN896 / P4M900, VX800)
+
+[Driver features]
+------------------------
+ Device: CRT, LCD, DVI
+
+ Support viafb_mode:
+ CRT:
+ 640x480(60, 75, 85, 100, 120 Hz), 720x480(60 Hz),
+ 720x576(60 Hz), 800x600(60, 75, 85, 100, 120 Hz),
+ 848x480(60 Hz), 856x480(60 Hz), 1024x512(60 Hz),
+ 1024x768(60, 75, 85, 100 Hz), 1152x864(75 Hz),
+ 1280x768(60 Hz), 1280x960(60 Hz), 1280x1024(60, 75, 85 Hz),
+ 1440x1050(60 Hz), 1600x1200(60, 75 Hz), 1280x720(60 Hz),
+ 1920x1080(60 Hz), 1400x1050(60 Hz), 800x480(60 Hz)
+
+ color depth: 8 bpp, 16 bpp, 32 bpp supports.
+
+ Support 2D hardware accelerator.
+
+[Using the viafb module]
+-- -- --------------------
+ Start viafb with default settings:
+ #modprobe viafb
+
+ Start viafb with with user options:
+ #modprobe viafb viafb_mode=800x600 viafb_bpp=16 viafb_refresh=60
+ viafb_active_dev=CRT+DVI viafb_dvi_port=DVP1
+ viafb_mode1=1024x768 viafb_bpp=16 viafb_refresh1=60
+ viafb_SAMM_ON=1
+
+ viafb_mode:
+ 640x480 (default)
+ 720x480
+ 800x600
+ 1024x768
+ ......
+
+ viafb_bpp:
+ 8, 16, 32 (default:32)
+
+ viafb_refresh:
+ 60, 75, 85, 100, 120 (default:60)
+
+ viafb_lcd_dsp_method:
+ 0 : expansion (default)
+ 1 : centering
+
+ viafb_lcd_mode:
+ 0 : LCD panel with LSB data format input (default)
+ 1 : LCD panel with MSB data format input
+
+ viafb_lcd_panel_id:
+ 0 : Resolution: 640x480, Channel: single, Dithering: Enable
+ 1 : Resolution: 800x600, Channel: single, Dithering: Enable
+ 2 : Resolution: 1024x768, Channel: single, Dithering: Enable (default)
+ 3 : Resolution: 1280x768, Channel: single, Dithering: Enable
+ 4 : Resolution: 1280x1024, Channel: dual, Dithering: Enable
+ 5 : Resolution: 1400x1050, Channel: dual, Dithering: Enable
+ 6 : Resolution: 1600x1200, Channel: dual, Dithering: Enable
+
+ 8 : Resolution: 800x480, Channel: single, Dithering: Enable
+ 9 : Resolution: 1024x768, Channel: dual, Dithering: Enable
+ 10: Resolution: 1024x768, Channel: single, Dithering: Disable
+ 11: Resolution: 1024x768, Channel: dual, Dithering: Disable
+ 12: Resolution: 1280x768, Channel: single, Dithering: Disable
+ 13: Resolution: 1280x1024, Channel: dual, Dithering: Disable
+ 14: Resolution: 1400x1050, Channel: dual, Dithering: Disable
+ 15: Resolution: 1600x1200, Channel: dual, Dithering: Disable
+ 16: Resolution: 1366x768, Channel: single, Dithering: Disable
+ 17: Resolution: 1024x600, Channel: single, Dithering: Enable
+ 18: Resolution: 1280x768, Channel: dual, Dithering: Enable
+ 19: Resolution: 1280x800, Channel: single, Dithering: Enable
+
+ viafb_accel:
+ 0 : No 2D Hardware Acceleration
+ 1 : 2D Hardware Acceleration (default)
+
+ viafb_SAMM_ON:
+ 0 : viafb_SAMM_ON disable (default)
+ 1 : viafb_SAMM_ON enable
+
+ viafb_mode1: (secondary display device)
+ 640x480 (default)
+ 720x480
+ 800x600
+ 1024x768
+ ... ...
+
+ viafb_bpp1: (secondary display device)
+ 8, 16, 32 (default:32)
+
+ viafb_refresh1: (secondary display device)
+ 60, 75, 85, 100, 120 (default:60)
+
+ viafb_active_dev:
+ This option is used to specify active devices.(CRT, DVI, CRT+LCD...)
+ DVI stands for DVI or HDMI, E.g., If you want to enable HDMI,
+ set viafb_active_dev=DVI. In SAMM case, the previous of
+ viafb_active_dev is primary device, and the following is
+ secondary device.
+
+ For example:
+ To enable one device, such as DVI only, we can use:
+ modprobe viafb viafb_active_dev=DVI
+ To enable two devices, such as CRT+DVI:
+ modprobe viafb viafb_active_dev=CRT+DVI;
+
+ For DuoView case, we can use:
+ modprobe viafb viafb_active_dev=CRT+DVI
+ OR
+ modprobe viafb viafb_active_dev=DVI+CRT...
+
+ For SAMM case:
+ If CRT is primary and DVI is secondary, we should use:
+ modprobe viafb viafb_active_dev=CRT+DVI viafb_SAMM_ON=1...
+ If DVI is primary and CRT is secondary, we should use:
+ modprobe viafb viafb_active_dev=DVI+CRT viafb_SAMM_ON=1...
+
+ viafb_display_hardware_layout:
+ This option is used to specify display hardware layout for CX700 chip.
+ 1 : LCD only
+ 2 : DVI only
+ 3 : LCD+DVI (default)
+ 4 : LCD1+LCD2 (internal + internal)
+ 16: LCD1+ExternalLCD2 (internal + external)
+
+ viafb_second_size:
+ This option is used to set second device memory size(MB) in SAMM case.
+ The minimal size is 16.
+
+ viafb_platform_epia_dvi:
+ This option is used to enable DVI on EPIA - M
+ 0 : No DVI on EPIA - M (default)
+ 1 : DVI on EPIA - M
+
+ viafb_bus_width:
+ When using 24 - Bit Bus Width Digital Interface,
+ this option should be set.
+ 12: 12-Bit LVDS or 12-Bit TMDS (default)
+ 24: 24-Bit LVDS or 24-Bit TMDS
+
+ viafb_device_lcd_dualedge:
+ When using Dual Edge Panel, this option should be set.
+ 0 : No Dual Edge Panel (default)
+ 1 : Dual Edge Panel
+
+ viafb_video_dev:
+ This option is used to specify video output devices(CRT, DVI, LCD) for
+ duoview case.
+ For example:
+ To output video on DVI, we should use:
+ modprobe viafb viafb_video_dev=DVI...
+
+ viafb_lcd_port:
+ This option is used to specify LCD output port,
+ available values are "DVP0" "DVP1" "DFP_HIGHLOW" "DFP_HIGH" "DFP_LOW".
+ for external LCD + external DVI on CX700(External LCD is on DVP0),
+ we should use:
+ modprobe viafb viafb_lcd_port=DVP0...
+
+Notes:
+ 1. CRT may not display properly for DuoView CRT & DVI display at
+ the "640x480" PAL mode with DVI overscan enabled.
+ 2. SAMM stands for single adapter multi monitors. It is different from
+ multi-head since SAMM support multi monitor at driver layers, thus fbcon
+ layer doesn't even know about it; SAMM's second screen doesn't have a
+ device node file, thus a user mode application can't access it directly.
+ When SAMM is enabled, viafb_mode and viafb_mode1, viafb_bpp and
+ viafb_bpp1, viafb_refresh and viafb_refresh1 can be different.
+ 3. When console is depending on viafbinfo1, dynamically change resolution
+ and bpp, need to call VIAFB specified ioctl interface VIAFB_SET_DEVICE
+ instead of calling common ioctl function FBIOPUT_VSCREENINFO since
+ viafb doesn't support multi-head well, or it will cause screen crush.
+ 4. VX800 2D accelerator hasn't been supported in this driver yet. When
+ using driver on VX800, the driver will disable the acceleration
+ function as default.
+
+
+[Configure viafb with "fbset" tool]
+-----------------------------------
+ "fbset" is an inbox utility of Linux.
+ 1. Inquire current viafb information, type,
+ # fbset -i
+
+ 2. Set various resolutions and viafb_refresh rates,
+ # fbset <resolution-vertical_sync>
+
+ example,
+ # fbset "1024x768-75"
+ or
+ # fbset -g 1024 768 1024 768 32
+ Check the file "/etc/fb.modes" to find display modes available.
+
+ 3. Set the color depth,
+ # fbset -depth <value>
+
+ example,
+ # fbset -depth 16
+
+[Bootup with viafb]:
+--------------------
+ Add the following line to your grub.conf:
+ append = "video=viafb:viafb_mode=1024x768,viafb_bpp=32,viafb_refresh=85"
+
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 83c88cae1ed..f5f812daf9f 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -6,6 +6,24 @@ be removed from this file.
---------------------------
+What: old static regulatory information and ieee80211_regdom module parameter
+When: 2.6.29
+Why: The old regulatory infrastructure has been replaced with a new one
+ which does not require statically defined regulatory domains. We do
+ not want to keep static regulatory domains in the kernel due to the
+ the dynamic nature of regulatory law and localization. We kept around
+ the old static definitions for the regulatory domains of:
+ * US
+ * JP
+ * EU
+ and used by default the US when CONFIG_WIRELESS_OLD_REGULATORY was
+ set. We also kept around the ieee80211_regdom module parameter in case
+ some applications were relying on it. Changing regulatory domains
+ can now be done instead by using nl80211, as is done with iw.
+Who: Luis R. Rodriguez <lrodriguez@atheros.com>
+
+---------------------------
+
What: dev->power.power_state
When: July 2007
Why: Broken design for runtime control over driver power states, confusing
@@ -232,6 +250,9 @@ What (Why):
- xt_mark match revision 0
(superseded by xt_mark match revision 1)
+ - xt_recent: the old ipt_recent proc dir
+ (superseded by /proc/net/xt_recent)
+
When: January 2009 or Linux 2.7.0, whichever comes first
Why: Superseded by newer revisions or modules
Who: Jan Engelhardt <jengelh@computergmbh.de>
@@ -266,11 +287,19 @@ Who: Glauber Costa <gcosta@redhat.com>
---------------------------
-What: old style serial driver for ColdFire (CONFIG_SERIAL_COLDFIRE)
-When: 2.6.28
-Why: This driver still uses the old interface and has been replaced
- by CONFIG_SERIAL_MCF.
-Who: Sebastian Siewior <sebastian@breakpoint.cc>
+What: remove HID compat support
+When: 2.6.29
+Why: needed only as a temporary solution until distros fix themselves up
+Who: Jiri Slaby <jirislaby@gmail.com>
+
+---------------------------
+
+What: print_fn_descriptor_symbol()
+When: October 2009
+Why: The %pF vsprintf format provides the same functionality in a
+ simpler way. print_fn_descriptor_symbol() is deprecated but
+ still present to give out-of-tree modules time to change.
+Who: Bjorn Helgaas <bjorn.helgaas@hp.com>
---------------------------
diff --git a/Documentation/filesystems/autofs4-mount-control.txt b/Documentation/filesystems/autofs4-mount-control.txt
new file mode 100644
index 00000000000..c6341745df3
--- /dev/null
+++ b/Documentation/filesystems/autofs4-mount-control.txt
@@ -0,0 +1,393 @@
+
+Miscellaneous Device control operations for the autofs4 kernel module
+====================================================================
+
+The problem
+===========
+
+There is a problem with active restarts in autofs (that is to say
+restarting autofs when there are busy mounts).
+
+During normal operation autofs uses a file descriptor opened on the
+directory that is being managed in order to be able to issue control
+operations. Using a file descriptor gives ioctl operations access to
+autofs specific information stored in the super block. The operations
+are things such as setting an autofs mount catatonic, setting the
+expire timeout and requesting expire checks. As is explained below,
+certain types of autofs triggered mounts can end up covering an autofs
+mount itself which prevents us being able to use open(2) to obtain a
+file descriptor for these operations if we don't already have one open.
+
+Currently autofs uses "umount -l" (lazy umount) to clear active mounts
+at restart. While using lazy umount works for most cases, anything that
+needs to walk back up the mount tree to construct a path, such as
+getcwd(2) and the proc file system /proc/<pid>/cwd, no longer works
+because the point from which the path is constructed has been detached
+from the mount tree.
+
+The actual problem with autofs is that it can't reconnect to existing
+mounts. Immediately one thinks of just adding the ability to remount
+autofs file systems would solve it, but alas, that can't work. This is
+because autofs direct mounts and the implementation of "on demand mount
+and expire" of nested mount trees have the file system mounted directly
+on top of the mount trigger directory dentry.
+
+For example, there are two types of automount maps, direct (in the kernel
+module source you will see a third type called an offset, which is just
+a direct mount in disguise) and indirect.
+
+Here is a master map with direct and indirect map entries:
+
+/- /etc/auto.direct
+/test /etc/auto.indirect
+
+and the corresponding map files:
+
+/etc/auto.direct:
+
+/automount/dparse/g6 budgie:/autofs/export1
+/automount/dparse/g1 shark:/autofs/export1
+and so on.
+
+/etc/auto.indirect:
+
+g1 shark:/autofs/export1
+g6 budgie:/autofs/export1
+and so on.
+
+For the above indirect map an autofs file system is mounted on /test and
+mounts are triggered for each sub-directory key by the inode lookup
+operation. So we see a mount of shark:/autofs/export1 on /test/g1, for
+example.
+
+The way that direct mounts are handled is by making an autofs mount on
+each full path, such as /automount/dparse/g1, and using it as a mount
+trigger. So when we walk on the path we mount shark:/autofs/export1 "on
+top of this mount point". Since these are always directories we can
+use the follow_link inode operation to trigger the mount.
+
+But, each entry in direct and indirect maps can have offsets (making
+them multi-mount map entries).
+
+For example, an indirect mount map entry could also be:
+
+g1 \
+ / shark:/autofs/export5/testing/test \
+ /s1 shark:/autofs/export/testing/test/s1 \
+ /s2 shark:/autofs/export5/testing/test/s2 \
+ /s1/ss1 shark:/autofs/export1 \
+ /s2/ss2 shark:/autofs/export2
+
+and a similarly a direct mount map entry could also be:
+
+/automount/dparse/g1 \
+ / shark:/autofs/export5/testing/test \
+ /s1 shark:/autofs/export/testing/test/s1 \
+ /s2 shark:/autofs/export5/testing/test/s2 \
+ /s1/ss1 shark:/autofs/export2 \
+ /s2/ss2 shark:/autofs/export2
+
+One of the issues with version 4 of autofs was that, when mounting an
+entry with a large number of offsets, possibly with nesting, we needed
+to mount and umount all of the offsets as a single unit. Not really a
+problem, except for people with a large number of offsets in map entries.
+This mechanism is used for the well known "hosts" map and we have seen
+cases (in 2.4) where the available number of mounts are exhausted or
+where the number of privileged ports available is exhausted.
+
+In version 5 we mount only as we go down the tree of offsets and
+similarly for expiring them which resolves the above problem. There is
+somewhat more detail to the implementation but it isn't needed for the
+sake of the problem explanation. The one important detail is that these
+offsets are implemented using the same mechanism as the direct mounts
+above and so the mount points can be covered by a mount.
+
+The current autofs implementation uses an ioctl file descriptor opened
+on the mount point for control operations. The references held by the
+descriptor are accounted for in checks made to determine if a mount is
+in use and is also used to access autofs file system information held
+in the mount super block. So the use of a file handle needs to be
+retained.
+
+
+The Solution
+============
+
+To be able to restart autofs leaving existing direct, indirect and
+offset mounts in place we need to be able to obtain a file handle
+for these potentially covered autofs mount points. Rather than just
+implement an isolated operation it was decided to re-implement the
+existing ioctl interface and add new operations to provide this
+functionality.
+
+In addition, to be able to reconstruct a mount tree that has busy mounts,
+the uid and gid of the last user that triggered the mount needs to be
+available because these can be used as macro substitution variables in
+autofs maps. They are recorded at mount request time and an operation
+has been added to retrieve them.
+
+Since we're re-implementing the control interface, a couple of other
+problems with the existing interface have been addressed. First, when
+a mount or expire operation completes a status is returned to the
+kernel by either a "send ready" or a "send fail" operation. The
+"send fail" operation of the ioctl interface could only ever send
+ENOENT so the re-implementation allows user space to send an actual
+status. Another expensive operation in user space, for those using
+very large maps, is discovering if a mount is present. Usually this
+involves scanning /proc/mounts and since it needs to be done quite
+often it can introduce significant overhead when there are many entries
+in the mount table. An operation to lookup the mount status of a mount
+point dentry (covered or not) has also been added.
+
+Current kernel development policy recommends avoiding the use of the
+ioctl mechanism in favor of systems such as Netlink. An implementation
+using this system was attempted to evaluate its suitability and it was
+found to be inadequate, in this case. The Generic Netlink system was
+used for this as raw Netlink would lead to a significant increase in
+complexity. There's no question that the Generic Netlink system is an
+elegant solution for common case ioctl functions but it's not a complete
+replacement probably because it's primary purpose in life is to be a
+message bus implementation rather than specifically an ioctl replacement.
+While it would be possible to work around this there is one concern
+that lead to the decision to not use it. This is that the autofs
+expire in the daemon has become far to complex because umount
+candidates are enumerated, almost for no other reason than to "count"
+the number of times to call the expire ioctl. This involves scanning
+the mount table which has proved to be a big overhead for users with
+large maps. The best way to improve this is try and get back to the
+way the expire was done long ago. That is, when an expire request is
+issued for a mount (file handle) we should continually call back to
+the daemon until we can't umount any more mounts, then return the
+appropriate status to the daemon. At the moment we just expire one
+mount at a time. A Generic Netlink implementation would exclude this
+possibility for future development due to the requirements of the
+message bus architecture.
+
+
+autofs4 Miscellaneous Device mount control interface
+====================================================
+
+The control interface is opening a device node, typically /dev/autofs.
+
+All the ioctls use a common structure to pass the needed parameter
+information and return operation results:
+
+struct autofs_dev_ioctl {
+ __u32 ver_major;
+ __u32 ver_minor;
+ __u32 size; /* total size of data passed in
+ * including this struct */
+ __s32 ioctlfd; /* automount command fd */
+
+ __u32 arg1; /* Command parameters */
+ __u32 arg2;
+
+ char path[0];
+};
+
+The ioctlfd field is a mount point file descriptor of an autofs mount
+point. It is returned by the open call and is used by all calls except
+the check for whether a given path is a mount point, where it may
+optionally be used to check a specific mount corresponding to a given
+mount point file descriptor, and when requesting the uid and gid of the
+last successful mount on a directory within the autofs file system.
+
+The fields arg1 and arg2 are used to communicate parameters and results of
+calls made as described below.
+
+The path field is used to pass a path where it is needed and the size field
+is used account for the increased structure length when translating the
+structure sent from user space.
+
+This structure can be initialized before setting specific fields by using
+the void function call init_autofs_dev_ioctl(struct autofs_dev_ioctl *).
+
+All of the ioctls perform a copy of this structure from user space to
+kernel space and return -EINVAL if the size parameter is smaller than
+the structure size itself, -ENOMEM if the kernel memory allocation fails
+or -EFAULT if the copy itself fails. Other checks include a version check
+of the compiled in user space version against the module version and a
+mismatch results in a -EINVAL return. If the size field is greater than
+the structure size then a path is assumed to be present and is checked to
+ensure it begins with a "/" and is NULL terminated, otherwise -EINVAL is
+returned. Following these checks, for all ioctl commands except
+AUTOFS_DEV_IOCTL_VERSION_CMD, AUTOFS_DEV_IOCTL_OPENMOUNT_CMD and
+AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD the ioctlfd is validated and if it is
+not a valid descriptor or doesn't correspond to an autofs mount point
+an error of -EBADF, -ENOTTY or -EINVAL (not an autofs descriptor) is
+returned.
+
+
+The ioctls
+==========
+
+An example of an implementation which uses this interface can be seen
+in autofs version 5.0.4 and later in file lib/dev-ioctl-lib.c of the
+distribution tar available for download from kernel.org in directory
+/pub/linux/daemons/autofs/v5.
+
+The device node ioctl operations implemented by this interface are:
+
+
+AUTOFS_DEV_IOCTL_VERSION
+------------------------
+
+Get the major and minor version of the autofs4 device ioctl kernel module
+implementation. It requires an initialized struct autofs_dev_ioctl as an
+input parameter and sets the version information in the passed in structure.
+It returns 0 on success or the error -EINVAL if a version mismatch is
+detected.
+
+
+AUTOFS_DEV_IOCTL_PROTOVER_CMD and AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD
+------------------------------------------------------------------
+
+Get the major and minor version of the autofs4 protocol version understood
+by loaded module. This call requires an initialized struct autofs_dev_ioctl
+with the ioctlfd field set to a valid autofs mount point descriptor
+and sets the requested version number in structure field arg1. These
+commands return 0 on success or one of the negative error codes if
+validation fails.
+
+
+AUTOFS_DEV_IOCTL_OPENMOUNT and AUTOFS_DEV_IOCTL_CLOSEMOUNT
+----------------------------------------------------------
+
+Obtain and release a file descriptor for an autofs managed mount point
+path. The open call requires an initialized struct autofs_dev_ioctl with
+the the path field set and the size field adjusted appropriately as well
+as the arg1 field set to the device number of the autofs mount. The
+device number can be obtained from the mount options shown in
+/proc/mounts. The close call requires an initialized struct
+autofs_dev_ioct with the ioctlfd field set to the descriptor obtained
+from the open call. The release of the file descriptor can also be done
+with close(2) so any open descriptors will also be closed at process exit.
+The close call is included in the implemented operations largely for
+completeness and to provide for a consistent user space implementation.
+
+
+AUTOFS_DEV_IOCTL_READY_CMD and AUTOFS_DEV_IOCTL_FAIL_CMD
+--------------------------------------------------------
+
+Return mount and expire result status from user space to the kernel.
+Both of these calls require an initialized struct autofs_dev_ioctl
+with the ioctlfd field set to the descriptor obtained from the open
+call and the arg1 field set to the wait queue token number, received
+by user space in the foregoing mount or expire request. The arg2 field
+is set to the status to be returned. For the ready call this is always
+0 and for the fail call it is set to the errno of the operation.
+
+
+AUTOFS_DEV_IOCTL_SETPIPEFD_CMD
+------------------------------
+
+Set the pipe file descriptor used for kernel communication to the daemon.
+Normally this is set at mount time using an option but when reconnecting
+to a existing mount we need to use this to tell the autofs mount about
+the new kernel pipe descriptor. In order to protect mounts against
+incorrectly setting the pipe descriptor we also require that the autofs
+mount be catatonic (see next call).
+
+The call requires an initialized struct autofs_dev_ioctl with the
+ioctlfd field set to the descriptor obtained from the open call and
+the arg1 field set to descriptor of the pipe. On success the call
+also sets the process group id used to identify the controlling process
+(eg. the owning automount(8) daemon) to the process group of the caller.
+
+
+AUTOFS_DEV_IOCTL_CATATONIC_CMD
+------------------------------
+
+Make the autofs mount point catatonic. The autofs mount will no longer
+issue mount requests, the kernel communication pipe descriptor is released
+and any remaining waits in the queue released.
+
+The call requires an initialized struct autofs_dev_ioctl with the
+ioctlfd field set to the descriptor obtained from the open call.
+
+
+AUTOFS_DEV_IOCTL_TIMEOUT_CMD
+----------------------------
+
+Set the expire timeout for mounts withing an autofs mount point.
+
+The call requires an initialized struct autofs_dev_ioctl with the
+ioctlfd field set to the descriptor obtained from the open call.
+
+
+AUTOFS_DEV_IOCTL_REQUESTER_CMD
+------------------------------
+
+Return the uid and gid of the last process to successfully trigger a the
+mount on the given path dentry.
+
+The call requires an initialized struct autofs_dev_ioctl with the path
+field set to the mount point in question and the size field adjusted
+appropriately as well as the arg1 field set to the device number of the
+containing autofs mount. Upon return the struct field arg1 contains the
+uid and arg2 the gid.
+
+When reconstructing an autofs mount tree with active mounts we need to
+re-connect to mounts that may have used the original process uid and
+gid (or string variations of them) for mount lookups within the map entry.
+This call provides the ability to obtain this uid and gid so they may be
+used by user space for the mount map lookups.
+
+
+AUTOFS_DEV_IOCTL_EXPIRE_CMD
+---------------------------
+
+Issue an expire request to the kernel for an autofs mount. Typically
+this ioctl is called until no further expire candidates are found.
+
+The call requires an initialized struct autofs_dev_ioctl with the
+ioctlfd field set to the descriptor obtained from the open call. In
+addition an immediate expire, independent of the mount timeout, can be
+requested by setting the arg1 field to 1. If no expire candidates can
+be found the ioctl returns -1 with errno set to EAGAIN.
+
+This call causes the kernel module to check the mount corresponding
+to the given ioctlfd for mounts that can be expired, issues an expire
+request back to the daemon and waits for completion.
+
+AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD
+------------------------------
+
+Checks if an autofs mount point is in use.
+
+The call requires an initialized struct autofs_dev_ioctl with the
+ioctlfd field set to the descriptor obtained from the open call and
+it returns the result in the arg1 field, 1 for busy and 0 otherwise.
+
+
+AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD
+---------------------------------
+
+Check if the given path is a mountpoint.
+
+The call requires an initialized struct autofs_dev_ioctl. There are two
+possible variations. Both use the path field set to the path of the mount
+point to check and the size field adjusted appropriately. One uses the
+ioctlfd field to identify a specific mount point to check while the other
+variation uses the path and optionaly arg1 set to an autofs mount type.
+The call returns 1 if this is a mount point and sets arg1 to the device
+number of the mount and field arg2 to the relevant super block magic
+number (described below) or 0 if it isn't a mountpoint. In both cases
+the the device number (as returned by new_encode_dev()) is returned
+in field arg1.
+
+If supplied with a file descriptor we're looking for a specific mount,
+not necessarily at the top of the mounted stack. In this case the path
+the descriptor corresponds to is considered a mountpoint if it is itself
+a mountpoint or contains a mount, such as a multi-mount without a root
+mount. In this case we return 1 if the descriptor corresponds to a mount
+point and and also returns the super magic of the covering mount if there
+is one or 0 if it isn't a mountpoint.
+
+If a path is supplied (and the ioctlfd field is set to -1) then the path
+is looked up and is checked to see if it is the root of a mount. If a
+type is also given we are looking for a particular autofs mount and if
+a match isn't found a fail is returned. If the the located path is the
+root of a mount 1 is returned along with the super magic of the mount
+or 0 otherwise.
+
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index b45f3c1b8b4..9dd2a3bb2ac 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -96,6 +96,11 @@ errors=remount-ro(*) Remount the filesystem read-only on an error.
errors=continue Keep going on a filesystem error.
errors=panic Panic and halt the machine if an error occurs.
+data_err=ignore(*) Just print an error message if an error occurs
+ in a file data buffer in ordered mode.
+data_err=abort Abort the journal if an error occurs in a file
+ data buffer in ordered mode.
+
grpid Give objects the same group ID as their creator.
bsdgroups
@@ -193,6 +198,5 @@ kernel source: <file:fs/ext3/>
programs: http://e2fsprogs.sourceforge.net/
http://ext2resize.sourceforge.net
-useful links: http://www.zip.com.au/~akpm/linux/ext3/ext3-usage.html
- http://www-106.ibm.com/developerworks/linux/library/l-fs7/
+useful links: http://www-106.ibm.com/developerworks/linux/library/l-fs7/
http://www-106.ibm.com/developerworks/linux/library/l-fs8/
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 0d5394920a3..174eaff7ded 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -2,19 +2,24 @@
Ext4 Filesystem
===============
-This is a development version of the ext4 filesystem, an advanced level
-of the ext3 filesystem which incorporates scalability and reliability
-enhancements for supporting large filesystems (64 bit) in keeping with
-increasing disk capacities and state-of-the-art feature requirements.
+Ext4 is an an advanced level of the ext3 filesystem which incorporates
+scalability and reliability enhancements for supporting large filesystems
+(64 bit) in keeping with increasing disk capacities and state-of-the-art
+feature requirements.
-Mailing list: linux-ext4@vger.kernel.org
+Mailing list: linux-ext4@vger.kernel.org
+Web site: http://ext4.wiki.kernel.org
1. Quick usage instructions:
===========================
+Note: More extensive information for getting started with ext4 can be
+ found at the ext4 wiki site at the URL:
+ http://ext4.wiki.kernel.org/index.php/Ext4_Howto
+
- Compile and install the latest version of e2fsprogs (as of this
- writing version 1.41) from:
+ writing version 1.41.3) from:
http://sourceforge.net/project/showfiles.php?group_id=2406
@@ -32,28 +37,26 @@ Mailing list: linux-ext4@vger.kernel.org
you will need to merge your changes with the version from e2fsprogs
1.41.x.
- - Create a new filesystem using the ext4dev filesystem type:
+ - Create a new filesystem using the ext4 filesystem type:
- # mke2fs -t ext4dev /dev/hda1
+ # mke2fs -t ext4 /dev/hda1
- Or configure an existing ext3 filesystem to support extents and set
- the test_fs flag to indicate that it's ok for an in-development
- filesystem to touch this filesystem:
+ Or to configure an existing ext3 filesystem to support extents:
- # tune2fs -O extents -E test_fs /dev/hda1
+ # tune2fs -O extents /dev/hda1
If the filesystem was created with 128 byte inodes, it can be
converted to use 256 byte for greater efficiency via:
# tune2fs -I 256 /dev/hda1
- (Note: we currently do not have tools to convert an ext4dev
+ (Note: we currently do not have tools to convert an ext4
filesystem back to ext3; so please do not do try this on production
filesystems.)
- Mounting:
- # mount -t ext4dev /dev/hda1 /wherever
+ # mount -t ext4 /dev/hda1 /wherever
- When comparing performance with other filesystems, remember that
ext3/4 by default offers higher data integrity guarantees than most.
@@ -104,8 +107,8 @@ exist yet so I'm not sure they're in the near-term roadmap.
The big performance win will come with mballoc, delalloc and flex_bg
grouping of bitmaps and inode tables. Some test results available here:
- - http://www.bullopensource.org/ext4/20080530/ffsb-write-2.6.26-rc2.html
- - http://www.bullopensource.org/ext4/20080530/ffsb-readwrite-2.6.26-rc2.html
+ - http://www.bullopensource.org/ext4/20080818-ffsb/ffsb-write-2.6.27-rc1.html
+ - http://www.bullopensource.org/ext4/20080818-ffsb/ffsb-readwrite-2.6.27-rc1.html
3. Options
==========
@@ -177,6 +180,11 @@ barrier=<0|1(*)> This enables/disables the use of write barriers in
your disks are battery-backed in one way or another,
disabling barriers may safely improve performance.
+inode_readahead=n This tuning parameter controls the maximum
+ number of inode table blocks that ext4's inode
+ table readahead algorithm will pre-read into
+ the buffer cache. The default value is 32 blocks.
+
orlov (*) This enables the new Orlov block allocator. It is
enabled by default.
@@ -209,15 +217,17 @@ noreservation
bsddf (*) Make 'df' act like BSD.
minixdf Make 'df' act like Minix.
-check=none Don't do extra checking of bitmaps on mount.
-nocheck
-
debug Extra debugging information is sent to syslog.
errors=remount-ro(*) Remount the filesystem read-only on an error.
errors=continue Keep going on a filesystem error.
errors=panic Panic and halt the machine if an error occurs.
+data_err=ignore(*) Just print an error message if an error occurs
+ in a file data buffer in ordered mode.
+data_err=abort Abort the journal if an error occurs in a file
+ data buffer in ordered mode.
+
grpid Give objects the same group ID as their creator.
bsdgroups
@@ -243,8 +253,6 @@ nobh (a) cache disk block mapping information
"nobh" option tries to avoid associating buffer
heads (supported only for "writeback" mode).
-mballoc (*) Use the multiple block allocator for block allocation
-nomballoc disabled multiple block allocator for block allocation.
stripe=n Number of filesystem blocks that mballoc will try
to use for allocation size and alignment. For RAID5/6
systems this should be the number of data
@@ -252,6 +260,7 @@ stripe=n Number of filesystem blocks that mballoc will try
delalloc (*) Deferring block allocation until write-out time.
nodelalloc Disable delayed allocation. Blocks are allocation
when data is copied from user to page cache.
+
Data Mode
=========
There are 3 different data modes:
diff --git a/Documentation/filesystems/fiemap.txt b/Documentation/filesystems/fiemap.txt
new file mode 100644
index 00000000000..1e3defcfe50
--- /dev/null
+++ b/Documentation/filesystems/fiemap.txt
@@ -0,0 +1,228 @@
+============
+Fiemap Ioctl
+============
+
+The fiemap ioctl is an efficient method for userspace to get file
+extent mappings. Instead of block-by-block mapping (such as bmap), fiemap
+returns a list of extents.
+
+
+Request Basics
+--------------
+
+A fiemap request is encoded within struct fiemap:
+
+struct fiemap {
+ __u64 fm_start; /* logical offset (inclusive) at
+ * which to start mapping (in) */
+ __u64 fm_length; /* logical length of mapping which
+ * userspace cares about (in) */
+ __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */
+ __u32 fm_mapped_extents; /* number of extents that were
+ * mapped (out) */
+ __u32 fm_extent_count; /* size of fm_extents array (in) */
+ __u32 fm_reserved;
+ struct fiemap_extent fm_extents[0]; /* array of mapped extents (out) */
+};
+
+
+fm_start, and fm_length specify the logical range within the file
+which the process would like mappings for. Extents returned mirror
+those on disk - that is, the logical offset of the 1st returned extent
+may start before fm_start, and the range covered by the last returned
+extent may end after fm_length. All offsets and lengths are in bytes.
+
+Certain flags to modify the way in which mappings are looked up can be
+set in fm_flags. If the kernel doesn't understand some particular
+flags, it will return EBADR and the contents of fm_flags will contain
+the set of flags which caused the error. If the kernel is compatible
+with all flags passed, the contents of fm_flags will be unmodified.
+It is up to userspace to determine whether rejection of a particular
+flag is fatal to it's operation. This scheme is intended to allow the
+fiemap interface to grow in the future but without losing
+compatibility with old software.
+
+fm_extent_count specifies the number of elements in the fm_extents[] array
+that can be used to return extents. If fm_extent_count is zero, then the
+fm_extents[] array is ignored (no extents will be returned), and the
+fm_mapped_extents count will hold the number of extents needed in
+fm_extents[] to hold the file's current mapping. Note that there is
+nothing to prevent the file from changing between calls to FIEMAP.
+
+The following flags can be set in fm_flags:
+
+* FIEMAP_FLAG_SYNC
+If this flag is set, the kernel will sync the file before mapping extents.
+
+* FIEMAP_FLAG_XATTR
+If this flag is set, the extents returned will describe the inodes
+extended attribute lookup tree, instead of it's data tree.
+
+
+Extent Mapping
+--------------
+
+Extent information is returned within the embedded fm_extents array
+which userspace must allocate along with the fiemap structure. The
+number of elements in the fiemap_extents[] array should be passed via
+fm_extent_count. The number of extents mapped by kernel will be
+returned via fm_mapped_extents. If the number of fiemap_extents
+allocated is less than would be required to map the requested range,
+the maximum number of extents that can be mapped in the fm_extent[]
+array will be returned and fm_mapped_extents will be equal to
+fm_extent_count. In that case, the last extent in the array will not
+complete the requested range and will not have the FIEMAP_EXTENT_LAST
+flag set (see the next section on extent flags).
+
+Each extent is described by a single fiemap_extent structure as
+returned in fm_extents.
+
+struct fiemap_extent {
+ __u64 fe_logical; /* logical offset in bytes for the start of
+ * the extent */
+ __u64 fe_physical; /* physical offset in bytes for the start
+ * of the extent */
+ __u64 fe_length; /* length in bytes for the extent */
+ __u64 fe_reserved64[2];
+ __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */
+ __u32 fe_reserved[3];
+};
+
+All offsets and lengths are in bytes and mirror those on disk. It is valid
+for an extents logical offset to start before the request or it's logical
+length to extend past the request. Unless FIEMAP_EXTENT_NOT_ALIGNED is
+returned, fe_logical, fe_physical, and fe_length will be aligned to the
+block size of the file system. With the exception of extents flagged as
+FIEMAP_EXTENT_MERGED, adjacent extents will not be merged.
+
+The fe_flags field contains flags which describe the extent returned.
+A special flag, FIEMAP_EXTENT_LAST is always set on the last extent in
+the file so that the process making fiemap calls can determine when no
+more extents are available, without having to call the ioctl again.
+
+Some flags are intentionally vague and will always be set in the
+presence of other more specific flags. This way a program looking for
+a general property does not have to know all existing and future flags
+which imply that property.
+
+For example, if FIEMAP_EXTENT_DATA_INLINE or FIEMAP_EXTENT_DATA_TAIL
+are set, FIEMAP_EXTENT_NOT_ALIGNED will also be set. A program looking
+for inline or tail-packed data can key on the specific flag. Software
+which simply cares not to try operating on non-aligned extents
+however, can just key on FIEMAP_EXTENT_NOT_ALIGNED, and not have to
+worry about all present and future flags which might imply unaligned
+data. Note that the opposite is not true - it would be valid for
+FIEMAP_EXTENT_NOT_ALIGNED to appear alone.
+
+* FIEMAP_EXTENT_LAST
+This is the last extent in the file. A mapping attempt past this
+extent will return nothing.
+
+* FIEMAP_EXTENT_UNKNOWN
+The location of this extent is currently unknown. This may indicate
+the data is stored on an inaccessible volume or that no storage has
+been allocated for the file yet.
+
+* FIEMAP_EXTENT_DELALLOC
+ - This will also set FIEMAP_EXTENT_UNKNOWN.
+Delayed allocation - while there is data for this extent, it's
+physical location has not been allocated yet.
+
+* FIEMAP_EXTENT_ENCODED
+This extent does not consist of plain filesystem blocks but is
+encoded (e.g. encrypted or compressed). Reading the data in this
+extent via I/O to the block device will have undefined results.
+
+Note that it is *always* undefined to try to update the data
+in-place by writing to the indicated location without the
+assistance of the filesystem, or to access the data using the
+information returned by the FIEMAP interface while the filesystem
+is mounted. In other words, user applications may only read the
+extent data via I/O to the block device while the filesystem is
+unmounted, and then only if the FIEMAP_EXTENT_ENCODED flag is
+clear; user applications must not try reading or writing to the
+filesystem via the block device under any other circumstances.
+
+* FIEMAP_EXTENT_DATA_ENCRYPTED
+ - This will also set FIEMAP_EXTENT_ENCODED
+The data in this extent has been encrypted by the file system.
+
+* FIEMAP_EXTENT_NOT_ALIGNED
+Extent offsets and length are not guaranteed to be block aligned.
+
+* FIEMAP_EXTENT_DATA_INLINE
+ This will also set FIEMAP_EXTENT_NOT_ALIGNED
+Data is located within a meta data block.
+
+* FIEMAP_EXTENT_DATA_TAIL
+ This will also set FIEMAP_EXTENT_NOT_ALIGNED
+Data is packed into a block with data from other files.
+
+* FIEMAP_EXTENT_UNWRITTEN
+Unwritten extent - the extent is allocated but it's data has not been
+initialized. This indicates the extent's data will be all zero if read
+through the filesystem but the contents are undefined if read directly from
+the device.
+
+* FIEMAP_EXTENT_MERGED
+This will be set when a file does not support extents, i.e., it uses a block
+based addressing scheme. Since returning an extent for each block back to
+userspace would be highly inefficient, the kernel will try to merge most
+adjacent blocks into 'extents'.
+
+
+VFS -> File System Implementation
+---------------------------------
+
+File systems wishing to support fiemap must implement a ->fiemap callback on
+their inode_operations structure. The fs ->fiemap call is responsible for
+defining it's set of supported fiemap flags, and calling a helper function on
+each discovered extent:
+
+struct inode_operations {
+ ...
+
+ int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
+ u64 len);
+
+->fiemap is passed struct fiemap_extent_info which describes the
+fiemap request:
+
+struct fiemap_extent_info {
+ unsigned int fi_flags; /* Flags as passed from user */
+ unsigned int fi_extents_mapped; /* Number of mapped extents */
+ unsigned int fi_extents_max; /* Size of fiemap_extent array */
+ struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent array */
+};
+
+It is intended that the file system should not need to access any of this
+structure directly.
+
+
+Flag checking should be done at the beginning of the ->fiemap callback via the
+fiemap_check_flags() helper:
+
+int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
+
+The struct fieinfo should be passed in as recieved from ioctl_fiemap(). The
+set of fiemap flags which the fs understands should be passed via fs_flags. If
+fiemap_check_flags finds invalid user flags, it will place the bad values in
+fieinfo->fi_flags and return -EBADR. If the file system gets -EBADR, from
+fiemap_check_flags(), it should immediately exit, returning that error back to
+ioctl_fiemap().
+
+
+For each extent in the request range, the file system should call
+the helper function, fiemap_fill_next_extent():
+
+int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
+ u64 phys, u64 len, u32 flags, u32 dev);
+
+fiemap_fill_next_extent() will use the passed values to populate the
+next free extent in the fm_extents array. 'General' extent flags will
+automatically be set from specific flags on behalf of the calling file
+system so that the userspace API is not broken.
+
+fiemap_fill_next_extent() returns 0 on success, and 1 when the
+user-supplied fm_extents array is full. If an error is encountered
+while copying the extent to user memory, -EFAULT will be returned.
diff --git a/Documentation/filesystems/nfsroot.txt b/Documentation/filesystems/nfsroot.txt
index 31b32917234..68baddf3c3e 100644
--- a/Documentation/filesystems/nfsroot.txt
+++ b/Documentation/filesystems/nfsroot.txt
@@ -169,7 +169,7 @@ They depend on various facilities being available:
3.1) Booting from a floppy using syslinux
When building kernels, an easy way to create a boot floppy that uses
- syslinux is to use the zdisk or bzdisk make targets which use
+ syslinux is to use the zdisk or bzdisk make targets which use zimage
and bzimage images respectively. Both targets accept the
FDARGS parameter which can be used to set the kernel command line.
diff --git a/Documentation/filesystems/ocfs2.txt b/Documentation/filesystems/ocfs2.txt
index c318a8bbb1e..4340cc82579 100644
--- a/Documentation/filesystems/ocfs2.txt
+++ b/Documentation/filesystems/ocfs2.txt
@@ -76,3 +76,9 @@ localalloc=8(*) Allows custom localalloc size in MB. If the value is too
large, the fs will silently revert it to the default.
Localalloc is not enabled for local mounts.
localflocks This disables cluster aware flock.
+inode64 Indicates that Ocfs2 is allowed to create inodes at
+ any location in the filesystem, including those which
+ will result in inode numbers occupying more than 32
+ bits of significance.
+user_xattr (*) Enables Extended User Attributes.
+nouser_xattr Disables Extended User Attributes.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index f566ad9bcb7..bcceb99b81d 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -923,45 +923,44 @@ CPUs.
The "procs_blocked" line gives the number of processes currently blocked,
waiting for I/O to complete.
+
1.9 Ext4 file system parameters
------------------------------
-Ext4 file system have one directory per partition under /proc/fs/ext4/
-# ls /proc/fs/ext4/hdc/
-group_prealloc max_to_scan mb_groups mb_history min_to_scan order2_req
-stats stream_req
-
-mb_groups:
-This file gives the details of multiblock allocator buddy cache of free blocks
-
-mb_history:
-Multiblock allocation history.
-
-stats:
-This file indicate whether the multiblock allocator should start collecting
-statistics. The statistics are shown during unmount
-group_prealloc:
-The multiblock allocator normalize the block allocation request to
-group_prealloc filesystem blocks if we don't have strip value set.
-The stripe value can be specified at mount time or during mke2fs.
+Information about mounted ext4 file systems can be found in
+/proc/fs/ext4. Each mounted filesystem will have a directory in
+/proc/fs/ext4 based on its device name (i.e., /proc/fs/ext4/hdc or
+/proc/fs/ext4/dm-0). The files in each per-device directory are shown
+in Table 1-10, below.
-max_to_scan:
-How long multiblock allocator can look for a best extent (in found extents)
-
-min_to_scan:
-How long multiblock allocator must look for a best extent
-
-order2_req:
-Multiblock allocator use 2^N search using buddies only for requests greater
-than or equal to order2_req. The request size is specfied in file system
-blocks. A value of 2 indicate only if the requests are greater than or equal
-to 4 blocks.
+Table 1-10: Files in /proc/fs/ext4/<devname>
+..............................................................................
+ File Content
+ mb_groups details of multiblock allocator buddy cache of free blocks
+ mb_history multiblock allocation history
+ stats controls whether the multiblock allocator should start
+ collecting statistics, which are shown during the unmount
+ group_prealloc the multiblock allocator will round up allocation
+ requests to a multiple of this tuning parameter if the
+ stripe size is not set in the ext4 superblock
+ max_to_scan The maximum number of extents the multiblock allocator
+ will search to find the best extent
+ min_to_scan The minimum number of extents the multiblock allocator
+ will search to find the best extent
+ order2_req Tuning parameter which controls the minimum size for
+ requests (as a power of 2) where the buddy cache is
+ used
+ stream_req Files which have fewer blocks than this tunable
+ parameter will have their blocks allocated out of a
+ block group specific preallocation pool, so that small
+ files are packed closely together. Each large file
+ will have its blocks allocated out of its own unique
+ preallocation pool.
+inode_readahead Tuning parameter which controls the maximum number of
+ inode table blocks that ext4's inode table readahead
+ algorithm will pre-read into the buffer cache
+..............................................................................
-stream_req:
-Files smaller than stream_req are served by the stream allocator, whose
-purpose is to pack requests as close each to other as possible to
-produce smooth I/O traffic. Avalue of 16 indicate that file smaller than 16
-filesystem block size will use group based preallocation.
------------------------------------------------------------------------------
Summary
@@ -1322,6 +1321,18 @@ debugging information is displayed on console.
NMI switch that most IA32 servers have fires unknown NMI up, for example.
If a system hangs up, try pressing the NMI switch.
+panic_on_unrecovered_nmi
+------------------------
+
+The default Linux behaviour on an NMI of either memory or unknown is to continue
+operation. For many environments such as scientific computing it is preferable
+that the box is taken out and the error dealt with than an uncorrected
+parity/ECC error get propogated.
+
+A small number of systems do generate NMI's for bizarre random reasons such as
+power management so the default is off. That sysctl works like the existing
+panic controls already in that directory.
+
nmi_watchdog
------------
@@ -1332,13 +1343,6 @@ determine whether or not they are still functioning properly.
Because the NMI watchdog shares registers with oprofile, by disabling the NMI
watchdog, oprofile may have more registers to utilize.
-maps_protect
-------------
-
-Enables/Disables the protection of the per-process proc entries "maps" and
-"smaps". When enabled, the contents of these files are visible only to
-readers that are allowed to ptrace() the given process.
-
msgmni
------
@@ -1380,15 +1384,18 @@ causes the kernel to prefer to reclaim dentries and inodes.
dirty_background_ratio
----------------------
-Contains, as a percentage of total system memory, the number of pages at which
-the pdflush background writeback daemon will start writing out dirty data.
+Contains, as a percentage of the dirtyable system memory (free pages + mapped
+pages + file cache, not including locked pages and HugePages), the number of
+pages at which the pdflush background writeback daemon will start writing out
+dirty data.
dirty_ratio
-----------------
-Contains, as a percentage of total system memory, the number of pages at which
-a process which is generating disk writes will itself start writing out dirty
-data.
+Contains, as a percentage of the dirtyable system memory (free pages + mapped
+pages + file cache, not including locked pages and HugePages), the number of
+pages at which a process which is generating disk writes will itself start
+writing out dirty data.
dirty_writeback_centisecs
-------------------------
@@ -2408,24 +2415,29 @@ will be dumped when the <pid> process is dumped. coredump_filter is a bitmask
of memory types. If a bit of the bitmask is set, memory segments of the
corresponding memory type are dumped, otherwise they are not dumped.
-The following 4 memory types are supported:
+The following 7 memory types are supported:
- (bit 0) anonymous private memory
- (bit 1) anonymous shared memory
- (bit 2) file-backed private memory
- (bit 3) file-backed shared memory
- (bit 4) ELF header pages in file-backed private memory areas (it is
effective only if the bit 2 is cleared)
+ - (bit 5) hugetlb private memory
+ - (bit 6) hugetlb shared memory
Note that MMIO pages such as frame buffer are never dumped and vDSO pages
are always dumped regardless of the bitmask status.
-Default value of coredump_filter is 0x3; this means all anonymous memory
-segments are dumped.
+ Note bit 0-4 doesn't effect any hugetlb memory. hugetlb memory are only
+ effected by bit 5-6.
+
+Default value of coredump_filter is 0x23; this means all anonymous memory
+segments and hugetlb private memory are dumped.
If you don't want to dump all shared memory segments attached to pid 1234,
-write 1 to the process's proc file.
+write 0x21 to the process's proc file.
- $ echo 0x1 > /proc/1234/coredump_filter
+ $ echo 0x21 > /proc/1234/coredump_filter
When a new process is created, the process inherits the bitmask status from its
parent. It is useful to set up coredump_filter before the program runs.
diff --git a/Documentation/filesystems/ramfs-rootfs-initramfs.txt b/Documentation/filesystems/ramfs-rootfs-initramfs.txt
index 7be232b44ee..62fe9b1e089 100644
--- a/Documentation/filesystems/ramfs-rootfs-initramfs.txt
+++ b/Documentation/filesystems/ramfs-rootfs-initramfs.txt
@@ -263,7 +263,7 @@ User Mode Linux, like so:
sleep(999999999);
}
EOF
- gcc -static hello2.c -o init
+ gcc -static hello.c -o init
echo init | cpio -o -H newc | gzip > test.cpio.gz
# Testing external initramfs using the initrd loading mechanism.
qemu -kernel /boot/vmlinuz -initrd test.cpio.gz /dev/zero
diff --git a/Documentation/filesystems/ubifs.txt b/Documentation/filesystems/ubifs.txt
index 6a0d70a22f0..dd84ea3c10d 100644
--- a/Documentation/filesystems/ubifs.txt
+++ b/Documentation/filesystems/ubifs.txt
@@ -86,6 +86,15 @@ norm_unmount (*) commit on unmount; the journal is committed
fast_unmount do not commit on unmount; this option makes
unmount faster, but the next mount slower
because of the need to replay the journal.
+bulk_read read more in one go to take advantage of flash
+ media that read faster sequentially
+no_bulk_read (*) do not bulk-read
+no_chk_data_crc skip checking of CRCs on data nodes in order to
+ improve read performance. Use this option only
+ if the flash media is highly reliable. The effect
+ of this option is that corruption of the contents
+ of a file can go unnoticed.
+chk_data_crc (*) do not skip checking CRCs on data nodes
Quick usage instructions
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index 18022e249c5..b1b98870124 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -240,6 +240,10 @@ signal, or (b) something wrongly believes it's safe to remove drivers
needed to manage a signal that's in active use. That is, requesting a
GPIO can serve as a kind of lock.
+Some platforms may also use knowledge about what GPIOs are active for
+power management, such as by powering down unused chip sectors and, more
+easily, gating off unused clocks.
+
These two calls are optional because not not all current Linux platforms
offer such functionality in their GPIO support; a valid implementation
could return success for all gpio_request() calls. Unlike the other calls,
@@ -264,7 +268,7 @@ map between them using calls like:
/* map GPIO numbers to IRQ numbers */
int gpio_to_irq(unsigned gpio);
- /* map IRQ numbers to GPIO numbers */
+ /* map IRQ numbers to GPIO numbers (avoid using this) */
int irq_to_gpio(unsigned irq);
Those return either the corresponding number in the other namespace, or
@@ -284,7 +288,8 @@ system wakeup capabilities.
Non-error values returned from irq_to_gpio() would most commonly be used
with gpio_get_value(), for example to initialize or update driver state
-when the IRQ is edge-triggered.
+when the IRQ is edge-triggered. Note that some platforms don't support
+this reverse mapping, so you should avoid using it.
Emulating Open Drain Signals
diff --git a/Documentation/hwmon/adt7470 b/Documentation/hwmon/adt7470
new file mode 100644
index 00000000000..75d13ca147c
--- /dev/null
+++ b/Documentation/hwmon/adt7470
@@ -0,0 +1,76 @@
+Kernel driver adt7470
+=====================
+
+Supported chips:
+ * Analog Devices ADT7470
+ Prefix: 'adt7470'
+ Addresses scanned: I2C 0x2C, 0x2E, 0x2F
+ Datasheet: Publicly available at the Analog Devices website
+
+Author: Darrick J. Wong
+
+Description
+-----------
+
+This driver implements support for the Analog Devices ADT7470 chip. There may
+be other chips that implement this interface.
+
+The ADT7470 uses the 2-wire interface compatible with the SMBus 2.0
+specification. Using an analog to digital converter it measures up to ten (10)
+external temperatures. It has four (4) 16-bit counters for measuring fan speed.
+There are four (4) PWM outputs that can be used to control fan speed.
+
+A sophisticated control system for the PWM outputs is designed into the ADT7470
+that allows fan speed to be adjusted automatically based on any of the ten
+temperature sensors. Each PWM output is individually adjustable and
+programmable. Once configured, the ADT7470 will adjust the PWM outputs in
+response to the measured temperatures with further host intervention. This
+feature can also be disabled for manual control of the PWM's.
+
+Each of the measured inputs (temperature, fan speed) has corresponding high/low
+limit values. The ADT7470 will signal an ALARM if any measured value exceeds
+either limit.
+
+The ADT7470 DOES NOT sample all inputs continuously. A single pin on the
+ADT7470 is connected to a multitude of thermal diodes, but the chip must be
+instructed explicitly to read the multitude of diodes. If you want to use
+automatic fan control mode, you must manually read any of the temperature
+sensors or the fan control algorithm will not run. The chip WILL NOT DO THIS
+AUTOMATICALLY; this must be done from userspace. This may be a bug in the chip
+design, given that many other AD chips take care of this. The driver will not
+read the registers more often than once every 5 seconds. Further,
+configuration data is only read once per minute.
+
+Special Features
+----------------
+
+The ADT7470 has a 8-bit ADC and is capable of measuring temperatures with 1
+degC resolution.
+
+The Analog Devices datasheet is very detailed and describes a procedure for
+determining an optimal configuration for the automatic PWM control.
+
+Configuration Notes
+-------------------
+
+Besides standard interfaces driver adds the following:
+
+* PWM Control
+
+* pwm#_auto_point1_pwm and pwm#_auto_point1_temp and
+* pwm#_auto_point2_pwm and pwm#_auto_point2_temp -
+
+point1: Set the pwm speed at a lower temperature bound.
+point2: Set the pwm speed at a higher temperature bound.
+
+The ADT7470 will scale the pwm between the lower and higher pwm speed when
+the temperature is between the two temperature boundaries. PWM values range
+from 0 (off) to 255 (full speed). Fan speed will be set to maximum when the
+temperature sensor associated with the PWM control exceeds
+pwm#_auto_point2_temp.
+
+Notes
+-----
+
+As stated above, the temperature inputs must be read periodically from
+userspace in order for the automatic pwm algorithm to run.
diff --git a/Documentation/hwmon/it87 b/Documentation/hwmon/it87
index 3496b7020e7..042c0415140 100644
--- a/Documentation/hwmon/it87
+++ b/Documentation/hwmon/it87
@@ -136,10 +136,10 @@ once-only alarms.
The IT87xx only updates its values each 1.5 seconds; reading it more often
will do no harm, but will return 'old' values.
-To change sensor N to a thermistor, 'echo 2 > tempN_type' where N is 1, 2,
+To change sensor N to a thermistor, 'echo 4 > tempN_type' where N is 1, 2,
or 3. To change sensor N to a thermal diode, 'echo 3 > tempN_type'.
Give 0 for unused sensor. Any other value is invalid. To configure this at
-startup, consult lm_sensors's /etc/sensors.conf. (2 = thermistor;
+startup, consult lm_sensors's /etc/sensors.conf. (4 = thermistor;
3 = thermal diode)
diff --git a/Documentation/hwmon/lm85 b/Documentation/hwmon/lm85
index 6d41db7f17f..40062074129 100644
--- a/Documentation/hwmon/lm85
+++ b/Documentation/hwmon/lm85
@@ -163,16 +163,6 @@ configured individually according to the following options.
* pwm#_auto_pwm_min - this specifies the PWM value for temp#_auto_temp_off
temperature. (PWM value from 0 to 255)
-* pwm#_auto_pwm_freq - select base frequency of PWM output. You can select
- in range of 10.0 to 94.0 Hz in .1 Hz units.
- (Values 100 to 940).
-
-The pwm#_auto_pwm_freq can be set to one of the following 8 values. Setting the
-frequency to a value not on this list, will result in the next higher frequency
-being selected. The actual device frequency may vary slightly from this
-specification as designed by the manufacturer. Consult the datasheet for more
-details. (PWM Frequency values: 100, 150, 230, 300, 380, 470, 620, 940)
-
* pwm#_auto_pwm_minctl - this flags selects for temp#_auto_temp_off temperature
the bahaviour of fans. Write 1 to let fans spinning at
pwm#_auto_pwm_min or write 0 to let them off.
diff --git a/Documentation/hwmon/lm87 b/Documentation/hwmon/lm87
index ec27aa1b94c..6b47b67fd96 100644
--- a/Documentation/hwmon/lm87
+++ b/Documentation/hwmon/lm87
@@ -65,11 +65,10 @@ The LM87 has four pins which can serve one of two possible functions,
depending on the hardware configuration.
Some functions share pins, so not all functions are available at the same
-time. Which are depends on the hardware setup. This driver assumes that
-the BIOS configured the chip correctly. In that respect, it differs from
-the original driver (from lm_sensors for Linux 2.4), which would force the
-LM87 to an arbitrary, compile-time chosen mode, regardless of the actual
-chipset wiring.
+time. Which are depends on the hardware setup. This driver normally
+assumes that firmware configured the chip correctly. Where this is not
+the case, platform code must set the I2C client's platform_data to point
+to a u8 value to be written to the channel register.
For reference, here is the list of exclusive functions:
- in0+in5 (default) or temp3
diff --git a/Documentation/hwmon/lm90 b/Documentation/hwmon/lm90
index aa4a0ec2008..e0d5206d1de 100644
--- a/Documentation/hwmon/lm90
+++ b/Documentation/hwmon/lm90
@@ -11,7 +11,7 @@ Supported chips:
Prefix: 'lm99'
Addresses scanned: I2C 0x4c and 0x4d
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM89.html
+ http://www.national.com/mpf/LM/LM89.html
* National Semiconductor LM99
Prefix: 'lm99'
Addresses scanned: I2C 0x4c and 0x4d
@@ -21,18 +21,32 @@ Supported chips:
Prefix: 'lm86'
Addresses scanned: I2C 0x4c
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM86.html
+ http://www.national.com/mpf/LM/LM86.html
* Analog Devices ADM1032
Prefix: 'adm1032'
Addresses scanned: I2C 0x4c and 0x4d
- Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/en/prod/0,2877,ADM1032,00.html
+ Datasheet: Publicly available at the ON Semiconductor website
+ http://www.onsemi.com/PowerSolutions/product.do?id=ADM1032
* Analog Devices ADT7461
Prefix: 'adt7461'
Addresses scanned: I2C 0x4c and 0x4d
- Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/en/prod/0,2877,ADT7461,00.html
- Note: Only if in ADM1032 compatibility mode
+ Datasheet: Publicly available at the ON Semiconductor website
+ http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461
+ * Maxim MAX6646
+ Prefix: 'max6646'
+ Addresses scanned: I2C 0x4d
+ Datasheet: Publicly available at the Maxim website
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+ * Maxim MAX6647
+ Prefix: 'max6646'
+ Addresses scanned: I2C 0x4e
+ Datasheet: Publicly available at the Maxim website
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+ * Maxim MAX6649
+ Prefix: 'max6646'
+ Addresses scanned: I2C 0x4c
+ Datasheet: Publicly available at the Maxim website
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
* Maxim MAX6657
Prefix: 'max6657'
Addresses scanned: I2C 0x4c
@@ -70,25 +84,21 @@ Description
The LM90 is a digital temperature sensor. It senses its own temperature as
well as the temperature of up to one external diode. It is compatible
-with many other devices such as the LM86, the LM89, the LM99, the ADM1032,
-the MAX6657, MAX6658, MAX6659, MAX6680 and the MAX6681 all of which are
-supported by this driver.
+with many other devices, many of which are supported by this driver.
Note that there is no easy way to differentiate between the MAX6657,
MAX6658 and MAX6659 variants. The extra address and features of the
MAX6659 are not supported by this driver. The MAX6680 and MAX6681 only
differ in their pinout, therefore they obviously can't (and don't need to)
-be distinguished. Additionally, the ADT7461 is supported if found in
-ADM1032 compatibility mode.
+be distinguished.
The specificity of this family of chipsets over the ADM1021/LM84
family is that it features critical limits with hysteresis, and an
increased resolution of the remote temperature measurement.
The different chipsets of the family are not strictly identical, although
-very similar. This driver doesn't handle any specific feature for now,
-with the exception of SMBus PEC. For reference, here comes a non-exhaustive
-list of specific features:
+very similar. For reference, here comes a non-exhaustive list of specific
+features:
LM90:
* Filter and alert configuration register at 0xBF.
@@ -114,9 +124,11 @@ ADT7461:
* Lower resolution for remote temperature
MAX6657 and MAX6658:
+ * Better local resolution
* Remote sensor type selection
MAX6659:
+ * Better local resolution
* Selectable address
* Second critical temperature limit
* Remote sensor type selection
@@ -127,7 +139,8 @@ MAX6680 and MAX6681:
All temperature values are given in degrees Celsius. Resolution
is 1.0 degree for the local temperature, 0.125 degree for the remote
-temperature.
+temperature, except for the MAX6657, MAX6658 and MAX6659 which have a
+resolution of 0.125 degree for both temperatures.
Each sensor has its own high and low limits, plus a critical limit.
Additionally, there is a relative hysteresis value common to both critical
diff --git a/Documentation/hwmon/pc87360 b/Documentation/hwmon/pc87360
index 89a8fcfa78d..cbac32b59c8 100644
--- a/Documentation/hwmon/pc87360
+++ b/Documentation/hwmon/pc87360
@@ -5,12 +5,7 @@ Supported chips:
* National Semiconductor PC87360, PC87363, PC87364, PC87365 and PC87366
Prefixes: 'pc87360', 'pc87363', 'pc87364', 'pc87365', 'pc87366'
Addresses scanned: none, address read from Super I/O config space
- Datasheets:
- http://www.national.com/pf/PC/PC87360.html
- http://www.national.com/pf/PC/PC87363.html
- http://www.national.com/pf/PC/PC87364.html
- http://www.national.com/pf/PC/PC87365.html
- http://www.national.com/pf/PC/PC87366.html
+ Datasheets: No longer available
Authors: Jean Delvare <khali@linux-fr.org>
diff --git a/Documentation/hwmon/pc87427 b/Documentation/hwmon/pc87427
index 9a0708f9f49..d1ebbe510f3 100644
--- a/Documentation/hwmon/pc87427
+++ b/Documentation/hwmon/pc87427
@@ -5,7 +5,7 @@ Supported chips:
* National Semiconductor PC87427
Prefix: 'pc87427'
Addresses scanned: none, address read from Super I/O config space
- Datasheet: http://www.winbond.com.tw/E-WINBONDHTM/partner/apc_007.html
+ Datasheet: No longer available
Author: Jean Delvare <khali@linux-fr.org>
diff --git a/Documentation/hwmon/w83781d b/Documentation/hwmon/w83781d
index 6f800a0283e..c91e0b63ea1 100644
--- a/Documentation/hwmon/w83781d
+++ b/Documentation/hwmon/w83781d
@@ -353,7 +353,7 @@ in6=255
# PWM
-Additional info about PWM on the AS99127F (may apply to other Asus
+* Additional info about PWM on the AS99127F (may apply to other Asus
chips as well) by Jean Delvare as of 2004-04-09:
AS99127F revision 2 seems to have two PWM registers at 0x59 and 0x5A,
@@ -396,7 +396,7 @@ Please contact us if you can figure out how it is supposed to work. As
long as we don't know more, the w83781d driver doesn't handle PWM on
AS99127F chips at all.
-Additional info about PWM on the AS99127F rev.1 by Hector Martin:
+* Additional info about PWM on the AS99127F rev.1 by Hector Martin:
I've been fiddling around with the (in)famous 0x59 register and
found out the following values do work as a form of coarse pwm:
@@ -418,3 +418,36 @@ change.
My mobo is an ASUS A7V266-E. This behavior is similar to what I got
with speedfan under Windows, where 0-15% would be off, 15-2x% (can't
remember the exact value) would be 70% and higher would be full on.
+
+* Additional info about PWM on the AS99127F rev.1 from lm-sensors
+ ticket #2350:
+
+I conducted some experiment on Asus P3B-F motherboard with AS99127F
+(Ver. 1).
+
+I confirm that 0x59 register control the CPU_Fan Header on this
+motherboard, and 0x5a register control PWR_Fan.
+
+In order to reduce the dependency of specific fan, the measurement is
+conducted with a digital scope without fan connected. I found out that
+P3B-F actually output variable DC voltage on fan header center pin,
+looks like PWM is filtered on this motherboard.
+
+Here are some of measurements:
+
+0x80 20 mV
+0x81 20 mV
+0x82 232 mV
+0x83 1.2 V
+0x84 2.31 V
+0x85 3.44 V
+0x86 4.62 V
+0x87 5.81 V
+0x88 7.01 V
+9x89 8.22 V
+0x8a 9.42 V
+0x8b 10.6 V
+0x8c 11.9 V
+0x8d 12.4 V
+0x8e 12.4 V
+0x8f 12.4 V
diff --git a/Documentation/hwmon/w83791d b/Documentation/hwmon/w83791d
index a67d3b7a709..5663e491655 100644
--- a/Documentation/hwmon/w83791d
+++ b/Documentation/hwmon/w83791d
@@ -58,29 +58,35 @@ internal state that allows no clean access (Bank with ID register is not
currently selected). If you know the address of the chip, use a 'force'
parameter; this will put it into a more well-behaved state first.
-The driver implements three temperature sensors, five fan rotation speed
-sensors, and ten voltage sensors.
+The driver implements three temperature sensors, ten voltage sensors,
+five fan rotation speed sensors and manual PWM control of each fan.
Temperatures are measured in degrees Celsius and measurement resolution is 1
degC for temp1 and 0.5 degC for temp2 and temp3. An alarm is triggered when
the temperature gets higher than the Overtemperature Shutdown value; it stays
on until the temperature falls below the Hysteresis value.
+Voltage sensors (also known as IN sensors) report their values in millivolts.
+An alarm is triggered if the voltage has crossed a programmable minimum
+or maximum limit.
+
Fan rotation speeds are reported in RPM (rotations per minute). An alarm is
triggered if the rotation speed has dropped below a programmable limit. Fan
readings can be divided by a programmable divider (1, 2, 4, 8, 16,
32, 64 or 128 for all fans) to give the readings more range or accuracy.
-Voltage sensors (also known as IN sensors) report their values in millivolts.
-An alarm is triggered if the voltage has crossed a programmable minimum
-or maximum limit.
+Each fan controlled is controlled by PWM. The PWM duty cycle can be read and
+set for each fan separately. Valid values range from 0 (stop) to 255 (full).
+PWM 1-3 support Thermal Cruise mode, in which the PWMs are automatically
+regulated to keep respectively temp 1-3 at a certain target temperature.
+See below for the description of the sysfs-interface.
The w83791d has a global bit used to enable beeping from the speaker when an
alarm is triggered as well as a bitmask to enable or disable the beep for
specific alarms. You need both the global beep enable bit and the
corresponding beep bit to be on for a triggered alarm to sound a beep.
-The sysfs interface to the gloabal enable is via the sysfs beep_enable file.
+The sysfs interface to the global enable is via the sysfs beep_enable file.
This file is used for both legacy and new code.
The sysfs interface to the beep bitmask has migrated from the original legacy
@@ -105,6 +111,27 @@ going forward.
The driver reads the hardware chip values at most once every three seconds.
User mode code requesting values more often will receive cached values.
+/sys files
+----------
+The sysfs-interface is documented in the 'sysfs-interface' file. Only
+chip-specific options are documented here.
+
+pwm[1-3]_enable - this file controls mode of fan/temperature control for
+ fan 1-3. Fan/PWM 4-5 only support manual mode.
+ * 1 Manual mode
+ * 2 Thermal Cruise mode
+ * 3 Fan Speed Cruise mode (no further support)
+
+temp[1-3]_target - defines the target temperature for Thermal Cruise mode.
+ Unit: millidegree Celsius
+ RW
+
+temp[1-3]_tolerance - temperature tolerance for Thermal Cruise mode.
+ Specifies an interval around the target temperature
+ in which the fan speed is not changed.
+ Unit: millidegree Celsius
+ RW
+
Alarms bitmap vs. beep_mask bitmask
------------------------------------
For legacy code using the alarms and beep_mask files:
@@ -132,7 +159,3 @@ tart2 : alarms: 0x020000 beep_mask: 0x080000 <== mismatch
tart3 : alarms: 0x040000 beep_mask: 0x100000 <== mismatch
case_open : alarms: 0x001000 beep_mask: 0x001000
global_enable: alarms: -------- beep_mask: 0x800000 (modified via beep_enable)
-
-W83791D TODO:
----------------
-Provide a patch for smart-fan control (still need appropriate motherboard/fans)
diff --git a/Documentation/i2c/busses/i2c-viapro b/Documentation/i2c/busses/i2c-viapro
index 1405fb69984..22efedf60c8 100644
--- a/Documentation/i2c/busses/i2c-viapro
+++ b/Documentation/i2c/busses/i2c-viapro
@@ -16,6 +16,9 @@ Supported adapters:
* VIA Technologies, Inc. CX700
Datasheet: available on request and under NDA from VIA
+ * VIA Technologies, Inc. VX800/VX820
+ Datasheet: available on http://linux.via.com.tw
+
Authors:
Kyösti Mälkki <kmalkki@cc.hut.fi>,
Mark D. Studebaker <mdsxyz123@yahoo.com>,
@@ -49,6 +52,7 @@ Your lspci -n listing must show one of these :
device 1106:3372 (VT8237S)
device 1106:3287 (VT8251)
device 1106:8324 (CX700)
+ device 1106:8353 (VX800/VX820)
If none of these show up, you should look in the BIOS for settings like
enable ACPI / SMBus or even USB.
@@ -57,5 +61,5 @@ Except for the oldest chips (VT82C596A/B, VT82C686A and most probably
VT8231), this driver supports I2C block transactions. Such transactions
are mainly useful to read from and write to EEPROMs.
-The CX700 additionally appears to support SMBus PEC, although this driver
-doesn't implement it yet.
+The CX700/VX800/VX820 additionally appears to support SMBus PEC, although
+this driver doesn't implement it yet.
diff --git a/Documentation/i2c/dev-interface b/Documentation/i2c/dev-interface
index 9dd79123ddd..3e742ba2553 100644
--- a/Documentation/i2c/dev-interface
+++ b/Documentation/i2c/dev-interface
@@ -4,6 +4,10 @@ the /dev interface. You need to load module i2c-dev for this.
Each registered i2c adapter gets a number, counting from 0. You can
examine /sys/class/i2c-dev/ to see what number corresponds to which adapter.
+Alternatively, you can run "i2cdetect -l" to obtain a formated list of all
+i2c adapters present on your system at a given time. i2cdetect is part of
+the i2c-tools package.
+
I2C device files are character device files with major device number 89
and a minor device number corresponding to the number assigned as
explained above. They should be called "i2c-%d" (i2c-0, i2c-1, ...,
@@ -17,30 +21,34 @@ So let's say you want to access an i2c adapter from a C program. The
first thing to do is "#include <linux/i2c-dev.h>". Please note that
there are two files named "i2c-dev.h" out there, one is distributed
with the Linux kernel and is meant to be included from kernel
-driver code, the other one is distributed with lm_sensors and is
+driver code, the other one is distributed with i2c-tools and is
meant to be included from user-space programs. You obviously want
the second one here.
Now, you have to decide which adapter you want to access. You should
-inspect /sys/class/i2c-dev/ to decide this. Adapter numbers are assigned
-somewhat dynamically, so you can not even assume /dev/i2c-0 is the
-first adapter.
+inspect /sys/class/i2c-dev/ or run "i2cdetect -l" to decide this.
+Adapter numbers are assigned somewhat dynamically, so you can not
+assume much about them. They can even change from one boot to the next.
Next thing, open the device file, as follows:
+
int file;
int adapter_nr = 2; /* probably dynamically determined */
char filename[20];
- sprintf(filename,"/dev/i2c-%d",adapter_nr);
- if ((file = open(filename,O_RDWR)) < 0) {
+ snprintf(filename, 19, "/dev/i2c-%d", adapter_nr);
+ file = open(filename, O_RDWR);
+ if (file < 0) {
/* ERROR HANDLING; you can check errno to see what went wrong */
exit(1);
}
When you have opened the device, you must specify with what device
address you want to communicate:
+
int addr = 0x40; /* The I2C address */
- if (ioctl(file,I2C_SLAVE,addr) < 0) {
+
+ if (ioctl(file, I2C_SLAVE, addr) < 0) {
/* ERROR HANDLING; you can check errno to see what went wrong */
exit(1);
}
@@ -48,31 +56,41 @@ address you want to communicate:
Well, you are all set up now. You can now use SMBus commands or plain
I2C to communicate with your device. SMBus commands are preferred if
the device supports them. Both are illustrated below.
+
__u8 register = 0x10; /* Device register to access */
__s32 res;
char buf[10];
+
/* Using SMBus commands */
- res = i2c_smbus_read_word_data(file,register);
+ res = i2c_smbus_read_word_data(file, register);
if (res < 0) {
/* ERROR HANDLING: i2c transaction failed */
} else {
/* res contains the read word */
}
+
/* Using I2C Write, equivalent of
- i2c_smbus_write_word_data(file,register,0x6543) */
+ i2c_smbus_write_word_data(file, register, 0x6543) */
buf[0] = register;
buf[1] = 0x43;
buf[2] = 0x65;
- if ( write(file,buf,3) != 3) {
+ if (write(file, buf, 3) ! =3) {
/* ERROR HANDLING: i2c transaction failed */
}
+
/* Using I2C Read, equivalent of i2c_smbus_read_byte(file) */
- if (read(file,buf,1) != 1) {
+ if (read(file, buf, 1) != 1) {
/* ERROR HANDLING: i2c transaction failed */
} else {
/* buf[0] contains the read byte */
}
+Note that only a subset of the I2C and SMBus protocols can be achieved by
+the means of read() and write() calls. In particular, so-called combined
+transactions (mixing read and write messages in the same transaction)
+aren't supported. For this reason, this interface is almost never used by
+user-space programs.
+
IMPORTANT: because of the use of inline functions, you *have* to use
'-O' or some variation when you compile your program!
@@ -80,31 +98,29 @@ IMPORTANT: because of the use of inline functions, you *have* to use
Full interface description
==========================
-The following IOCTLs are defined and fully supported
-(see also i2c-dev.h):
+The following IOCTLs are defined:
-ioctl(file,I2C_SLAVE,long addr)
+ioctl(file, I2C_SLAVE, long addr)
Change slave address. The address is passed in the 7 lower bits of the
argument (except for 10 bit addresses, passed in the 10 lower bits in this
case).
-ioctl(file,I2C_TENBIT,long select)
+ioctl(file, I2C_TENBIT, long select)
Selects ten bit addresses if select not equals 0, selects normal 7 bit
addresses if select equals 0. Default 0. This request is only valid
if the adapter has I2C_FUNC_10BIT_ADDR.
-ioctl(file,I2C_PEC,long select)
+ioctl(file, I2C_PEC, long select)
Selects SMBus PEC (packet error checking) generation and verification
if select not equals 0, disables if select equals 0. Default 0.
Used only for SMBus transactions. This request only has an effect if the
the adapter has I2C_FUNC_SMBUS_PEC; it is still safe if not, it just
doesn't have any effect.
-ioctl(file,I2C_FUNCS,unsigned long *funcs)
+ioctl(file, I2C_FUNCS, unsigned long *funcs)
Gets the adapter functionality and puts it in *funcs.
-ioctl(file,I2C_RDWR,struct i2c_rdwr_ioctl_data *msgset)
-
+ioctl(file, I2C_RDWR, struct i2c_rdwr_ioctl_data *msgset)
Do combined read/write transaction without stop in between.
Only valid if the adapter has I2C_FUNC_I2C. The argument is
a pointer to a
@@ -120,10 +136,9 @@ ioctl(file,I2C_RDWR,struct i2c_rdwr_ioctl_data *msgset)
The slave address and whether to use ten bit address mode has to be
set in each message, overriding the values set with the above ioctl's.
-
-Other values are NOT supported at this moment, except for I2C_SMBUS,
-which you should never directly call; instead, use the access functions
-below.
+ioctl(file, I2C_SMBUS, struct i2c_smbus_ioctl_data *args)
+ Not meant to be called directly; instead, use the access functions
+ below.
You can do plain i2c transactions by using read(2) and write(2) calls.
You do not need to pass the address byte; instead, set it through
@@ -148,7 +163,52 @@ what happened. The 'write' transactions return 0 on success; the
returns the number of values read. The block buffers need not be longer
than 32 bytes.
-The above functions are all macros, that resolve to calls to the
-i2c_smbus_access function, that on its turn calls a specific ioctl
+The above functions are all inline functions, that resolve to calls to
+the i2c_smbus_access function, that on its turn calls a specific ioctl
with the data in a specific format. Read the source code if you
want to know what happens behind the screens.
+
+
+Implementation details
+======================
+
+For the interested, here's the code flow which happens inside the kernel
+when you use the /dev interface to I2C:
+
+1* Your program opens /dev/i2c-N and calls ioctl() on it, as described in
+section "C example" above.
+
+2* These open() and ioctl() calls are handled by the i2c-dev kernel
+driver: see i2c-dev.c:i2cdev_open() and i2c-dev.c:i2cdev_ioctl(),
+respectively. You can think of i2c-dev as a generic I2C chip driver
+that can be programmed from user-space.
+
+3* Some ioctl() calls are for administrative tasks and are handled by
+i2c-dev directly. Examples include I2C_SLAVE (set the address of the
+device you want to access) and I2C_PEC (enable or disable SMBus error
+checking on future transactions.)
+
+4* Other ioctl() calls are converted to in-kernel function calls by
+i2c-dev. Examples include I2C_FUNCS, which queries the I2C adapter
+functionality using i2c.h:i2c_get_functionality(), and I2C_SMBUS, which
+performs an SMBus transaction using i2c-core.c:i2c_smbus_xfer().
+
+The i2c-dev driver is responsible for checking all the parameters that
+come from user-space for validity. After this point, there is no
+difference between these calls that came from user-space through i2c-dev
+and calls that would have been performed by kernel I2C chip drivers
+directly. This means that I2C bus drivers don't need to implement
+anything special to support access from user-space.
+
+5* These i2c-core.c/i2c.h functions are wrappers to the actual
+implementation of your I2C bus driver. Each adapter must declare
+callback functions implementing these standard calls.
+i2c.h:i2c_get_functionality() calls i2c_adapter.algo->functionality(),
+while i2c-core.c:i2c_smbus_xfer() calls either
+adapter.algo->smbus_xfer() if it is implemented, or if not,
+i2c-core.c:i2c_smbus_xfer_emulated() which in turn calls
+i2c_adapter.algo->master_xfer().
+
+After your I2C bus driver has processed these requests, execution runs
+up the call chain, with almost no processing done, except by i2c-dev to
+package the returned data, if any, in suitable format for the ioctl.
diff --git a/Documentation/i2c/smbus-protocol b/Documentation/i2c/smbus-protocol
index 24bfb65da17..9df47441f0e 100644
--- a/Documentation/i2c/smbus-protocol
+++ b/Documentation/i2c/smbus-protocol
@@ -109,8 +109,8 @@ specified through the Comm byte.
S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A] P
-SMBus Process Call
-==================
+SMBus Process Call: i2c_smbus_process_call()
+=============================================
This command selects a device register (through the Comm byte), sends
16 bits of data to it, and reads 16 bits of data in return.
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index 6b61b3a2e90..d73ee117a8c 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -606,6 +606,8 @@ SMBus communication
extern s32 i2c_smbus_read_word_data(struct i2c_client * client, u8 command);
extern s32 i2c_smbus_write_word_data(struct i2c_client * client,
u8 command, u16 value);
+ extern s32 i2c_smbus_process_call(struct i2c_client *client,
+ u8 command, u16 value);
extern s32 i2c_smbus_read_block_data(struct i2c_client * client,
u8 command, u8 *values);
extern s32 i2c_smbus_write_block_data(struct i2c_client * client,
@@ -621,8 +623,6 @@ These ones were removed from i2c-core because they had no users, but could
be added back later if needed:
extern s32 i2c_smbus_write_quick(struct i2c_client * client, u8 value);
- extern s32 i2c_smbus_process_call(struct i2c_client * client,
- u8 command, u16 value);
extern s32 i2c_smbus_block_process_call(struct i2c_client *client,
u8 command, u8 length,
u8 *values)
diff --git a/Documentation/ia64/kvm.txt b/Documentation/ia64/kvm.txt
index 914d07f4926..84f7cb3d5be 100644
--- a/Documentation/ia64/kvm.txt
+++ b/Documentation/ia64/kvm.txt
@@ -1,7 +1,8 @@
-Currently, kvm module in EXPERIMENTAL stage on IA64. This means that
-interfaces are not stable enough to use. So, plase had better don't run
-critical applications in virtual machine. We will try our best to make it
-strong in future versions!
+Currently, kvm module is in EXPERIMENTAL stage on IA64. This means that
+interfaces are not stable enough to use. So, please don't run critical
+applications in virtual machine.
+We will try our best to improve it in future versions!
+
Guide: How to boot up guests on kvm/ia64
This guide is to describe how to enable kvm support for IA-64 systems.
diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt
index 1c6b545635a..b880ce5dbd3 100644
--- a/Documentation/ioctl-number.txt
+++ b/Documentation/ioctl-number.txt
@@ -92,6 +92,7 @@ Code Seq# Include File Comments
'J' 00-1F drivers/scsi/gdth_ioctl.h
'K' all linux/kd.h
'L' 00-1F linux/loop.h
+'L' 20-2F driver/usb/misc/vstusb.h
'L' E0-FF linux/ppdd.h encrypted disk device driver
<http://linux01.gwdg.de/~alatham/ppdd.html>
'M' all linux/soundcard.h
@@ -110,6 +111,8 @@ Code Seq# Include File Comments
'W' 00-1F linux/wanrouter.h conflict!
'X' all linux/xfs_fs.h
'Y' all linux/cyclades.h
+'[' 00-07 linux/usb/usbtmc.h USB Test and Measurement Devices
+ <mailto:gregkh@suse.de>
'a' all ATM on linux
<http://lrcwww.epfl.ch/linux-atm/magic.html>
'b' 00-FF bit3 vme host bridge
diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt
index 0bd32748a46..c6841eee959 100644
--- a/Documentation/kernel-doc-nano-HOWTO.txt
+++ b/Documentation/kernel-doc-nano-HOWTO.txt
@@ -168,10 +168,10 @@ if ($#ARGV < 0) {
mkdir $ARGV[0],0777;
$state = 0;
while (<STDIN>) {
- if (/^\.TH \"[^\"]*\" 4 \"([^\"]*)\"/) {
+ if (/^\.TH \"[^\"]*\" 9 \"([^\"]*)\"/) {
if ($state == 1) { close OUT }
$state = 1;
- $fn = "$ARGV[0]/$1.4";
+ $fn = "$ARGV[0]/$1.9";
print STDERR "Creating $fn\n";
open OUT, ">$fn" or die "can't open $fn: $!\n";
print OUT $_;
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 533199bdb92..343e0f0f84b 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -101,6 +101,7 @@ parameter is applicable:
X86-64 X86-64 architecture is enabled.
More X86-64 boot options can be found in
Documentation/x86_64/boot-options.txt .
+ X86 Either 32bit or 64bit x86 (same as X86-32+X86-64)
In addition, the following text indicates that the option:
@@ -311,6 +312,11 @@ and is between 256 and 4096 characters. It is defined in the file
isolate - enable device isolation (each device, as far
as possible, will get its own protection
domain)
+ fullflush - enable flushing of IO/TLB entries when
+ they are unmapped. Otherwise they are
+ flushed before they will be reused, which
+ is a lot of faster
+
amd_iommu_size= [HW,X86-64]
Define the size of the aperture for the AMD IOMMU
driver. Possible values are:
@@ -490,12 +496,6 @@ and is between 256 and 4096 characters. It is defined in the file
Range: 0 - 8192
Default: 64
- disable_8254_timer
- enable_8254_timer
- [IA32/X86_64] Disable/Enable interrupt 0 timer routing
- over the 8254 in addition to over the IO-APIC. The
- kernel tries to set a sensible default.
-
hpet= [X86-32,HPET] option to control HPET usage
Format: { enable (default) | disable | force }
disable: disable HPET and use PIT instead
@@ -686,11 +686,12 @@ and is between 256 and 4096 characters. It is defined in the file
earlyprintk= [X86-32,X86-64,SH,BLACKFIN]
earlyprintk=vga
earlyprintk=serial[,ttySn[,baudrate]]
+ earlyprintk=dbgp
Append ",keep" to not disable it when the real console
takes over.
- Only vga or serial at a time, not both.
+ Only vga or serial or usb debug port at a time.
Currently only ttyS0 and ttyS1 are supported.
@@ -717,7 +718,7 @@ and is between 256 and 4096 characters. It is defined in the file
See Documentation/block/as-iosched.txt and
Documentation/block/deadline-iosched.txt for details.
- elfcorehdr= [X86-32, X86_64]
+ elfcorehdr= [IA64,PPC,SH,X86-32,X86_64]
Specifies physical address of start of kernel core
image elf header. Generally kexec loader will
pass this option to capture kernel.
@@ -823,6 +824,9 @@ and is between 256 and 4096 characters. It is defined in the file
Defaults to the default architecture's huge page size
if not specified.
+ hlt [BUGS=ARM,SH]
+
+ i8042.debug [HW] Toggle i8042 debug mode
i8042.direct [HW] Put keyboard port into non-translated mode
i8042.dumbkbd [HW] Pretend that controller can only read data from
keyboard and cannot control its state
@@ -1047,6 +1051,10 @@ and is between 256 and 4096 characters. It is defined in the file
(only serial suported for now)
Format: <serial_device>[,baud]
+ kmac= [MIPS] korina ethernet MAC address.
+ Configure the RouterBoard 532 series on-chip
+ Ethernet adapter MAC address.
+
l2cr= [PPC]
l3cr= [PPC]
@@ -1233,6 +1241,10 @@ and is between 256 and 4096 characters. It is defined in the file
mem=nopentium [BUGS=X86-32] Disable usage of 4MB pages for kernel
memory.
+ memchunk=nn[KMG]
+ [KNL,SH] Allow user to override the default size for
+ per-device physically contiguous DMA buffers.
+
memmap=exactmap [KNL,X86-32,X86_64] Enable setting of an exact
E820 memory map, as specified by the user.
Such memmap=exactmap lines can be constructed based on
@@ -1255,6 +1267,29 @@ and is between 256 and 4096 characters. It is defined in the file
or
memmap=0x10000$0x18690000
+ memory_corruption_check=0/1 [X86]
+ Some BIOSes seem to corrupt the first 64k of
+ memory when doing things like suspend/resume.
+ Setting this option will scan the memory
+ looking for corruption. Enabling this will
+ both detect corruption and prevent the kernel
+ from using the memory being corrupted.
+ However, its intended as a diagnostic tool; if
+ repeatable BIOS-originated corruption always
+ affects the same memory, you can use memmap=
+ to prevent the kernel from using that memory.
+
+ memory_corruption_check_size=size [X86]
+ By default it checks for corruption in the low
+ 64k, making this memory unavailable for normal
+ use. Use this parameter to scan for
+ corruption in more or less memory.
+
+ memory_corruption_check_period=seconds [X86]
+ By default it checks for corruption every 60
+ seconds. Use this parameter to check at some
+ other rate. 0 disables periodic checking.
+
memtest= [KNL,X86] Enable memtest
Format: <integer>
range: 0,4 : pattern number
@@ -1392,6 +1427,8 @@ and is between 256 and 4096 characters. It is defined in the file
nodisconnect [HW,SCSI,M68K] Disables SCSI disconnects.
+ nodsp [SH] Disable hardware DSP at boot time.
+
noefi [X86-32,X86-64] Disable EFI runtime services support.
noexec [IA-64]
@@ -1408,13 +1445,15 @@ and is between 256 and 4096 characters. It is defined in the file
noexec32=off: disable non-executable mappings
read implies executable mappings
+ nofpu [SH] Disable hardware FPU at boot time.
+
nofxsr [BUGS=X86-32] Disables x86 floating point extended
register save and restore. The kernel will only save
legacy floating-point registers on task switch.
noclflush [BUGS=X86] Don't use the CLFLUSH instruction
- nohlt [BUGS=ARM]
+ nohlt [BUGS=ARM,SH]
no-hlt [BUGS=X86-32] Tells the kernel that the hlt
instruction doesn't work correctly and not to
@@ -1452,6 +1491,12 @@ and is between 256 and 4096 characters. It is defined in the file
nolapic_timer [X86-32,APIC] Do not use the local APIC timer.
+ nox2apic [X86-64,APIC] Do not enable x2APIC mode.
+
+ x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of
+ default x2apic cluster mode on platforms
+ supporting x2apic.
+
noltlbs [PPC] Do not use large page/tlb entries for kernel
lowmem mapping on PPC40x.
@@ -1571,7 +1616,7 @@ and is between 256 and 4096 characters. It is defined in the file
See also Documentation/paride.txt.
pci=option[,option...] [PCI] various PCI subsystem options:
- off [X86-32] don't probe for the PCI bus
+ off [X86] don't probe for the PCI bus
bios [X86-32] force use of PCI BIOS, don't access
the hardware directly. Use this if your machine
has a non-standard PCI host bridge.
@@ -1579,9 +1624,9 @@ and is between 256 and 4096 characters. It is defined in the file
hardware access methods are allowed. Use this
if you experience crashes upon bootup and you
suspect they are caused by the BIOS.
- conf1 [X86-32] Force use of PCI Configuration
+ conf1 [X86] Force use of PCI Configuration
Mechanism 1.
- conf2 [X86-32] Force use of PCI Configuration
+ conf2 [X86] Force use of PCI Configuration
Mechanism 2.
noaer [PCIE] If the PCIEAER kernel config parameter is
enabled, this kernel boot option can be used to
@@ -1601,37 +1646,37 @@ and is between 256 and 4096 characters. It is defined in the file
this option if the kernel is unable to allocate
IRQs or discover secondary PCI buses on your
motherboard.
- rom [X86-32] Assign address space to expansion ROMs.
+ rom [X86] Assign address space to expansion ROMs.
Use with caution as certain devices share
address decoders between ROMs and other
resources.
- norom [X86-32,X86_64] Do not assign address space to
+ norom [X86] Do not assign address space to
expansion ROMs that do not already have
BIOS assigned address ranges.
- irqmask=0xMMMM [X86-32] Set a bit mask of IRQs allowed to be
+ irqmask=0xMMMM [X86] Set a bit mask of IRQs allowed to be
assigned automatically to PCI devices. You can
make the kernel exclude IRQs of your ISA cards
this way.
- pirqaddr=0xAAAAA [X86-32] Specify the physical address
+ pirqaddr=0xAAAAA [X86] Specify the physical address
of the PIRQ table (normally generated
by the BIOS) if it is outside the
F0000h-100000h range.
- lastbus=N [X86-32] Scan all buses thru bus #N. Can be
+ lastbus=N [X86] Scan all buses thru bus #N. Can be
useful if the kernel is unable to find your
secondary buses and you want to tell it
explicitly which ones they are.
- assign-busses [X86-32] Always assign all PCI bus
+ assign-busses [X86] Always assign all PCI bus
numbers ourselves, overriding
whatever the firmware may have done.
- usepirqmask [X86-32] Honor the possible IRQ mask stored
+ usepirqmask [X86] Honor the possible IRQ mask stored
in the BIOS $PIR table. This is needed on
some systems with broken BIOSes, notably
some HP Pavilion N5400 and Omnibook XE3
notebooks. This will have no effect if ACPI
IRQ routing is enabled.
- noacpi [X86-32] Do not use ACPI for IRQ routing
+ noacpi [X86] Do not use ACPI for IRQ routing
or for PCI scanning.
- use_crs [X86-32] Use _CRS for PCI resource
+ use_crs [X86] Use _CRS for PCI resource
allocation.
routeirq Do IRQ routing for all PCI devices.
This is normally done in pci_enable_device(),
@@ -1660,6 +1705,12 @@ and is between 256 and 4096 characters. It is defined in the file
reserved for the CardBus bridge's memory
window. The default value is 64 megabytes.
+ pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
+ Management.
+ off Disable ASPM.
+ force Enable ASPM even on devices that claim not to support it.
+ WARNING: Forcing ASPM on may cause system lockups.
+
pcmv= [HW,PCMCIA] BadgePAD 4
pd. [PARIDE]
@@ -1711,6 +1762,11 @@ and is between 256 and 4096 characters. It is defined in the file
autoconfiguration.
Ranges are in pairs (memory base and size).
+ dynamic_printk
+ Enables pr_debug()/dev_dbg() calls if
+ CONFIG_DYNAMIC_PRINTK_DEBUG has been enabled. These can also
+ be switched on/off via <debugfs>/dynamic_printk/modules
+
print-fatal-signals=
[KNL] debug: print fatal signals
print-fatal-signals=1: print segfault info to
@@ -1913,6 +1969,12 @@ and is between 256 and 4096 characters. It is defined in the file
shapers= [NET]
Maximal number of shapers.
+ show_msr= [x86] show boot-time MSR settings
+ Format: { <integer> }
+ Show boot-time (BIOS-initialized) MSR settings.
+ The parameter means the number of CPUs to show,
+ for example 1 means boot CPU only.
+
sim710= [SCSI,HW]
See header of drivers/scsi/sim710.c.
@@ -2239,6 +2301,25 @@ and is between 256 and 4096 characters. It is defined in the file
autosuspended. Devices for which the delay is set
to a negative value won't be autosuspended at all.
+ usbcore.usbfs_snoop=
+ [USB] Set to log all usbfs traffic (default 0 = off).
+
+ usbcore.blinkenlights=
+ [USB] Set to cycle leds on hubs (default 0 = off).
+
+ usbcore.old_scheme_first=
+ [USB] Start with the old device initialization
+ scheme (default 0 = off).
+
+ usbcore.use_both_schemes=
+ [USB] Try the other device initialization scheme
+ if the first one fails (default 1 = enabled).
+
+ usbcore.initial_descriptor_timeout=
+ [USB] Specifies timeout for the initial 64-byte
+ USB_REQ_GET_DESCRIPTOR request in milliseconds
+ (default 5000 = 5.0 seconds).
+
usbhid.mousepoll=
[USBHID] The interval which mice are to be polled at.
diff --git a/Documentation/kobject.txt b/Documentation/kobject.txt
index 51a8021ee53..f5d2aad65a6 100644
--- a/Documentation/kobject.txt
+++ b/Documentation/kobject.txt
@@ -118,6 +118,10 @@ the name of the kobject, call kobject_rename():
int kobject_rename(struct kobject *kobj, const char *new_name);
+Note kobject_rename does perform any locking or have a solid notion of
+what names are valid so the provide must provide their own sanity checking
+and serialization.
+
There is a function called kobject_set_name() but that is legacy cruft and
is being removed. If your code needs to call this function, it is
incorrect and needs to be fixed.
diff --git a/Documentation/laptops/disk-shock-protection.txt b/Documentation/laptops/disk-shock-protection.txt
new file mode 100644
index 00000000000..0e6ba266383
--- /dev/null
+++ b/Documentation/laptops/disk-shock-protection.txt
@@ -0,0 +1,149 @@
+Hard disk shock protection
+==========================
+
+Author: Elias Oltmanns <eo@nebensachen.de>
+Last modified: 2008-10-03
+
+
+0. Contents
+-----------
+
+1. Intro
+2. The interface
+3. References
+4. CREDITS
+
+
+1. Intro
+--------
+
+ATA/ATAPI-7 specifies the IDLE IMMEDIATE command with unload feature.
+Issuing this command should cause the drive to switch to idle mode and
+unload disk heads. This feature is being used in modern laptops in
+conjunction with accelerometers and appropriate software to implement
+a shock protection facility. The idea is to stop all I/O operations on
+the internal hard drive and park its heads on the ramp when critical
+situations are anticipated. The desire to have such a feature
+available on GNU/Linux systems has been the original motivation to
+implement a generic disk head parking interface in the Linux kernel.
+Please note, however, that other components have to be set up on your
+system in order to get disk shock protection working (see
+section 3. References below for pointers to more information about
+that).
+
+
+2. The interface
+----------------
+
+For each ATA device, the kernel exports the file
+block/*/device/unload_heads in sysfs (here assumed to be mounted under
+/sys). Access to /sys/block/*/device/unload_heads is denied with
+-EOPNOTSUPP if the device does not support the unload feature.
+Otherwise, writing an integer value to this file will take the heads
+of the respective drive off the platter and block all I/O operations
+for the specified number of milliseconds. When the timeout expires and
+no further disk head park request has been issued in the meantime,
+normal operation will be resumed. The maximal value accepted for a
+timeout is 30000 milliseconds. Exceeding this limit will return
+-EOVERFLOW, but heads will be parked anyway and the timeout will be
+set to 30 seconds. However, you can always change a timeout to any
+value between 0 and 30000 by issuing a subsequent head park request
+before the timeout of the previous one has expired. In particular, the
+total timeout can exceed 30 seconds and, more importantly, you can
+cancel a previously set timeout and resume normal operation
+immediately by specifying a timeout of 0. Values below -2 are rejected
+with -EINVAL (see below for the special meaning of -1 and -2). If the
+timeout specified for a recent head park request has not yet expired,
+reading from /sys/block/*/device/unload_heads will report the number
+of milliseconds remaining until normal operation will be resumed;
+otherwise, reading the unload_heads attribute will return 0.
+
+For example, do the following in order to park the heads of drive
+/dev/sda and stop all I/O operations for five seconds:
+
+# echo 5000 > /sys/block/sda/device/unload_heads
+
+A simple
+
+# cat /sys/block/sda/device/unload_heads
+
+will show you how many milliseconds are left before normal operation
+will be resumed.
+
+A word of caution: The fact that the interface operates on a basis of
+milliseconds may raise expectations that cannot be satisfied in
+reality. In fact, the ATA specs clearly state that the time for an
+unload operation to complete is vendor specific. The hint in ATA-7
+that this will typically be within 500 milliseconds apparently has
+been dropped in ATA-8.
+
+There is a technical detail of this implementation that may cause some
+confusion and should be discussed here. When a head park request has
+been issued to a device successfully, all I/O operations on the
+controller port this device is attached to will be deferred. That is
+to say, any other device that may be connected to the same port will
+be affected too. The only exception is that a subsequent head unload
+request to that other device will be executed immediately. Further
+operations on that port will be deferred until the timeout specified
+for either device on the port has expired. As far as PATA (old style
+IDE) configurations are concerned, there can only be two devices
+attached to any single port. In SATA world we have port multipliers
+which means that a user-issued head parking request to one device may
+actually result in stopping I/O to a whole bunch of devices. However,
+since this feature is supposed to be used on laptops and does not seem
+to be very useful in any other environment, there will be mostly one
+device per port. Even if the CD/DVD writer happens to be connected to
+the same port as the hard drive, it generally *should* recover just
+fine from the occasional buffer under-run incurred by a head park
+request to the HD. Actually, when you are using an ide driver rather
+than its libata counterpart (i.e. your disk is called /dev/hda
+instead of /dev/sda), then parking the heads of one drive (drive X)
+will generally not affect the mode of operation of another drive
+(drive Y) on the same port as described above. It is only when a port
+reset is required to recover from an exception on drive Y that further
+I/O operations on that drive (and the reset itself) will be delayed
+until drive X is no longer in the parked state.
+
+Finally, there are some hard drives that only comply with an earlier
+version of the ATA standard than ATA-7, but do support the unload
+feature nonetheless. Unfortunately, there is no safe way Linux can
+detect these devices, so you won't be able to write to the
+unload_heads attribute. If you know that your device really does
+support the unload feature (for instance, because the vendor of your
+laptop or the hard drive itself told you so), then you can tell the
+kernel to enable the usage of this feature for that drive by writing
+the special value -1 to the unload_heads attribute:
+
+# echo -1 > /sys/block/sda/device/unload_heads
+
+will enable the feature for /dev/sda, and giving -2 instead of -1 will
+disable it again.
+
+
+3. References
+-------------
+
+There are several laptops from different vendors featuring shock
+protection capabilities. As manufacturers have refused to support open
+source development of the required software components so far, Linux
+support for shock protection varies considerably between different
+hardware implementations. Ideally, this section should contain a list
+of pointers at different projects aiming at an implementation of shock
+protection on different systems. Unfortunately, I only know of a
+single project which, although still considered experimental, is fit
+for use. Please feel free to add projects that have been the victims
+of my ignorance.
+
+- http://www.thinkwiki.org/wiki/HDAPS
+ See this page for information about Linux support of the hard disk
+ active protection system as implemented in IBM/Lenovo Thinkpads.
+
+
+4. CREDITS
+----------
+
+This implementation of disk head parking has been inspired by a patch
+originally published by Jon Escombe <lists@dresco.co.uk>. My efforts
+to develop an implementation of this feature that is fit to be merged
+into mainline have been aided by various kernel developers, in
+particular by Tejun Heo and Bartlomiej Zolnierkiewicz.
diff --git a/Documentation/markers.txt b/Documentation/markers.txt
index d9f50a19fa0..089f6138fcd 100644
--- a/Documentation/markers.txt
+++ b/Documentation/markers.txt
@@ -50,10 +50,12 @@ Connecting a function (probe) to a marker is done by providing a probe (function
to call) for the specific marker through marker_probe_register() and can be
activated by calling marker_arm(). Marker deactivation can be done by calling
marker_disarm() as many times as marker_arm() has been called. Removing a probe
-is done through marker_probe_unregister(); it will disarm the probe and make
-sure there is no caller left using the probe when it returns. Probe removal is
-preempt-safe because preemption is disabled around the probe call. See the
-"Probe example" section below for a sample probe module.
+is done through marker_probe_unregister(); it will disarm the probe.
+marker_synchronize_unregister() must be called before the end of the module exit
+function to make sure there is no caller left using the probe. This, and the
+fact that preemption is disabled around the probe call, make sure that probe
+removal and module unload are safe. See the "Probe example" section below for a
+sample probe module.
The marker mechanism supports inserting multiple instances of the same marker.
Markers can be put in inline functions, inlined static functions, and
diff --git a/Documentation/mtd/nand_ecc.txt b/Documentation/mtd/nand_ecc.txt
new file mode 100644
index 00000000000..bdf93b7f0f2
--- /dev/null
+++ b/Documentation/mtd/nand_ecc.txt
@@ -0,0 +1,714 @@
+Introduction
+============
+
+Having looked at the linux mtd/nand driver and more specific at nand_ecc.c
+I felt there was room for optimisation. I bashed the code for a few hours
+performing tricks like table lookup removing superfluous code etc.
+After that the speed was increased by 35-40%.
+Still I was not too happy as I felt there was additional room for improvement.
+
+Bad! I was hooked.
+I decided to annotate my steps in this file. Perhaps it is useful to someone
+or someone learns something from it.
+
+
+The problem
+===========
+
+NAND flash (at least SLC one) typically has sectors of 256 bytes.
+However NAND flash is not extremely reliable so some error detection
+(and sometimes correction) is needed.
+
+This is done by means of a Hamming code. I'll try to explain it in
+laymans terms (and apologies to all the pro's in the field in case I do
+not use the right terminology, my coding theory class was almost 30
+years ago, and I must admit it was not one of my favourites).
+
+As I said before the ecc calculation is performed on sectors of 256
+bytes. This is done by calculating several parity bits over the rows and
+columns. The parity used is even parity which means that the parity bit = 1
+if the data over which the parity is calculated is 1 and the parity bit = 0
+if the data over which the parity is calculated is 0. So the total
+number of bits over the data over which the parity is calculated + the
+parity bit is even. (see wikipedia if you can't follow this).
+Parity is often calculated by means of an exclusive or operation,
+sometimes also referred to as xor. In C the operator for xor is ^
+
+Back to ecc.
+Let's give a small figure:
+
+byte 0: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp0 rp2 rp4 ... rp14
+byte 1: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp1 rp2 rp4 ... rp14
+byte 2: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp0 rp3 rp4 ... rp14
+byte 3: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp1 rp3 rp4 ... rp14
+byte 4: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp0 rp2 rp5 ... rp14
+....
+byte 254: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp0 rp3 rp5 ... rp15
+byte 255: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp1 rp3 rp5 ... rp15
+ cp1 cp0 cp1 cp0 cp1 cp0 cp1 cp0
+ cp3 cp3 cp2 cp2 cp3 cp3 cp2 cp2
+ cp5 cp5 cp5 cp5 cp4 cp4 cp4 cp4
+
+This figure represents a sector of 256 bytes.
+cp is my abbreviaton for column parity, rp for row parity.
+
+Let's start to explain column parity.
+cp0 is the parity that belongs to all bit0, bit2, bit4, bit6.
+so the sum of all bit0, bit2, bit4 and bit6 values + cp0 itself is even.
+Similarly cp1 is the sum of all bit1, bit3, bit5 and bit7.
+cp2 is the parity over bit0, bit1, bit4 and bit5
+cp3 is the parity over bit2, bit3, bit6 and bit7.
+cp4 is the parity over bit0, bit1, bit2 and bit3.
+cp5 is the parity over bit4, bit5, bit6 and bit7.
+Note that each of cp0 .. cp5 is exactly one bit.
+
+Row parity actually works almost the same.
+rp0 is the parity of all even bytes (0, 2, 4, 6, ... 252, 254)
+rp1 is the parity of all odd bytes (1, 3, 5, 7, ..., 253, 255)
+rp2 is the parity of all bytes 0, 1, 4, 5, 8, 9, ...
+(so handle two bytes, then skip 2 bytes).
+rp3 is covers the half rp2 does not cover (bytes 2, 3, 6, 7, 10, 11, ...)
+for rp4 the rule is cover 4 bytes, skip 4 bytes, cover 4 bytes, skip 4 etc.
+so rp4 calculates parity over bytes 0, 1, 2, 3, 8, 9, 10, 11, 16, ...)
+and rp5 covers the other half, so bytes 4, 5, 6, 7, 12, 13, 14, 15, 20, ..
+The story now becomes quite boring. I guess you get the idea.
+rp6 covers 8 bytes then skips 8 etc
+rp7 skips 8 bytes then covers 8 etc
+rp8 covers 16 bytes then skips 16 etc
+rp9 skips 16 bytes then covers 16 etc
+rp10 covers 32 bytes then skips 32 etc
+rp11 skips 32 bytes then covers 32 etc
+rp12 covers 64 bytes then skips 64 etc
+rp13 skips 64 bytes then covers 64 etc
+rp14 covers 128 bytes then skips 128
+rp15 skips 128 bytes then covers 128
+
+In the end the parity bits are grouped together in three bytes as
+follows:
+ECC Bit 7 Bit 6 Bit 5 Bit 4 Bit 3 Bit 2 Bit 1 Bit 0
+ECC 0 rp07 rp06 rp05 rp04 rp03 rp02 rp01 rp00
+ECC 1 rp15 rp14 rp13 rp12 rp11 rp10 rp09 rp08
+ECC 2 cp5 cp4 cp3 cp2 cp1 cp0 1 1
+
+I detected after writing this that ST application note AN1823
+(http://www.st.com/stonline/books/pdf/docs/10123.pdf) gives a much
+nicer picture.(but they use line parity as term where I use row parity)
+Oh well, I'm graphically challenged, so suffer with me for a moment :-)
+And I could not reuse the ST picture anyway for copyright reasons.
+
+
+Attempt 0
+=========
+
+Implementing the parity calculation is pretty simple.
+In C pseudocode:
+for (i = 0; i < 256; i++)
+{
+ if (i & 0x01)
+ rp1 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp1;
+ else
+ rp0 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp1;
+ if (i & 0x02)
+ rp3 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp3;
+ else
+ rp2 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp2;
+ if (i & 0x04)
+ rp5 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp5;
+ else
+ rp4 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp4;
+ if (i & 0x08)
+ rp7 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp7;
+ else
+ rp6 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp6;
+ if (i & 0x10)
+ rp9 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp9;
+ else
+ rp8 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp8;
+ if (i & 0x20)
+ rp11 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp11;
+ else
+ rp10 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp10;
+ if (i & 0x40)
+ rp13 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp13;
+ else
+ rp12 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp12;
+ if (i & 0x80)
+ rp15 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp15;
+ else
+ rp14 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp14;
+ cp0 = bit6 ^ bit4 ^ bit2 ^ bit0 ^ cp0;
+ cp1 = bit7 ^ bit5 ^ bit3 ^ bit1 ^ cp1;
+ cp2 = bit5 ^ bit4 ^ bit1 ^ bit0 ^ cp2;
+ cp3 = bit7 ^ bit6 ^ bit3 ^ bit2 ^ cp3
+ cp4 = bit3 ^ bit2 ^ bit1 ^ bit0 ^ cp4
+ cp5 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ cp5
+}
+
+
+Analysis 0
+==========
+
+C does have bitwise operators but not really operators to do the above
+efficiently (and most hardware has no such instructions either).
+Therefore without implementing this it was clear that the code above was
+not going to bring me a Nobel prize :-)
+
+Fortunately the exclusive or operation is commutative, so we can combine
+the values in any order. So instead of calculating all the bits
+individually, let us try to rearrange things.
+For the column parity this is easy. We can just xor the bytes and in the
+end filter out the relevant bits. This is pretty nice as it will bring
+all cp calculation out of the if loop.
+
+Similarly we can first xor the bytes for the various rows.
+This leads to:
+
+
+Attempt 1
+=========
+
+const char parity[256] = {
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0
+};
+
+void ecc1(const unsigned char *buf, unsigned char *code)
+{
+ int i;
+ const unsigned char *bp = buf;
+ unsigned char cur;
+ unsigned char rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
+ unsigned char rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15;
+ unsigned char par;
+
+ par = 0;
+ rp0 = 0; rp1 = 0; rp2 = 0; rp3 = 0;
+ rp4 = 0; rp5 = 0; rp6 = 0; rp7 = 0;
+ rp8 = 0; rp9 = 0; rp10 = 0; rp11 = 0;
+ rp12 = 0; rp13 = 0; rp14 = 0; rp15 = 0;
+
+ for (i = 0; i < 256; i++)
+ {
+ cur = *bp++;
+ par ^= cur;
+ if (i & 0x01) rp1 ^= cur; else rp0 ^= cur;
+ if (i & 0x02) rp3 ^= cur; else rp2 ^= cur;
+ if (i & 0x04) rp5 ^= cur; else rp4 ^= cur;
+ if (i & 0x08) rp7 ^= cur; else rp6 ^= cur;
+ if (i & 0x10) rp9 ^= cur; else rp8 ^= cur;
+ if (i & 0x20) rp11 ^= cur; else rp10 ^= cur;
+ if (i & 0x40) rp13 ^= cur; else rp12 ^= cur;
+ if (i & 0x80) rp15 ^= cur; else rp14 ^= cur;
+ }
+ code[0] =
+ (parity[rp7] << 7) |
+ (parity[rp6] << 6) |
+ (parity[rp5] << 5) |
+ (parity[rp4] << 4) |
+ (parity[rp3] << 3) |
+ (parity[rp2] << 2) |
+ (parity[rp1] << 1) |
+ (parity[rp0]);
+ code[1] =
+ (parity[rp15] << 7) |
+ (parity[rp14] << 6) |
+ (parity[rp13] << 5) |
+ (parity[rp12] << 4) |
+ (parity[rp11] << 3) |
+ (parity[rp10] << 2) |
+ (parity[rp9] << 1) |
+ (parity[rp8]);
+ code[2] =
+ (parity[par & 0xf0] << 7) |
+ (parity[par & 0x0f] << 6) |
+ (parity[par & 0xcc] << 5) |
+ (parity[par & 0x33] << 4) |
+ (parity[par & 0xaa] << 3) |
+ (parity[par & 0x55] << 2);
+ code[0] = ~code[0];
+ code[1] = ~code[1];
+ code[2] = ~code[2];
+}
+
+Still pretty straightforward. The last three invert statements are there to
+give a checksum of 0xff 0xff 0xff for an empty flash. In an empty flash
+all data is 0xff, so the checksum then matches.
+
+I also introduced the parity lookup. I expected this to be the fastest
+way to calculate the parity, but I will investigate alternatives later
+on.
+
+
+Analysis 1
+==========
+
+The code works, but is not terribly efficient. On my system it took
+almost 4 times as much time as the linux driver code. But hey, if it was
+*that* easy this would have been done long before.
+No pain. no gain.
+
+Fortunately there is plenty of room for improvement.
+
+In step 1 we moved from bit-wise calculation to byte-wise calculation.
+However in C we can also use the unsigned long data type and virtually
+every modern microprocessor supports 32 bit operations, so why not try
+to write our code in such a way that we process data in 32 bit chunks.
+
+Of course this means some modification as the row parity is byte by
+byte. A quick analysis:
+for the column parity we use the par variable. When extending to 32 bits
+we can in the end easily calculate p0 and p1 from it.
+(because par now consists of 4 bytes, contributing to rp1, rp0, rp1, rp0
+respectively)
+also rp2 and rp3 can be easily retrieved from par as rp3 covers the
+first two bytes and rp2 the last two bytes.
+
+Note that of course now the loop is executed only 64 times (256/4).
+And note that care must taken wrt byte ordering. The way bytes are
+ordered in a long is machine dependent, and might affect us.
+Anyway, if there is an issue: this code is developed on x86 (to be
+precise: a DELL PC with a D920 Intel CPU)
+
+And of course the performance might depend on alignment, but I expect
+that the I/O buffers in the nand driver are aligned properly (and
+otherwise that should be fixed to get maximum performance).
+
+Let's give it a try...
+
+
+Attempt 2
+=========
+
+extern const char parity[256];
+
+void ecc2(const unsigned char *buf, unsigned char *code)
+{
+ int i;
+ const unsigned long *bp = (unsigned long *)buf;
+ unsigned long cur;
+ unsigned long rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
+ unsigned long rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15;
+ unsigned long par;
+
+ par = 0;
+ rp0 = 0; rp1 = 0; rp2 = 0; rp3 = 0;
+ rp4 = 0; rp5 = 0; rp6 = 0; rp7 = 0;
+ rp8 = 0; rp9 = 0; rp10 = 0; rp11 = 0;
+ rp12 = 0; rp13 = 0; rp14 = 0; rp15 = 0;
+
+ for (i = 0; i < 64; i++)
+ {
+ cur = *bp++;
+ par ^= cur;
+ if (i & 0x01) rp5 ^= cur; else rp4 ^= cur;
+ if (i & 0x02) rp7 ^= cur; else rp6 ^= cur;
+ if (i & 0x04) rp9 ^= cur; else rp8 ^= cur;
+ if (i & 0x08) rp11 ^= cur; else rp10 ^= cur;
+ if (i & 0x10) rp13 ^= cur; else rp12 ^= cur;
+ if (i & 0x20) rp15 ^= cur; else rp14 ^= cur;
+ }
+ /*
+ we need to adapt the code generation for the fact that rp vars are now
+ long; also the column parity calculation needs to be changed.
+ we'll bring rp4 to 15 back to single byte entities by shifting and
+ xoring
+ */
+ rp4 ^= (rp4 >> 16); rp4 ^= (rp4 >> 8); rp4 &= 0xff;
+ rp5 ^= (rp5 >> 16); rp5 ^= (rp5 >> 8); rp5 &= 0xff;
+ rp6 ^= (rp6 >> 16); rp6 ^= (rp6 >> 8); rp6 &= 0xff;
+ rp7 ^= (rp7 >> 16); rp7 ^= (rp7 >> 8); rp7 &= 0xff;
+ rp8 ^= (rp8 >> 16); rp8 ^= (rp8 >> 8); rp8 &= 0xff;
+ rp9 ^= (rp9 >> 16); rp9 ^= (rp9 >> 8); rp9 &= 0xff;
+ rp10 ^= (rp10 >> 16); rp10 ^= (rp10 >> 8); rp10 &= 0xff;
+ rp11 ^= (rp11 >> 16); rp11 ^= (rp11 >> 8); rp11 &= 0xff;
+ rp12 ^= (rp12 >> 16); rp12 ^= (rp12 >> 8); rp12 &= 0xff;
+ rp13 ^= (rp13 >> 16); rp13 ^= (rp13 >> 8); rp13 &= 0xff;
+ rp14 ^= (rp14 >> 16); rp14 ^= (rp14 >> 8); rp14 &= 0xff;
+ rp15 ^= (rp15 >> 16); rp15 ^= (rp15 >> 8); rp15 &= 0xff;
+ rp3 = (par >> 16); rp3 ^= (rp3 >> 8); rp3 &= 0xff;
+ rp2 = par & 0xffff; rp2 ^= (rp2 >> 8); rp2 &= 0xff;
+ par ^= (par >> 16);
+ rp1 = (par >> 8); rp1 &= 0xff;
+ rp0 = (par & 0xff);
+ par ^= (par >> 8); par &= 0xff;
+
+ code[0] =
+ (parity[rp7] << 7) |
+ (parity[rp6] << 6) |
+ (parity[rp5] << 5) |
+ (parity[rp4] << 4) |
+ (parity[rp3] << 3) |
+ (parity[rp2] << 2) |
+ (parity[rp1] << 1) |
+ (parity[rp0]);
+ code[1] =
+ (parity[rp15] << 7) |
+ (parity[rp14] << 6) |
+ (parity[rp13] << 5) |
+ (parity[rp12] << 4) |
+ (parity[rp11] << 3) |
+ (parity[rp10] << 2) |
+ (parity[rp9] << 1) |
+ (parity[rp8]);
+ code[2] =
+ (parity[par & 0xf0] << 7) |
+ (parity[par & 0x0f] << 6) |
+ (parity[par & 0xcc] << 5) |
+ (parity[par & 0x33] << 4) |
+ (parity[par & 0xaa] << 3) |
+ (parity[par & 0x55] << 2);
+ code[0] = ~code[0];
+ code[1] = ~code[1];
+ code[2] = ~code[2];
+}
+
+The parity array is not shown any more. Note also that for these
+examples I kinda deviated from my regular programming style by allowing
+multiple statements on a line, not using { } in then and else blocks
+with only a single statement and by using operators like ^=
+
+
+Analysis 2
+==========
+
+The code (of course) works, and hurray: we are a little bit faster than
+the linux driver code (about 15%). But wait, don't cheer too quickly.
+THere is more to be gained.
+If we look at e.g. rp14 and rp15 we see that we either xor our data with
+rp14 or with rp15. However we also have par which goes over all data.
+This means there is no need to calculate rp14 as it can be calculated from
+rp15 through rp14 = par ^ rp15;
+(or if desired we can avoid calculating rp15 and calculate it from
+rp14). That is why some places refer to inverse parity.
+Of course the same thing holds for rp4/5, rp6/7, rp8/9, rp10/11 and rp12/13.
+Effectively this means we can eliminate the else clause from the if
+statements. Also we can optimise the calculation in the end a little bit
+by going from long to byte first. Actually we can even avoid the table
+lookups
+
+Attempt 3
+=========
+
+Odd replaced:
+ if (i & 0x01) rp5 ^= cur; else rp4 ^= cur;
+ if (i & 0x02) rp7 ^= cur; else rp6 ^= cur;
+ if (i & 0x04) rp9 ^= cur; else rp8 ^= cur;
+ if (i & 0x08) rp11 ^= cur; else rp10 ^= cur;
+ if (i & 0x10) rp13 ^= cur; else rp12 ^= cur;
+ if (i & 0x20) rp15 ^= cur; else rp14 ^= cur;
+with
+ if (i & 0x01) rp5 ^= cur;
+ if (i & 0x02) rp7 ^= cur;
+ if (i & 0x04) rp9 ^= cur;
+ if (i & 0x08) rp11 ^= cur;
+ if (i & 0x10) rp13 ^= cur;
+ if (i & 0x20) rp15 ^= cur;
+
+ and outside the loop added:
+ rp4 = par ^ rp5;
+ rp6 = par ^ rp7;
+ rp8 = par ^ rp9;
+ rp10 = par ^ rp11;
+ rp12 = par ^ rp13;
+ rp14 = par ^ rp15;
+
+And after that the code takes about 30% more time, although the number of
+statements is reduced. This is also reflected in the assembly code.
+
+
+Analysis 3
+==========
+
+Very weird. Guess it has to do with caching or instruction parallellism
+or so. I also tried on an eeePC (Celeron, clocked at 900 Mhz). Interesting
+observation was that this one is only 30% slower (according to time)
+executing the code as my 3Ghz D920 processor.
+
+Well, it was expected not to be easy so maybe instead move to a
+different track: let's move back to the code from attempt2 and do some
+loop unrolling. This will eliminate a few if statements. I'll try
+different amounts of unrolling to see what works best.
+
+
+Attempt 4
+=========
+
+Unrolled the loop 1, 2, 3 and 4 times.
+For 4 the code starts with:
+
+ for (i = 0; i < 4; i++)
+ {
+ cur = *bp++;
+ par ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ rp8 ^= cur;
+ rp10 ^= cur;
+ if (i & 0x1) rp13 ^= cur; else rp12 ^= cur;
+ if (i & 0x2) rp15 ^= cur; else rp14 ^= cur;
+ cur = *bp++;
+ par ^= cur;
+ rp5 ^= cur;
+ rp6 ^= cur;
+ ...
+
+
+Analysis 4
+==========
+
+Unrolling once gains about 15%
+Unrolling twice keeps the gain at about 15%
+Unrolling three times gives a gain of 30% compared to attempt 2.
+Unrolling four times gives a marginal improvement compared to unrolling
+three times.
+
+I decided to proceed with a four time unrolled loop anyway. It was my gut
+feeling that in the next steps I would obtain additional gain from it.
+
+The next step was triggered by the fact that par contains the xor of all
+bytes and rp4 and rp5 each contain the xor of half of the bytes.
+So in effect par = rp4 ^ rp5. But as xor is commutative we can also say
+that rp5 = par ^ rp4. So no need to keep both rp4 and rp5 around. We can
+eliminate rp5 (or rp4, but I already foresaw another optimisation).
+The same holds for rp6/7, rp8/9, rp10/11 rp12/13 and rp14/15.
+
+
+Attempt 5
+=========
+
+Effectively so all odd digit rp assignments in the loop were removed.
+This included the else clause of the if statements.
+Of course after the loop we need to correct things by adding code like:
+ rp5 = par ^ rp4;
+Also the initial assignments (rp5 = 0; etc) could be removed.
+Along the line I also removed the initialisation of rp0/1/2/3.
+
+
+Analysis 5
+==========
+
+Measurements showed this was a good move. The run-time roughly halved
+compared with attempt 4 with 4 times unrolled, and we only require 1/3rd
+of the processor time compared to the current code in the linux kernel.
+
+However, still I thought there was more. I didn't like all the if
+statements. Why not keep a running parity and only keep the last if
+statement. Time for yet another version!
+
+
+Attempt 6
+=========
+
+THe code within the for loop was changed to:
+
+ for (i = 0; i < 4; i++)
+ {
+ cur = *bp++; tmppar = cur; rp4 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp6 ^= tmppar;
+ cur = *bp++; tmppar ^= cur; rp4 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp8 ^= tmppar;
+
+ cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp6 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp6 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp4 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp10 ^= tmppar;
+
+ cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp6 ^= cur; rp8 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp6 ^= cur; rp8 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp8 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp8 ^= cur;
+
+ cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp6 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp6 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp4 ^= cur;
+ cur = *bp++; tmppar ^= cur;
+
+ par ^= tmppar;
+ if ((i & 0x1) == 0) rp12 ^= tmppar;
+ if ((i & 0x2) == 0) rp14 ^= tmppar;
+ }
+
+As you can see tmppar is used to accumulate the parity within a for
+iteration. In the last 3 statements is is added to par and, if needed,
+to rp12 and rp14.
+
+While making the changes I also found that I could exploit that tmppar
+contains the running parity for this iteration. So instead of having:
+rp4 ^= cur; rp6 = cur;
+I removed the rp6 = cur; statement and did rp6 ^= tmppar; on next
+statement. A similar change was done for rp8 and rp10
+
+
+Analysis 6
+==========
+
+Measuring this code again showed big gain. When executing the original
+linux code 1 million times, this took about 1 second on my system.
+(using time to measure the performance). After this iteration I was back
+to 0.075 sec. Actually I had to decide to start measuring over 10
+million interations in order not to loose too much accuracy. This one
+definitely seemed to be the jackpot!
+
+There is a little bit more room for improvement though. There are three
+places with statements:
+rp4 ^= cur; rp6 ^= cur;
+It seems more efficient to also maintain a variable rp4_6 in the while
+loop; This eliminates 3 statements per loop. Of course after the loop we
+need to correct by adding:
+ rp4 ^= rp4_6;
+ rp6 ^= rp4_6
+Furthermore there are 4 sequential assingments to rp8. This can be
+encoded slightly more efficient by saving tmppar before those 4 lines
+and later do rp8 = rp8 ^ tmppar ^ notrp8;
+(where notrp8 is the value of rp8 before those 4 lines).
+Again a use of the commutative property of xor.
+Time for a new test!
+
+
+Attempt 7
+=========
+
+The new code now looks like:
+
+ for (i = 0; i < 4; i++)
+ {
+ cur = *bp++; tmppar = cur; rp4 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp6 ^= tmppar;
+ cur = *bp++; tmppar ^= cur; rp4 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp8 ^= tmppar;
+
+ cur = *bp++; tmppar ^= cur; rp4_6 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp6 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp4 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp10 ^= tmppar;
+
+ notrp8 = tmppar;
+ cur = *bp++; tmppar ^= cur; rp4_6 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp6 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp4 ^= cur;
+ cur = *bp++; tmppar ^= cur;
+ rp8 = rp8 ^ tmppar ^ notrp8;
+
+ cur = *bp++; tmppar ^= cur; rp4_6 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp6 ^= cur;
+ cur = *bp++; tmppar ^= cur; rp4 ^= cur;
+ cur = *bp++; tmppar ^= cur;
+
+ par ^= tmppar;
+ if ((i & 0x1) == 0) rp12 ^= tmppar;
+ if ((i & 0x2) == 0) rp14 ^= tmppar;
+ }
+ rp4 ^= rp4_6;
+ rp6 ^= rp4_6;
+
+
+Not a big change, but every penny counts :-)
+
+
+Analysis 7
+==========
+
+Acutally this made things worse. Not very much, but I don't want to move
+into the wrong direction. Maybe something to investigate later. Could
+have to do with caching again.
+
+Guess that is what there is to win within the loop. Maybe unrolling one
+more time will help. I'll keep the optimisations from 7 for now.
+
+
+Attempt 8
+=========
+
+Unrolled the loop one more time.
+
+
+Analysis 8
+==========
+
+This makes things worse. Let's stick with attempt 6 and continue from there.
+Although it seems that the code within the loop cannot be optimised
+further there is still room to optimize the generation of the ecc codes.
+We can simply calcualate the total parity. If this is 0 then rp4 = rp5
+etc. If the parity is 1, then rp4 = !rp5;
+But if rp4 = rp5 we do not need rp5 etc. We can just write the even bits
+in the result byte and then do something like
+ code[0] |= (code[0] << 1);
+Lets test this.
+
+
+Attempt 9
+=========
+
+Changed the code but again this slightly degrades performance. Tried all
+kind of other things, like having dedicated parity arrays to avoid the
+shift after parity[rp7] << 7; No gain.
+Change the lookup using the parity array by using shift operators (e.g.
+replace parity[rp7] << 7 with:
+rp7 ^= (rp7 << 4);
+rp7 ^= (rp7 << 2);
+rp7 ^= (rp7 << 1);
+rp7 &= 0x80;
+No gain.
+
+The only marginal change was inverting the parity bits, so we can remove
+the last three invert statements.
+
+Ah well, pity this does not deliver more. Then again 10 million
+iterations using the linux driver code takes between 13 and 13.5
+seconds, whereas my code now takes about 0.73 seconds for those 10
+million iterations. So basically I've improved the performance by a
+factor 18 on my system. Not that bad. Of course on different hardware
+you will get different results. No warranties!
+
+But of course there is no such thing as a free lunch. The codesize almost
+tripled (from 562 bytes to 1434 bytes). Then again, it is not that much.
+
+
+Correcting errors
+=================
+
+For correcting errors I again used the ST application note as a starter,
+but I also peeked at the existing code.
+The algorithm itself is pretty straightforward. Just xor the given and
+the calculated ecc. If all bytes are 0 there is no problem. If 11 bits
+are 1 we have one correctable bit error. If there is 1 bit 1, we have an
+error in the given ecc code.
+It proved to be fastest to do some table lookups. Performance gain
+introduced by this is about a factor 2 on my system when a repair had to
+be done, and 1% or so if no repair had to be done.
+Code size increased from 330 bytes to 686 bytes for this function.
+(gcc 4.2, -O3)
+
+
+Conclusion
+==========
+
+The gain when calculating the ecc is tremendous. Om my development hardware
+a speedup of a factor of 18 for ecc calculation was achieved. On a test on an
+embedded system with a MIPS core a factor 7 was obtained.
+On a test with a Linksys NSLU2 (ARMv5TE processor) the speedup was a factor
+5 (big endian mode, gcc 4.1.2, -O3)
+For correction not much gain could be obtained (as bitflips are rare). Then
+again there are also much less cycles spent there.
+
+It seems there is not much more gain possible in this, at least when
+programmed in C. Of course it might be possible to squeeze something more
+out of it with an assembler program, but due to pipeline behaviour etc
+this is very tricky (at least for intel hw).
+
+Author: Frans Meulenbroeks
+Copyright (C) 2008 Koninklijke Philips Electronics NV.
diff --git a/Documentation/networking/LICENSE.qlge b/Documentation/networking/LICENSE.qlge
new file mode 100644
index 00000000000..123b6edd7f1
--- /dev/null
+++ b/Documentation/networking/LICENSE.qlge
@@ -0,0 +1,46 @@
+Copyright (c) 2003-2008 QLogic Corporation
+QLogic Linux Networking HBA Driver
+
+This program includes a device driver for Linux 2.6 that may be
+distributed with QLogic hardware specific firmware binary file.
+You may modify and redistribute the device driver code under the
+GNU General Public License as published by the Free Software
+Foundation (version 2 or a later version).
+
+You may redistribute the hardware specific firmware binary file
+under the following terms:
+
+ 1. Redistribution of source code (only if applicable),
+ must retain the above copyright notice, this list of
+ conditions and the following disclaimer.
+
+ 2. Redistribution in binary form must reproduce the above
+ copyright notice, this list of conditions and the
+ following disclaimer in the documentation and/or other
+ materials provided with the distribution.
+
+ 3. The name of QLogic Corporation may not be used to
+ endorse or promote products derived from this software
+ without specific prior written permission
+
+REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
+THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
+CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
+OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
+TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
+ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
+COMBINATION WITH THIS PROGRAM.
+
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index 297ba7b1cca..2035bc4932f 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -35,8 +35,9 @@ This file contains
6.1 general settings
6.2 local loopback of sent frames
6.3 CAN controller hardware filters
- 6.4 currently supported CAN hardware
- 6.5 todo
+ 6.4 The virtual CAN driver (vcan)
+ 6.5 currently supported CAN hardware
+ 6.6 todo
7 Credits
@@ -584,7 +585,42 @@ solution for a couple of reasons:
@133MHz with four SJA1000 CAN controllers from 2002 under heavy bus
load without any problems ...
- 6.4 currently supported CAN hardware (September 2007)
+ 6.4 The virtual CAN driver (vcan)
+
+ Similar to the network loopback devices, vcan offers a virtual local
+ CAN interface. A full qualified address on CAN consists of
+
+ - a unique CAN Identifier (CAN ID)
+ - the CAN bus this CAN ID is transmitted on (e.g. can0)
+
+ so in common use cases more than one virtual CAN interface is needed.
+
+ The virtual CAN interfaces allow the transmission and reception of CAN
+ frames without real CAN controller hardware. Virtual CAN network
+ devices are usually named 'vcanX', like vcan0 vcan1 vcan2 ...
+ When compiled as a module the virtual CAN driver module is called vcan.ko
+
+ Since Linux Kernel version 2.6.24 the vcan driver supports the Kernel
+ netlink interface to create vcan network devices. The creation and
+ removal of vcan network devices can be managed with the ip(8) tool:
+
+ - Create a virtual CAN network interface:
+ ip link add type vcan
+
+ - Create a virtual CAN network interface with a specific name 'vcan42':
+ ip link add dev vcan42 type vcan
+
+ - Remove a (virtual CAN) network interface 'vcan42':
+ ip link del vcan42
+
+ The tool 'vcan' from the SocketCAN SVN repository on BerliOS is obsolete.
+
+ Virtual CAN network device creation in older Kernels:
+ In Linux Kernel versions < 2.6.24 the vcan driver creates 4 vcan
+ netdevices at module load time by default. This value can be changed
+ with the module parameter 'numdev'. E.g. 'modprobe vcan numdev=8'
+
+ 6.5 currently supported CAN hardware
On the project website http://developer.berlios.de/projects/socketcan
there are different drivers available:
@@ -603,7 +639,7 @@ solution for a couple of reasons:
Please check the Mailing Lists on the berlios OSS project website.
- 6.5 todo (September 2007)
+ 6.6 todo
The configuration interface for CAN network drivers is still an open
issue that has not been finalized in the socketcan project. Also the
diff --git a/Documentation/networking/cs89x0.txt b/Documentation/networking/cs89x0.txt
index 6387d3decf8..c725d33b316 100644
--- a/Documentation/networking/cs89x0.txt
+++ b/Documentation/networking/cs89x0.txt
@@ -3,7 +3,7 @@ NOTE
----
This document was contributed by Cirrus Logic for kernel 2.2.5. This version
-has been updated for 2.3.48 by Andrew Morton <andrewm@uow.edu.au>
+has been updated for 2.3.48 by Andrew Morton.
Cirrus make a copy of this driver available at their website, as
described below. In general, you should use the driver version which
@@ -690,7 +690,7 @@ latest drivers and technical publications.
6.4 Current maintainer
In February 2000 the maintenance of this driver was assumed by Andrew
-Morton <akpm@zip.com.au>
+Morton.
6.5 Kernel module parameters
diff --git a/Documentation/networking/multiqueue.txt b/Documentation/networking/multiqueue.txt
index d391ea63114..4caa0e314cc 100644
--- a/Documentation/networking/multiqueue.txt
+++ b/Documentation/networking/multiqueue.txt
@@ -24,4 +24,56 @@ netif_{start|stop|wake}_subqueue() functions to manage each queue while the
device is still operational. netdev->queue_lock is still used when the device
comes online or when it's completely shut down (unregister_netdev(), etc.).
-Author: Peter P. Waskiewicz Jr. <peter.p.waskiewicz.jr@intel.com>
+
+Section 2: Qdisc support for multiqueue devices
+
+-----------------------------------------------
+
+Currently two qdiscs are optimized for multiqueue devices. The first is the
+default pfifo_fast qdisc. This qdisc supports one qdisc per hardware queue.
+A new round-robin qdisc, sch_multiq also supports multiple hardware queues. The
+qdisc is responsible for classifying the skb's and then directing the skb's to
+bands and queues based on the value in skb->queue_mapping. Use this field in
+the base driver to determine which queue to send the skb to.
+
+sch_multiq has been added for hardware that wishes to avoid head-of-line
+blocking. It will cycle though the bands and verify that the hardware queue
+associated with the band is not stopped prior to dequeuing a packet.
+
+On qdisc load, the number of bands is based on the number of queues on the
+hardware. Once the association is made, any skb with skb->queue_mapping set,
+will be queued to the band associated with the hardware queue.
+
+
+Section 3: Brief howto using MULTIQ for multiqueue devices
+---------------------------------------------------------------
+
+The userspace command 'tc,' part of the iproute2 package, is used to configure
+qdiscs. To add the MULTIQ qdisc to your network device, assuming the device
+is called eth0, run the following command:
+
+# tc qdisc add dev eth0 root handle 1: multiq
+
+The qdisc will allocate the number of bands to equal the number of queues that
+the device reports, and bring the qdisc online. Assuming eth0 has 4 Tx
+queues, the band mapping would look like:
+
+band 0 => queue 0
+band 1 => queue 1
+band 2 => queue 2
+band 3 => queue 3
+
+Traffic will begin flowing through each queue based on either the simple_tx_hash
+function or based on netdev->select_queue() if you have it defined.
+
+The behavior of tc filters remains the same. However a new tc action,
+skbedit, has been added. Assuming you wanted to route all traffic to a
+specific host, for example 192.168.0.3, through a specific queue you could use
+this action and establish a filter such as:
+
+tc filter add dev eth0 parent 1: protocol ip prio 1 u32 \
+ match ip dst 192.168.0.3 \
+ action skbedit queue_mapping 3
+
+Author: Alexander Duyck <alexander.h.duyck@intel.com>
+Original Author: Peter P. Waskiewicz Jr. <peter.p.waskiewicz.jr@intel.com>
diff --git a/Documentation/networking/phonet.txt b/Documentation/networking/phonet.txt
new file mode 100644
index 00000000000..6a07e45d4a9
--- /dev/null
+++ b/Documentation/networking/phonet.txt
@@ -0,0 +1,175 @@
+Linux Phonet protocol family
+============================
+
+Introduction
+------------
+
+Phonet is a packet protocol used by Nokia cellular modems for both IPC
+and RPC. With the Linux Phonet socket family, Linux host processes can
+receive and send messages from/to the modem, or any other external
+device attached to the modem. The modem takes care of routing.
+
+Phonet packets can be exchanged through various hardware connections
+depending on the device, such as:
+ - USB with the CDC Phonet interface,
+ - infrared,
+ - Bluetooth,
+ - an RS232 serial port (with a dedicated "FBUS" line discipline),
+ - the SSI bus with some TI OMAP processors.
+
+
+Packets format
+--------------
+
+Phonet packets have a common header as follows:
+
+ struct phonethdr {
+ uint8_t pn_media; /* Media type (link-layer identifier) */
+ uint8_t pn_rdev; /* Receiver device ID */
+ uint8_t pn_sdev; /* Sender device ID */
+ uint8_t pn_res; /* Resource ID or function */
+ uint16_t pn_length; /* Big-endian message byte length (minus 6) */
+ uint8_t pn_robj; /* Receiver object ID */
+ uint8_t pn_sobj; /* Sender object ID */
+ };
+
+On Linux, the link-layer header includes the pn_media byte (see below).
+The next 7 bytes are part of the network-layer header.
+
+The device ID is split: the 6 higher-order bits consitute the device
+address, while the 2 lower-order bits are used for multiplexing, as are
+the 8-bit object identifiers. As such, Phonet can be considered as a
+network layer with 6 bits of address space and 10 bits for transport
+protocol (much like port numbers in IP world).
+
+The modem always has address number zero. All other device have a their
+own 6-bit address.
+
+
+Link layer
+----------
+
+Phonet links are always point-to-point links. The link layer header
+consists of a single Phonet media type byte. It uniquely identifies the
+link through which the packet is transmitted, from the modem's
+perspective. Each Phonet network device shall prepend and set the media
+type byte as appropriate. For convenience, a common phonet_header_ops
+link-layer header operations structure is provided. It sets the
+media type according to the network device hardware address.
+
+Linux Phonet network interfaces support a dedicated link layer packets
+type (ETH_P_PHONET) which is out of the Ethernet type range. They can
+only send and receive Phonet packets.
+
+The virtual TUN tunnel device driver can also be used for Phonet. This
+requires IFF_TUN mode, _without_ the IFF_NO_PI flag. In this case,
+there is no link-layer header, so there is no Phonet media type byte.
+
+Note that Phonet interfaces are not allowed to re-order packets, so
+only the (default) Linux FIFO qdisc should be used with them.
+
+
+Network layer
+-------------
+
+The Phonet socket address family maps the Phonet packet header:
+
+ struct sockaddr_pn {
+ sa_family_t spn_family; /* AF_PHONET */
+ uint8_t spn_obj; /* Object ID */
+ uint8_t spn_dev; /* Device ID */
+ uint8_t spn_resource; /* Resource or function */
+ uint8_t spn_zero[...]; /* Padding */
+ };
+
+The resource field is only used when sending and receiving;
+It is ignored by bind() and getsockname().
+
+
+Low-level datagram protocol
+---------------------------
+
+Applications can send Phonet messages using the Phonet datagram socket
+protocol from the PF_PHONET family. Each socket is bound to one of the
+2^10 object IDs available, and can send and receive packets with any
+other peer.
+
+ struct sockaddr_pn addr = { .spn_family = AF_PHONET, };
+ ssize_t len;
+ socklen_t addrlen = sizeof(addr);
+ int fd;
+
+ fd = socket(PF_PHONET, SOCK_DGRAM, 0);
+ bind(fd, (struct sockaddr *)&addr, sizeof(addr));
+ /* ... */
+
+ sendto(fd, msg, msglen, 0, (struct sockaddr *)&addr, sizeof(addr));
+ len = recvfrom(fd, buf, sizeof(buf), 0,
+ (struct sockaddr *)&addr, &addrlen);
+
+This protocol follows the SOCK_DGRAM connection-less semantics.
+However, connect() and getpeername() are not supported, as they did
+not seem useful with Phonet usages (could be added easily).
+
+
+Phonet Pipe protocol
+--------------------
+
+The Phonet Pipe protocol is a simple sequenced packets protocol
+with end-to-end congestion control. It uses the passive listening
+socket paradigm. The listening socket is bound to an unique free object
+ID. Each listening socket can handle up to 255 simultaneous
+connections, one per accept()'d socket.
+
+ int lfd, cfd;
+
+ lfd = socket(PF_PHONET, SOCK_SEQPACKET, PN_PROTO_PIPE);
+ listen (lfd, INT_MAX);
+
+ /* ... */
+ cfd = accept(lfd, NULL, NULL);
+ for (;;)
+ {
+ char buf[...];
+ ssize_t len = read(cfd, buf, sizeof(buf));
+
+ /* ... */
+
+ write(cfd, msg, msglen);
+ }
+
+Connections are established between two endpoints by a "third party"
+application. This means that both endpoints are passive; so connect()
+is not possible.
+
+WARNING:
+When polling a connected pipe socket for writability, there is an
+intrinsic race condition whereby writability might be lost between the
+polling and the writing system calls. In this case, the socket will
+block until write becomes possible again, unless non-blocking mode
+is enabled.
+
+
+The pipe protocol provides two socket options at the SOL_PNPIPE level:
+
+ PNPIPE_ENCAP accepts one integer value (int) of:
+
+ PNPIPE_ENCAP_NONE: The socket operates normally (default).
+
+ PNPIPE_ENCAP_IP: The socket is used as a backend for a virtual IP
+ interface. This requires CAP_NET_ADMIN capability. GPRS data
+ support on Nokia modems can use this. Note that the socket cannot
+ be reliably poll()'d or read() from while in this mode.
+
+ PNPIPE_IFINDEX is a read-only integer value. It contains the
+ interface index of the network interface created by PNPIPE_ENCAP,
+ or zero if encapsulation is off.
+
+
+Authors
+-------
+
+Linux Phonet was initially written by Sakari Ailus.
+Other contributors include Mikä Liljeberg, Andras Domokos,
+Carlos Chinea and Rémi Denis-Courmont.
+Copyright (C) 2008 Nokia Corporation.
diff --git a/Documentation/networking/regulatory.txt b/Documentation/networking/regulatory.txt
new file mode 100644
index 00000000000..a96989a8ff3
--- /dev/null
+++ b/Documentation/networking/regulatory.txt
@@ -0,0 +1,194 @@
+Linux wireless regulatory documentation
+---------------------------------------
+
+This document gives a brief review over how the Linux wireless
+regulatory infrastructure works.
+
+More up to date information can be obtained at the project's web page:
+
+http://wireless.kernel.org/en/developers/Regulatory
+
+Keeping regulatory domains in userspace
+---------------------------------------
+
+Due to the dynamic nature of regulatory domains we keep them
+in userspace and provide a framework for userspace to upload
+to the kernel one regulatory domain to be used as the central
+core regulatory domain all wireless devices should adhere to.
+
+How to get regulatory domains to the kernel
+-------------------------------------------
+
+Userspace gets a regulatory domain in the kernel by having
+a userspace agent build it and send it via nl80211. Only
+expected regulatory domains will be respected by the kernel.
+
+A currently available userspace agent which can accomplish this
+is CRDA - central regulatory domain agent. Its documented here:
+
+http://wireless.kernel.org/en/developers/Regulatory/CRDA
+
+Essentially the kernel will send a udev event when it knows
+it needs a new regulatory domain. A udev rule can be put in place
+to trigger crda to send the respective regulatory domain for a
+specific ISO/IEC 3166 alpha2.
+
+Below is an example udev rule which can be used:
+
+# Example file, should be put in /etc/udev/rules.d/regulatory.rules
+KERNEL=="regulatory*", ACTION=="change", SUBSYSTEM=="platform", RUN+="/sbin/crda"
+
+The alpha2 is passed as an environment variable under the variable COUNTRY.
+
+Who asks for regulatory domains?
+--------------------------------
+
+* Users
+
+Users can use iw:
+
+http://wireless.kernel.org/en/users/Documentation/iw
+
+An example:
+
+ # set regulatory domain to "Costa Rica"
+ iw reg set CR
+
+This will request the kernel to set the regulatory domain to
+the specificied alpha2. The kernel in turn will then ask userspace
+to provide a regulatory domain for the alpha2 specified by the user
+by sending a uevent.
+
+* Wireless subsystems for Country Information elements
+
+The kernel will send a uevent to inform userspace a new
+regulatory domain is required. More on this to be added
+as its integration is added.
+
+* Drivers
+
+If drivers determine they need a specific regulatory domain
+set they can inform the wireless core using regulatory_hint().
+They have two options -- they either provide an alpha2 so that
+crda can provide back a regulatory domain for that country or
+they can build their own regulatory domain based on internal
+custom knowledge so the wireless core can respect it.
+
+*Most* drivers will rely on the first mechanism of providing a
+regulatory hint with an alpha2. For these drivers there is an additional
+check that can be used to ensure compliance based on custom EEPROM
+regulatory data. This additional check can be used by drivers by
+registering on its struct wiphy a reg_notifier() callback. This notifier
+is called when the core's regulatory domain has been changed. The driver
+can use this to review the changes made and also review who made them
+(driver, user, country IE) and determine what to allow based on its
+internal EEPROM data. Devices drivers wishing to be capable of world
+roaming should use this callback. More on world roaming will be
+added to this document when its support is enabled.
+
+Device drivers who provide their own built regulatory domain
+do not need a callback as the channels registered by them are
+the only ones that will be allowed and therefore *additional*
+cannels cannot be enabled.
+
+Example code - drivers hinting an alpha2:
+------------------------------------------
+
+This example comes from the zd1211rw device driver. You can start
+by having a mapping of your device's EEPROM country/regulatory
+domain value to to a specific alpha2 as follows:
+
+static struct zd_reg_alpha2_map reg_alpha2_map[] = {
+ { ZD_REGDOMAIN_FCC, "US" },
+ { ZD_REGDOMAIN_IC, "CA" },
+ { ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */
+ { ZD_REGDOMAIN_JAPAN, "JP" },
+ { ZD_REGDOMAIN_JAPAN_ADD, "JP" },
+ { ZD_REGDOMAIN_SPAIN, "ES" },
+ { ZD_REGDOMAIN_FRANCE, "FR" },
+
+Then you can define a routine to map your read EEPROM value to an alpha2,
+as follows:
+
+static int zd_reg2alpha2(u8 regdomain, char *alpha2)
+{
+ unsigned int i;
+ struct zd_reg_alpha2_map *reg_map;
+ for (i = 0; i < ARRAY_SIZE(reg_alpha2_map); i++) {
+ reg_map = &reg_alpha2_map[i];
+ if (regdomain == reg_map->reg) {
+ alpha2[0] = reg_map->alpha2[0];
+ alpha2[1] = reg_map->alpha2[1];
+ return 0;
+ }
+ }
+ return 1;
+}
+
+Lastly, you can then hint to the core of your discovered alpha2, if a match
+was found. You need to do this after you have registered your wiphy. You
+are expected to do this during initialization.
+
+ r = zd_reg2alpha2(mac->regdomain, alpha2);
+ if (!r)
+ regulatory_hint(hw->wiphy, alpha2, NULL);
+
+Example code - drivers providing a built in regulatory domain:
+--------------------------------------------------------------
+
+If you have regulatory information you can obtain from your
+driver and you *need* to use this we let you build a regulatory domain
+structure and pass it to the wireless core. To do this you should
+kmalloc() a structure big enough to hold your regulatory domain
+structure and you should then fill it with your data. Finally you simply
+call regulatory_hint() with the regulatory domain structure in it.
+
+Bellow is a simple example, with a regulatory domain cached using the stack.
+Your implementation may vary (read EEPROM cache instead, for example).
+
+Example cache of some regulatory domain
+
+struct ieee80211_regdomain mydriver_jp_regdom = {
+ .n_reg_rules = 3,
+ .alpha2 = "JP",
+ //.alpha2 = "99", /* If I have no alpha2 to map it to */
+ .reg_rules = {
+ /* IEEE 802.11b/g, channels 1..14 */
+ REG_RULE(2412-20, 2484+20, 40, 6, 20, 0),
+ /* IEEE 802.11a, channels 34..48 */
+ REG_RULE(5170-20, 5240+20, 40, 6, 20,
+ NL80211_RRF_PASSIVE_SCAN),
+ /* IEEE 802.11a, channels 52..64 */
+ REG_RULE(5260-20, 5320+20, 40, 6, 20,
+ NL80211_RRF_NO_IBSS |
+ NL80211_RRF_DFS),
+ }
+};
+
+Then in some part of your code after your wiphy has been registered:
+
+ int r;
+ struct ieee80211_regdomain *rd;
+ int size_of_regd;
+ int num_rules = mydriver_jp_regdom.n_reg_rules;
+ unsigned int i;
+
+ size_of_regd = sizeof(struct ieee80211_regdomain) +
+ (num_rules * sizeof(struct ieee80211_reg_rule));
+
+ rd = kzalloc(size_of_regd, GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+
+ memcpy(rd, &mydriver_jp_regdom, sizeof(struct ieee80211_regdomain));
+
+ for (i=0; i < num_rules; i++) {
+ memcpy(&rd->reg_rules[i], &mydriver_jp_regdom.reg_rules[i],
+ sizeof(struct ieee80211_reg_rule));
+ }
+ r = regulatory_hint(hw->wiphy, NULL, rd);
+ if (r) {
+ kfree(rd);
+ return r;
+ }
+
diff --git a/Documentation/networking/tproxy.txt b/Documentation/networking/tproxy.txt
new file mode 100644
index 00000000000..7b5996d9357
--- /dev/null
+++ b/Documentation/networking/tproxy.txt
@@ -0,0 +1,85 @@
+Transparent proxy support
+=========================
+
+This feature adds Linux 2.2-like transparent proxy support to current kernels.
+To use it, enable NETFILTER_TPROXY, the socket match and the TPROXY target in
+your kernel config. You will need policy routing too, so be sure to enable that
+as well.
+
+
+1. Making non-local sockets work
+================================
+
+The idea is that you identify packets with destination address matching a local
+socket on your box, set the packet mark to a certain value, and then match on that
+value using policy routing to have those packets delivered locally:
+
+# iptables -t mangle -N DIVERT
+# iptables -t mangle -A PREROUTING -p tcp -m socket -j DIVERT
+# iptables -t mangle -A DIVERT -j MARK --set-mark 1
+# iptables -t mangle -A DIVERT -j ACCEPT
+
+# ip rule add fwmark 1 lookup 100
+# ip route add local 0.0.0.0/0 dev lo table 100
+
+Because of certain restrictions in the IPv4 routing output code you'll have to
+modify your application to allow it to send datagrams _from_ non-local IP
+addresses. All you have to do is enable the (SOL_IP, IP_TRANSPARENT) socket
+option before calling bind:
+
+fd = socket(AF_INET, SOCK_STREAM, 0);
+/* - 8< -*/
+int value = 1;
+setsockopt(fd, SOL_IP, IP_TRANSPARENT, &value, sizeof(value));
+/* - 8< -*/
+name.sin_family = AF_INET;
+name.sin_port = htons(0xCAFE);
+name.sin_addr.s_addr = htonl(0xDEADBEEF);
+bind(fd, &name, sizeof(name));
+
+A trivial patch for netcat is available here:
+http://people.netfilter.org/hidden/tproxy/netcat-ip_transparent-support.patch
+
+
+2. Redirecting traffic
+======================
+
+Transparent proxying often involves "intercepting" traffic on a router. This is
+usually done with the iptables REDIRECT target; however, there are serious
+limitations of that method. One of the major issues is that it actually
+modifies the packets to change the destination address -- which might not be
+acceptable in certain situations. (Think of proxying UDP for example: you won't
+be able to find out the original destination address. Even in case of TCP
+getting the original destination address is racy.)
+
+The 'TPROXY' target provides similar functionality without relying on NAT. Simply
+add rules like this to the iptables ruleset above:
+
+# iptables -t mangle -A PREROUTING -p tcp --dport 80 -j TPROXY \
+ --tproxy-mark 0x1/0x1 --on-port 50080
+
+Note that for this to work you'll have to modify the proxy to enable (SOL_IP,
+IP_TRANSPARENT) for the listening socket.
+
+
+3. Iptables extensions
+======================
+
+To use tproxy you'll need to have the 'socket' and 'TPROXY' modules
+compiled for iptables. A patched version of iptables is available
+here: http://git.balabit.hu/?p=bazsi/iptables-tproxy.git
+
+
+4. Application support
+======================
+
+4.1. Squid
+----------
+
+Squid 3.HEAD has support built-in. To use it, pass
+'--enable-linux-netfilter' to configure and set the 'tproxy' option on
+the HTTP listener you redirect traffic to with the TPROXY iptables
+target.
+
+For more information please consult the following page on the Squid
+wiki: http://wiki.squid-cache.org/Features/Tproxy4
diff --git a/Documentation/networking/vortex.txt b/Documentation/networking/vortex.txt
index 6356d3faed3..bd70976b816 100644
--- a/Documentation/networking/vortex.txt
+++ b/Documentation/networking/vortex.txt
@@ -1,5 +1,5 @@
Documentation/networking/vortex.txt
-Andrew Morton <andrewm@uow.edu.au>
+Andrew Morton
30 April 2000
@@ -11,7 +11,7 @@ The driver was written by Donald Becker <becker@scyld.com>
Don is no longer the prime maintainer of this version of the driver.
Please report problems to one or more of:
- Andrew Morton <akpm@osdl.org>
+ Andrew Morton
Netdev mailing list <netdev@vger.kernel.org>
Linux kernel mailing list <linux-kernel@vger.kernel.org>
@@ -305,11 +305,6 @@ Donald's wake-on-LAN page:
ftp://ftp.3com.com/pub/nic/3c90x/3c90xx2.exe
-Driver updates and a detailed changelog for the modifications which
-were made for the 2.3/2,4 series kernel is available at
-
- http://www.zip.com.au/~akpm/linux/#3c59x-bc
-
Autonegotiation notes
---------------------
diff --git a/Documentation/pcmcia/driver-changes.txt b/Documentation/pcmcia/driver-changes.txt
index 96f155e6875..059934363ca 100644
--- a/Documentation/pcmcia/driver-changes.txt
+++ b/Documentation/pcmcia/driver-changes.txt
@@ -1,5 +1,11 @@
This file details changes in 2.6 which affect PCMCIA card driver authors:
+* New configuration loop helper (as of 2.6.28)
+ By calling pcmcia_loop_config(), a driver can iterate over all available
+ configuration options. During a driver's probe() phase, one doesn't need
+ to use pcmcia_get_{first,next}_tuple, pcmcia_get_tuple_data and
+ pcmcia_parse_tuple directly in most if not all cases.
+
* New release helper (as of 2.6.17)
Instead of calling pcmcia_release_{configuration,io,irq,win}, all that's
necessary now is calling pcmcia_disable_device. As there is no valid
diff --git a/Documentation/power/regulator/machine.txt b/Documentation/power/regulator/machine.txt
index c9a35665cf7..ce3487d99ab 100644
--- a/Documentation/power/regulator/machine.txt
+++ b/Documentation/power/regulator/machine.txt
@@ -2,17 +2,8 @@ Regulator Machine Driver Interface
===================================
The regulator machine driver interface is intended for board/machine specific
-initialisation code to configure the regulator subsystem. Typical things that
-machine drivers would do are :-
+initialisation code to configure the regulator subsystem.
- 1. Regulator -> Device mapping.
- 2. Regulator supply configuration.
- 3. Power Domain constraint setting.
-
-
-
-1. Regulator -> device mapping
-==============================
Consider the following machine :-
Regulator-1 -+-> Regulator-2 --> [Consumer A @ 1.8 - 2.0V]
@@ -21,81 +12,82 @@ Consider the following machine :-
The drivers for consumers A & B must be mapped to the correct regulator in
order to control their power supply. This mapping can be achieved in machine
-initialisation code by calling :-
+initialisation code by creating a struct regulator_consumer_supply for
+each regulator.
+
+struct regulator_consumer_supply {
+ struct device *dev; /* consumer */
+ const char *supply; /* consumer supply - e.g. "vcc" */
+};
-int regulator_set_device_supply(const char *regulator, struct device *dev,
- const char *supply);
+e.g. for the machine above
-and is shown with the following code :-
+static struct regulator_consumer_supply regulator1_consumers[] = {
+{
+ .dev = &platform_consumerB_device.dev,
+ .supply = "Vcc",
+},};
-regulator_set_device_supply("Regulator-1", devB, "Vcc");
-regulator_set_device_supply("Regulator-2", devA, "Vcc");
+static struct regulator_consumer_supply regulator2_consumers[] = {
+{
+ .dev = &platform_consumerA_device.dev,
+ .supply = "Vcc",
+},};
This maps Regulator-1 to the 'Vcc' supply for Consumer B and maps Regulator-2
to the 'Vcc' supply for Consumer A.
-
-2. Regulator supply configuration.
-==================================
-Consider the following machine (again) :-
-
- Regulator-1 -+-> Regulator-2 --> [Consumer A @ 1.8 - 2.0V]
- |
- +-> [Consumer B @ 3.3V]
+Constraints can now be registered by defining a struct regulator_init_data
+for each regulator power domain. This structure also maps the consumers
+to their supply regulator :-
+
+static struct regulator_init_data regulator1_data = {
+ .constraints = {
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(regulator1_consumers),
+ .consumer_supplies = regulator1_consumers,
+};
Regulator-1 supplies power to Regulator-2. This relationship must be registered
with the core so that Regulator-1 is also enabled when Consumer A enables it's
-supply (Regulator-2).
-
-This relationship can be register with the core via :-
-
-int regulator_set_supply(const char *regulator, const char *regulator_supply);
-
-In this example we would use the following code :-
-
-regulator_set_supply("Regulator-2", "Regulator-1");
-
-Relationships can be queried by calling :-
-
-const char *regulator_get_supply(const char *regulator);
-
-
-3. Power Domain constraint setting.
-===================================
-Each power domain within a system has physical constraints on voltage and
-current. This must be defined in software so that the power domain is always
-operated within specifications.
-
-Consider the following machine (again) :-
-
- Regulator-1 -+-> Regulator-2 --> [Consumer A @ 1.8 - 2.0V]
- |
- +-> [Consumer B @ 3.3V]
-
-This gives us two regulators and two power domains:
-
- Domain 1: Regulator-2, Consumer B.
- Domain 2: Consumer A.
-
-Constraints can be registered by calling :-
-
-int regulator_set_platform_constraints(const char *regulator,
- struct regulation_constraints *constraints);
-
-The example is defined as follows :-
-
-struct regulation_constraints domain_1 = {
- .min_uV = 3300000,
- .max_uV = 3300000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL,
+supply (Regulator-2). The supply regulator is set by the supply_regulator_dev
+field below:-
+
+static struct regulator_init_data regulator2_data = {
+ .supply_regulator_dev = &platform_regulator1_device.dev,
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 2000000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(regulator2_consumers),
+ .consumer_supplies = regulator2_consumers,
};
-struct regulation_constraints domain_2 = {
- .min_uV = 1800000,
- .max_uV = 2000000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
- .valid_modes_mask = REGULATOR_MODE_NORMAL,
+Finally the regulator devices must be registered in the usual manner.
+
+static struct platform_device regulator_devices[] = {
+{
+ .name = "regulator",
+ .id = DCDC_1,
+ .dev = {
+ .platform_data = &regulator1_data,
+ },
+},
+{
+ .name = "regulator",
+ .id = DCDC_2,
+ .dev = {
+ .platform_data = &regulator2_data,
+ },
+},
};
+/* register regulator 1 device */
+platform_device_register(&wm8350_regulator_devices[0]);
-regulator_set_platform_constraints("Regulator-1", &domain_1);
-regulator_set_platform_constraints("Regulator-2", &domain_2);
+/* register regulator 2 device */
+platform_device_register(&wm8350_regulator_devices[1]);
diff --git a/Documentation/power/regulator/regulator.txt b/Documentation/power/regulator/regulator.txt
index a6905014359..4200accb9bb 100644
--- a/Documentation/power/regulator/regulator.txt
+++ b/Documentation/power/regulator/regulator.txt
@@ -10,11 +10,11 @@ Registration
Drivers can register a regulator by calling :-
-struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
- void *reg_data);
+struct regulator_dev *regulator_register(struct device *dev,
+ struct regulator_desc *regulator_desc);
-This will register the regulators capabilities and operations the regulator
-core. The core does not touch reg_data (private to regulator driver).
+This will register the regulators capabilities and operations to the regulator
+core.
Regulators can be unregistered by calling :-
diff --git a/Documentation/power/s2ram.txt b/Documentation/power/s2ram.txt
index b05f512130e..2ebdc6091ce 100644
--- a/Documentation/power/s2ram.txt
+++ b/Documentation/power/s2ram.txt
@@ -54,3 +54,21 @@ used to run with "radeonfb" (it's an ATI Radeon mobility). It turns out
that "radeonfb" simply cannot resume that device - it tries to set the
PLL's, and it just _hangs_. Using the regular VGA console and letting X
resume it instead works fine.
+
+NOTE
+====
+pm_trace uses the system's Real Time Clock (RTC) to save the magic number.
+Reason for this is that the RTC is the only reliably available piece of
+hardware during resume operations where a value can be set that will
+survive a reboot.
+
+Consequence is that after a resume (even if it is successful) your system
+clock will have a value corresponding to the magic mumber instead of the
+correct date/time! It is therefore advisable to use a program like ntp-date
+or rdate to reset the correct date/time from an external time source when
+using this trace option.
+
+As the clock keeps ticking it is also essential that the reboot is done
+quickly after the resume failure. The trace option does not use the seconds
+or the low order bits of the minutes of the RTC, but a too long delay will
+corrupt the magic value.
diff --git a/Documentation/powerpc/00-INDEX b/Documentation/powerpc/00-INDEX
index 29d839ce732..e3960b8c868 100644
--- a/Documentation/powerpc/00-INDEX
+++ b/Documentation/powerpc/00-INDEX
@@ -18,10 +18,6 @@ mpc52xx.txt
- Linux 2.6.x on MPC52xx family
mpc52xx-device-tree-bindings.txt
- MPC5200 Device Tree Bindings
-ppc_htab.txt
- - info about the Linux/PPC /proc/ppc_htab entry
-smp.txt
- - use and state info about Linux/PPC on MP machines
sound.txt
- info on sound support under Linux/PPC
zImage_layout.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/83xx-512x-pci.txt b/Documentation/powerpc/dts-bindings/fsl/83xx-512x-pci.txt
new file mode 100644
index 00000000000..35a46536240
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/fsl/83xx-512x-pci.txt
@@ -0,0 +1,40 @@
+* Freescale 83xx and 512x PCI bridges
+
+Freescale 83xx and 512x SOCs include the same pci bridge core.
+
+83xx/512x specific notes:
+- reg: should contain two address length tuples
+ The first is for the internal pci bridge registers
+ The second is for the pci config space access registers
+
+Example (MPC8313ERDB)
+ pci0: pci@e0008500 {
+ cell-index = <1>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0E -mini PCI */
+ 0x7000 0x0 0x0 0x1 &ipic 18 0x8
+ 0x7000 0x0 0x0 0x2 &ipic 18 0x8
+ 0x7000 0x0 0x0 0x3 &ipic 18 0x8
+ 0x7000 0x0 0x0 0x4 &ipic 18 0x8
+
+ /* IDSEL 0x0F - PCI slot */
+ 0x7800 0x0 0x0 0x1 &ipic 17 0x8
+ 0x7800 0x0 0x0 0x2 &ipic 18 0x8
+ 0x7800 0x0 0x0 0x3 &ipic 17 0x8
+ 0x7800 0x0 0x0 0x4 &ipic 18 0x8>;
+ interrupt-parent = <&ipic>;
+ interrupts = <66 0x8>;
+ bus-range = <0x0 0x0>;
+ ranges = <0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000
+ 0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000
+ 0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
+ clock-frequency = <66666666>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0xe0008500 0x100 /* internal registers */
+ 0xe0008300 0x8>; /* config space access registers */
+ compatible = "fsl,mpc8349-pci";
+ device_type = "pci";
+ };
diff --git a/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt b/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt
new file mode 100644
index 00000000000..d015dcec401
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt
@@ -0,0 +1,40 @@
+GPIO controllers on MPC8xxx SoCs
+
+This is for the non-QE/CPM/GUTs GPIO controllers as found on
+8349, 8572, 8610 and compatible.
+
+Every GPIO controller node must have #gpio-cells property defined,
+this information will be used to translate gpio-specifiers.
+
+Required properties:
+- compatible : "fsl,<CHIP>-gpio" followed by "fsl,mpc8349-gpio" for
+ 83xx, "fsl,mpc8572-gpio" for 85xx and "fsl,mpc8610-gpio" for 86xx.
+- #gpio-cells : Should be two. The first cell is the pin number and the
+ second cell is used to specify optional parameters (currently unused).
+ - interrupts : Interrupt mapping for GPIO IRQ (currently unused).
+ - interrupt-parent : Phandle for the interrupt controller that
+ services interrupts for this device.
+- gpio-controller : Marks the port as GPIO controller.
+
+Example of gpio-controller nodes for a MPC8347 SoC:
+
+ gpio1: gpio-controller@c00 {
+ #gpio-cells = <2>;
+ compatible = "fsl,mpc8347-gpio", "fsl,mpc8349-gpio";
+ reg = <0xc00 0x100>;
+ interrupts = <74 0x8>;
+ interrupt-parent = <&ipic>;
+ gpio-controller;
+ };
+
+ gpio2: gpio-controller@d00 {
+ #gpio-cells = <2>;
+ compatible = "fsl,mpc8347-gpio", "fsl,mpc8349-gpio";
+ reg = <0xd00 0x100>;
+ interrupts = <75 0x8>;
+ interrupt-parent = <&ipic>;
+ gpio-controller;
+ };
+
+See booting-without-of.txt for details of how to specify GPIO
+information for devices.
diff --git a/Documentation/powerpc/dts-bindings/fsl/dma.txt b/Documentation/powerpc/dts-bindings/fsl/dma.txt
index 86826df00e6..cc453110fc4 100644
--- a/Documentation/powerpc/dts-bindings/fsl/dma.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/dma.txt
@@ -20,7 +20,7 @@ Required properties:
- compatible : compatible list, contains 2 entries, first is
"fsl,CHIP-dma-channel", where CHIP is the processor
(mpc8349, mpc8350, etc.) and the second is
- "fsl,elo-dma-channel"
+ "fsl,elo-dma-channel". However, see note below.
- reg : <registers mapping for channel>
- cell-index : dma channel index starts at 0.
@@ -82,7 +82,7 @@ Required properties:
- compatible : compatible list, contains 2 entries, first is
"fsl,CHIP-dma-channel", where CHIP is the processor
(mpc8540, mpc8560, etc.) and the second is
- "fsl,eloplus-dma-channel"
+ "fsl,eloplus-dma-channel". However, see note below.
- cell-index : dma channel index starts at 0.
- reg : <registers mapping for channel>
- interrupts : <interrupt mapping for DMA channel IRQ>
@@ -125,3 +125,12 @@ Example:
interrupts = <17 2>;
};
};
+
+Note on DMA channel compatible properties: The compatible property must say
+"fsl,elo-dma-channel" or "fsl,eloplus-dma-channel" to be used by the Elo DMA
+driver (fsldma). Any DMA channel used by fsldma cannot be used by another
+DMA driver, such as the SSI sound drivers for the MPC8610. Therefore, any DMA
+channel that should be used for another driver should not use
+"fsl,elo-dma-channel" or "fsl,eloplus-dma-channel". For the SSI drivers, for
+example, the compatible property should be "fsl,ssi-dma-channel". See ssi.txt
+for more information.
diff --git a/Documentation/powerpc/dts-bindings/fsl/ssi.txt b/Documentation/powerpc/dts-bindings/fsl/ssi.txt
index d100555d488..a2d963998a6 100644
--- a/Documentation/powerpc/dts-bindings/fsl/ssi.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/ssi.txt
@@ -24,6 +24,12 @@ Required properties:
"rj-master" - r.j., SSI is clock master
"ac97-slave" - AC97 mode, SSI is clock slave
"ac97-master" - AC97 mode, SSI is clock master
+- fsl,playback-dma: phandle to a node for the DMA channel to use for
+ playback of audio. This is typically dictated by SOC
+ design. See the notes below.
+- fsl,capture-dma: phandle to a node for the DMA channel to use for
+ capture (recording) of audio. This is typically dictated
+ by SOC design. See the notes below.
Optional properties:
- codec-handle : phandle to a 'codec' node that defines an audio
@@ -36,3 +42,20 @@ Child 'codec' node required properties:
Child 'codec' node optional properties:
- clock-frequency : The frequency of the input clock, which typically
comes from an on-board dedicated oscillator.
+
+Notes on fsl,playback-dma and fsl,capture-dma:
+
+On SOCs that have an SSI, specific DMA channels are hard-wired for playback
+and capture. On the MPC8610, for example, SSI1 must use DMA channel 0 for
+playback and DMA channel 1 for capture. SSI2 must use DMA channel 2 for
+playback and DMA channel 3 for capture. The developer can choose which
+DMA controller to use, but the channels themselves are hard-wired. The
+purpose of these two properties is to represent this hardware design.
+
+The device tree nodes for the DMA channels that are referenced by
+"fsl,playback-dma" and "fsl,capture-dma" must be marked as compatible with
+"fsl,ssi-dma-channel". The SOC-specific compatible string (e.g.
+"fsl,mpc8610-dma-channel") can remain. If these nodes are left as
+"fsl,elo-dma-channel" or "fsl,eloplus-dma-channel", then the generic Elo DMA
+drivers (fsldma) will attempt to use them, and it will conflict with the
+sound drivers.
diff --git a/Documentation/powerpc/ppc_htab.txt b/Documentation/powerpc/ppc_htab.txt
deleted file mode 100644
index 8b8c7df29fa..00000000000
--- a/Documentation/powerpc/ppc_htab.txt
+++ /dev/null
@@ -1,118 +0,0 @@
- Information about /proc/ppc_htab
-=====================================================================
-
-This document and the related code was written by me (Cort Dougan), please
-email me (cort@fsmlabs.com) if you have questions, comments or corrections.
-
-Last Change: 2.16.98
-
-This entry in the proc directory is readable by all users but only
-writable by root.
-
-The ppc_htab interface is a user level way of accessing the
-performance monitoring registers as well as providing information
-about the PTE hash table.
-
-1. Reading
-
- Reading this file will give you information about the memory management
- hash table that serves as an extended tlb for page translation on the
- powerpc. It will also give you information about performance measurement
- specific to the cpu that you are using.
-
- Explanation of the 604 Performance Monitoring Fields:
- MMCR0 - the current value of the MMCR0 register
- PMC1
- PMC2 - the value of the performance counters and a
- description of what events they are counting
- which are based on MMCR0 bit settings.
- Explanation of the PTE Hash Table fields:
-
- Size - hash table size in Kb.
- Buckets - number of buckets in the table.
- Address - the virtual kernel address of the hash table base.
- Entries - the number of ptes that can be stored in the hash table.
- User/Kernel - how many pte's are in use by the kernel or user at that time.
- Overflows - How many of the entries are in their secondary hash location.
- Percent full - ratio of free pte entries to in use entries.
- Reloads - Count of how many hash table misses have occurred
- that were fixed with a reload from the linux tables.
- Should always be 0 on 603 based machines.
- Non-error Misses - Count of how many hash table misses have occurred
- that were completed with the creation of a pte in the linux
- tables with a call to do_page_fault().
- Error Misses - Number of misses due to errors such as bad address
- and permission violations. This includes kernel access of
- bad user addresses that are fixed up by the trap handler.
-
- Note that calculation of the data displayed from /proc/ppc_htab takes
- a long time and spends a great deal of time in the kernel. It would
- be quite hard on performance to read this file constantly. In time
- there may be a counter in the kernel that allows successive reads from
- this file only after a given amount of time has passed to reduce the
- possibility of a user slowing the system by reading this file.
-
-2. Writing
-
- Writing to the ppc_htab allows you to change the characteristics of
- the powerpc PTE hash table and setup performance monitoring.
-
- Resizing the PTE hash table is not enabled right now due to many
- complications with moving the hash table, rehashing the entries
- and many many SMP issues that would have to be dealt with.
-
- Write options to ppc_htab:
-
- - To set the size of the hash table to 64Kb:
-
- echo 'size 64' > /proc/ppc_htab
-
- The size must be a multiple of 64 and must be greater than or equal to
- 64.
-
- - To turn off performance monitoring:
-
- echo 'off' > /proc/ppc_htab
-
- - To reset the counters without changing what they're counting:
-
- echo 'reset' > /proc/ppc_htab
-
- Note that counting will continue after the reset if it is enabled.
-
- - To count only events in user mode or only in kernel mode:
-
- echo 'user' > /proc/ppc_htab
- ...or...
- echo 'kernel' > /proc/ppc_htab
-
- Note that these two options are exclusive of one another and the
- lack of either of these options counts user and kernel.
- Using 'reset' and 'off' reset these flags.
-
- - The 604 has 2 performance counters which can each count events from
- a specific set of events. These sets are disjoint so it is not
- possible to count _any_ combination of 2 events. One event can
- be counted by PMC1 and one by PMC2.
-
- To start counting a particular event use:
-
- echo 'event' > /proc/ppc_htab
-
- and choose from these events:
-
- PMC1
- ----
- 'ic miss' - instruction cache misses
- 'dtlb' - data tlb misses (not hash table misses)
-
- PMC2
- ----
- 'dc miss' - data cache misses
- 'itlb' - instruction tlb misses (not hash table misses)
- 'load miss time' - cycles to complete a load miss
-
-3. Bugs
-
- The PMC1 and PMC2 counters can overflow and give no indication of that
- in /proc/ppc_htab.
diff --git a/Documentation/powerpc/smp.txt b/Documentation/powerpc/smp.txt
deleted file mode 100644
index 5b581b849ff..00000000000
--- a/Documentation/powerpc/smp.txt
+++ /dev/null
@@ -1,34 +0,0 @@
- Information about Linux/PPC SMP mode
-=====================================================================
-
-This document and the related code was written by me
-(Cort Dougan, cort@fsmlabs.com) please email me if you have questions,
-comments or corrections.
-
-Last Change: 3.31.99
-
-If you want to help by writing code or testing different hardware please
-email me!
-
-1. State of Supported Hardware
-
- PowerSurge Architecture - tested on UMAX s900, Apple 9600
- The second processor on this machine boots up just fine and
- enters its idle loop. Hopefully a completely working SMP kernel
- on this machine will be done shortly.
-
- The code makes the assumption of only two processors. The changes
- necessary to work with any number would not be overly difficult but
- I don't have any machines with >2 processors so it's not high on my
- list of priorities. If anyone else would like do to the work email
- me and I can point out the places that need changed. If you have >2
- processors and don't want to add support yourself let me know and I
- can take a look into it.
-
- BeBox
- BeBox support hasn't been added to the 2.1.X kernels from 2.0.X
- but work is being done and SMP support for BeBox is in the works.
-
- CHRP
- CHRP SMP works and is fairly solid. It's been tested on the IBM F50
- with 4 processors for quite some time now.
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index 6fcb3060dec..b65f0799df4 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -341,6 +341,8 @@ key that does nothing by itself, as well as any hot key that is type-specific
3.1 Guidelines for wireless device drivers
------------------------------------------
+(in this text, rfkill->foo means the foo field of struct rfkill).
+
1. Each independent transmitter in a wireless device (usually there is only one
transmitter per device) should have a SINGLE rfkill class attached to it.
@@ -363,10 +365,32 @@ This rule exists because users of the rfkill subsystem expect to get (and set,
when possible) the overall transmitter rfkill state, not of a particular rfkill
line.
-5. During suspend, the rfkill class will attempt to soft-block the radio
-through a call to rfkill->toggle_radio, and will try to restore its previous
-state during resume. After a rfkill class is suspended, it will *not* call
-rfkill->toggle_radio until it is resumed.
+5. The wireless device driver MUST NOT leave the transmitter enabled during
+suspend and hibernation unless:
+
+ 5.1. The transmitter has to be enabled for some sort of functionality
+ like wake-on-wireless-packet or autonomous packed forwarding in a mesh
+ network, and that functionality is enabled for this suspend/hibernation
+ cycle.
+
+AND
+
+ 5.2. The device was not on a user-requested BLOCKED state before
+ the suspend (i.e. the driver must NOT unblock a device, not even
+ to support wake-on-wireless-packet or remain in the mesh).
+
+In other words, there is absolutely no allowed scenario where a driver can
+automatically take action to unblock a rfkill controller (obviously, this deals
+with scenarios where soft-blocking or both soft and hard blocking is happening.
+Scenarios where hardware rfkill lines are the only ones blocking the
+transmitter are outside of this rule, since the wireless device driver does not
+control its input hardware rfkill lines in the first place).
+
+6. During resume, rfkill will try to restore its previous state.
+
+7. After a rfkill class is suspended, it will *not* call rfkill->toggle_radio
+until it is resumed.
+
Example of a WLAN wireless driver connected to the rfkill subsystem:
--------------------------------------------------------------------
diff --git a/Documentation/s390/CommonIO b/Documentation/s390/CommonIO
index bf0baa19ec2..339207d11d9 100644
--- a/Documentation/s390/CommonIO
+++ b/Documentation/s390/CommonIO
@@ -70,13 +70,19 @@ Command line parameters
Note: While already known devices can be added to the list of devices to be
ignored, there will be no effect on then. However, if such a device
- disappears and then reappears, it will then be ignored.
+ disappears and then reappears, it will then be ignored. To make
+ known devices go away, you need the "purge" command (see below).
For example,
"echo add 0.0.a000-0.0.accc, 0.0.af00-0.0.afff > /proc/cio_ignore"
will add 0.0.a000-0.0.accc and 0.0.af00-0.0.afff to the list of ignored
devices.
+ You can remove already known but now ignored devices via
+ "echo purge > /proc/cio_ignore"
+ All devices ignored but still registered and not online (= not in use)
+ will be deregistered and thus removed from the system.
+
The devices can be specified either by bus id (0.x.abcd) or, for 2.4 backward
compatibility, by the device number in hexadecimal (0xabcd or abcd). Device
numbers given as 0xabcd will be interpreted as 0.0.abcd.
@@ -98,8 +104,7 @@ debugfs entries
handling).
- /sys/kernel/debug/s390dbf/cio_msg/sprintf
- Various debug messages from the common I/O-layer, including messages
- printed when cio_msg=yes.
+ Various debug messages from the common I/O-layer.
- /sys/kernel/debug/s390dbf/cio_trace/hex_ascii
Logs the calling of functions in the common I/O-layer and, if applicable,
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index 88bcb876733..9d8eb553884 100644
--- a/Documentation/scheduler/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
@@ -1,151 +1,242 @@
+ =============
+ CFS Scheduler
+ =============
-This is the CFS scheduler.
-
-80% of CFS's design can be summed up in a single sentence: CFS basically
-models an "ideal, precise multi-tasking CPU" on real hardware.
-
-"Ideal multi-tasking CPU" is a (non-existent :-)) CPU that has 100%
-physical power and which can run each task at precise equal speed, in
-parallel, each at 1/nr_running speed. For example: if there are 2 tasks
-running then it runs each at 50% physical power - totally in parallel.
-
-On real hardware, we can run only a single task at once, so while that
-one task runs, the other tasks that are waiting for the CPU are at a
-disadvantage - the current task gets an unfair amount of CPU time. In
-CFS this fairness imbalance is expressed and tracked via the per-task
-p->wait_runtime (nanosec-unit) value. "wait_runtime" is the amount of
-time the task should now run on the CPU for it to become completely fair
-and balanced.
-
-( small detail: on 'ideal' hardware, the p->wait_runtime value would
- always be zero - no task would ever get 'out of balance' from the
- 'ideal' share of CPU time. )
-
-CFS's task picking logic is based on this p->wait_runtime value and it
-is thus very simple: it always tries to run the task with the largest
-p->wait_runtime value. In other words, CFS tries to run the task with
-the 'gravest need' for more CPU time. So CFS always tries to split up
-CPU time between runnable tasks as close to 'ideal multitasking
-hardware' as possible.
-
-Most of the rest of CFS's design just falls out of this really simple
-concept, with a few add-on embellishments like nice levels,
-multiprocessing and various algorithm variants to recognize sleepers.
-
-In practice it works like this: the system runs a task a bit, and when
-the task schedules (or a scheduler tick happens) the task's CPU usage is
-'accounted for': the (small) time it just spent using the physical CPU
-is deducted from p->wait_runtime. [minus the 'fair share' it would have
-gotten anyway]. Once p->wait_runtime gets low enough so that another
-task becomes the 'leftmost task' of the time-ordered rbtree it maintains
-(plus a small amount of 'granularity' distance relative to the leftmost
-task so that we do not over-schedule tasks and trash the cache) then the
-new leftmost task is picked and the current task is preempted.
-
-The rq->fair_clock value tracks the 'CPU time a runnable task would have
-fairly gotten, had it been runnable during that time'. So by using
-rq->fair_clock values we can accurately timestamp and measure the
-'expected CPU time' a task should have gotten. All runnable tasks are
-sorted in the rbtree by the "rq->fair_clock - p->wait_runtime" key, and
-CFS picks the 'leftmost' task and sticks to it. As the system progresses
-forwards, newly woken tasks are put into the tree more and more to the
-right - slowly but surely giving a chance for every task to become the
-'leftmost task' and thus get on the CPU within a deterministic amount of
-time.
-
-Some implementation details:
-
- - the introduction of Scheduling Classes: an extensible hierarchy of
- scheduler modules. These modules encapsulate scheduling policy
- details and are handled by the scheduler core without the core
- code assuming about them too much.
-
- - sched_fair.c implements the 'CFS desktop scheduler': it is a
- replacement for the vanilla scheduler's SCHED_OTHER interactivity
- code.
-
- I'd like to give credit to Con Kolivas for the general approach here:
- he has proven via RSDL/SD that 'fair scheduling' is possible and that
- it results in better desktop scheduling. Kudos Con!
-
- The CFS patch uses a completely different approach and implementation
- from RSDL/SD. My goal was to make CFS's interactivity quality exceed
- that of RSDL/SD, which is a high standard to meet :-) Testing
- feedback is welcome to decide this one way or another. [ and, in any
- case, all of SD's logic could be added via a kernel/sched_sd.c module
- as well, if Con is interested in such an approach. ]
-
- CFS's design is quite radical: it does not use runqueues, it uses a
- time-ordered rbtree to build a 'timeline' of future task execution,
- and thus has no 'array switch' artifacts (by which both the vanilla
- scheduler and RSDL/SD are affected).
-
- CFS uses nanosecond granularity accounting and does not rely on any
- jiffies or other HZ detail. Thus the CFS scheduler has no notion of
- 'timeslices' and has no heuristics whatsoever. There is only one
- central tunable (you have to switch on CONFIG_SCHED_DEBUG):
-
- /proc/sys/kernel/sched_granularity_ns
-
- which can be used to tune the scheduler from 'desktop' (low
- latencies) to 'server' (good batching) workloads. It defaults to a
- setting suitable for desktop workloads. SCHED_BATCH is handled by the
- CFS scheduler module too.
-
- Due to its design, the CFS scheduler is not prone to any of the
- 'attacks' that exist today against the heuristics of the stock
- scheduler: fiftyp.c, thud.c, chew.c, ring-test.c, massive_intr.c all
- work fine and do not impact interactivity and produce the expected
- behavior.
-
- the CFS scheduler has a much stronger handling of nice levels and
- SCHED_BATCH: both types of workloads should be isolated much more
- agressively than under the vanilla scheduler.
-
- ( another detail: due to nanosec accounting and timeline sorting,
- sched_yield() support is very simple under CFS, and in fact under
- CFS sched_yield() behaves much better than under any other
- scheduler i have tested so far. )
-
- - sched_rt.c implements SCHED_FIFO and SCHED_RR semantics, in a simpler
- way than the vanilla scheduler does. It uses 100 runqueues (for all
- 100 RT priority levels, instead of 140 in the vanilla scheduler)
- and it needs no expired array.
-
- - reworked/sanitized SMP load-balancing: the runqueue-walking
- assumptions are gone from the load-balancing code now, and
- iterators of the scheduling modules are used. The balancing code got
- quite a bit simpler as a result.
-
-
-Group scheduler extension to CFS
-================================
-
-Normally the scheduler operates on individual tasks and strives to provide
-fair CPU time to each task. Sometimes, it may be desirable to group tasks
-and provide fair CPU time to each such task group. For example, it may
-be desirable to first provide fair CPU time to each user on the system
-and then to each task belonging to a user.
-
-CONFIG_FAIR_GROUP_SCHED strives to achieve exactly that. It lets
-SCHED_NORMAL/BATCH tasks be be grouped and divides CPU time fairly among such
-groups. At present, there are two (mutually exclusive) mechanisms to group
-tasks for CPU bandwidth control purpose:
-
- - Based on user id (CONFIG_FAIR_USER_SCHED)
- In this option, tasks are grouped according to their user id.
- - Based on "cgroup" pseudo filesystem (CONFIG_FAIR_CGROUP_SCHED)
- This options lets the administrator create arbitrary groups
- of tasks, using the "cgroup" pseudo filesystem. See
- Documentation/cgroups.txt for more information about this
- filesystem.
-Only one of these options to group tasks can be chosen and not both.
+1. OVERVIEW
+
+CFS stands for "Completely Fair Scheduler," and is the new "desktop" process
+scheduler implemented by Ingo Molnar and merged in Linux 2.6.23. It is the
+replacement for the previous vanilla scheduler's SCHED_OTHER interactivity
+code.
+
+80% of CFS's design can be summed up in a single sentence: CFS basically models
+an "ideal, precise multi-tasking CPU" on real hardware.
+
+"Ideal multi-tasking CPU" is a (non-existent :-)) CPU that has 100% physical
+power and which can run each task at precise equal speed, in parallel, each at
+1/nr_running speed. For example: if there are 2 tasks running, then it runs
+each at 50% physical power --- i.e., actually in parallel.
+
+On real hardware, we can run only a single task at once, so we have to
+introduce the concept of "virtual runtime." The virtual runtime of a task
+specifies when its next timeslice would start execution on the ideal
+multi-tasking CPU described above. In practice, the virtual runtime of a task
+is its actual runtime normalized to the total number of running tasks.
+
+
+
+2. FEW IMPLEMENTATION DETAILS
+
+In CFS the virtual runtime is expressed and tracked via the per-task
+p->se.vruntime (nanosec-unit) value. This way, it's possible to accurately
+timestamp and measure the "expected CPU time" a task should have gotten.
+
+[ small detail: on "ideal" hardware, at any time all tasks would have the same
+ p->se.vruntime value --- i.e., tasks would execute simultaneously and no task
+ would ever get "out of balance" from the "ideal" share of CPU time. ]
+
+CFS's task picking logic is based on this p->se.vruntime value and it is thus
+very simple: it always tries to run the task with the smallest p->se.vruntime
+value (i.e., the task which executed least so far). CFS always tries to split
+up CPU time between runnable tasks as close to "ideal multitasking hardware" as
+possible.
+
+Most of the rest of CFS's design just falls out of this really simple concept,
+with a few add-on embellishments like nice levels, multiprocessing and various
+algorithm variants to recognize sleepers.
+
+
+
+3. THE RBTREE
+
+CFS's design is quite radical: it does not use the old data structures for the
+runqueues, but it uses a time-ordered rbtree to build a "timeline" of future
+task execution, and thus has no "array switch" artifacts (by which both the
+previous vanilla scheduler and RSDL/SD are affected).
+
+CFS also maintains the rq->cfs.min_vruntime value, which is a monotonic
+increasing value tracking the smallest vruntime among all tasks in the
+runqueue. The total amount of work done by the system is tracked using
+min_vruntime; that value is used to place newly activated entities on the left
+side of the tree as much as possible.
+
+The total number of running tasks in the runqueue is accounted through the
+rq->cfs.load value, which is the sum of the weights of the tasks queued on the
+runqueue.
+
+CFS maintains a time-ordered rbtree, where all runnable tasks are sorted by the
+p->se.vruntime key (there is a subtraction using rq->cfs.min_vruntime to
+account for possible wraparounds). CFS picks the "leftmost" task from this
+tree and sticks to it.
+As the system progresses forwards, the executed tasks are put into the tree
+more and more to the right --- slowly but surely giving a chance for every task
+to become the "leftmost task" and thus get on the CPU within a deterministic
+amount of time.
+
+Summing up, CFS works like this: it runs a task a bit, and when the task
+schedules (or a scheduler tick happens) the task's CPU usage is "accounted
+for": the (small) time it just spent using the physical CPU is added to
+p->se.vruntime. Once p->se.vruntime gets high enough so that another task
+becomes the "leftmost task" of the time-ordered rbtree it maintains (plus a
+small amount of "granularity" distance relative to the leftmost task so that we
+do not over-schedule tasks and trash the cache), then the new leftmost task is
+picked and the current task is preempted.
+
+
+
+4. SOME FEATURES OF CFS
+
+CFS uses nanosecond granularity accounting and does not rely on any jiffies or
+other HZ detail. Thus the CFS scheduler has no notion of "timeslices" in the
+way the previous scheduler had, and has no heuristics whatsoever. There is
+only one central tunable (you have to switch on CONFIG_SCHED_DEBUG):
+
+ /proc/sys/kernel/sched_granularity_ns
+
+which can be used to tune the scheduler from "desktop" (i.e., low latencies) to
+"server" (i.e., good batching) workloads. It defaults to a setting suitable
+for desktop workloads. SCHED_BATCH is handled by the CFS scheduler module too.
+
+Due to its design, the CFS scheduler is not prone to any of the "attacks" that
+exist today against the heuristics of the stock scheduler: fiftyp.c, thud.c,
+chew.c, ring-test.c, massive_intr.c all work fine and do not impact
+interactivity and produce the expected behavior.
+
+The CFS scheduler has a much stronger handling of nice levels and SCHED_BATCH
+than the previous vanilla scheduler: both types of workloads are isolated much
+more aggressively.
+
+SMP load-balancing has been reworked/sanitized: the runqueue-walking
+assumptions are gone from the load-balancing code now, and iterators of the
+scheduling modules are used. The balancing code got quite a bit simpler as a
+result.
+
+
+
+5. Scheduling policies
+
+CFS implements three scheduling policies:
+
+ - SCHED_NORMAL (traditionally called SCHED_OTHER): The scheduling
+ policy that is used for regular tasks.
+
+ - SCHED_BATCH: Does not preempt nearly as often as regular tasks
+ would, thereby allowing tasks to run longer and make better use of
+ caches but at the cost of interactivity. This is well suited for
+ batch jobs.
+
+ - SCHED_IDLE: This is even weaker than nice 19, but its not a true
+ idle timer scheduler in order to avoid to get into priority
+ inversion problems which would deadlock the machine.
+
+SCHED_FIFO/_RR are implemented in sched_rt.c and are as specified by
+POSIX.
+
+The command chrt from util-linux-ng 2.13.1.1 can set all of these except
+SCHED_IDLE.
-Group scheduler tunables:
-When CONFIG_FAIR_USER_SCHED is defined, a directory is created in sysfs for
-each new user and a "cpu_share" file is added in that directory.
+
+6. SCHEDULING CLASSES
+
+The new CFS scheduler has been designed in such a way to introduce "Scheduling
+Classes," an extensible hierarchy of scheduler modules. These modules
+encapsulate scheduling policy details and are handled by the scheduler core
+without the core code assuming too much about them.
+
+sched_fair.c implements the CFS scheduler described above.
+
+sched_rt.c implements SCHED_FIFO and SCHED_RR semantics, in a simpler way than
+the previous vanilla scheduler did. It uses 100 runqueues (for all 100 RT
+priority levels, instead of 140 in the previous scheduler) and it needs no
+expired array.
+
+Scheduling classes are implemented through the sched_class structure, which
+contains hooks to functions that must be called whenever an interesting event
+occurs.
+
+This is the (partial) list of the hooks:
+
+ - enqueue_task(...)
+
+ Called when a task enters a runnable state.
+ It puts the scheduling entity (task) into the red-black tree and
+ increments the nr_running variable.
+
+ - dequeue_tree(...)
+
+ When a task is no longer runnable, this function is called to keep the
+ corresponding scheduling entity out of the red-black tree. It decrements
+ the nr_running variable.
+
+ - yield_task(...)
+
+ This function is basically just a dequeue followed by an enqueue, unless the
+ compat_yield sysctl is turned on; in that case, it places the scheduling
+ entity at the right-most end of the red-black tree.
+
+ - check_preempt_curr(...)
+
+ This function checks if a task that entered the runnable state should
+ preempt the currently running task.
+
+ - pick_next_task(...)
+
+ This function chooses the most appropriate task eligible to run next.
+
+ - set_curr_task(...)
+
+ This function is called when a task changes its scheduling class or changes
+ its task group.
+
+ - task_tick(...)
+
+ This function is mostly called from time tick functions; it might lead to
+ process switch. This drives the running preemption.
+
+ - task_new(...)
+
+ The core scheduler gives the scheduling module an opportunity to manage new
+ task startup. The CFS scheduling module uses it for group scheduling, while
+ the scheduling module for a real-time task does not use it.
+
+
+
+7. GROUP SCHEDULER EXTENSIONS TO CFS
+
+Normally, the scheduler operates on individual tasks and strives to provide
+fair CPU time to each task. Sometimes, it may be desirable to group tasks and
+provide fair CPU time to each such task group. For example, it may be
+desirable to first provide fair CPU time to each user on the system and then to
+each task belonging to a user.
+
+CONFIG_GROUP_SCHED strives to achieve exactly that. It lets tasks to be
+grouped and divides CPU time fairly among such groups.
+
+CONFIG_RT_GROUP_SCHED permits to group real-time (i.e., SCHED_FIFO and
+SCHED_RR) tasks.
+
+CONFIG_FAIR_GROUP_SCHED permits to group CFS (i.e., SCHED_NORMAL and
+SCHED_BATCH) tasks.
+
+At present, there are two (mutually exclusive) mechanisms to group tasks for
+CPU bandwidth control purposes:
+
+ - Based on user id (CONFIG_USER_SCHED)
+
+ With this option, tasks are grouped according to their user id.
+
+ - Based on "cgroup" pseudo filesystem (CONFIG_CGROUP_SCHED)
+
+ This options needs CONFIG_CGROUPS to be defined, and lets the administrator
+ create arbitrary groups of tasks, using the "cgroup" pseudo filesystem. See
+ Documentation/cgroups.txt for more information about this filesystem.
+
+Only one of these options to group tasks can be chosen and not both.
+
+When CONFIG_USER_SCHED is defined, a directory is created in sysfs for each new
+user and a "cpu_share" file is added in that directory.
# cd /sys/kernel/uids
# cat 512/cpu_share # Display user 512's CPU share
@@ -155,16 +246,14 @@ each new user and a "cpu_share" file is added in that directory.
2048
#
-CPU bandwidth between two users are divided in the ratio of their CPU shares.
-For ex: if you would like user "root" to get twice the bandwidth of user
-"guest", then set the cpu_share for both the users such that "root"'s
-cpu_share is twice "guest"'s cpu_share
-
+CPU bandwidth between two users is divided in the ratio of their CPU shares.
+For example: if you would like user "root" to get twice the bandwidth of user
+"guest," then set the cpu_share for both the users such that "root"'s cpu_share
+is twice "guest"'s cpu_share.
-When CONFIG_FAIR_CGROUP_SCHED is defined, a "cpu.shares" file is created
-for each group created using the pseudo filesystem. See example steps
-below to create task groups and modify their CPU share using the "cgroups"
-pseudo filesystem
+When CONFIG_CGROUP_SCHED is defined, a "cpu.shares" file is created for each
+group created using the pseudo filesystem. See example steps below to create
+task groups and modify their CPU share using the "cgroups" pseudo filesystem.
# mkdir /dev/cpuctl
# mount -t cgroup -ocpu none /dev/cpuctl
diff --git a/Documentation/scsi/ChangeLog.megaraid b/Documentation/scsi/ChangeLog.megaraid
index 37796fe45bd..eaa4801f2ce 100644
--- a/Documentation/scsi/ChangeLog.megaraid
+++ b/Documentation/scsi/ChangeLog.megaraid
@@ -409,7 +409,7 @@ i. Function reordering so that inline functions are defined before they
megaraid_mbox_prepare_pthru, megaraid_mbox_prepare_epthru,
megaraid_busywait_mbox
- - Andrew Morton <akpm@osdl.org>, 08.19.2004
+ - Andrew Morton, 08.19.2004
linux-scsi mailing list
"Something else to clean up after inclusion: every instance of an
@@ -471,13 +471,13 @@ vi. Add support for 64-bit applications. Current drivers assume only
vii. Move the function declarations for the management module from
megaraid_mm.h to megaraid_mm.c
- - Andrew Morton <akpm@osdl.org>, 08.19.2004
+ - Andrew Morton, 08.19.2004
linux-scsi mailing list
viii. Change default values for MEGARAID_NEWGEN, MEGARAID_MM, and
MEGARAID_MAILBOX to 'n' in Kconfig.megaraid
- - Andrew Morton <akpm@osdl.org>, 08.19.2004
+ - Andrew Morton, 08.19.2004
linux-scsi mailing list
ix. replace udelay with msleep
diff --git a/Documentation/scsi/scsi_fc_transport.txt b/Documentation/scsi/scsi_fc_transport.txt
index 75143f0c23b..38d324d62b2 100644
--- a/Documentation/scsi/scsi_fc_transport.txt
+++ b/Documentation/scsi/scsi_fc_transport.txt
@@ -436,6 +436,42 @@ Other:
was updated to remove all vports for the fc_host as well.
+Transport supplied functions
+----------------------------
+
+The following functions are supplied by the FC-transport for use by LLDs.
+
+ fc_vport_create - create a vport
+ fc_vport_terminate - detach and remove a vport
+
+Details:
+
+/**
+ * fc_vport_create - Admin App or LLDD requests creation of a vport
+ * @shost: scsi host the virtual port is connected to.
+ * @ids: The world wide names, FC4 port roles, etc for
+ * the virtual port.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+struct fc_vport *
+fc_vport_create(struct Scsi_Host *shost, struct fc_vport_identifiers *ids)
+
+/**
+ * fc_vport_terminate - Admin App or LLDD requests termination of a vport
+ * @vport: fc_vport to be terminated
+ *
+ * Calls the LLDD vport_delete() function, then deallocates and removes
+ * the vport from the shost and object tree.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+int
+fc_vport_terminate(struct fc_vport *vport)
+
+
Credits
=======
The following people have contributed to this document:
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index b117e42a616..e0e54a27fc1 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -746,8 +746,10 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
Module snd-hda-intel
--------------------
- Module for Intel HD Audio (ICH6, ICH6M, ESB2, ICH7, ICH8),
- ATI SB450, SB600, RS600,
+ Module for Intel HD Audio (ICH6, ICH6M, ESB2, ICH7, ICH8, ICH9, ICH10,
+ PCH, SCH),
+ ATI SB450, SB600, R600, RS600, RS690, RS780, RV610, RV620,
+ RV630, RV635, RV670, RV770,
VIA VT8251/VT8237A,
SIS966, ULI M5461
@@ -807,6 +809,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
ALC260
hp HP machines
hp-3013 HP machines (3013-variant)
+ hp-dc7600 HP DC7600
fujitsu Fujitsu S7020
acer Acer TravelMate
will Will laptops (PB V7900)
@@ -828,8 +831,11 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
hippo Hippo (ATI) with jack detection, Sony UX-90s
hippo_1 Hippo (Benq) with jack detection
sony-assamd Sony ASSAMD
+ toshiba-s06 Toshiba S06
+ toshiba-rx1 Toshiba RX1
ultra Samsung Q1 Ultra Vista model
lenovo-3000 Lenovo 3000 y410
+ nec NEC Versa S9100
basic fixed pin assignment w/o SPDIF
auto auto-config reading BIOS (default)
@@ -838,6 +844,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
3stack 3-stack model
toshiba Toshiba A205
acer Acer laptops
+ acer-aspire Acer Aspire One
dell Dell OEM laptops (Vostro 1200)
zepto Zepto laptops
test for testing/debugging purpose, almost all controls can
@@ -847,6 +854,9 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
ALC269
basic Basic preset
+ quanta Quanta FL1
+ eeepc-p703 ASUS Eeepc P703 P900A
+ eeepc-p901 ASUS Eeepc P901 S101
ALC662/663
3stack-dig 3-stack (2-channel) with SPDIF
@@ -856,10 +866,17 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
lenovo-101e Lenovo laptop
eeepc-p701 ASUS Eeepc P701
eeepc-ep20 ASUS Eeepc EP20
+ ecs ECS/Foxconn mobo
m51va ASUS M51VA
g71v ASUS G71V
h13 ASUS H13
g50v ASUS G50V
+ asus-mode1 ASUS
+ asus-mode2 ASUS
+ asus-mode3 ASUS
+ asus-mode4 ASUS
+ asus-mode5 ASUS
+ asus-mode6 ASUS
auto auto-config reading BIOS (default)
ALC882/885
@@ -891,12 +908,14 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
lenovo-101e Lenovo 101E
lenovo-nb0763 Lenovo NB0763
lenovo-ms7195-dig Lenovo MS7195
+ lenovo-sky Lenovo Sky
haier-w66 Haier W66
3stack-hp HP machines with 3stack (Lucknow, Samba boards)
6stack-dell Dell machines with 6stack (Inspiron 530)
mitac Mitac 8252D
clevo-m720 Clevo M720 laptop series
fujitsu-pi2515 Fujitsu AMILO Pi2515
+ 3stack-6ch-intel Intel DG33* boards
auto auto-config reading BIOS (default)
ALC861/660
@@ -929,7 +948,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
allout 5-jack in back, 2-jack in front, SPDIF out
auto auto-config reading BIOS (default)
- AD1882
+ AD1882 / AD1882A
3stack 3-stack mode (default)
6stack 6-stack mode
@@ -1079,7 +1098,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
register value without FIFO size correction as the current
DMA pointer. position_fix=2 will make the driver to use
the position buffer instead of reading SD_LPIB register.
- (Usually SD_LPLIB register is more accurate than the
+ (Usually SD_LPIB register is more accurate than the
position buffer.)
NB: If you get many "azx_get_response timeout" messages at
@@ -1166,6 +1185,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
* Event Electronics, EZ8
* Digigram VX442
* Lionstracs, Mediastaton
+ * Terrasoniq TS 88
model - Use the given board model, one of the following:
delta1010, dio2496, delta66, delta44, audiophile, delta410,
@@ -1200,7 +1220,10 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
* TerraTec Phase 22
* TerraTec Phase 28
* AudioTrak Prodigy 7.1
- * AudioTrak Prodigy 7.1LT
+ * AudioTrak Prodigy 7.1 LT
+ * AudioTrak Prodigy 7.1 XT
+ * AudioTrak Prodigy 7.1 HIFI
+ * AudioTrak Prodigy 7.1 HD2
* AudioTrak Prodigy 192
* Pontis MS300
* Albatron K8X800 Pro II
@@ -1211,12 +1234,16 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
* Shuttle SN25P
* Onkyo SE-90PCI
* Onkyo SE-200PCI
+ * ESI Juli@
+ * Hercules Fortissimo IV
+ * EGO-SYS WaveTerminal 192M
model - Use the given board model, one of the following:
revo51, revo71, amp2000, prodigy71, prodigy71lt,
- prodigy192, aureon51, aureon71, universe, ap192,
- k8x800, phase22, phase28, ms300, av710, se200pci,
- se90pci
+ prodigy71xt, prodigy71hifi, prodigyhd2, prodigy192,
+ juli, aureon51, aureon71, universe, ap192, k8x800,
+ phase22, phase28, ms300, av710, se200pci, se90pci,
+ fortissimo4, sn25p, WT192M
This module supports multiple cards and autoprobe.
@@ -1255,7 +1282,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
Module for AC'97 motherboards from Intel and compatibles.
* Intel i810/810E, i815, i820, i830, i84x, MX440
- ICH5, ICH6, ICH7, ESB2
+ ICH5, ICH6, ICH7, 6300ESB, ESB2
* SiS 7012 (SiS 735)
* NVidia NForce, NForce2, NForce3, MCP04, CK804
CK8, CK8S, MCP501
@@ -1951,6 +1978,8 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
* CHIC True Sound 4Dwave
* Shark Predator4D-PCI
* Jaton SonicWave 4D
+ * SiS SI7018 PCI Audio
+ * Hoontech SoundTrack Digital 4DWave NX
pcm_channels - max channels (voices) reserved for PCM
wavetable_size - max wavetable size in kB (4-?kb)
@@ -1966,12 +1995,25 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
vid - Vendor ID for the device (optional)
pid - Product ID for the device (optional)
+ nrpacks - Max. number of packets per URB (default: 8)
+ async_unlink - Use async unlink mode (default: yes)
device_setup - Device specific magic number (optional)
- Influence depends on the device
- Default: 0x0000
+ ignore_ctl_error - Ignore any USB-controller regarding mixer
+ interface (default: no)
This module supports multiple devices, autoprobe and hotplugging.
+ NB: nrpacks parameter can be modified dynamically via sysfs.
+ Don't put the value over 20. Changing via sysfs has no sanity
+ check.
+ NB: async_unlink=0 would cause Oops. It remains just for
+ debugging purpose (if any).
+ NB: ignore_ctl_error=1 may help when you get an error at accessing
+ the mixer element such as URB error -22. This happens on some
+ buggy USB device or the controller.
+
Module snd-usb-caiaq
--------------------
@@ -2078,7 +2120,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
-------------------
Module for sound cards based on the Asus AV100/AV200 chips,
- i.e., Xonar D1, DX, D2 and D2X.
+ i.e., Xonar D1, DX, D2, D2X and HDAV1.3 (Deluxe).
This module supports autoprobe and multiple cards.
diff --git a/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl b/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
index e13c4e67029..87a7c07ab65 100644
--- a/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
+++ b/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
@@ -5073,8 +5073,7 @@ struct _snd_pcm_runtime {
with <constant>SNDRV_DMA_TYPE_CONTINUOUS</constant> type and the
<function>snd_dma_continuous_data(GFP_KERNEL)</function> device pointer,
where <constant>GFP_KERNEL</constant> is the kernel allocation flag to
- use. For the SBUS, <constant>SNDRV_DMA_TYPE_SBUS</constant> and
- <function>snd_dma_sbus_data(sbus_dev)</function> are used instead.
+ use.
For the PCI scatter-gather buffers, use
<constant>SNDRV_DMA_TYPE_DEV_SG</constant> with
<function>snd_dma_pci_data(pci)</function>
@@ -6135,44 +6134,58 @@ struct _snd_pcm_runtime {
</para>
</section>
- <section id="useful-functions-snd-assert">
- <title><function>snd_assert()</function></title>
+ <section id="useful-functions-snd-bug">
+ <title><function>snd_BUG()</function></title>
<para>
- <function>snd_assert()</function> macro is similar with the
- normal <function>assert()</function> macro. For example,
+ It shows the <computeroutput>BUG?</computeroutput> message and
+ stack trace as well as <function>snd_BUG_ON</function> at the point.
+ It's useful to show that a fatal error happens there.
+ </para>
+ <para>
+ When no debug flag is set, this macro is ignored.
+ </para>
+ </section>
+
+ <section id="useful-functions-snd-bug-on">
+ <title><function>snd_BUG_ON()</function></title>
+ <para>
+ <function>snd_BUG_ON()</function> macro is similar with
+ <function>WARN_ON()</function> macro. For example,
<informalexample>
<programlisting>
<![CDATA[
- snd_assert(pointer != NULL, return -EINVAL);
+ snd_BUG_ON(!pointer);
]]>
</programlisting>
</informalexample>
- </para>
- <para>
- The first argument is the expression to evaluate, and the
- second argument is the action if it fails. When
- <constant>CONFIG_SND_DEBUG</constant>, is set, it will show an
- error message such as <computeroutput>BUG? (xxx)</computeroutput>
- together with stack trace.
- </para>
- <para>
- When no debug flag is set, this macro is ignored.
- </para>
- </section>
+ or it can be used as the condition,
+ <informalexample>
+ <programlisting>
+<![CDATA[
+ if (snd_BUG_ON(non_zero_is_bug))
+ return -EINVAL;
+]]>
+ </programlisting>
+ </informalexample>
- <section id="useful-functions-snd-bug">
- <title><function>snd_BUG()</function></title>
- <para>
- It shows the <computeroutput>BUG?</computeroutput> message and
- stack trace as well as <function>snd_assert</function> at the point.
- It's useful to show that a fatal error happens there.
</para>
+
<para>
- When no debug flag is set, this macro is ignored.
+ The macro takes an conditional expression to evaluate.
+ When <constant>CONFIG_SND_DEBUG</constant>, is set, the
+ expression is actually evaluated. If it's non-zero, it shows
+ the warning message such as
+ <computeroutput>BUG? (xxx)</computeroutput>
+ normally followed by stack trace. It returns the evaluated
+ value.
+ When no <constant>CONFIG_SND_DEBUG</constant> is set, this
+ macro always returns zero.
</para>
+
</section>
+
</chapter>
diff --git a/Documentation/sound/alsa/soc/dapm.txt b/Documentation/sound/alsa/soc/dapm.txt
index b2ed6983f40..46f9684d0b2 100644
--- a/Documentation/sound/alsa/soc/dapm.txt
+++ b/Documentation/sound/alsa/soc/dapm.txt
@@ -135,11 +135,7 @@ when the Mic is inserted:-
static int spitz_mic_bias(struct snd_soc_dapm_widget* w, int event)
{
- if(SND_SOC_DAPM_EVENT_ON(event))
- set_scoop_gpio(&spitzscoop2_device.dev, SPITZ_SCP2_MIC_BIAS);
- else
- reset_scoop_gpio(&spitzscoop2_device.dev, SPITZ_SCP2_MIC_BIAS);
-
+ gpio_set_value(SPITZ_GPIO_MIC_BIAS, SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
@@ -269,11 +265,7 @@ powered only when the spk is in use.
/* turn speaker amplifier on/off depending on use */
static int corgi_amp_event(struct snd_soc_dapm_widget *w, int event)
{
- if (SND_SOC_DAPM_EVENT_ON(event))
- set_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_APM_ON);
- else
- reset_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_APM_ON);
-
+ gpio_set_value(CORGI_GPIO_APM_ON, SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
diff --git a/Documentation/sparc/sbus_drivers.txt b/Documentation/sparc/sbus_drivers.txt
deleted file mode 100644
index eb1e28ad882..00000000000
--- a/Documentation/sparc/sbus_drivers.txt
+++ /dev/null
@@ -1,309 +0,0 @@
-
- Writing SBUS Drivers
-
- David S. Miller (davem@redhat.com)
-
- The SBUS driver interfaces of the Linux kernel have been
-revamped completely for 2.4.x for several reasons. Foremost were
-performance and complexity concerns. This document details these
-new interfaces and how they are used to write an SBUS device driver.
-
- SBUS drivers need to include <asm/sbus.h> to get access
-to functions and structures described here.
-
- Probing and Detection
-
- Each SBUS device inside the machine is described by a
-structure called "struct sbus_dev". Likewise, each SBUS bus
-found in the system is described by a "struct sbus_bus". For
-each SBUS bus, the devices underneath are hung in a tree-like
-fashion off of the bus structure.
-
- The SBUS device structure contains enough information
-for you to implement your device probing algorithm and obtain
-the bits necessary to run your device. The most commonly
-used members of this structure, and their typical usage,
-will be detailed below.
-
- Here is a piece of skeleton code for performing a device
-probe in an SBUS driver under Linux:
-
- static int __devinit mydevice_probe_one(struct sbus_dev *sdev)
- {
- struct mysdevice *mp = kzalloc(sizeof(*mp), GFP_KERNEL);
-
- if (!mp)
- return -ENODEV;
-
- ...
- dev_set_drvdata(&sdev->ofdev.dev, mp);
- return 0;
- ...
- }
-
- static int __devinit mydevice_probe(struct of_device *dev,
- const struct of_device_id *match)
- {
- struct sbus_dev *sdev = to_sbus_device(&dev->dev);
-
- return mydevice_probe_one(sdev);
- }
-
- static int __devexit mydevice_remove(struct of_device *dev)
- {
- struct sbus_dev *sdev = to_sbus_device(&dev->dev);
- struct mydevice *mp = dev_get_drvdata(&dev->dev);
-
- return mydevice_remove_one(sdev, mp);
- }
-
- static struct of_device_id mydevice_match[] = {
- {
- .name = "mydevice",
- },
- {},
- };
-
- MODULE_DEVICE_TABLE(of, mydevice_match);
-
- static struct of_platform_driver mydevice_driver = {
- .match_table = mydevice_match,
- .probe = mydevice_probe,
- .remove = __devexit_p(mydevice_remove),
- .driver = {
- .name = "mydevice",
- },
- };
-
- static int __init mydevice_init(void)
- {
- return of_register_driver(&mydevice_driver, &sbus_bus_type);
- }
-
- static void __exit mydevice_exit(void)
- {
- of_unregister_driver(&mydevice_driver);
- }
-
- module_init(mydevice_init);
- module_exit(mydevice_exit);
-
- The mydevice_match table is a series of entries which
-describes what SBUS devices your driver is meant for. In the
-simplest case you specify a string for the 'name' field. Every
-SBUS device with a 'name' property matching your string will
-be passed one-by-one to your .probe method.
-
- You should store away your device private state structure
-pointer in the drvdata area so that you can retrieve it later on
-in your .remove method.
-
- Any memory allocated, registers mapped, IRQs registered,
-etc. must be undone by your .remove method so that all resources
-of your device are released by the time it returns.
-
- You should _NOT_ use the for_each_sbus(), for_each_sbusdev(),
-and for_all_sbusdev() interfaces. They are deprecated, will be
-removed, and no new driver should reference them ever.
-
- Mapping and Accessing I/O Registers
-
- Each SBUS device structure contains an array of descriptors
-which describe each register set. We abuse struct resource for that.
-They each correspond to the "reg" properties provided by the OBP firmware.
-
- Before you can access your device's registers you must map
-them. And later if you wish to shutdown your driver (for module
-unload or similar) you must unmap them. You must treat them as
-a resource, which you allocate (map) before using and free up
-(unmap) when you are done with it.
-
- The mapping information is stored in an opaque value
-typed as an "unsigned long". This is the type of the return value
-of the mapping interface, and the arguments to the unmapping
-interface. Let's say you want to map the first set of registers.
-Perhaps part of your driver software state structure looks like:
-
- struct mydevice {
- unsigned long control_regs;
- ...
- struct sbus_dev *sdev;
- ...
- };
-
- At initialization time you then use the sbus_ioremap
-interface to map in your registers, like so:
-
- static void init_one_mydevice(struct sbus_dev *sdev)
- {
- struct mydevice *mp;
- ...
-
- mp->control_regs = sbus_ioremap(&sdev->resource[0], 0,
- CONTROL_REGS_SIZE, "mydevice regs");
- if (!mp->control_regs) {
- /* Failure, cleanup and return. */
- }
- }
-
- Second argument to sbus_ioremap is an offset for
-cranky devices with broken OBP PROM. The sbus_ioremap uses only
-a start address and flags from the resource structure.
-Therefore it is possible to use the same resource to map
-several sets of registers or even to fabricate a resource
-structure if driver gets physical address from some private place.
-This practice is discouraged though. Use whatever OBP PROM
-provided to you.
-
- And here is how you might unmap these registers later at
-driver shutdown or module unload time, using the sbus_iounmap
-interface:
-
- static void mydevice_unmap_regs(struct mydevice *mp)
- {
- sbus_iounmap(mp->control_regs, CONTROL_REGS_SIZE);
- }
-
- Finally, to actually access your registers there are 6
-interface routines at your disposal. Accesses are byte (8 bit),
-word (16 bit), or longword (32 bit) sized. Here they are:
-
- u8 sbus_readb(unsigned long reg) /* read byte */
- u16 sbus_readw(unsigned long reg) /* read word */
- u32 sbus_readl(unsigned long reg) /* read longword */
- void sbus_writeb(u8 value, unsigned long reg) /* write byte */
- void sbus_writew(u16 value, unsigned long reg) /* write word */
- void sbus_writel(u32 value, unsigned long reg) /* write longword */
-
- So, let's say your device has a control register of some sort
-at offset zero. The following might implement resetting your device:
-
- #define CONTROL 0x00UL
-
- #define CONTROL_RESET 0x00000001 /* Reset hardware */
-
- static void mydevice_reset(struct mydevice *mp)
- {
- sbus_writel(CONTROL_RESET, mp->regs + CONTROL);
- }
-
- Or perhaps there is a data port register at an offset of
-16 bytes which allows you to read bytes from a fifo in the device:
-
- #define DATA 0x10UL
-
- static u8 mydevice_get_byte(struct mydevice *mp)
- {
- return sbus_readb(mp->regs + DATA);
- }
-
- It's pretty straightforward, and clueful readers may have
-noticed that these interfaces mimick the PCI interfaces of the
-Linux kernel. This was not by accident.
-
- WARNING:
-
- DO NOT try to treat these opaque register mapping
- values as a memory mapped pointer to some structure
- which you can dereference.
-
- It may be memory mapped, it may not be. In fact it
- could be a physical address, or it could be the time
- of day xor'd with 0xdeadbeef. :-)
-
- Whatever it is, it's an implementation detail. The
- interface was done this way to shield the driver
- author from such complexities.
-
- Doing DVMA
-
- SBUS devices can perform DMA transactions in a way similar
-to PCI but dissimilar to ISA, e.g. DMA masters supply address.
-In contrast to PCI, however, that address (a bus address) is
-translated by IOMMU before a memory access is performed and therefore
-it is virtual. Sun calls this procedure DVMA.
-
- Linux supports two styles of using SBUS DVMA: "consistent memory"
-and "streaming DVMA". CPU view of consistent memory chunk is, well,
-consistent with a view of a device. Think of it as an uncached memory.
-Typically this way of doing DVMA is not very fast and drivers use it
-mostly for control blocks or queues. On some CPUs we cannot flush or
-invalidate individual pages or cache lines and doing explicit flushing
-over ever little byte in every control block would be wasteful.
-
-Streaming DVMA is a preferred way to transfer large amounts of data.
-This process works in the following way:
-1. a CPU stops accessing a certain part of memory,
- flushes its caches covering that memory;
-2. a device does DVMA accesses, then posts an interrupt;
-3. CPU invalidates its caches and starts to access the memory.
-
-A single streaming DVMA operation can touch several discontiguous
-regions of a virtual bus address space. This is called a scatter-gather
-DVMA.
-
-[TBD: Why do not we neither Solaris attempt to map disjoint pages
-into a single virtual chunk with the help of IOMMU, so that non SG
-DVMA masters would do SG? It'd be very helpful for RAID.]
-
- In order to perform a consistent DVMA a driver does something
-like the following:
-
- char *mem; /* Address in the CPU space */
- u32 busa; /* Address in the SBus space */
-
- mem = (char *) sbus_alloc_consistent(sdev, MYMEMSIZE, &busa);
-
- Then mem is used when CPU accesses this memory and u32
-is fed to the device so that it can do DVMA. This is typically
-done with an sbus_writel() into some device register.
-
- Do not forget to free the DVMA resources once you are done:
-
- sbus_free_consistent(sdev, MYMEMSIZE, mem, busa);
-
- Streaming DVMA is more interesting. First you allocate some
-memory suitable for it or pin down some user pages. Then it all works
-like this:
-
- char *mem = argumen1;
- unsigned int size = argument2;
- u32 busa; /* Address in the SBus space */
-
- *mem = 1; /* CPU can access */
- busa = sbus_map_single(sdev, mem, size);
- if (busa == 0) .......
-
- /* Tell the device to use busa here */
- /* CPU cannot access the memory without sbus_dma_sync_single() */
-
- sbus_unmap_single(sdev, busa, size);
- if (*mem == 0) .... /* CPU can access again */
-
- It is possible to retain mappings and ask the device to
-access data again and again without calling sbus_unmap_single.
-However, CPU caches must be invalidated with sbus_dma_sync_single
-before such access.
-
-[TBD but what about writeback caches here... do we have any?]
-
- There is an equivalent set of functions doing the same thing
-only with several memory segments at once for devices capable of
-scatter-gather transfers. Use the Source, Luke.
-
- Examples
-
- drivers/net/sunhme.c
- This is a complicated driver which illustrates many concepts
-discussed above and plus it handles both PCI and SBUS boards.
-
- drivers/scsi/esp.c
- Check it out for scatter-gather DVMA.
-
- drivers/sbus/char/bpp.c
- A non-DVMA device.
-
- drivers/net/sunlance.c
- Lance driver abuses consistent mappings for data transfer.
-It is a nifty trick which we do not particularly recommend...
-Just check it out and know that it's legal.
diff --git a/Documentation/spi/pxa2xx b/Documentation/spi/pxa2xx
index bbe8dee681a..6bb916d57c9 100644
--- a/Documentation/spi/pxa2xx
+++ b/Documentation/spi/pxa2xx
@@ -96,7 +96,7 @@ Each slave device attached to the PXA must provide slave specific configuration
information via the structure "pxa2xx_spi_chip" found in
"arch/arm/mach-pxa/include/mach/pxa2xx_spi.h". The pxa2xx_spi master controller driver
will uses the configuration whenever the driver communicates with the slave
-device.
+device. All fields are optional.
struct pxa2xx_spi_chip {
u8 tx_threshold;
@@ -112,14 +112,17 @@ used to configure the SSP hardware fifo. These fields are critical to the
performance of pxa2xx_spi driver and misconfiguration will result in rx
fifo overruns (especially in PIO mode transfers). Good default values are
- .tx_threshold = 12,
- .rx_threshold = 4,
+ .tx_threshold = 8,
+ .rx_threshold = 8,
+
+The range is 1 to 16 where zero indicates "use default".
The "pxa2xx_spi_chip.dma_burst_size" field is used to configure PXA2xx DMA
engine and is related the "spi_device.bits_per_word" field. Read and understand
the PXA2xx "Developer Manual" sections on the DMA controller and SSP Controllers
to determine the correct value. An SSP configured for byte-wide transfers would
-use a value of 8.
+use a value of 8. The driver will determine a reasonable default if
+dma_burst_size == 0.
The "pxa2xx_spi_chip.timeout" fields is used to efficiently handle
trailing bytes in the SSP receiver fifo. The correct value for this field is
@@ -137,7 +140,13 @@ function for asserting/deasserting a slave device chip select. If the field is
NULL, the pxa2xx_spi master controller driver assumes that the SSP port is
configured to use SSPFRM instead.
-NSSP SALVE SAMPLE
+NOTE: the SPI driver cannot control the chip select if SSPFRM is used, so the
+chipselect is dropped after each spi_transfer. Most devices need chip select
+asserted around the complete message. Use SSPFRM as a GPIO (through cs_control)
+to accomodate these chips.
+
+
+NSSP SLAVE SAMPLE
-----------------
The pxa2xx_spi_chip structure is passed to the pxa2xx_spi driver in the
"spi_board_info.controller_data" field. Below is a sample configuration using
@@ -206,18 +215,21 @@ static void __init streetracer_init(void)
DMA and PIO I/O Support
-----------------------
-The pxa2xx_spi driver support both DMA and interrupt driven PIO message
-transfers. The driver defaults to PIO mode and DMA transfers must enabled by
-setting the "enable_dma" flag in the "pxa2xx_spi_master" structure and
-ensuring that the "pxa2xx_spi_chip.dma_burst_size" field is non-zero. The DMA
-mode support both coherent and stream based DMA mappings.
+The pxa2xx_spi driver supports both DMA and interrupt driven PIO message
+transfers. The driver defaults to PIO mode and DMA transfers must be enabled
+by setting the "enable_dma" flag in the "pxa2xx_spi_master" structure. The DMA
+mode supports both coherent and stream based DMA mappings.
The following logic is used to determine the type of I/O to be used on
a per "spi_transfer" basis:
-if !enable_dma or dma_burst_size == 0 then
+if !enable_dma then
always use PIO transfers
+if spi_message.len > 8191 then
+ print "rate limited" warning
+ use PIO transfers
+
if spi_message.is_dma_mapped and rx_dma_buf != 0 and tx_dma_buf != 0 then
use coherent DMA mode
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index e1ff0d920a5..bde799e0659 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -369,4 +369,5 @@ can be ORed together:
2 - A module was force loaded by insmod -f.
Set by modutils >= 2.4.9 and module-init-tools.
4 - Unsafe SMP processors: SMP with CPUs not designed for SMP.
+ 64 - A module from drivers/staging was loaded.
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index 5ce0952aa06..10a0263ebb3 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -95,7 +95,9 @@ On all - write a character to /proc/sysrq-trigger. e.g.:
'p' - Will dump the current registers and flags to your console.
-'q' - Will dump a list of all running timers.
+'q' - Will dump per CPU lists of all armed hrtimers (but NOT regular
+ timer_list timers) and detailed information about all
+ clockevent devices.
'r' - Turns off keyboard raw mode and sets it to XLATE.
diff --git a/Documentation/timers/00-INDEX b/Documentation/timers/00-INDEX
new file mode 100644
index 00000000000..397dc35e132
--- /dev/null
+++ b/Documentation/timers/00-INDEX
@@ -0,0 +1,10 @@
+00-INDEX
+ - this file
+highres.txt
+ - High resolution timers and dynamic ticks design notes
+hpet.txt
+ - High Precision Event Timer Driver for Linux
+hrtimers.txt
+ - subsystem for high-resolution kernel timers
+timer_stats.txt
+ - timer usage statistics
diff --git a/Documentation/hpet.txt b/Documentation/timers/hpet.txt
index 6ad52d9dad6..e7c09abcfab 100644
--- a/Documentation/hpet.txt
+++ b/Documentation/timers/hpet.txt
@@ -1,21 +1,32 @@
High Precision Event Timer Driver for Linux
-The High Precision Event Timer (HPET) hardware is the future replacement
-for the 8254 and Real Time Clock (RTC) periodic timer functionality.
-Each HPET can have up to 32 timers. It is possible to configure the
-first two timers as legacy replacements for 8254 and RTC periodic timers.
-A specification done by Intel and Microsoft can be found at
-<http://www.intel.com/technology/architecture/hpetspec.htm>.
+The High Precision Event Timer (HPET) hardware follows a specification
+by Intel and Microsoft which can be found at
+
+ http://www.intel.com/technology/architecture/hpetspec.htm
+
+Each HPET has one fixed-rate counter (at 10+ MHz, hence "High Precision")
+and up to 32 comparators. Normally three or more comparators are provided,
+each of which can generate oneshot interupts and at least one of which has
+additional hardware to support periodic interrupts. The comparators are
+also called "timers", which can be misleading since usually timers are
+independent of each other ... these share a counter, complicating resets.
+
+HPET devices can support two interrupt routing modes. In one mode, the
+comparators are additional interrupt sources with no particular system
+role. Many x86 BIOS writers don't route HPET interrupts at all, which
+prevents use of that mode. They support the other "legacy replacement"
+mode where the first two comparators block interrupts from 8254 timers
+and from the RTC.
The driver supports detection of HPET driver allocation and initialization
of the HPET before the driver module_init routine is called. This enables
platform code which uses timer 0 or 1 as the main timer to intercept HPET
initialization. An example of this initialization can be found in
-arch/i386/kernel/time_hpet.c.
+arch/x86/kernel/hpet.c.
-The driver provides two APIs which are very similar to the API found in
-the rtc.c driver. There is a user space API and a kernel space API.
-An example user space program is provided below.
+The driver provides a userspace API which resembles the API found in the
+RTC driver framework. An example user space program is provided below.
#include <stdio.h>
#include <stdlib.h>
@@ -286,15 +297,3 @@ out:
return;
}
-
-The kernel API has three interfaces exported from the driver:
-
- hpet_register(struct hpet_task *tp, int periodic)
- hpet_unregister(struct hpet_task *tp)
- hpet_control(struct hpet_task *tp, unsigned int cmd, unsigned long arg)
-
-The kernel module using this interface fills in the ht_func and ht_data
-members of the hpet_task structure before calling hpet_register.
-hpet_control simply vectors to the hpet_ioctl routine and has the same
-commands and respective arguments as the user API. hpet_unregister
-is used to terminate usage of the HPET timer reserved by hpet_register.
diff --git a/Documentation/tracepoints.txt b/Documentation/tracepoints.txt
new file mode 100644
index 00000000000..5d354e16749
--- /dev/null
+++ b/Documentation/tracepoints.txt
@@ -0,0 +1,101 @@
+ Using the Linux Kernel Tracepoints
+
+ Mathieu Desnoyers
+
+
+This document introduces Linux Kernel Tracepoints and their use. It provides
+examples of how to insert tracepoints in the kernel and connect probe functions
+to them and provides some examples of probe functions.
+
+
+* Purpose of tracepoints
+
+A tracepoint placed in code provides a hook to call a function (probe) that you
+can provide at runtime. A tracepoint can be "on" (a probe is connected to it) or
+"off" (no probe is attached). When a tracepoint is "off" it has no effect,
+except for adding a tiny time penalty (checking a condition for a branch) and
+space penalty (adding a few bytes for the function call at the end of the
+instrumented function and adds a data structure in a separate section). When a
+tracepoint is "on", the function you provide is called each time the tracepoint
+is executed, in the execution context of the caller. When the function provided
+ends its execution, it returns to the caller (continuing from the tracepoint
+site).
+
+You can put tracepoints at important locations in the code. They are
+lightweight hooks that can pass an arbitrary number of parameters,
+which prototypes are described in a tracepoint declaration placed in a header
+file.
+
+They can be used for tracing and performance accounting.
+
+
+* Usage
+
+Two elements are required for tracepoints :
+
+- A tracepoint definition, placed in a header file.
+- The tracepoint statement, in C code.
+
+In order to use tracepoints, you should include linux/tracepoint.h.
+
+In include/trace/subsys.h :
+
+#include <linux/tracepoint.h>
+
+DEFINE_TRACE(subsys_eventname,
+ TPPTOTO(int firstarg, struct task_struct *p),
+ TPARGS(firstarg, p));
+
+In subsys/file.c (where the tracing statement must be added) :
+
+#include <trace/subsys.h>
+
+void somefct(void)
+{
+ ...
+ trace_subsys_eventname(arg, task);
+ ...
+}
+
+Where :
+- subsys_eventname is an identifier unique to your event
+ - subsys is the name of your subsystem.
+ - eventname is the name of the event to trace.
+- TPPTOTO(int firstarg, struct task_struct *p) is the prototype of the function
+ called by this tracepoint.
+- TPARGS(firstarg, p) are the parameters names, same as found in the prototype.
+
+Connecting a function (probe) to a tracepoint is done by providing a probe
+(function to call) for the specific tracepoint through
+register_trace_subsys_eventname(). Removing a probe is done through
+unregister_trace_subsys_eventname(); it will remove the probe sure there is no
+caller left using the probe when it returns. Probe removal is preempt-safe
+because preemption is disabled around the probe call. See the "Probe example"
+section below for a sample probe module.
+
+The tracepoint mechanism supports inserting multiple instances of the same
+tracepoint, but a single definition must be made of a given tracepoint name over
+all the kernel to make sure no type conflict will occur. Name mangling of the
+tracepoints is done using the prototypes to make sure typing is correct.
+Verification of probe type correctness is done at the registration site by the
+compiler. Tracepoints can be put in inline functions, inlined static functions,
+and unrolled loops as well as regular functions.
+
+The naming scheme "subsys_event" is suggested here as a convention intended
+to limit collisions. Tracepoint names are global to the kernel: they are
+considered as being the same whether they are in the core kernel image or in
+modules.
+
+
+* Probe / tracepoint example
+
+See the example provided in samples/tracepoints/src
+
+Compile them with your kernel.
+
+Run, as root :
+modprobe tracepoint-example (insmod order is not important)
+modprobe tracepoint-probe-example
+cat /proc/tracepoint-example (returns an expected error)
+rmmod tracepoint-example tracepoint-probe-example
+dmesg
diff --git a/Documentation/tracers/mmiotrace.txt b/Documentation/tracers/mmiotrace.txt
index a4afb560a45..5bbbe209622 100644
--- a/Documentation/tracers/mmiotrace.txt
+++ b/Documentation/tracers/mmiotrace.txt
@@ -36,7 +36,7 @@ $ mount -t debugfs debugfs /debug
$ echo mmiotrace > /debug/tracing/current_tracer
$ cat /debug/tracing/trace_pipe > mydump.txt &
Start X or whatever.
-$ echo "X is up" > /debug/tracing/marker
+$ echo "X is up" > /debug/tracing/trace_marker
$ echo none > /debug/tracing/current_tracer
Check for lost events.
@@ -59,9 +59,8 @@ The 'cat' process should stay running (sleeping) in the background.
Load the driver you want to trace and use it. Mmiotrace will only catch MMIO
accesses to areas that are ioremapped while mmiotrace is active.
-[Unimplemented feature:]
During tracing you can place comments (markers) into the trace by
-$ echo "X is up" > /debug/tracing/marker
+$ echo "X is up" > /debug/tracing/trace_marker
This makes it easier to see which part of the (huge) trace corresponds to
which action. It is recommended to place descriptive markers about what you
do.
diff --git a/Documentation/usb/anchors.txt b/Documentation/usb/anchors.txt
index 5e6b64c20d2..6f24f566955 100644
--- a/Documentation/usb/anchors.txt
+++ b/Documentation/usb/anchors.txt
@@ -52,6 +52,11 @@ Therefore no guarantee is made that the URBs have been unlinked when
the call returns. They may be unlinked later but will be unlinked in
finite time.
+usb_scuttle_anchored_urbs()
+---------------------------
+
+All URBs of an anchor are unanchored en masse.
+
usb_wait_anchor_empty_timeout()
-------------------------------
@@ -59,4 +64,16 @@ This function waits for all URBs associated with an anchor to finish
or a timeout, whichever comes first. Its return value will tell you
whether the timeout was reached.
+usb_anchor_empty()
+------------------
+
+Returns true if no URBs are associated with an anchor. Locking
+is the caller's responsibility.
+
+usb_get_from_anchor()
+---------------------
+Returns the oldest anchored URB of an anchor. The URB is unanchored
+and returned with a reference. As you may mix URBs to several
+destinations in one anchor you have no guarantee the chronologically
+first submitted URB is returned. \ No newline at end of file
diff --git a/Documentation/usb/misc_usbsevseg.txt b/Documentation/usb/misc_usbsevseg.txt
new file mode 100644
index 00000000000..0f6be4f9930
--- /dev/null
+++ b/Documentation/usb/misc_usbsevseg.txt
@@ -0,0 +1,46 @@
+USB 7-Segment Numeric Display
+Manufactured by Delcom Engineering
+
+Device Information
+------------------
+USB VENDOR_ID 0x0fc5
+USB PRODUCT_ID 0x1227
+Both the 6 character and 8 character displays have PRODUCT_ID,
+and according to Delcom Engineering no queryable information
+can be obtained from the device to tell them apart.
+
+Device Modes
+------------
+By default, the driver assumes the display is only 6 characters
+The mode for 6 characters is:
+ MSB 0x06; LSB 0x3f
+For the 8 character display:
+ MSB 0x08; LSB 0xff
+The device can accept "text" either in raw, hex, or ascii textmode.
+raw controls each segment manually,
+hex expects a value between 0-15 per character,
+ascii expects a value between '0'-'9' and 'A'-'F'.
+The default is ascii.
+
+Device Operation
+----------------
+1. Turn on the device:
+ echo 1 > /sys/bus/usb/.../powered
+2. Set the device's mode:
+ echo $mode_msb > /sys/bus/usb/.../mode_msb
+ echo $mode_lsb > /sys/bus/usb/.../mode_lsb
+3. Set the textmode:
+ echo $textmode > /sys/bus/usb/.../textmode
+4. set the text (for example):
+ echo "123ABC" > /sys/bus/usb/.../text (ascii)
+ echo "A1B2" > /sys/bus/usb/.../text (ascii)
+ echo -ne "\x01\x02\x03" > /sys/bus/usb/.../text (hex)
+5. Set the decimal places.
+ The device has either 6 or 8 decimal points.
+ to set the nth decimal place calculate 10 ** n
+ and echo it in to /sys/bus/usb/.../decimals
+ To set multiple decimals points sum up each power.
+ For example, to set the 0th and 3rd decimal place
+ echo 1001 > /sys/bus/usb/.../decimals
+
+
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index 9d31140e3f5..e48ea1d5101 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -350,12 +350,12 @@ without holding the mutex.
There also are a couple of utility routines drivers can use:
- usb_autopm_enable() sets pm_usage_cnt to 1 and then calls
- usb_autopm_set_interface(), which will attempt an autoresume.
-
- usb_autopm_disable() sets pm_usage_cnt to 0 and then calls
+ usb_autopm_enable() sets pm_usage_cnt to 0 and then calls
usb_autopm_set_interface(), which will attempt an autosuspend.
+ usb_autopm_disable() sets pm_usage_cnt to 1 and then calls
+ usb_autopm_set_interface(), which will attempt an autoresume.
+
The conventional usage pattern is that a driver calls
usb_autopm_get_interface() in its open routine and
usb_autopm_put_interface() in its close or release routine. But
diff --git a/Documentation/video4linux/CARDLIST.au0828 b/Documentation/video4linux/CARDLIST.au0828
index aa05e5bb22f..d5cb4ea287b 100644
--- a/Documentation/video4linux/CARDLIST.au0828
+++ b/Documentation/video4linux/CARDLIST.au0828
@@ -1,5 +1,5 @@
0 -> Unknown board (au0828)
- 1 -> Hauppauge HVR950Q (au0828) [2040:7200,2040:7210,2040:7217,2040:721b,2040:721f,2040:7280,0fd9:0008]
+ 1 -> Hauppauge HVR950Q (au0828) [2040:7200,2040:7210,2040:7217,2040:721b,2040:721e,2040:721f,2040:7280,0fd9:0008]
2 -> Hauppauge HVR850 (au0828) [2040:7240]
3 -> DViCO FusionHDTV USB (au0828) [0fe9:d620]
4 -> Hauppauge HVR950Q rev xxF8 (au0828) [2040:7201,2040:7211,2040:7281]
diff --git a/Documentation/video4linux/CARDLIST.bttv b/Documentation/video4linux/CARDLIST.bttv
index f32efb6fb12..60ba6683603 100644
--- a/Documentation/video4linux/CARDLIST.bttv
+++ b/Documentation/video4linux/CARDLIST.bttv
@@ -150,3 +150,4 @@
149 -> Typhoon TV-Tuner PCI (50684)
150 -> Geovision GV-600 [008a:763c]
151 -> Kozumi KTV-01C
+152 -> Encore ENL TV-FM-2 [1000:1801]
diff --git a/Documentation/video4linux/CARDLIST.cx23885 b/Documentation/video4linux/CARDLIST.cx23885
index f0e613ba55b..64823ccacd6 100644
--- a/Documentation/video4linux/CARDLIST.cx23885
+++ b/Documentation/video4linux/CARDLIST.cx23885
@@ -9,3 +9,5 @@
8 -> Hauppauge WinTV-HVR1700 [0070:8101]
9 -> Hauppauge WinTV-HVR1400 [0070:8010]
10 -> DViCO FusionHDTV7 Dual Express [18ac:d618]
+ 11 -> DViCO FusionHDTV DVB-T Dual Express [18ac:db78]
+ 12 -> Leadtek Winfast PxDVR3200 H [107d:6681]
diff --git a/Documentation/video4linux/CARDLIST.cx88 b/Documentation/video4linux/CARDLIST.cx88
index 7cf5685d364..a5227e308f4 100644
--- a/Documentation/video4linux/CARDLIST.cx88
+++ b/Documentation/video4linux/CARDLIST.cx88
@@ -66,3 +66,11 @@
65 -> DViCO FusionHDTV 7 Gold [18ac:d610]
66 -> Prolink Pixelview MPEG 8000GT [1554:4935]
67 -> Kworld PlusTV HD PCI 120 (ATSC 120) [17de:08c1]
+ 68 -> Hauppauge WinTV-HVR4000 DVB-S/S2/T/Hybrid [0070:6900,0070:6904,0070:6902]
+ 69 -> Hauppauge WinTV-HVR4000(Lite) DVB-S/S2 [0070:6905,0070:6906]
+ 70 -> TeVii S460 DVB-S/S2 [d460:9022]
+ 71 -> Omicom SS4 DVB-S/S2 PCI [A044:2011]
+ 72 -> TBS 8920 DVB-S/S2 [8920:8888]
+ 73 -> TeVii S420 DVB-S [d420:9022]
+ 74 -> Prolink Pixelview Global Extreme [1554:4976]
+ 75 -> PROF 7300 DVB-S/S2 [B033:3033]
diff --git a/Documentation/video4linux/CARDLIST.em28xx b/Documentation/video4linux/CARDLIST.em28xx
index 53449cb99b1..187cc48d092 100644
--- a/Documentation/video4linux/CARDLIST.em28xx
+++ b/Documentation/video4linux/CARDLIST.em28xx
@@ -1,5 +1,5 @@
0 -> Unknown EM2800 video grabber (em2800) [eb1a:2800]
- 1 -> Unknown EM2750/28xx video grabber (em2820/em2840) [eb1a:2820,eb1a:2821,eb1a:2860,eb1a:2861,eb1a:2870,eb1a:2881,eb1a:2883]
+ 1 -> Unknown EM2750/28xx video grabber (em2820/em2840) [eb1a:2820,eb1a:2860,eb1a:2861,eb1a:2870,eb1a:2881,eb1a:2883]
2 -> Terratec Cinergy 250 USB (em2820/em2840) [0ccd:0036]
3 -> Pinnacle PCTV USB 2 (em2820/em2840) [2304:0208]
4 -> Hauppauge WinTV USB 2 (em2820/em2840) [2040:4200,2040:4201]
@@ -12,7 +12,7 @@
11 -> Terratec Hybrid XS (em2880) [0ccd:0042]
12 -> Kworld PVR TV 2800 RF (em2820/em2840)
13 -> Terratec Prodigy XS (em2880) [0ccd:0047]
- 14 -> Pixelview Prolink PlayTV USB 2.0 (em2820/em2840)
+ 14 -> Pixelview Prolink PlayTV USB 2.0 (em2820/em2840) [eb1a:2821]
15 -> V-Gear PocketTV (em2800)
16 -> Hauppauge WinTV HVR 950 (em2883) [2040:6513,2040:6517,2040:651b,2040:651f]
17 -> Pinnacle PCTV HD Pro Stick (em2880) [2304:0227]
diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134
index 39868af9cf9..dc67eef38ff 100644
--- a/Documentation/video4linux/CARDLIST.saa7134
+++ b/Documentation/video4linux/CARDLIST.saa7134
@@ -76,7 +76,7 @@
75 -> AVerMedia AVerTVHD MCE A180 [1461:1044]
76 -> SKNet MonsterTV Mobile [1131:4ee9]
77 -> Pinnacle PCTV 40i/50i/110i (saa7133) [11bd:002e]
- 78 -> ASUSTeK P7131 Dual [1043:4862,1043:4857]
+ 78 -> ASUSTeK P7131 Dual [1043:4862]
79 -> Sedna/MuchTV PC TV Cardbus TV/Radio (ITO25 Rev:2B)
80 -> ASUS Digimatrix TV [1043:0210]
81 -> Philips Tiger reference design [1131:2018]
@@ -145,3 +145,9 @@
144 -> Beholder BeholdTV M6 Extra [5ace:6193]
145 -> AVerMedia MiniPCI DVB-T Hybrid M103 [1461:f636]
146 -> ASUSTeK P7131 Analog
+147 -> Asus Tiger 3in1 [1043:4878]
+148 -> Encore ENLTV-FM v5.3 [1a7f:2008]
+149 -> Avermedia PCI pure analog (M135A) [1461:f11d]
+150 -> Zogis Real Angel 220
+151 -> ADS Tech Instant HDTV [1421:0380]
+152 -> Asus Tiger Rev:1.00 [1043:4857]
diff --git a/Documentation/video4linux/CARDLIST.tuner b/Documentation/video4linux/CARDLIST.tuner
index 0e2394695bb..691d2f37dc5 100644
--- a/Documentation/video4linux/CARDLIST.tuner
+++ b/Documentation/video4linux/CARDLIST.tuner
@@ -74,3 +74,5 @@ tuner=72 - Thomson FE6600
tuner=73 - Samsung TCPG 6121P30A
tuner=75 - Philips TEA5761 FM Radio
tuner=76 - Xceive 5000 tuner
+tuner=77 - TCL tuner MF02GIP-5N-E
+tuner=78 - Philips FMD1216MEX MK3 Hybrid Tuner
diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt
index 9a3e4d797fa..004818fab04 100644
--- a/Documentation/video4linux/gspca.txt
+++ b/Documentation/video4linux/gspca.txt
@@ -7,6 +7,7 @@ The modules are:
xxxx vend:prod
----
spca501 0000:0000 MystFromOri Unknow Camera
+m5602 0402:5602 ALi Video Camera Controller
spca501 040a:0002 Kodak DVC-325
spca500 040a:0300 Kodak EZ200
zc3xx 041e:041e Creative WebCam Live!
@@ -42,6 +43,7 @@ zc3xx 0458:7007 Genius VideoCam V2
zc3xx 0458:700c Genius VideoCam V3
zc3xx 0458:700f Genius VideoCam Web V2
sonixj 0458:7025 Genius Eye 311Q
+sonixj 0458:702e Genius Slim 310 NB
sonixj 045e:00f5 MicroSoft VX3000
sonixj 045e:00f7 MicroSoft VX1000
ov519 045e:028c Micro$oft xbox cam
@@ -81,7 +83,7 @@ spca561 046d:092b Labtec Webcam Plus
spca561 046d:092c Logitech QC chat Elch2
spca561 046d:092d Logitech QC Elch2
spca561 046d:092e Logitech QC Elch2
-spca561 046d:092f Logitech QC Elch2
+spca561 046d:092f Logitech QuickCam Express Plus
sunplus 046d:0960 Logitech ClickSmart 420
sunplus 0471:0322 Philips DMVC1300K
zc3xx 0471:0325 Philips SPC 200 NC
@@ -96,6 +98,29 @@ sunplus 04a5:3003 Benq DC 1300
sunplus 04a5:3008 Benq DC 1500
sunplus 04a5:300a Benq DC 3410
spca500 04a5:300c Benq DC 1016
+finepix 04cb:0104 Fujifilm FinePix 4800
+finepix 04cb:0109 Fujifilm FinePix A202
+finepix 04cb:010b Fujifilm FinePix A203
+finepix 04cb:010f Fujifilm FinePix A204
+finepix 04cb:0111 Fujifilm FinePix A205
+finepix 04cb:0113 Fujifilm FinePix A210
+finepix 04cb:0115 Fujifilm FinePix A303
+finepix 04cb:0117 Fujifilm FinePix A310
+finepix 04cb:0119 Fujifilm FinePix F401
+finepix 04cb:011b Fujifilm FinePix F402
+finepix 04cb:011d Fujifilm FinePix F410
+finepix 04cb:0121 Fujifilm FinePix F601
+finepix 04cb:0123 Fujifilm FinePix F700
+finepix 04cb:0125 Fujifilm FinePix M603
+finepix 04cb:0127 Fujifilm FinePix S300
+finepix 04cb:0129 Fujifilm FinePix S304
+finepix 04cb:012b Fujifilm FinePix S500
+finepix 04cb:012d Fujifilm FinePix S602
+finepix 04cb:012f Fujifilm FinePix S700
+finepix 04cb:0131 Fujifilm FinePix unknown model
+finepix 04cb:013b Fujifilm FinePix unknown model
+finepix 04cb:013d Fujifilm FinePix unknown model
+finepix 04cb:013f Fujifilm FinePix F420
sunplus 04f1:1001 JVC GC A50
spca561 04fc:0561 Flexcam 100
sunplus 04fc:500c Sunplus CA500C
@@ -181,6 +206,7 @@ pac207 093a:2468 PAC207
pac207 093a:2470 Genius GF112
pac207 093a:2471 Genius VideoCam ge111
pac207 093a:2472 Genius VideoCam ge110
+pac207 093a:2476 Genius e-Messenger 112
pac7311 093a:2600 PAC7311 Typhoon
pac7311 093a:2601 Philips SPC 610 NC
pac7311 093a:2603 PAC7312
diff --git a/Documentation/video4linux/m5602.txt b/Documentation/video4linux/m5602.txt
new file mode 100644
index 00000000000..4450ab13f37
--- /dev/null
+++ b/Documentation/video4linux/m5602.txt
@@ -0,0 +1,12 @@
+This document describes the ALi m5602 bridge connected
+to the following supported sensors:
+OmniVision OV9650,
+Samsung s5k83a,
+Samsung s5k4aa,
+Micron mt9m111,
+Pixel plus PO1030
+
+This driver mimics the windows drivers, which have a braindead implementation sending bayer-encoded frames at VGA resolution.
+In a perfect world we should be able to reprogram the m5602 and the connected sensor in hardware instead, supporting a range of resolutions and pixelformats
+
+Anyway, have fun and please report any bugs to m560x-driver-devel@lists.sourceforge.net
diff --git a/Documentation/video4linux/soc-camera.txt b/Documentation/video4linux/soc-camera.txt
new file mode 100644
index 00000000000..178ef3c5e57
--- /dev/null
+++ b/Documentation/video4linux/soc-camera.txt
@@ -0,0 +1,120 @@
+ Soc-Camera Subsystem
+ ====================
+
+Terminology
+-----------
+
+The following terms are used in this document:
+ - camera / camera device / camera sensor - a video-camera sensor chip, capable
+ of connecting to a variety of systems and interfaces, typically uses i2c for
+ control and configuration, and a parallel or a serial bus for data.
+ - camera host - an interface, to which a camera is connected. Typically a
+ specialised interface, present on many SoCs, e.g., PXA27x and PXA3xx, SuperH,
+ AVR32, i.MX27, i.MX31.
+ - camera host bus - a connection between a camera host and a camera. Can be
+ parallel or serial, consists of data and control lines, e.g., clock, vertical
+ and horizontal synchronization signals.
+
+Purpose of the soc-camera subsystem
+-----------------------------------
+
+The soc-camera subsystem provides a unified API between camera host drivers and
+camera sensor drivers. It implements a V4L2 interface to the user, currently
+only the mmap method is supported.
+
+This subsystem has been written to connect drivers for System-on-Chip (SoC)
+video capture interfaces with drivers for CMOS camera sensor chips to enable
+the reuse of sensor drivers with various hosts. The subsystem has been designed
+to support multiple camera host interfaces and multiple cameras per interface,
+although most applications have only one camera sensor.
+
+Existing drivers
+----------------
+
+As of 2.6.27-rc4 there are two host drivers in the mainline: pxa_camera.c for
+PXA27x SoCs and sh_mobile_ceu_camera.c for SuperH SoCs, and four sensor drivers:
+mt9m001.c, mt9m111.c, mt9v022.c and a generic soc_camera_platform.c driver. This
+list is not supposed to be updated, look for more examples in your tree.
+
+Camera host API
+---------------
+
+A host camera driver is registered using the
+
+soc_camera_host_register(struct soc_camera_host *);
+
+function. The host object can be initialized as follows:
+
+static struct soc_camera_host pxa_soc_camera_host = {
+ .drv_name = PXA_CAM_DRV_NAME,
+ .ops = &pxa_soc_camera_host_ops,
+};
+
+All camera host methods are passed in a struct soc_camera_host_ops:
+
+static struct soc_camera_host_ops pxa_soc_camera_host_ops = {
+ .owner = THIS_MODULE,
+ .add = pxa_camera_add_device,
+ .remove = pxa_camera_remove_device,
+ .suspend = pxa_camera_suspend,
+ .resume = pxa_camera_resume,
+ .set_fmt_cap = pxa_camera_set_fmt_cap,
+ .try_fmt_cap = pxa_camera_try_fmt_cap,
+ .init_videobuf = pxa_camera_init_videobuf,
+ .reqbufs = pxa_camera_reqbufs,
+ .poll = pxa_camera_poll,
+ .querycap = pxa_camera_querycap,
+ .try_bus_param = pxa_camera_try_bus_param,
+ .set_bus_param = pxa_camera_set_bus_param,
+};
+
+.add and .remove methods are called when a sensor is attached to or detached
+from the host, apart from performing host-internal tasks they shall also call
+sensor driver's .init and .release methods respectively. .suspend and .resume
+methods implement host's power-management functionality and its their
+responsibility to call respective sensor's methods. .try_bus_param and
+.set_bus_param are used to negotiate physical connection parameters between the
+host and the sensor. .init_videobuf is called by soc-camera core when a
+video-device is opened, further video-buffer management is implemented completely
+by the specific camera host driver. The rest of the methods are called from
+respective V4L2 operations.
+
+Camera API
+----------
+
+Sensor drivers can use struct soc_camera_link, typically provided by the
+platform, and used to specify to which camera host bus the sensor is connected,
+and arbitrarily provide platform .power and .reset methods for the camera.
+soc_camera_device_register() and soc_camera_device_unregister() functions are
+used to add a sensor driver to or remove one from the system. The registration
+function takes a pointer to struct soc_camera_device as the only parameter.
+This struct can be initialized as follows:
+
+ /* link to driver operations */
+ icd->ops = &mt9m001_ops;
+ /* link to the underlying physical (e.g., i2c) device */
+ icd->control = &client->dev;
+ /* window geometry */
+ icd->x_min = 20;
+ icd->y_min = 12;
+ icd->x_current = 20;
+ icd->y_current = 12;
+ icd->width_min = 48;
+ icd->width_max = 1280;
+ icd->height_min = 32;
+ icd->height_max = 1024;
+ icd->y_skip_top = 1;
+ /* camera bus ID, typically obtained from platform data */
+ icd->iface = icl->bus_id;
+
+struct soc_camera_ops provides .probe and .remove methods, which are called by
+the soc-camera core, when a camera is matched against or removed from a camera
+host bus, .init, .release, .suspend, and .resume are called from the camera host
+driver as discussed above. Other members of this struct provide respective V4L2
+functionality.
+
+struct soc_camera_device also links to an array of struct soc_camera_data_format,
+listing pixel formats, supported by the camera.
+
+--
+Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
diff --git a/Documentation/vm/unevictable-lru.txt b/Documentation/vm/unevictable-lru.txt
new file mode 100644
index 00000000000..125eed560e5
--- /dev/null
+++ b/Documentation/vm/unevictable-lru.txt
@@ -0,0 +1,615 @@
+
+This document describes the Linux memory management "Unevictable LRU"
+infrastructure and the use of this infrastructure to manage several types
+of "unevictable" pages. The document attempts to provide the overall
+rationale behind this mechanism and the rationale for some of the design
+decisions that drove the implementation. The latter design rationale is
+discussed in the context of an implementation description. Admittedly, one
+can obtain the implementation details--the "what does it do?"--by reading the
+code. One hopes that the descriptions below add value by provide the answer
+to "why does it do that?".
+
+Unevictable LRU Infrastructure:
+
+The Unevictable LRU adds an additional LRU list to track unevictable pages
+and to hide these pages from vmscan. This mechanism is based on a patch by
+Larry Woodman of Red Hat to address several scalability problems with page
+reclaim in Linux. The problems have been observed at customer sites on large
+memory x86_64 systems. For example, a non-numal x86_64 platform with 128GB
+of main memory will have over 32 million 4k pages in a single zone. When a
+large fraction of these pages are not evictable for any reason [see below],
+vmscan will spend a lot of time scanning the LRU lists looking for the small
+fraction of pages that are evictable. This can result in a situation where
+all cpus are spending 100% of their time in vmscan for hours or days on end,
+with the system completely unresponsive.
+
+The Unevictable LRU infrastructure addresses the following classes of
+unevictable pages:
+
++ page owned by ramfs
++ page mapped into SHM_LOCKed shared memory regions
++ page mapped into VM_LOCKED [mlock()ed] vmas
+
+The infrastructure might be able to handle other conditions that make pages
+unevictable, either by definition or by circumstance, in the future.
+
+
+The Unevictable LRU List
+
+The Unevictable LRU infrastructure consists of an additional, per-zone, LRU list
+called the "unevictable" list and an associated page flag, PG_unevictable, to
+indicate that the page is being managed on the unevictable list. The
+PG_unevictable flag is analogous to, and mutually exclusive with, the PG_active
+flag in that it indicates on which LRU list a page resides when PG_lru is set.
+The unevictable LRU list is source configurable based on the UNEVICTABLE_LRU
+Kconfig option.
+
+The Unevictable LRU infrastructure maintains unevictable pages on an additional
+LRU list for a few reasons:
+
+1) We get to "treat unevictable pages just like we treat other pages in the
+ system, which means we get to use the same code to manipulate them, the
+ same code to isolate them (for migrate, etc.), the same code to keep track
+ of the statistics, etc..." [Rik van Riel]
+
+2) We want to be able to migrate unevictable pages between nodes--for memory
+ defragmentation, workload management and memory hotplug. The linux kernel
+ can only migrate pages that it can successfully isolate from the lru lists.
+ If we were to maintain pages elsewise than on an lru-like list, where they
+ can be found by isolate_lru_page(), we would prevent their migration, unless
+ we reworked migration code to find the unevictable pages.
+
+
+The unevictable LRU list does not differentiate between file backed and swap
+backed [anon] pages. This differentiation is only important while the pages
+are, in fact, evictable.
+
+The unevictable LRU list benefits from the "arrayification" of the per-zone
+LRU lists and statistics originally proposed and posted by Christoph Lameter.
+
+The unevictable list does not use the lru pagevec mechanism. Rather,
+unevictable pages are placed directly on the page's zone's unevictable
+list under the zone lru_lock. The reason for this is to prevent stranding
+of pages on the unevictable list when one task has the page isolated from the
+lru and other tasks are changing the "evictability" state of the page.
+
+
+Unevictable LRU and Memory Controller Interaction
+
+The memory controller data structure automatically gets a per zone unevictable
+lru list as a result of the "arrayification" of the per-zone LRU lists. The
+memory controller tracks the movement of pages to and from the unevictable list.
+When a memory control group comes under memory pressure, the controller will
+not attempt to reclaim pages on the unevictable list. This has a couple of
+effects. Because the pages are "hidden" from reclaim on the unevictable list,
+the reclaim process can be more efficient, dealing only with pages that have
+a chance of being reclaimed. On the other hand, if too many of the pages
+charged to the control group are unevictable, the evictable portion of the
+working set of the tasks in the control group may not fit into the available
+memory. This can cause the control group to thrash or to oom-kill tasks.
+
+
+Unevictable LRU: Detecting Unevictable Pages
+
+The function page_evictable(page, vma) in vmscan.c determines whether a
+page is evictable or not. For ramfs pages and pages in SHM_LOCKed regions,
+page_evictable() tests a new address space flag, AS_UNEVICTABLE, in the page's
+address space using a wrapper function. Wrapper functions are used to set,
+clear and test the flag to reduce the requirement for #ifdef's throughout the
+source code. AS_UNEVICTABLE is set on ramfs inode/mapping when it is created.
+This flag remains for the life of the inode.
+
+For shared memory regions, AS_UNEVICTABLE is set when an application
+successfully SHM_LOCKs the region and is removed when the region is
+SHM_UNLOCKed. Note that shmctl(SHM_LOCK, ...) does not populate the page
+tables for the region as does, for example, mlock(). So, we make no special
+effort to push any pages in the SHM_LOCKed region to the unevictable list.
+Vmscan will do this when/if it encounters the pages during reclaim. On
+SHM_UNLOCK, shmctl() scans the pages in the region and "rescues" them from the
+unevictable list if no other condition keeps them unevictable. If a SHM_LOCKed
+region is destroyed, the pages are also "rescued" from the unevictable list in
+the process of freeing them.
+
+page_evictable() detects mlock()ed pages by testing an additional page flag,
+PG_mlocked via the PageMlocked() wrapper. If the page is NOT mlocked, and a
+non-NULL vma is supplied, page_evictable() will check whether the vma is
+VM_LOCKED via is_mlocked_vma(). is_mlocked_vma() will SetPageMlocked() and
+update the appropriate statistics if the vma is VM_LOCKED. This method allows
+efficient "culling" of pages in the fault path that are being faulted in to
+VM_LOCKED vmas.
+
+
+Unevictable Pages and Vmscan [shrink_*_list()]
+
+If unevictable pages are culled in the fault path, or moved to the unevictable
+list at mlock() or mmap() time, vmscan will never encounter the pages until
+they have become evictable again, for example, via munlock() and have been
+"rescued" from the unevictable list. However, there may be situations where we
+decide, for the sake of expediency, to leave a unevictable page on one of the
+regular active/inactive LRU lists for vmscan to deal with. Vmscan checks for
+such pages in all of the shrink_{active|inactive|page}_list() functions and
+will "cull" such pages that it encounters--that is, it diverts those pages to
+the unevictable list for the zone being scanned.
+
+There may be situations where a page is mapped into a VM_LOCKED vma, but the
+page is not marked as PageMlocked. Such pages will make it all the way to
+shrink_page_list() where they will be detected when vmscan walks the reverse
+map in try_to_unmap(). If try_to_unmap() returns SWAP_MLOCK, shrink_page_list()
+will cull the page at that point.
+
+Note that for anonymous pages, shrink_page_list() attempts to add the page to
+the swap cache before it tries to unmap the page. To avoid this unnecessary
+consumption of swap space, shrink_page_list() calls try_to_munlock() to check
+whether any VM_LOCKED vmas map the page without attempting to unmap the page.
+If try_to_munlock() returns SWAP_MLOCK, shrink_page_list() will cull the page
+without consuming swap space. try_to_munlock() will be described below.
+
+To "cull" an unevictable page, vmscan simply puts the page back on the lru
+list using putback_lru_page()--the inverse operation to isolate_lru_page()--
+after dropping the page lock. Because the condition which makes the page
+unevictable may change once the page is unlocked, putback_lru_page() will
+recheck the unevictable state of a page that it places on the unevictable lru
+list. If the page has become unevictable, putback_lru_page() removes it from
+the list and retries, including the page_unevictable() test. Because such a
+race is a rare event and movement of pages onto the unevictable list should be
+rare, these extra evictabilty checks should not occur in the majority of calls
+to putback_lru_page().
+
+
+Mlocked Page: Prior Work
+
+The "Unevictable Mlocked Pages" infrastructure is based on work originally
+posted by Nick Piggin in an RFC patch entitled "mm: mlocked pages off LRU".
+Nick posted his patch as an alternative to a patch posted by Christoph
+Lameter to achieve the same objective--hiding mlocked pages from vmscan.
+In Nick's patch, he used one of the struct page lru list link fields as a count
+of VM_LOCKED vmas that map the page. This use of the link field for a count
+prevented the management of the pages on an LRU list. Thus, mlocked pages were
+not migratable as isolate_lru_page() could not find them and the lru list link
+field was not available to the migration subsystem. Nick resolved this by
+putting mlocked pages back on the lru list before attempting to isolate them,
+thus abandoning the count of VM_LOCKED vmas. When Nick's patch was integrated
+with the Unevictable LRU work, the count was replaced by walking the reverse
+map to determine whether any VM_LOCKED vmas mapped the page. More on this
+below.
+
+
+Mlocked Pages: Basic Management
+
+Mlocked pages--pages mapped into a VM_LOCKED vma--represent one class of
+unevictable pages. When such a page has been "noticed" by the memory
+management subsystem, the page is marked with the PG_mlocked [PageMlocked()]
+flag. A PageMlocked() page will be placed on the unevictable LRU list when
+it is added to the LRU. Pages can be "noticed" by memory management in
+several places:
+
+1) in the mlock()/mlockall() system call handlers.
+2) in the mmap() system call handler when mmap()ing a region with the
+ MAP_LOCKED flag, or mmap()ing a region in a task that has called
+ mlockall() with the MCL_FUTURE flag. Both of these conditions result
+ in the VM_LOCKED flag being set for the vma.
+3) in the fault path, if mlocked pages are "culled" in the fault path,
+ and when a VM_LOCKED stack segment is expanded.
+4) as mentioned above, in vmscan:shrink_page_list() with attempting to
+ reclaim a page in a VM_LOCKED vma--via try_to_unmap() or try_to_munlock().
+
+Mlocked pages become unlocked and rescued from the unevictable list when:
+
+1) mapped in a range unlocked via the munlock()/munlockall() system calls.
+2) munmapped() out of the last VM_LOCKED vma that maps the page, including
+ unmapping at task exit.
+3) when the page is truncated from the last VM_LOCKED vma of an mmap()ed file.
+4) before a page is COWed in a VM_LOCKED vma.
+
+
+Mlocked Pages: mlock()/mlockall() System Call Handling
+
+Both [do_]mlock() and [do_]mlockall() system call handlers call mlock_fixup()
+for each vma in the range specified by the call. In the case of mlockall(),
+this is the entire active address space of the task. Note that mlock_fixup()
+is used for both mlock()ing and munlock()ing a range of memory. A call to
+mlock() an already VM_LOCKED vma, or to munlock() a vma that is not VM_LOCKED
+is treated as a no-op--mlock_fixup() simply returns.
+
+If the vma passes some filtering described in "Mlocked Pages: Filtering Vmas"
+below, mlock_fixup() will attempt to merge the vma with its neighbors or split
+off a subset of the vma if the range does not cover the entire vma. Once the
+vma has been merged or split or neither, mlock_fixup() will call
+__mlock_vma_pages_range() to fault in the pages via get_user_pages() and
+to mark the pages as mlocked via mlock_vma_page().
+
+Note that the vma being mlocked might be mapped with PROT_NONE. In this case,
+get_user_pages() will be unable to fault in the pages. That's OK. If pages
+do end up getting faulted into this VM_LOCKED vma, we'll handle them in the
+fault path or in vmscan.
+
+Also note that a page returned by get_user_pages() could be truncated or
+migrated out from under us, while we're trying to mlock it. To detect
+this, __mlock_vma_pages_range() tests the page_mapping after acquiring
+the page lock. If the page is still associated with its mapping, we'll
+go ahead and call mlock_vma_page(). If the mapping is gone, we just
+unlock the page and move on. Worse case, this results in page mapped
+in a VM_LOCKED vma remaining on a normal LRU list without being
+PageMlocked(). Again, vmscan will detect and cull such pages.
+
+mlock_vma_page(), called with the page locked [N.B., not "mlocked"], will
+TestSetPageMlocked() for each page returned by get_user_pages(). We use
+TestSetPageMlocked() because the page might already be mlocked by another
+task/vma and we don't want to do extra work. We especially do not want to
+count an mlocked page more than once in the statistics. If the page was
+already mlocked, mlock_vma_page() is done.
+
+If the page was NOT already mlocked, mlock_vma_page() attempts to isolate the
+page from the LRU, as it is likely on the appropriate active or inactive list
+at that time. If the isolate_lru_page() succeeds, mlock_vma_page() will
+putback the page--putback_lru_page()--which will notice that the page is now
+mlocked and divert the page to the zone's unevictable LRU list. If
+mlock_vma_page() is unable to isolate the page from the LRU, vmscan will handle
+it later if/when it attempts to reclaim the page.
+
+
+Mlocked Pages: Filtering Special Vmas
+
+mlock_fixup() filters several classes of "special" vmas:
+
+1) vmas with VM_IO|VM_PFNMAP set are skipped entirely. The pages behind
+ these mappings are inherently pinned, so we don't need to mark them as
+ mlocked. In any case, most of the pages have no struct page in which to
+ so mark the page. Because of this, get_user_pages() will fail for these
+ vmas, so there is no sense in attempting to visit them.
+
+2) vmas mapping hugetlbfs page are already effectively pinned into memory.
+ We don't need nor want to mlock() these pages. However, to preserve the
+ prior behavior of mlock()--before the unevictable/mlock changes--mlock_fixup()
+ will call make_pages_present() in the hugetlbfs vma range to allocate the
+ huge pages and populate the ptes.
+
+3) vmas with VM_DONTEXPAND|VM_RESERVED are generally user space mappings of
+ kernel pages, such as the vdso page, relay channel pages, etc. These pages
+ are inherently unevictable and are not managed on the LRU lists.
+ mlock_fixup() treats these vmas the same as hugetlbfs vmas. It calls
+ make_pages_present() to populate the ptes.
+
+Note that for all of these special vmas, mlock_fixup() does not set the
+VM_LOCKED flag. Therefore, we won't have to deal with them later during
+munlock() or munmap()--for example, at task exit. Neither does mlock_fixup()
+account these vmas against the task's "locked_vm".
+
+Mlocked Pages: Downgrading the Mmap Semaphore.
+
+mlock_fixup() must be called with the mmap semaphore held for write, because
+it may have to merge or split vmas. However, mlocking a large region of
+memory can take a long time--especially if vmscan must reclaim pages to
+satisfy the regions requirements. Faulting in a large region with the mmap
+semaphore held for write can hold off other faults on the address space, in
+the case of a multi-threaded task. It can also hold off scans of the task's
+address space via /proc. While testing under heavy load, it was observed that
+the ps(1) command could be held off for many minutes while a large segment was
+mlock()ed down.
+
+To address this issue, and to make the system more responsive during mlock()ing
+of large segments, mlock_fixup() downgrades the mmap semaphore to read mode
+during the call to __mlock_vma_pages_range(). This works fine. However, the
+callers of mlock_fixup() expect the semaphore to be returned in write mode.
+So, mlock_fixup() "upgrades" the semphore to write mode. Linux does not
+support an atomic upgrade_sem() call, so mlock_fixup() must drop the semaphore
+and reacquire it in write mode. In a multi-threaded task, it is possible for
+the task memory map to change while the semaphore is dropped. Therefore,
+mlock_fixup() looks up the vma at the range start address after reacquiring
+the semaphore in write mode and verifies that it still covers the original
+range. If not, mlock_fixup() returns an error [-EAGAIN]. All callers of
+mlock_fixup() have been changed to deal with this new error condition.
+
+Note: when munlocking a region, all of the pages should already be resident--
+unless we have racing threads mlocking() and munlocking() regions. So,
+unlocking should not have to wait for page allocations nor faults of any kind.
+Therefore mlock_fixup() does not downgrade the semaphore for munlock().
+
+
+Mlocked Pages: munlock()/munlockall() System Call Handling
+
+The munlock() and munlockall() system calls are handled by the same functions--
+do_mlock[all]()--as the mlock() and mlockall() system calls with the unlock
+vs lock operation indicated by an argument. So, these system calls are also
+handled by mlock_fixup(). Again, if called for an already munlock()ed vma,
+mlock_fixup() simply returns. Because of the vma filtering discussed above,
+VM_LOCKED will not be set in any "special" vmas. So, these vmas will be
+ignored for munlock.
+
+If the vma is VM_LOCKED, mlock_fixup() again attempts to merge or split off
+the specified range. The range is then munlocked via the function
+__mlock_vma_pages_range()--the same function used to mlock a vma range--
+passing a flag to indicate that munlock() is being performed.
+
+Because the vma access protections could have been changed to PROT_NONE after
+faulting in and mlocking some pages, get_user_pages() was unreliable for visiting
+these pages for munlocking. Because we don't want to leave pages mlocked(),
+get_user_pages() was enhanced to accept a flag to ignore the permissions when
+fetching the pages--all of which should be resident as a result of previous
+mlock()ing.
+
+For munlock(), __mlock_vma_pages_range() unlocks individual pages by calling
+munlock_vma_page(). munlock_vma_page() unconditionally clears the PG_mlocked
+flag using TestClearPageMlocked(). As with mlock_vma_page(), munlock_vma_page()
+use the Test*PageMlocked() function to handle the case where the page might
+have already been unlocked by another task. If the page was mlocked,
+munlock_vma_page() updates that zone statistics for the number of mlocked
+pages. Note, however, that at this point we haven't checked whether the page
+is mapped by other VM_LOCKED vmas.
+
+We can't call try_to_munlock(), the function that walks the reverse map to check
+for other VM_LOCKED vmas, without first isolating the page from the LRU.
+try_to_munlock() is a variant of try_to_unmap() and thus requires that the page
+not be on an lru list. [More on these below.] However, the call to
+isolate_lru_page() could fail, in which case we couldn't try_to_munlock().
+So, we go ahead and clear PG_mlocked up front, as this might be the only chance
+we have. If we can successfully isolate the page, we go ahead and
+try_to_munlock(), which will restore the PG_mlocked flag and update the zone
+page statistics if it finds another vma holding the page mlocked. If we fail
+to isolate the page, we'll have left a potentially mlocked page on the LRU.
+This is fine, because we'll catch it later when/if vmscan tries to reclaim the
+page. This should be relatively rare.
+
+Mlocked Pages: Migrating Them...
+
+A page that is being migrated has been isolated from the lru lists and is
+held locked across unmapping of the page, updating the page's mapping
+[address_space] entry and copying the contents and state, until the
+page table entry has been replaced with an entry that refers to the new
+page. Linux supports migration of mlocked pages and other unevictable
+pages. This involves simply moving the PageMlocked and PageUnevictable states
+from the old page to the new page.
+
+Note that page migration can race with mlocking or munlocking of the same
+page. This has been discussed from the mlock/munlock perspective in the
+respective sections above. Both processes [migration, m[un]locking], hold
+the page locked. This provides the first level of synchronization. Page
+migration zeros out the page_mapping of the old page before unlocking it,
+so m[un]lock can skip these pages by testing the page mapping under page
+lock.
+
+When completing page migration, we place the new and old pages back onto the
+lru after dropping the page lock. The "unneeded" page--old page on success,
+new page on failure--will be freed when the reference count held by the
+migration process is released. To ensure that we don't strand pages on the
+unevictable list because of a race between munlock and migration, page
+migration uses the putback_lru_page() function to add migrated pages back to
+the lru.
+
+
+Mlocked Pages: mmap(MAP_LOCKED) System Call Handling
+
+In addition the the mlock()/mlockall() system calls, an application can request
+that a region of memory be mlocked using the MAP_LOCKED flag with the mmap()
+call. Furthermore, any mmap() call or brk() call that expands the heap by a
+task that has previously called mlockall() with the MCL_FUTURE flag will result
+in the newly mapped memory being mlocked. Before the unevictable/mlock changes,
+the kernel simply called make_pages_present() to allocate pages and populate
+the page table.
+
+To mlock a range of memory under the unevictable/mlock infrastructure, the
+mmap() handler and task address space expansion functions call
+mlock_vma_pages_range() specifying the vma and the address range to mlock.
+mlock_vma_pages_range() filters vmas like mlock_fixup(), as described above in
+"Mlocked Pages: Filtering Vmas". It will clear the VM_LOCKED flag, which will
+have already been set by the caller, in filtered vmas. Thus these vma's need
+not be visited for munlock when the region is unmapped.
+
+For "normal" vmas, mlock_vma_pages_range() calls __mlock_vma_pages_range() to
+fault/allocate the pages and mlock them. Again, like mlock_fixup(),
+mlock_vma_pages_range() downgrades the mmap semaphore to read mode before
+attempting to fault/allocate and mlock the pages; and "upgrades" the semaphore
+back to write mode before returning.
+
+The callers of mlock_vma_pages_range() will have already added the memory
+range to be mlocked to the task's "locked_vm". To account for filtered vmas,
+mlock_vma_pages_range() returns the number of pages NOT mlocked. All of the
+callers then subtract a non-negative return value from the task's locked_vm.
+A negative return value represent an error--for example, from get_user_pages()
+attempting to fault in a vma with PROT_NONE access. In this case, we leave
+the memory range accounted as locked_vm, as the protections could be changed
+later and pages allocated into that region.
+
+
+Mlocked Pages: munmap()/exit()/exec() System Call Handling
+
+When unmapping an mlocked region of memory, whether by an explicit call to
+munmap() or via an internal unmap from exit() or exec() processing, we must
+munlock the pages if we're removing the last VM_LOCKED vma that maps the pages.
+Before the unevictable/mlock changes, mlocking did not mark the pages in any way,
+so unmapping them required no processing.
+
+To munlock a range of memory under the unevictable/mlock infrastructure, the
+munmap() hander and task address space tear down function call
+munlock_vma_pages_all(). The name reflects the observation that one always
+specifies the entire vma range when munlock()ing during unmap of a region.
+Because of the vma filtering when mlocking() regions, only "normal" vmas that
+actually contain mlocked pages will be passed to munlock_vma_pages_all().
+
+munlock_vma_pages_all() clears the VM_LOCKED vma flag and, like mlock_fixup()
+for the munlock case, calls __munlock_vma_pages_range() to walk the page table
+for the vma's memory range and munlock_vma_page() each resident page mapped by
+the vma. This effectively munlocks the page, only if this is the last
+VM_LOCKED vma that maps the page.
+
+
+Mlocked Page: try_to_unmap()
+
+[Note: the code changes represented by this section are really quite small
+compared to the text to describe what happening and why, and to discuss the
+implications.]
+
+Pages can, of course, be mapped into multiple vmas. Some of these vmas may
+have VM_LOCKED flag set. It is possible for a page mapped into one or more
+VM_LOCKED vmas not to have the PG_mlocked flag set and therefore reside on one
+of the active or inactive LRU lists. This could happen if, for example, a
+task in the process of munlock()ing the page could not isolate the page from
+the LRU. As a result, vmscan/shrink_page_list() might encounter such a page
+as described in "Unevictable Pages and Vmscan [shrink_*_list()]". To
+handle this situation, try_to_unmap() has been enhanced to check for VM_LOCKED
+vmas while it is walking a page's reverse map.
+
+try_to_unmap() is always called, by either vmscan for reclaim or for page
+migration, with the argument page locked and isolated from the LRU. BUG_ON()
+assertions enforce this requirement. Separate functions handle anonymous and
+mapped file pages, as these types of pages have different reverse map
+mechanisms.
+
+ try_to_unmap_anon()
+
+To unmap anonymous pages, each vma in the list anchored in the anon_vma must be
+visited--at least until a VM_LOCKED vma is encountered. If the page is being
+unmapped for migration, VM_LOCKED vmas do not stop the process because mlocked
+pages are migratable. However, for reclaim, if the page is mapped into a
+VM_LOCKED vma, the scan stops. try_to_unmap() attempts to acquire the mmap
+semphore of the mm_struct to which the vma belongs in read mode. If this is
+successful, try_to_unmap() will mlock the page via mlock_vma_page()--we
+wouldn't have gotten to try_to_unmap() if the page were already mlocked--and
+will return SWAP_MLOCK, indicating that the page is unevictable. If the
+mmap semaphore cannot be acquired, we are not sure whether the page is really
+unevictable or not. In this case, try_to_unmap() will return SWAP_AGAIN.
+
+ try_to_unmap_file() -- linear mappings
+
+Unmapping of a mapped file page works the same, except that the scan visits
+all vmas that maps the page's index/page offset in the page's mapping's
+reverse map priority search tree. It must also visit each vma in the page's
+mapping's non-linear list, if the list is non-empty. As for anonymous pages,
+on encountering a VM_LOCKED vma for a mapped file page, try_to_unmap() will
+attempt to acquire the associated mm_struct's mmap semaphore to mlock the page,
+returning SWAP_MLOCK if this is successful, and SWAP_AGAIN, if not.
+
+ try_to_unmap_file() -- non-linear mappings
+
+If a page's mapping contains a non-empty non-linear mapping vma list, then
+try_to_un{map|lock}() must also visit each vma in that list to determine
+whether the page is mapped in a VM_LOCKED vma. Again, the scan must visit
+all vmas in the non-linear list to ensure that the pages is not/should not be
+mlocked. If a VM_LOCKED vma is found in the list, the scan could terminate.
+However, there is no easy way to determine whether the page is actually mapped
+in a given vma--either for unmapping or testing whether the VM_LOCKED vma
+actually pins the page.
+
+So, try_to_unmap_file() handles non-linear mappings by scanning a certain
+number of pages--a "cluster"--in each non-linear vma associated with the page's
+mapping, for each file mapped page that vmscan tries to unmap. If this happens
+to unmap the page we're trying to unmap, try_to_unmap() will notice this on
+return--(page_mapcount(page) == 0)--and return SWAP_SUCCESS. Otherwise, it
+will return SWAP_AGAIN, causing vmscan to recirculate this page. We take
+advantage of the cluster scan in try_to_unmap_cluster() as follows:
+
+For each non-linear vma, try_to_unmap_cluster() attempts to acquire the mmap
+semaphore of the associated mm_struct for read without blocking. If this
+attempt is successful and the vma is VM_LOCKED, try_to_unmap_cluster() will
+retain the mmap semaphore for the scan; otherwise it drops it here. Then,
+for each page in the cluster, if we're holding the mmap semaphore for a locked
+vma, try_to_unmap_cluster() calls mlock_vma_page() to mlock the page. This
+call is a no-op if the page is already locked, but will mlock any pages in
+the non-linear mapping that happen to be unlocked. If one of the pages so
+mlocked is the page passed in to try_to_unmap(), try_to_unmap_cluster() will
+return SWAP_MLOCK, rather than the default SWAP_AGAIN. This will allow vmscan
+to cull the page, rather than recirculating it on the inactive list. Again,
+if try_to_unmap_cluster() cannot acquire the vma's mmap sem, it returns
+SWAP_AGAIN, indicating that the page is mapped by a VM_LOCKED vma, but
+couldn't be mlocked.
+
+
+Mlocked pages: try_to_munlock() Reverse Map Scan
+
+TODO/FIXME: a better name might be page_mlocked()--analogous to the
+page_referenced() reverse map walker--especially if we continue to call this
+from shrink_page_list(). See related TODO/FIXME below.
+
+When munlock_vma_page()--see "Mlocked Pages: munlock()/munlockall() System
+Call Handling" above--tries to munlock a page, or when shrink_page_list()
+encounters an anonymous page that is not yet in the swap cache, they need to
+determine whether or not the page is mapped by any VM_LOCKED vma, without
+actually attempting to unmap all ptes from the page. For this purpose, the
+unevictable/mlock infrastructure introduced a variant of try_to_unmap() called
+try_to_munlock().
+
+try_to_munlock() calls the same functions as try_to_unmap() for anonymous and
+mapped file pages with an additional argument specifing unlock versus unmap
+processing. Again, these functions walk the respective reverse maps looking
+for VM_LOCKED vmas. When such a vma is found for anonymous pages and file
+pages mapped in linear VMAs, as in the try_to_unmap() case, the functions
+attempt to acquire the associated mmap semphore, mlock the page via
+mlock_vma_page() and return SWAP_MLOCK. This effectively undoes the
+pre-clearing of the page's PG_mlocked done by munlock_vma_page() and informs
+shrink_page_list() that the anonymous page should be culled rather than added
+to the swap cache in preparation for a try_to_unmap() that will almost
+certainly fail.
+
+If try_to_unmap() is unable to acquire a VM_LOCKED vma's associated mmap
+semaphore, it will return SWAP_AGAIN. This will allow shrink_page_list()
+to recycle the page on the inactive list and hope that it has better luck
+with the page next time.
+
+For file pages mapped into non-linear vmas, the try_to_munlock() logic works
+slightly differently. On encountering a VM_LOCKED non-linear vma that might
+map the page, try_to_munlock() returns SWAP_AGAIN without actually mlocking
+the page. munlock_vma_page() will just leave the page unlocked and let
+vmscan deal with it--the usual fallback position.
+
+Note that try_to_munlock()'s reverse map walk must visit every vma in a pages'
+reverse map to determine that a page is NOT mapped into any VM_LOCKED vma.
+However, the scan can terminate when it encounters a VM_LOCKED vma and can
+successfully acquire the vma's mmap semphore for read and mlock the page.
+Although try_to_munlock() can be called many [very many!] times when
+munlock()ing a large region or tearing down a large address space that has been
+mlocked via mlockall(), overall this is a fairly rare event. In addition,
+although shrink_page_list() calls try_to_munlock() for every anonymous page that
+it handles that is not yet in the swap cache, on average anonymous pages will
+have very short reverse map lists.
+
+Mlocked Page: Page Reclaim in shrink_*_list()
+
+shrink_active_list() culls any obviously unevictable pages--i.e.,
+!page_evictable(page, NULL)--diverting these to the unevictable lru
+list. However, shrink_active_list() only sees unevictable pages that
+made it onto the active/inactive lru lists. Note that these pages do not
+have PageUnevictable set--otherwise, they would be on the unevictable list and
+shrink_active_list would never see them.
+
+Some examples of these unevictable pages on the LRU lists are:
+
+1) ramfs pages that have been placed on the lru lists when first allocated.
+
+2) SHM_LOCKed shared memory pages. shmctl(SHM_LOCK) does not attempt to
+ allocate or fault in the pages in the shared memory region. This happens
+ when an application accesses the page the first time after SHM_LOCKing
+ the segment.
+
+3) Mlocked pages that could not be isolated from the lru and moved to the
+ unevictable list in mlock_vma_page().
+
+3) Pages mapped into multiple VM_LOCKED vmas, but try_to_munlock() couldn't
+ acquire the vma's mmap semaphore to test the flags and set PageMlocked.
+ munlock_vma_page() was forced to let the page back on to the normal
+ LRU list for vmscan to handle.
+
+shrink_inactive_list() also culls any unevictable pages that it finds
+on the inactive lists, again diverting them to the appropriate zone's unevictable
+lru list. shrink_inactive_list() should only see SHM_LOCKed pages that became
+SHM_LOCKed after shrink_active_list() had moved them to the inactive list, or
+pages mapped into VM_LOCKED vmas that munlock_vma_page() couldn't isolate from
+the lru to recheck via try_to_munlock(). shrink_inactive_list() won't notice
+the latter, but will pass on to shrink_page_list().
+
+shrink_page_list() again culls obviously unevictable pages that it could
+encounter for similar reason to shrink_inactive_list(). As already discussed,
+shrink_page_list() proactively looks for anonymous pages that should have
+PG_mlocked set but don't--these would not be detected by page_evictable()--to
+avoid adding them to the swap cache unnecessarily. File pages mapped into
+VM_LOCKED vmas but without PG_mlocked set will make it all the way to
+try_to_unmap(). shrink_page_list() will divert them to the unevictable list when
+try_to_unmap() returns SWAP_MLOCK, as discussed above.
+
+TODO/FIXME: If we can enhance the swap cache to reliably remove entries
+with page_count(page) > 2, as long as all ptes are mapped to the page and
+not the swap entry, we can probably remove the call to try_to_munlock() in
+shrink_page_list() and just remove the page from the swap cache when
+try_to_unmap() returns SWAP_MLOCK. Currently, remove_exclusive_swap_page()
+doesn't seem to allow that.
+
+
diff --git a/Documentation/w1/00-INDEX b/Documentation/w1/00-INDEX
index 5270cf4cb10..cb49802745d 100644
--- a/Documentation/w1/00-INDEX
+++ b/Documentation/w1/00-INDEX
@@ -1,5 +1,7 @@
00-INDEX
- This file
+slaves/
+ - Drivers that provide support for specific family codes.
masters/
- Individual chips providing 1-wire busses.
w1.generic
diff --git a/Documentation/w1/masters/ds2490 b/Documentation/w1/masters/ds2490
index 239f9ae0184..28176def3d6 100644
--- a/Documentation/w1/masters/ds2490
+++ b/Documentation/w1/masters/ds2490
@@ -16,3 +16,55 @@ which allows to build USB <-> W1 bridges.
DS9490(R) is a USB <-> W1 bus master device
which has 0x81 family ID integrated chip and DS2490
low-level operational chip.
+
+Notes and limitations.
+- The weak pullup current is a minimum of 0.9mA and maximum of 6.0mA.
+- The 5V strong pullup is supported with a minimum of 5.9mA and a
+ maximum of 30.4 mA. (From DS2490.pdf)
+- While the ds2490 supports a hardware search the code doesn't take
+ advantage of it (in tested case it only returned first device).
+- The hardware will detect when devices are attached to the bus on the
+ next bus (reset?) operation, however only a message is printed as
+ the core w1 code doesn't make use of the information. Connecting
+ one device tends to give multiple new device notifications.
+- The number of USB bus transactions could be reduced if w1_reset_send
+ was added to the API. The name is just a suggestion. It would take
+ a write buffer and a read buffer (along with sizes) as arguments.
+ The ds2490 block I/O command supports reset, write buffer, read
+ buffer, and strong pullup all in one command, instead of the current
+ 1 reset bus, 2 write the match rom command and slave rom id, 3 block
+ write and read data. The write buffer needs to have the match rom
+ command and slave rom id prepended to the front of the requested
+ write buffer, both of which are known to the driver.
+- The hardware supports normal, flexible, and overdrive bus
+ communication speeds, but only the normal is supported.
+- The registered w1_bus_master functions don't define error
+ conditions. If a bus search is in progress and the ds2490 is
+ removed it can produce a good amount of error output before the bus
+ search finishes.
+- The hardware supports detecting some error conditions, such as
+ short, alarming presence on reset, and no presence on reset, but the
+ driver doesn't query those values.
+- The ds2490 specification doesn't cover short bulk in reads in
+ detail, but my observation is if fewer bytes are requested than are
+ available, the bulk read will return an error and the hardware will
+ clear the entire bulk in buffer. It would be possible to read the
+ maximum buffer size to not run into this error condition, only extra
+ bytes in the buffer is a logic error in the driver. The code should
+ should match reads and writes as well as data sizes. Reads and
+ writes are serialized and the status verifies that the chip is idle
+ (and data is available) before the read is executed, so it should
+ not happen.
+- Running x86_64 2.6.24 UHCI under qemu 0.9.0 under x86_64 2.6.22-rc6
+ with a OHCI controller, ds2490 running in the guest would operate
+ normally the first time the module was loaded after qemu attached
+ the ds2490 hardware, but if the module was unloaded, then reloaded
+ most of the time one of the bulk out or in, and usually the bulk in
+ would fail. qemu sets a 50ms timeout and the bulk in would timeout
+ even when the status shows data available. A bulk out write would
+ show a successful completion, but the ds2490 status register would
+ show 0 bytes written. Detaching qemu from the ds2490 hardware and
+ reattaching would clear the problem. usbmon output in the guest and
+ host did not explain the problem. My guess is a bug in either qemu
+ or the host OS and more likely the host OS.
+-- 03-06-2008 David Fries <David@Fries.net>
diff --git a/Documentation/w1/slaves/00-INDEX b/Documentation/w1/slaves/00-INDEX
new file mode 100644
index 00000000000..f8101d6b07b
--- /dev/null
+++ b/Documentation/w1/slaves/00-INDEX
@@ -0,0 +1,4 @@
+00-INDEX
+ - This file
+w1_therm
+ - The Maxim/Dallas Semiconductor ds18*20 temperature sensor.
diff --git a/Documentation/w1/slaves/w1_therm b/Documentation/w1/slaves/w1_therm
new file mode 100644
index 00000000000..0403aaaba87
--- /dev/null
+++ b/Documentation/w1/slaves/w1_therm
@@ -0,0 +1,41 @@
+Kernel driver w1_therm
+====================
+
+Supported chips:
+ * Maxim ds18*20 based temperature sensors.
+
+Author: Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+
+
+Description
+-----------
+
+w1_therm provides basic temperature conversion for ds18*20 devices.
+supported family codes:
+W1_THERM_DS18S20 0x10
+W1_THERM_DS1822 0x22
+W1_THERM_DS18B20 0x28
+
+Support is provided through the sysfs w1_slave file. Each open and
+read sequence will initiate a temperature conversion then provide two
+lines of ASCII output. The first line contains the nine hex bytes
+read along with a calculated crc value and YES or NO if it matched.
+If the crc matched the returned values are retained. The second line
+displays the retained values along with a temperature in millidegrees
+Centigrade after t=.
+
+Parasite powered devices are limited to one slave performing a
+temperature conversion at a time. If none of the devices are parasite
+powered it would be possible to convert all the devices at the same
+time and then go back to read individual sensors. That isn't
+currently supported. The driver also doesn't support reduced
+precision (which would also reduce the conversion time).
+
+The module parameter strong_pullup can be set to 0 to disable the
+strong pullup or 1 to enable. If enabled the 5V strong pullup will be
+enabled when the conversion is taking place provided the master driver
+must support the strong pullup (or it falls back to a pullup
+resistor). The DS18b20 temperature sensor specification lists a
+maximum current draw of 1.5mA and that a 5k pullup resistor is not
+sufficient. The strong pullup is designed to provide the additional
+current required.
diff --git a/Documentation/w1/w1.generic b/Documentation/w1/w1.generic
index 4c6509dd478..e3333eec432 100644
--- a/Documentation/w1/w1.generic
+++ b/Documentation/w1/w1.generic
@@ -79,10 +79,13 @@ w1 master sysfs interface
<xx-xxxxxxxxxxxxx> - a directory for a found device. The format is family-serial
bus - (standard) symlink to the w1 bus
driver - (standard) symlink to the w1 driver
+w1_master_add - Manually register a slave device
w1_master_attempts - the number of times a search was attempted
w1_master_max_slave_count
- the maximum slaves that may be attached to a master
w1_master_name - the name of the device (w1_bus_masterX)
+w1_master_pullup - 5V strong pullup 0 enabled, 1 disabled
+w1_master_remove - Manually remove a slave device
w1_master_search - the number of searches left to do, -1=continual (default)
w1_master_slave_count
- the number of slaves found
@@ -90,7 +93,13 @@ w1_master_slaves - the names of the slaves, one per line
w1_master_timeout - the delay in seconds between searches
If you have a w1 bus that never changes (you don't add or remove devices),
-you can set w1_master_search to a positive value to disable searches.
+you can set the module parameter search_count to a small positive number
+for an initially small number of bus searches. Alternatively it could be
+set to zero, then manually add the slave device serial numbers by
+w1_master_add device file. The w1_master_add and w1_master_remove files
+generally only make sense when searching is disabled, as a search will
+redetect manually removed devices that are present and timeout manually
+added devices that aren't on the bus.
w1 slave sysfs interface
diff --git a/Documentation/x86/00-INDEX b/Documentation/x86/00-INDEX
new file mode 100644
index 00000000000..dbe3377754a
--- /dev/null
+++ b/Documentation/x86/00-INDEX
@@ -0,0 +1,4 @@
+00-INDEX
+ - this file
+mtrr.txt
+ - how to use x86 Memory Type Range Registers to increase performance
diff --git a/Documentation/x86/i386/boot.txt b/Documentation/x86/boot.txt
index 147bfe511cd..83c0033ee9e 100644
--- a/Documentation/x86/i386/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -308,7 +308,7 @@ Protocol: 2.00+
Field name: start_sys
Type: read
-Offset/size: 0x20c/4
+Offset/size: 0x20c/2
Protocol: 2.00+
The load low segment (0x1000). Obsolete.
diff --git a/Documentation/mtrr.txt b/Documentation/x86/mtrr.txt
index c39ac395970..cc071dc333c 100644
--- a/Documentation/mtrr.txt
+++ b/Documentation/x86/mtrr.txt
@@ -18,7 +18,7 @@ Richard Gooch
The AMD K6-2 (stepping 8 and above) and K6-3 processors have two
MTRRs. These are supported. The AMD Athlon family provide 8 Intel
style MTRRs.
-
+
The Centaur C6 (WinChip) has 8 MCRs, allowing write-combining. These
are supported.
@@ -87,7 +87,7 @@ reg00: base=0x00000000 ( 0MB), size= 64MB: write-back, count=1
reg01: base=0xfb000000 (4016MB), size= 16MB: write-combining, count=1
reg02: base=0xfb000000 (4016MB), size= 4kB: uncachable, count=1
-Some cards (especially Voodoo Graphics boards) need this 4 kB area
+Some cards (especially Voodoo Graphics boards) need this 4 kB area
excluded from the beginning of the region because it is used for
registers.
diff --git a/Documentation/x86/pat.txt b/Documentation/x86/pat.txt
index 17965f927c1..c93ff5f4c0d 100644
--- a/Documentation/x86/pat.txt
+++ b/Documentation/x86/pat.txt
@@ -14,6 +14,10 @@ PAT allows for different types of memory attributes. The most commonly used
ones that will be supported at this time are Write-back, Uncached,
Write-combined and Uncached Minus.
+
+PAT APIs
+--------
+
There are many different APIs in the kernel that allows setting of memory
attributes at the page level. In order to avoid aliasing, these interfaces
should be used thoughtfully. Below is a table of interfaces available,
@@ -26,38 +30,38 @@ address range to avoid any aliasing.
API | RAM | ACPI,... | Reserved/Holes |
-----------------------|----------|------------|------------------|
| | | |
-ioremap | -- | UC | UC |
+ioremap | -- | UC- | UC- |
| | | |
ioremap_cache | -- | WB | WB |
| | | |
-ioremap_nocache | -- | UC | UC |
+ioremap_nocache | -- | UC- | UC- |
| | | |
ioremap_wc | -- | -- | WC |
| | | |
-set_memory_uc | UC | -- | -- |
+set_memory_uc | UC- | -- | -- |
set_memory_wb | | | |
| | | |
set_memory_wc | WC | -- | -- |
set_memory_wb | | | |
| | | |
-pci sysfs resource | -- | -- | UC |
+pci sysfs resource | -- | -- | UC- |
| | | |
pci sysfs resource_wc | -- | -- | WC |
is IORESOURCE_PREFETCH| | | |
| | | |
-pci proc | -- | -- | UC |
+pci proc | -- | -- | UC- |
!PCIIOC_WRITE_COMBINE | | | |
| | | |
pci proc | -- | -- | WC |
PCIIOC_WRITE_COMBINE | | | |
| | | |
-/dev/mem | -- | UC | UC |
+/dev/mem | -- | WB/WC/UC- | WB/WC/UC- |
read-write | | | |
| | | |
-/dev/mem | -- | UC | UC |
+/dev/mem | -- | UC- | UC- |
mmap SYNC flag | | | |
| | | |
-/dev/mem | -- | WB/WC/UC | WB/WC/UC |
+/dev/mem | -- | WB/WC/UC- | WB/WC/UC- |
mmap !SYNC flag | |(from exist-| (from exist- |
and | | ing alias)| ing alias) |
any alias to this area| | | |
@@ -68,7 +72,7 @@ pci proc | -- | -- | WC |
and | | | |
MTRR says WB | | | |
| | | |
-/dev/mem | -- | -- | UC_MINUS |
+/dev/mem | -- | -- | UC- |
mmap !SYNC flag | | | |
no alias to this area | | | |
and | | | |
@@ -98,3 +102,35 @@ types.
Drivers should use set_memory_[uc|wc] to set access type for RAM ranges.
+
+PAT debugging
+-------------
+
+With CONFIG_DEBUG_FS enabled, PAT memtype list can be examined by
+
+# mount -t debugfs debugfs /sys/kernel/debug
+# cat /sys/kernel/debug/x86/pat_memtype_list
+PAT memtype list:
+uncached-minus @ 0x7fadf000-0x7fae0000
+uncached-minus @ 0x7fb19000-0x7fb1a000
+uncached-minus @ 0x7fb1a000-0x7fb1b000
+uncached-minus @ 0x7fb1b000-0x7fb1c000
+uncached-minus @ 0x7fb1c000-0x7fb1d000
+uncached-minus @ 0x7fb1d000-0x7fb1e000
+uncached-minus @ 0x7fb1e000-0x7fb25000
+uncached-minus @ 0x7fb25000-0x7fb26000
+uncached-minus @ 0x7fb26000-0x7fb27000
+uncached-minus @ 0x7fb27000-0x7fb28000
+uncached-minus @ 0x7fb28000-0x7fb2e000
+uncached-minus @ 0x7fb2e000-0x7fb2f000
+uncached-minus @ 0x7fb2f000-0x7fb30000
+uncached-minus @ 0x7fb31000-0x7fb32000
+uncached-minus @ 0x80000000-0x90000000
+
+This list shows physical address ranges and various PAT settings used to
+access those physical address ranges.
+
+Another, more verbose way of getting PAT related debug messages is with
+"debugpat" boot parameter. With this parameter, various debug messages are
+printed to dmesg log.
+
diff --git a/Documentation/x86/i386/usb-legacy-support.txt b/Documentation/x86/usb-legacy-support.txt
index 1894cdfc69d..1894cdfc69d 100644
--- a/Documentation/x86/i386/usb-legacy-support.txt
+++ b/Documentation/x86/usb-legacy-support.txt
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt
index b0c7b6c4abd..72ffb5373ec 100644
--- a/Documentation/x86/x86_64/boot-options.txt
+++ b/Documentation/x86/x86_64/boot-options.txt
@@ -54,10 +54,6 @@ APICs
apicmaintimer. Useful when your PIT timer is totally
broken.
- disable_8254_timer / enable_8254_timer
- Enable interrupt 0 timer routing over the 8254 in addition to over
- the IO-APIC. The kernel tries to set a sensible default.
-
Early Console
syntax: earlyprintk=vga
diff --git a/Documentation/x86/i386/zero-page.txt b/Documentation/x86/zero-page.txt
index 169ad423a3d..169ad423a3d 100644
--- a/Documentation/x86/i386/zero-page.txt
+++ b/Documentation/x86/zero-page.txt