summaryrefslogtreecommitdiffstats
path: root/Documentation/DocBook
diff options
context:
space:
mode:
Diffstat (limited to 'Documentation/DocBook')
-rw-r--r--Documentation/DocBook/device-drivers.tmpl1
-rw-r--r--Documentation/DocBook/drm.tmpl1
-rw-r--r--Documentation/DocBook/genericirq.tmpl84
-rw-r--r--Documentation/DocBook/kernel-api.tmpl4
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl20
-rw-r--r--Documentation/DocBook/tracepoint.tmpl5
6 files changed, 70 insertions, 45 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index ecd35e9d441..feca0758391 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -46,7 +46,6 @@
<sect1><title>Atomic and pointer manipulation</title>
!Iarch/x86/include/asm/atomic.h
-!Iarch/x86/include/asm/unaligned.h
</sect1>
<sect1><title>Delaying, scheduling, and timer routines</title>
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 910c923a9b8..2861055afd7 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -136,6 +136,7 @@
#ifdef CONFIG_COMPAT
.compat_ioctl = i915_compat_ioctl,
#endif
+ .llseek = noop_llseek,
},
.pci_driver = {
.name = DRIVER_NAME,
diff --git a/Documentation/DocBook/genericirq.tmpl b/Documentation/DocBook/genericirq.tmpl
index 1448b33fd22..fb10fd08c05 100644
--- a/Documentation/DocBook/genericirq.tmpl
+++ b/Documentation/DocBook/genericirq.tmpl
@@ -28,7 +28,7 @@
</authorgroup>
<copyright>
- <year>2005-2006</year>
+ <year>2005-2010</year>
<holder>Thomas Gleixner</holder>
</copyright>
<copyright>
@@ -100,6 +100,10 @@
<listitem><para>Edge type</para></listitem>
<listitem><para>Simple type</para></listitem>
</itemizedlist>
+ During the implementation we identified another type:
+ <itemizedlist>
+ <listitem><para>Fast EOI type</para></listitem>
+ </itemizedlist>
In the SMP world of the __do_IRQ() super-handler another type
was identified:
<itemizedlist>
@@ -153,6 +157,7 @@
is still available. This leads to a kind of duality for the time
being. Over time the new model should be used in more and more
architectures, as it enables smaller and cleaner IRQ subsystems.
+ It's deprecated for three years now and about to be removed.
</para>
</chapter>
<chapter id="bugs">
@@ -217,6 +222,7 @@
<itemizedlist>
<listitem><para>handle_level_irq</para></listitem>
<listitem><para>handle_edge_irq</para></listitem>
+ <listitem><para>handle_fasteoi_irq</para></listitem>
<listitem><para>handle_simple_irq</para></listitem>
<listitem><para>handle_percpu_irq</para></listitem>
</itemizedlist>
@@ -233,33 +239,33 @@
are used by the default flow implementations.
The following helper functions are implemented (simplified excerpt):
<programlisting>
-default_enable(irq)
+default_enable(struct irq_data *data)
{
- desc->chip->unmask(irq);
+ desc->chip->irq_unmask(data);
}
-default_disable(irq)
+default_disable(struct irq_data *data)
{
- if (!delay_disable(irq))
- desc->chip->mask(irq);
+ if (!delay_disable(data))
+ desc->chip->irq_mask(data);
}
-default_ack(irq)
+default_ack(struct irq_data *data)
{
- chip->ack(irq);
+ chip->irq_ack(data);
}
-default_mask_ack(irq)
+default_mask_ack(struct irq_data *data)
{
- if (chip->mask_ack) {
- chip->mask_ack(irq);
+ if (chip->irq_mask_ack) {
+ chip->irq_mask_ack(data);
} else {
- chip->mask(irq);
- chip->ack(irq);
+ chip->irq_mask(data);
+ chip->irq_ack(data);
}
}
-noop(irq)
+noop(struct irq_data *data))
{
}
@@ -278,12 +284,27 @@ noop(irq)
<para>
The following control flow is implemented (simplified excerpt):
<programlisting>
-desc->chip->start();
+desc->chip->irq_mask();
handle_IRQ_event(desc->action);
-desc->chip->end();
+desc->chip->irq_unmask();
</programlisting>
</para>
- </sect3>
+ </sect3>
+ <sect3 id="Default_FASTEOI_IRQ_flow_handler">
+ <title>Default Fast EOI IRQ flow handler</title>
+ <para>
+ handle_fasteoi_irq provides a generic implementation
+ for interrupts, which only need an EOI at the end of
+ the handler
+ </para>
+ <para>
+ The following control flow is implemented (simplified excerpt):
+ <programlisting>
+handle_IRQ_event(desc->action);
+desc->chip->irq_eoi();
+ </programlisting>
+ </para>
+ </sect3>
<sect3 id="Default_Edge_IRQ_flow_handler">
<title>Default Edge IRQ flow handler</title>
<para>
@@ -294,20 +315,19 @@ desc->chip->end();
The following control flow is implemented (simplified excerpt):
<programlisting>
if (desc->status &amp; running) {
- desc->chip->hold();
+ desc->chip->irq_mask();
desc->status |= pending | masked;
return;
}
-desc->chip->start();
+desc->chip->irq_ack();
desc->status |= running;
do {
if (desc->status &amp; masked)
- desc->chip->enable();
+ desc->chip->irq_unmask();
desc->status &amp;= ~pending;
handle_IRQ_event(desc->action);
} while (status &amp; pending);
desc->status &amp;= ~running;
-desc->chip->end();
</programlisting>
</para>
</sect3>
@@ -342,9 +362,9 @@ handle_IRQ_event(desc->action);
<para>
The following control flow is implemented (simplified excerpt):
<programlisting>
-desc->chip->start();
handle_IRQ_event(desc->action);
-desc->chip->end();
+if (desc->chip->irq_eoi)
+ desc->chip->irq_eoi();
</programlisting>
</para>
</sect3>
@@ -375,8 +395,7 @@ desc->chip->end();
mechanism. (It's necessary to enable CONFIG_HARDIRQS_SW_RESEND when
you want to use the delayed interrupt disable feature and your
hardware is not capable of retriggering an interrupt.)
- The delayed interrupt disable can be runtime enabled, per interrupt,
- by setting the IRQ_DELAYED_DISABLE flag in the irq_desc status field.
+ The delayed interrupt disable is not configurable.
</para>
</sect2>
</sect1>
@@ -387,13 +406,13 @@ desc->chip->end();
contains all the direct chip relevant functions, which
can be utilized by the irq flow implementations.
<itemizedlist>
- <listitem><para>ack()</para></listitem>
- <listitem><para>mask_ack() - Optional, recommended for performance</para></listitem>
- <listitem><para>mask()</para></listitem>
- <listitem><para>unmask()</para></listitem>
- <listitem><para>retrigger() - Optional</para></listitem>
- <listitem><para>set_type() - Optional</para></listitem>
- <listitem><para>set_wake() - Optional</para></listitem>
+ <listitem><para>irq_ack()</para></listitem>
+ <listitem><para>irq_mask_ack() - Optional, recommended for performance</para></listitem>
+ <listitem><para>irq_mask()</para></listitem>
+ <listitem><para>irq_unmask()</para></listitem>
+ <listitem><para>irq_retrigger() - Optional</para></listitem>
+ <listitem><para>irq_set_type() - Optional</para></listitem>
+ <listitem><para>irq_set_wake() - Optional</para></listitem>
</itemizedlist>
These primitives are strictly intended to mean what they say: ack means
ACK, masking means masking of an IRQ line, etc. It is up to the flow
@@ -458,6 +477,7 @@ desc->chip->end();
<para>
This chapter contains the autogenerated documentation of the internal functions.
</para>
+!Ikernel/irq/irqdesc.c
!Ikernel/irq/handle.c
!Ikernel/irq/chip.c
</chapter>
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index a20c6f6fffc..6b4e07f28b6 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -57,7 +57,6 @@
</para>
<sect1><title>String Conversions</title>
-!Ilib/vsprintf.c
!Elib/vsprintf.c
</sect1>
<sect1><title>String Manipulation</title>
@@ -258,7 +257,8 @@ X!Earch/x86/kernel/mca_32.c
!Iblock/blk-sysfs.c
!Eblock/blk-settings.c
!Eblock/blk-exec.c
-!Eblock/blk-barrier.c
+!Eblock/blk-flush.c
+!Eblock/blk-lib.c
!Eblock/blk-tag.c
!Iblock/blk-tag.c
!Eblock/blk-integrity.c
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 0b1a3f97f28..f66f4df1869 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1645,7 +1645,9 @@ the amount of locking which needs to be done.
all the readers who were traversing the list when we deleted the
element are finished. We use <function>call_rcu()</function> to
register a callback which will actually destroy the object once
- the readers are finished.
+ all pre-existing readers are finished. Alternatively,
+ <function>synchronize_rcu()</function> may be used to block until
+ all pre-existing are finished.
</para>
<para>
But how does Read Copy Update know when the readers are
@@ -1714,7 +1716,7 @@ the amount of locking which needs to be done.
- object_put(obj);
+ list_del_rcu(&amp;obj-&gt;list);
cache_num--;
-+ call_rcu(&amp;obj-&gt;rcu, cache_delete_rcu, obj);
++ call_rcu(&amp;obj-&gt;rcu, cache_delete_rcu);
}
/* Must be holding cache_lock */
@@ -1725,14 +1727,6 @@ the amount of locking which needs to be done.
if (++cache_num > MAX_CACHE_SIZE) {
struct object *i, *outcast = NULL;
list_for_each_entry(i, &amp;cache, list) {
-@@ -85,6 +94,7 @@
- obj-&gt;popularity = 0;
- atomic_set(&amp;obj-&gt;refcnt, 1); /* The cache holds a reference */
- spin_lock_init(&amp;obj-&gt;lock);
-+ INIT_RCU_HEAD(&amp;obj-&gt;rcu);
-
- spin_lock_irqsave(&amp;cache_lock, flags);
- __cache_add(obj);
@@ -104,12 +114,11 @@
struct object *cache_find(int id)
{
@@ -1961,6 +1955,12 @@ machines due to caching.
</sect1>
</chapter>
+ <chapter id="apiref">
+ <title>Mutex API reference</title>
+!Iinclude/linux/mutex.h
+!Ekernel/mutex.c
+ </chapter>
+
<chapter id="references">
<title>Further reading</title>
diff --git a/Documentation/DocBook/tracepoint.tmpl b/Documentation/DocBook/tracepoint.tmpl
index e8473eae2a2..b57a9ede322 100644
--- a/Documentation/DocBook/tracepoint.tmpl
+++ b/Documentation/DocBook/tracepoint.tmpl
@@ -104,4 +104,9 @@
<title>Block IO</title>
!Iinclude/trace/events/block.h
</chapter>
+
+ <chapter id="workqueue">
+ <title>Workqueue</title>
+!Iinclude/trace/events/workqueue.h
+ </chapter>
</book>