summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAlan Cox <alan@lxorguk.ukuu.org.uk>2006-01-18 17:44:13 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-18 19:20:31 -0800
commitda9bb1d27b21cb24cbb6a2efb5d3c464d357a01e (patch)
tree016b66985a651d071d3873e74b115108ddf0b3f5 /include
parent2f768af73fea4c70f9046388a7ff648ad11f028e (diff)
[PATCH] EDAC: core EDAC support code
This is a subset of the bluesmoke project core code, stripped of the NMI work which isn't ready to merge and some of the "interesting" proc functionality that needs reworking or just has no place in kernel. It requires no core kernel changes except the added scrub functions already posted. The goal is to merge further functionality only after the core code is accepted and proven in the base kernel, and only at the point the upstream extras are really ready to merge. From: doug thompson <norsk5@xmission.com> This converts EDAC to sysfs and is the final chunk neccessary before EDAC has a stable user space API and can be considered for submission into the base kernel. Signed-off-by: Alan Cox <alan@redhat.com> Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Jesper Juhl <jesper.juhl@gmail.com> Signed-off-by: doug thompson <norsk5@xmission.com> Signed-off-by: Pavel Machek <pavel@suse.cz> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/atomic.h12
-rw-r--r--include/asm-i386/edac.h18
-rw-r--r--include/asm-x86_64/atomic.h12
-rw-r--r--include/asm-x86_64/edac.h18
4 files changed, 36 insertions, 24 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index e2c00c95a5e..de649d3aa2d 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -255,17 +255,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-/* ECC atomic, DMA, SMP and interrupt safe scrub function */
-
-static __inline__ void atomic_scrub(unsigned long *virt_addr, u32 size)
-{
- u32 i;
- for (i = 0; i < size / 4; i++, virt_addr++)
- /* Very carefully read and write to memory atomically
- * so we are interrupt, DMA and SMP safe.
- */
- __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
-}
-
#include <asm-generic/atomic.h>
#endif
diff --git a/include/asm-i386/edac.h b/include/asm-i386/edac.h
new file mode 100644
index 00000000000..3e7dd0ab68c
--- /dev/null
+++ b/include/asm-i386/edac.h
@@ -0,0 +1,18 @@
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+
+/* ECC atomic, DMA, SMP and interrupt safe scrub function */
+
+static __inline__ void atomic_scrub(void *va, u32 size)
+{
+ unsigned long *virt_addr = va;
+ u32 i;
+
+ for (i = 0; i < size / 4; i++, virt_addr++)
+ /* Very carefully read and write to memory atomically
+ * so we are interrupt, DMA and SMP safe.
+ */
+ __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
+}
+
+#endif
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 4048508c4f4..4b5cd553e77 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -426,17 +426,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-/* ECC atomic, DMA, SMP and interrupt safe scrub function */
-
-static __inline__ void atomic_scrub(u32 *virt_addr, u32 size)
-{
- u32 i;
- for (i = 0; i < size / 4; i++, virt_addr++)
- /* Very carefully read and write to memory atomically
- * so we are interrupt, DMA and SMP safe.
- */
- __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
-}
-
#include <asm-generic/atomic.h>
#endif
diff --git a/include/asm-x86_64/edac.h b/include/asm-x86_64/edac.h
new file mode 100644
index 00000000000..cad1cd42b4e
--- /dev/null
+++ b/include/asm-x86_64/edac.h
@@ -0,0 +1,18 @@
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+
+/* ECC atomic, DMA, SMP and interrupt safe scrub function */
+
+static __inline__ void atomic_scrub(void *va, u32 size)
+{
+ unsigned int *virt_addr = va;
+ u32 i;
+
+ for (i = 0; i < size / 4; i++, virt_addr++)
+ /* Very carefully read and write to memory atomically
+ * so we are interrupt, DMA and SMP safe.
+ */
+ __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
+}
+
+#endif