diff options
author | Tejun Heo <tj@kernel.org> | 2011-05-02 14:16:37 +0200 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-05-02 14:16:47 +0200 |
commit | ba67cf5cf2ce10ad86a212b70f8c7c75d93a5016 (patch) | |
tree | 70242f5927c6d6454bd352ff78f956cfc5238f59 /arch/sh/include/asm | |
parent | aff364860aa105b2deacc6f21ec8ef524460e3fc (diff) | |
parent | 2be19102b71c1a45d37fec50303791daa1a06869 (diff) |
Merge branch 'x86/urgent' into x86-mm
Merge reason: Pick up the following two fix commits.
2be19102b7: x86, NUMA: Fix empty memblk detection in numa_cleanup_meminfo()
765af22da8: x86-32, NUMA: Fix ACPI NUMA init broken by recent x86-64 change
Scheduled NUMA init 32/64bit unification changes depend on these.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/sh/include/asm')
-rw-r--r-- | arch/sh/include/asm/page.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/pgtable_32.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/unaligned-sh4a.h | 2 |
3 files changed, 3 insertions, 3 deletions
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index c4e0b3d472b..822d6084195 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -186,7 +186,7 @@ typedef struct page *pgtable_t; /* * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still * happily generate {ld/st}.q pairs, requiring us to have 8-byte - * alignment to avoid traps. The kmalloc alignment is gauranteed by + * alignment to avoid traps. The kmalloc alignment is guaranteed by * virtue of L1_CACHE_BYTES, requiring this to only be special cased * for slab caches. */ diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index b799fe71114..0bce3d81569 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h @@ -167,7 +167,7 @@ static inline unsigned long copy_ptea_attributes(unsigned long x) #endif /* - * Mask of bits that are to be preserved accross pgprot changes. + * Mask of bits that are to be preserved across pgprot changes. */ #define _PAGE_CHG_MASK \ (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | \ diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h index c48a9c3420d..95adc500cab 100644 --- a/arch/sh/include/asm/unaligned-sh4a.h +++ b/arch/sh/include/asm/unaligned-sh4a.h @@ -9,7 +9,7 @@ * struct. * * The same note as with the movli.l/movco.l pair applies here, as long - * as the load is gauranteed to be inlined, nothing else will hook in to + * as the load is guaranteed to be inlined, nothing else will hook in to * r0 and we get the return value for free. * * NOTE: Due to the fact we require r0 encoding, care should be taken to |