summaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/kvm_para.h3
-rw-r--r--arch/x86/include/asm/stat.h21
-rw-r--r--arch/x86/include/asm/word-at-a-time.h33
3 files changed, 57 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 734c3767cfa..183922e13de 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -170,6 +170,9 @@ static inline int kvm_para_available(void)
unsigned int eax, ebx, ecx, edx;
char signature[13];
+ if (boot_cpu_data.cpuid_level < 0)
+ return 0; /* So we don't blow up on old processors */
+
cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
memcpy(signature + 0, &ebx, 4);
memcpy(signature + 4, &ecx, 4);
diff --git a/arch/x86/include/asm/stat.h b/arch/x86/include/asm/stat.h
index e0b1d9bbcbc..7b3ddc34858 100644
--- a/arch/x86/include/asm/stat.h
+++ b/arch/x86/include/asm/stat.h
@@ -25,6 +25,12 @@ struct stat {
unsigned long __unused5;
};
+/* We don't need to memset the whole thing just to initialize the padding */
+#define INIT_STRUCT_STAT_PADDING(st) do { \
+ st.__unused4 = 0; \
+ st.__unused5 = 0; \
+} while (0)
+
#define STAT64_HAS_BROKEN_ST_INO 1
/* This matches struct stat64 in glibc2.1, hence the absolutely
@@ -63,6 +69,12 @@ struct stat64 {
unsigned long long st_ino;
};
+/* We don't need to memset the whole thing just to initialize the padding */
+#define INIT_STRUCT_STAT64_PADDING(st) do { \
+ memset(&st.__pad0, 0, sizeof(st.__pad0)); \
+ memset(&st.__pad3, 0, sizeof(st.__pad3)); \
+} while (0)
+
#else /* __i386__ */
struct stat {
@@ -87,6 +99,15 @@ struct stat {
unsigned long st_ctime_nsec;
long __unused[3];
};
+
+/* We don't need to memset the whole thing just to initialize the padding */
+#define INIT_STRUCT_STAT_PADDING(st) do { \
+ st.__pad0 = 0; \
+ st.__unused[0] = 0; \
+ st.__unused[1] = 0; \
+ st.__unused[2] = 0; \
+} while (0)
+
#endif
/* for 32bit emulation and 32 bit kernels */
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
index 6fe6767b712..e58f03b206c 100644
--- a/arch/x86/include/asm/word-at-a-time.h
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -43,4 +43,37 @@ static inline unsigned long has_zero(unsigned long a)
return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80);
}
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long ret, dummy;
+
+ asm(
+ "1:\tmov %2,%0\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3:\t"
+ "lea %2,%1\n\t"
+ "and %3,%1\n\t"
+ "mov (%1),%0\n\t"
+ "leal %2,%%ecx\n\t"
+ "andl %4,%%ecx\n\t"
+ "shll $3,%%ecx\n\t"
+ "shr %%cl,%0\n\t"
+ "jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ :"=&r" (ret),"=&c" (dummy)
+ :"m" (*(unsigned long *)addr),
+ "i" (-sizeof(unsigned long)),
+ "i" (sizeof(unsigned long)-1));
+ return ret;
+}
+
#endif /* _ASM_WORD_AT_A_TIME_H */