summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/init_task.c
diff options
context:
space:
mode:
authorHiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>2007-10-19 20:35:02 +0200
committerThomas Gleixner <tglx@linutronix.de>2007-10-19 20:35:02 +0200
commit7778887880d278c23dc0975210df0381c878ae1e (patch)
tree4c286d6a8d6467eaea9424b283b634934de02f87 /arch/x86/kernel/init_task.c
parentaf93ebc0b3ed8cdf93a6ed4bc1fab548f8059d0a (diff)
x86: merge init_task_32/64.c
Merge init_task_32/64.c. Move 64bit per cpu data orig_ist to setup64.c. [ mingo: fixed checkpatch trivialities. ] Signed-off-by: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/init_task.c')
-rw-r--r--arch/x86/kernel/init_task.c47
1 files changed, 47 insertions, 0 deletions
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
new file mode 100644
index 00000000000..468c9c43784
--- /dev/null
+++ b/arch/x86/kernel/init_task.c
@@ -0,0 +1,47 @@
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/fs.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/desc.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+EXPORT_SYMBOL(init_mm);
+
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is THREAD_SIZE aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union
+ __attribute__((__section__(".data.init_task"))) =
+ { INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+EXPORT_SYMBOL(init_task);
+
+/*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+ * no more per-task TSS's. The TSS size is kept cacheline-aligned
+ * so they are allowed to end up in the .data.cacheline_aligned
+ * section. Since TSS's are completely CPU-local, we want them
+ * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+ */
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
+