summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/bug.h15
-rw-r--r--include/asm-generic/memory_model.h2
-rw-r--r--include/asm-generic/pgtable.h50
-rw-r--r--include/asm-generic/vmlinux.lds.h31
4 files changed, 92 insertions, 6 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 12c07c1866b..8af276361bf 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -8,9 +8,17 @@
#ifdef CONFIG_GENERIC_BUG
#ifndef __ASSEMBLY__
struct bug_entry {
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
unsigned long bug_addr;
+#else
+ signed int bug_addr_disp;
+#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
const char *file;
+#else
+ signed int file_disp;
+#endif
unsigned short line;
#endif
unsigned short flags;
@@ -33,15 +41,14 @@ struct bug_entry {
#ifndef __WARN
#ifndef __ASSEMBLY__
-extern void warn_on_slowpath(const char *file, const int line);
extern void warn_slowpath(const char *file, const int line,
const char *fmt, ...) __attribute__((format(printf, 3, 4)));
#define WANT_WARN_ON_SLOWPATH
#endif
-#define __WARN() warn_on_slowpath(__FILE__, __LINE__)
-#define __WARN_printf(arg...) warn_slowpath(__FILE__, __LINE__, arg)
+#define __WARN() warn_slowpath(__FILE__, __LINE__, NULL)
+#define __WARN_printf(arg...) warn_slowpath(__FILE__, __LINE__, arg)
#else
-#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0)
+#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0)
#endif
#ifndef WARN_ON
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 18546d8eb78..36fa286adad 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -49,7 +49,7 @@
/* memmap is virtually contigious. */
#define __pfn_to_page(pfn) (vmemmap + (pfn))
-#define __page_to_pfn(page) ((page) - vmemmap)
+#define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
#elif defined(CONFIG_SPARSEMEM)
/*
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index ef87f889ef6..72ebe91005a 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -129,6 +129,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#define move_pte(pte, prot, old_addr, new_addr) (pte)
#endif
+#ifndef pgprot_writecombine
+#define pgprot_writecombine pgprot_noncached
+#endif
+
/*
* When walking page tables, get the address of the next boundary,
* or the end address of the range if that comes earlier. Although no
@@ -289,6 +293,52 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
#define arch_flush_lazy_cpu_mode() do {} while (0)
#endif
+#ifndef __HAVE_PFNMAP_TRACKING
+/*
+ * Interface that can be used by architecture code to keep track of
+ * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
+ *
+ * track_pfn_vma_new is called when a _new_ pfn mapping is being established
+ * for physical range indicated by pfn and size.
+ */
+static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
+ unsigned long pfn, unsigned long size)
+{
+ return 0;
+}
+
+/*
+ * Interface that can be used by architecture code to keep track of
+ * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
+ *
+ * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
+ * copied through copy_page_range().
+ */
+static inline int track_pfn_vma_copy(struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+/*
+ * Interface that can be used by architecture code to keep track of
+ * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
+ *
+ * untrack_pfn_vma is called while unmapping a pfnmap for a region.
+ * untrack can be called for a specific region indicated by pfn and size or
+ * can be for the entire vma (in which case size can be zero).
+ */
+static inline void untrack_pfn_vma(struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size)
+{
+}
+#else
+extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
+ unsigned long pfn, unsigned long size);
+extern int track_pfn_vma_copy(struct vm_area_struct *vma);
+extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
+ unsigned long size);
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 80744606bad..c61fab1dd2f 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -45,6 +45,22 @@
#define MCOUNT_REC()
#endif
+#ifdef CONFIG_TRACE_BRANCH_PROFILING
+#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
+ *(_ftrace_annotated_branch) \
+ VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
+#else
+#define LIKELY_PROFILE()
+#endif
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
+ *(_ftrace_branch) \
+ VMLINUX_SYMBOL(__stop_branch_profile) = .;
+#else
+#define BRANCH_PROFILE()
+#endif
+
/* .data section */
#define DATA_DATA \
*(.data) \
@@ -60,9 +76,12 @@
VMLINUX_SYMBOL(__start___markers) = .; \
*(__markers) \
VMLINUX_SYMBOL(__stop___markers) = .; \
+ . = ALIGN(32); \
VMLINUX_SYMBOL(__start___tracepoints) = .; \
*(__tracepoints) \
- VMLINUX_SYMBOL(__stop___tracepoints) = .;
+ VMLINUX_SYMBOL(__stop___tracepoints) = .; \
+ LIKELY_PROFILE() \
+ BRANCH_PROFILE()
#define RO_DATA(align) \
. = ALIGN((align)); \
@@ -269,6 +288,16 @@
*(.kprobes.text) \
VMLINUX_SYMBOL(__kprobes_text_end) = .;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#define IRQENTRY_TEXT \
+ ALIGN_FUNCTION(); \
+ VMLINUX_SYMBOL(__irqentry_text_start) = .; \
+ *(.irqentry.text) \
+ VMLINUX_SYMBOL(__irqentry_text_end) = .;
+#else
+#define IRQENTRY_TEXT
+#endif
+
/* Section used for early init (in .S files) */
#define HEAD_TEXT *(.head.text)