summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2012-09-16 18:31:37 -0700
committerOlof Johansson <olof@lixom.net>2012-09-16 18:31:37 -0700
commit2e6185f1fea6cf88e9ce25cde1d6291ddfb3d4f0 (patch)
treec45ae7bace055c258fba5c4c6c0340b1e3f17f05 /arch/x86/mm
parent7405a749ae14f846cc2892c36d1a9343b0264b7c (diff)
parentfd301cc4e5ba839050be135a178031bcd0d363a5 (diff)
Merge tag 'tegra-for-3.7-drivers-i2c' of git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra into next/drivers
From Stephen Warren: ARM: tegra: i2c driver enhancements mostly related to clocking This branch contains a number of fixes and cleanups to the Tegra I2C driver related to clocks. These are based on the common clock conversion in order to avoid duplicating the clock driver changes before and after the conversion. Finally, a bug-fix related to I2C_M_NOSTART is included. This branch is based on previous pull request tegra-for-3.7-common-clk. * tag 'tegra-for-3.7-drivers-i2c' of git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra: i2c: tegra: dynamically control fast clk i2c: tegra: I2_M_NOSTART functionality not supported in Tegra20 ARM: tegra: clock: remove unused clock entry for i2c ARM: tegra: clock: add connection name in i2c clock entry i2c: tegra: pass proper name for getting clock ARM: tegra: clock: add i2c fast clock entry in clock table ARM: Tegra: Add smp_twd clock for Tegra20 ARM: tegra: cpu-tegra: explicitly manage re-parenting ARM: tegra: fix overflow in tegra20_pll_clk_round_rate() ARM: tegra: Fix data type for io address ARM: tegra: remove tegra_timer from tegra_list_clks ARM: tegra30: clocks: fix the wrong tegra_audio_sync_clk_ops name ARM: tegra: clocks: separate tegra_clk_32k_ops from Tegra20 and Tegra30 ARM: tegra: Remove duplicate code ARM: tegra: Port tegra to generic clock framework ARM: tegra: Add clk_tegra structure and helper functions ARM: tegra: Rename tegra20 clock file ARM: tegra20: Separate out clk ops and clk data ARM: tegra30: Separate out clk ops and clk data ARM: tegra: fix U16 divider range check ... + sync to v3.6-rc4 Resolved remove/modify conflict in arch/arm/mach-sa1100/leds-hackkit.c caused by the sync with v3.6-rc4. Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/hugetlbpage.c21
-rw-r--r--arch/x86/mm/pageattr.c10
-rw-r--r--arch/x86/mm/srat.c15
3 files changed, 28 insertions, 18 deletions
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index f6679a7fb8c..b91e4851242 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
}
/*
- * search for a shareable pmd page for hugetlb.
+ * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
+ * and returns the corresponding pte. While this is not necessary for the
+ * !shared pmd case because we can allocate the pmd later as well, it makes the
+ * code much cleaner. pmd allocation is essential for the shared case because
+ * pud has to be populated inside the same i_mmap_mutex section - otherwise
+ * racing tasks could either miss the sharing (see huge_pte_offset) or select a
+ * bad pmd for sharing.
*/
-static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+static pte_t *
+huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
{
struct vm_area_struct *vma = find_vma(mm, addr);
struct address_space *mapping = vma->vm_file->f_mapping;
@@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
struct vm_area_struct *svma;
unsigned long saddr;
pte_t *spte = NULL;
+ pte_t *pte;
if (!vma_shareable(vma, addr))
- return;
+ return (pte_t *)pmd_alloc(mm, pud, addr);
mutex_lock(&mapping->i_mmap_mutex);
vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
@@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
put_page(virt_to_page(spte));
spin_unlock(&mm->page_table_lock);
out:
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
mutex_unlock(&mapping->i_mmap_mutex);
+ return pte;
}
/*
@@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
} else {
BUG_ON(sz != PMD_SIZE);
if (pud_none(*pud))
- huge_pmd_share(mm, addr, pud);
- pte = (pte_t *) pmd_alloc(mm, pud, addr);
+ pte = huge_pmd_share(mm, addr, pud);
+ else
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
}
}
BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 931930a9616..a718e0d2350 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -919,13 +919,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
/*
* On success we use clflush, when the CPU supports it to
- * avoid the wbindv. If the CPU does not support it, in the
- * error case, and during early boot (for EFI) we fall back
- * to cpa_flush_all (which uses wbinvd):
+ * avoid the wbindv. If the CPU does not support it and in the
+ * error case we fall back to cpa_flush_all (which uses
+ * wbindv):
*/
- if (early_boot_irqs_disabled)
- __cpa_flush_all((void *)(long)cache);
- else if (!ret && cpu_has_clflush) {
+ if (!ret && cpu_has_clflush) {
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
cpa_flush_array(addr, numpages, cache,
cpa.flags, pages);
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 4599c3e8bcb..4ddf497ca65 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -142,23 +142,23 @@ static inline int save_add_info(void) {return 0;}
#endif
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
-void __init
+int __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
u64 start, end;
int node, pxm;
if (srat_disabled())
- return;
+ return -1;
if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
bad_srat();
- return;
+ return -1;
}
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
- return;
+ return -1;
if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
- return;
+ return -1;
start = ma->base_address;
end = start + ma->length;
pxm = ma->proximity_domain;
@@ -168,12 +168,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains.\n");
bad_srat();
- return;
+ return -1;
}
if (numa_add_memblk(node, start, end) < 0) {
bad_srat();
- return;
+ return -1;
}
node_set(node, numa_nodes_parsed);
@@ -181,6 +181,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
node, pxm,
(unsigned long long) start, (unsigned long long) end - 1);
+ return 0;
}
void __init acpi_numa_arch_fixup(void) {}