From 2046b94e7c4fce92eb8165c2c36c6478f4927178 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Fri, 4 Apr 2008 11:05:59 -0700 Subject: [IA64] Multiple outstanding ptc.g instruction support According to SDM2.2, Itanium supports multiple outstanding ptc.g instructions. But current kernel function ia64_global_tlb_purge() uses a spinlock to serialize ptc.g instructions issued by multiple processors. This serialization might have scalability issue on a big SMP machine where many processors could purge TLB in parallel. The patch fixes this problem by issuing multiple ptc.g instructions in ia64_global_tlb_purge(). It also adds support for the "PALO" table to get a platform view of the max number of outstanding ptc.g instructions (which may be different from the processor view found from PAL_VM_SUMMARY). PALO specification can be found at: http://www.dig64.org/home/DIG64_PALO_R1_0.pdf spinaphore implementation by Matthew Wilcox. Signed-off-by: Fenghua Yu Signed-off-by: Tony Luck --- arch/ia64/kernel/setup.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/ia64/kernel/setup.c') diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 4aa9eaea76c..1cbd26340d8 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -59,6 +59,7 @@ #include #include #include +#include #include #include @@ -946,9 +947,10 @@ cpu_init (void) #endif /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ - if (ia64_pal_vm_summary(NULL, &vmi) == 0) + if (ia64_pal_vm_summary(NULL, &vmi) == 0) { max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; - else { + setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, 0); + } else { printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); max_ctx = (1U << 15) - 1; /* use architected minimum */ } -- cgit v1.2.3-70-g09d2 From a6c75b86ce9f01db4ea9912877b526c2dc4d2f0a Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Fri, 14 Mar 2008 13:57:08 -0700 Subject: [IA64] Kernel parameter for max number of concurrent global TLB purges The patch defines kernel parameter "nptcg=". The parameter overrides max number of concurrent global TLB purges which is reported from either PAL_VM_SUMMARY or SAL PALO. Signed-off-by: Fenghua Yu Signed-off-by: Tony Luck --- Documentation/kernel-parameters.txt | 4 ++++ arch/ia64/kernel/efi.c | 2 +- arch/ia64/kernel/setup.c | 2 +- arch/ia64/mm/tlb.c | 46 +++++++++++++++++++++++++++++++++---- include/asm-ia64/sal.h | 4 ++++ 5 files changed, 51 insertions(+), 7 deletions(-) (limited to 'arch/ia64/kernel/setup.c') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 4cd1a5da80a..be92e616572 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1335,6 +1335,10 @@ and is between 256 and 4096 characters. It is defined in the file nowb [ARM] + nptcg= [IA64] Override max number of concurrent global TLB + purges which is reported from either PAL_VM_SUMMARY or + SAL PALO. + numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA. one of ['zone', 'node', 'default'] can be specified This can be set from sysctl after boot. diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 003cd09b073..d45f215bc8f 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -436,7 +436,7 @@ static void __init handle_palo(unsigned long palo_phys) return; } - setup_ptcg_sem(palo->max_tlb_purges, 1); + setup_ptcg_sem(palo->max_tlb_purges, NPTCG_FROM_PALO); } void diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 1cbd26340d8..f798c0769d3 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -949,7 +949,7 @@ cpu_init (void) /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ if (ia64_pal_vm_summary(NULL, &vmi) == 0) { max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; - setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, 0); + setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL); } else { printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); max_ctx = (1U << 15) - 1; /* use architected minimum */ diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index d41d6076ed0..1a8948fd002 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -119,6 +119,27 @@ static u16 nptcg = 1; static int need_ptcg_sem = 1; static int toolatetochangeptcgsem = 0; +/* + * Kernel parameter "nptcg=" overrides max number of concurrent global TLB + * purges which is reported from either PAL or SAL PALO. + * + * We don't have sanity checking for nptcg value. It's the user's responsibility + * for valid nptcg value on the platform. Otherwise, kernel may hang in some + * cases. + */ +static int __init +set_nptcg(char *str) +{ + int value = 0; + + get_option(&str, &value); + setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER); + + return 1; +} + +__setup("nptcg=", set_nptcg); + /* * Maximum number of simultaneous ptc.g purges in the system can * be defined by PAL_VM_SUMMARY (in which case we should take @@ -126,13 +147,18 @@ static int toolatetochangeptcgsem = 0; * override table (in which case we should ignore the value from * PAL_VM_SUMMARY). * + * Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g + * purges defined in either PAL_VM_SUMMARY or PAL override table. In this case, + * we should ignore the value from either PAL_VM_SUMMARY or PAL override table. + * * Complicating the logic here is the fact that num_possible_cpus() * isn't fully setup until we start bringing cpus online. */ void -setup_ptcg_sem(int max_purges, int from_palo) +setup_ptcg_sem(int max_purges, int nptcg_from) { - static int have_palo; + static int kp_override; + static int palo_override; static int firstcpu = 1; if (toolatetochangeptcgsem) { @@ -140,8 +166,18 @@ setup_ptcg_sem(int max_purges, int from_palo) return; } - if (from_palo) { - have_palo = 1; + if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) { + kp_override = 1; + nptcg = max_purges; + goto resetsema; + } + if (kp_override) { + need_ptcg_sem = num_possible_cpus() > nptcg; + return; + } + + if (nptcg_from == NPTCG_FROM_PALO) { + palo_override = 1; /* In PALO max_purges == 0 really means it! */ if (max_purges == 0) @@ -153,7 +189,7 @@ setup_ptcg_sem(int max_purges, int from_palo) } goto resetsema; } - if (have_palo) { + if (palo_override) { if (nptcg != PALO_MAX_TLB_PURGES) need_ptcg_sem = (num_possible_cpus() > nptcg); return; diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index 3cd637a2c05..89594b442f8 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h @@ -896,6 +896,10 @@ struct palo_table { u8 reserved2[6]; }; +#define NPTCG_FROM_PAL 0 +#define NPTCG_FROM_PALO 1 +#define NPTCG_FROM_KERNEL_PARAMETER 2 + #endif /* __ASSEMBLY__ */ #endif /* _ASM_IA64_SAL_H */ -- cgit v1.2.3-70-g09d2 From 2c6e6db41f01b6b4eb98809350827c9678996698 Mon Sep 17 00:00:00 2001 From: "holt@sgi.com" Date: Thu, 3 Apr 2008 15:17:13 -0500 Subject: [IA64] Minimize per_cpu reservations. This attached patch significantly shrinks boot memory allocation on ia64. It does this by not allocating per_cpu areas for cpus that can never exist. In the case where acpi does not have any numa node description of the cpus, I defaulted to assigning the first 32 round-robin on the known nodes.. For the !CONFIG_ACPI I used for_each_possible_cpu(). Signed-off-by: Robin Holt Signed-off-by: Tony Luck --- arch/ia64/kernel/acpi.c | 4 +++- arch/ia64/kernel/numa.c | 2 +- arch/ia64/kernel/setup.c | 2 ++ arch/ia64/mm/discontig.c | 12 ++++-------- arch/ia64/mm/numa.c | 4 +++- include/asm-ia64/acpi.h | 33 +++++++++++++++++++++++++++++++++ include/asm-ia64/numa.h | 2 ++ 7 files changed, 48 insertions(+), 11 deletions(-) (limited to 'arch/ia64/kernel/setup.c') diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 78f28d825f3..c7467f863c7 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -423,6 +423,7 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN]; #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) static struct acpi_table_slit __initdata *slit_table; +cpumask_t early_cpu_possible_map = CPU_MASK_NONE; static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) { @@ -482,6 +483,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) (pa->apic_id << 8) | (pa->local_sapic_eid); /* nid should be overridden as logical node id later */ node_cpuid[srat_num_cpus].nid = pxm; + cpu_set(srat_num_cpus, early_cpu_possible_map); srat_num_cpus++; } @@ -559,7 +561,7 @@ void __init acpi_numa_arch_fixup(void) } /* set logical node id in cpu structure */ - for (i = 0; i < srat_num_cpus; i++) + for_each_possible_early_cpu(i) node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); printk(KERN_INFO "Number of logical nodes in system = %d\n", diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c index a78b45f5fe2..c93420c9740 100644 --- a/arch/ia64/kernel/numa.c +++ b/arch/ia64/kernel/numa.c @@ -73,7 +73,7 @@ void __init build_cpu_to_node_map(void) for(node=0; node < MAX_NUMNODES; node++) cpus_clear(node_to_cpu_mask[node]); - for(cpu = 0; cpu < NR_CPUS; ++cpu) { + for_each_possible_early_cpu(cpu) { node = -1; for (i = 0; i < NR_CPUS; ++i) if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 4aa9eaea76c..6206541f9e8 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -493,6 +493,8 @@ setup_arch (char **cmdline_p) acpi_table_init(); # ifdef CONFIG_ACPI_NUMA acpi_numa_init(); + per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? + 32 : cpus_weight(early_cpu_possible_map)), additional_cpus); # endif #else # ifdef CONFIG_SMP diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 06c540a2946..6136a4c6df1 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -104,7 +104,7 @@ static int __meminit early_nr_cpus_node(int node) { int cpu, n = 0; - for (cpu = 0; cpu < NR_CPUS; cpu++) + for_each_possible_early_cpu(cpu) if (node == node_cpuid[cpu].nid) n++; @@ -143,7 +143,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node) #ifdef CONFIG_SMP int cpu; - for (cpu = 0; cpu < NR_CPUS; cpu++) { + for_each_possible_early_cpu(cpu) { if (node == node_cpuid[cpu].nid) { memcpy(__va(cpu_data), __phys_per_cpu_start, __per_cpu_end - __per_cpu_start); @@ -346,7 +346,7 @@ static void __init initialize_pernode_data(void) #ifdef CONFIG_SMP /* Set the node_data pointer for each per-cpu struct */ - for (cpu = 0; cpu < NR_CPUS; cpu++) { + for_each_possible_early_cpu(cpu) { node = node_cpuid[cpu].nid; per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; } @@ -494,13 +494,9 @@ void __cpuinit *per_cpu_init(void) int cpu; static int first_time = 1; - - if (smp_processor_id() != 0) - return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; - if (first_time) { first_time = 0; - for (cpu = 0; cpu < NR_CPUS; cpu++) + for_each_possible_early_cpu(cpu) per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; } diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index 7807fc5c042..b73bf1838e5 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -27,7 +27,9 @@ */ int num_node_memblks; struct node_memblk_s node_memblk[NR_NODE_MEMBLKS]; -struct node_cpuid_s node_cpuid[NR_CPUS]; +struct node_cpuid_s node_cpuid[NR_CPUS] = + { [0 ... NR_CPUS-1] = { .phys_id = 0, .nid = NUMA_NO_NODE } }; + /* * This is a matrix with "distances" between nodes, they should be * proportional to the memory access latency ratios. diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h index cd1cc39b559..fcfad326f4c 100644 --- a/include/asm-ia64/acpi.h +++ b/include/asm-ia64/acpi.h @@ -35,6 +35,7 @@ #include #include #include +#include #define COMPILER_DEPENDENT_INT64 long #define COMPILER_DEPENDENT_UINT64 unsigned long @@ -115,7 +116,11 @@ extern unsigned int is_cpu_cpei_target(unsigned int cpu); extern void set_cpei_target_cpu(unsigned int cpu); extern unsigned int get_cpei_target_cpu(void); extern void prefill_possible_map(void); +#ifdef CONFIG_ACPI_HOTPLUG_CPU extern int additional_cpus; +#else +#define additional_cpus 0 +#endif #ifdef CONFIG_ACPI_NUMA #if MAX_NUMNODES > 256 @@ -129,6 +134,34 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; #define acpi_unlazy_tlb(x) +#ifdef CONFIG_ACPI_NUMA +extern cpumask_t early_cpu_possible_map; +#define for_each_possible_early_cpu(cpu) \ + for_each_cpu_mask((cpu), early_cpu_possible_map) + +static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus) +{ + int low_cpu, high_cpu; + int cpu; + int next_nid = 0; + + low_cpu = cpus_weight(early_cpu_possible_map); + + high_cpu = max(low_cpu, min_cpus); + high_cpu = min(high_cpu + reserve_cpus, NR_CPUS); + + for (cpu = low_cpu; cpu < high_cpu; cpu++) { + cpu_set(cpu, early_cpu_possible_map); + if (node_cpuid[cpu].nid == NUMA_NO_NODE) { + node_cpuid[cpu].nid = next_nid; + next_nid++; + if (next_nid >= num_online_nodes()) + next_nid = 0; + } + } +} +#endif /* CONFIG_ACPI_NUMA */ + #endif /*__KERNEL__*/ #endif /*_ASM_ACPI_H*/ diff --git a/include/asm-ia64/numa.h b/include/asm-ia64/numa.h index 6a8a27cfae3..3499ff57bf4 100644 --- a/include/asm-ia64/numa.h +++ b/include/asm-ia64/numa.h @@ -22,6 +22,8 @@ #include +#define NUMA_NO_NODE -1 + extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; extern pg_data_t *pgdat_list[MAX_NUMNODES]; -- cgit v1.2.3-70-g09d2 From 98075d245a5bc4aeebc2e9f16fa8b089a5c200ac Mon Sep 17 00:00:00 2001 From: Zoltan Menyhart Date: Fri, 11 Apr 2008 15:21:35 -0700 Subject: [IA64] Fix NUMA configuration issue There is a NUMA memory configuration issue in 2.6.24: A 2-node machine of ours has got the following memory layout: Node 0: 0 - 2 Gbytes Node 0: 4 - 8 Gbytes Node 1: 8 - 16 Gbytes Node 0: 16 - 18 Gbytes "efi_memmap_init()" merges the three last ranges into one. "register_active_ranges()" is called as follows: efi_memmap_walk(register_active_ranges, NULL); i.e. once for the 4 - 18 Gbytes range. It picks up the node number from the start address, and registers all the memory for the node #0. "register_active_ranges()" should be called as follows to make sure there is no merged address range at its entry: efi_memmap_walk(filter_memory, register_active_ranges); "filter_memory()" is similar to "filter_rsvd_memory()", but the reserved memory ranges are not filtered out. Signed-off-by: Zoltan Menyhart Signed-off-by: Tony Luck --- arch/ia64/kernel/setup.c | 23 +++++++++++++++++++++++ arch/ia64/mm/contig.c | 2 +- arch/ia64/mm/discontig.c | 2 +- arch/ia64/mm/init.c | 6 ++---- include/asm-ia64/meminit.h | 3 ++- 5 files changed, 29 insertions(+), 7 deletions(-) (limited to 'arch/ia64/kernel/setup.c') diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 4aa9eaea76c..c85b7dd6ef3 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -176,6 +176,29 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) return 0; } +/* + * Similar to "filter_rsvd_memory()", but the reserved memory ranges + * are not filtered out. + */ +int __init +filter_memory(unsigned long start, unsigned long end, void *arg) +{ + void (*func)(unsigned long, unsigned long, int); + +#if IGNORE_PFN0 + if (start == PAGE_OFFSET) { + printk(KERN_WARNING "warning: skipping physical page 0\n"); + start += PAGE_SIZE; + if (start >= end) + return 0; + } +#endif + func = arg; + if (start < end) + call_pernode_memory(__pa(start), end - start, func); + return 0; +} + static void __init sort_regions (struct rsvd_region *rsvd_region, int max) { diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 0479661fa41..798bf9835a5 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -253,7 +253,7 @@ paging_init (void) max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_VIRTUAL_MEM_MAP - efi_memmap_walk(register_active_ranges, NULL); + efi_memmap_walk(filter_memory, register_active_ranges); efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); if (max_gap < LARGE_GAP) { vmem_map = (struct page *) 0; diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index ffee1ea00bb..96d5fbfa44a 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -444,7 +444,7 @@ void __init find_memory(void) mem_data[node].min_pfn = ~0UL; } - efi_memmap_walk(register_active_ranges, NULL); + efi_memmap_walk(filter_memory, register_active_ranges); /* * Initialize the boot memory maps in reverse order since that's diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index da05893294b..5c1de53c8c1 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -547,12 +547,10 @@ find_largest_hole (u64 start, u64 end, void *arg) #endif /* CONFIG_VIRTUAL_MEM_MAP */ int __init -register_active_ranges(u64 start, u64 end, void *arg) +register_active_ranges(u64 start, u64 len, int nid) { - int nid = paddr_to_nid(__pa(start)); + u64 end = start + len; - if (nid < 0) - nid = 0; #ifdef CONFIG_KEXEC if (start > crashk_res.start && start < crashk_res.end) start = crashk_res.end; diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h index f93308f54b6..7245a578159 100644 --- a/include/asm-ia64/meminit.h +++ b/include/asm-ia64/meminit.h @@ -35,6 +35,7 @@ extern void find_memory (void); extern void reserve_memory (void); extern void find_initrd (void); extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); +extern int filter_memory (unsigned long start, unsigned long end, void *arg); extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e); extern int find_max_min_low_pfn (unsigned long , unsigned long, void *); @@ -56,7 +57,7 @@ extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end); #define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */ -extern int register_active_ranges(u64 start, u64 end, void *arg); +extern int register_active_ranges(u64 start, u64 len, int nid); #ifdef CONFIG_VIRTUAL_MEM_MAP # define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ -- cgit v1.2.3-70-g09d2