summaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig5
-rw-r--r--arch/sparc/boot/Makefile6
-rw-r--r--arch/sparc/boot/piggyback_32.c4
-rw-r--r--arch/sparc/boot/piggyback_64.c1
-rw-r--r--arch/sparc/configs/sparc32_defconfig74
-rw-r--r--arch/sparc/configs/sparc64_defconfig118
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/atomic_64.h2
-rw-r--r--arch/sparc/include/asm/bitsperlong.h13
-rw-r--r--arch/sparc/include/asm/cpudata_64.h197
-rw-r--r--arch/sparc/include/asm/dma-mapping.h168
-rw-r--r--arch/sparc/include/asm/dma-mapping_32.h60
-rw-r--r--arch/sparc/include/asm/dma-mapping_64.h154
-rw-r--r--arch/sparc/include/asm/errno.h2
-rw-r--r--arch/sparc/include/asm/ftrace.h11
-rw-r--r--arch/sparc/include/asm/kmap_types.h17
-rw-r--r--arch/sparc/include/asm/mdesc.h3
-rw-r--r--arch/sparc/include/asm/mman.h2
-rw-r--r--arch/sparc/include/asm/page_32.h2
-rw-r--r--arch/sparc/include/asm/page_64.h2
-rw-r--r--arch/sparc/include/asm/pci_64.h2
-rw-r--r--arch/sparc/include/asm/percpu_64.h8
-rw-r--r--arch/sparc/include/asm/pgalloc_32.h8
-rw-r--r--arch/sparc/include/asm/pgtable_64.h12
-rw-r--r--arch/sparc/include/asm/prom.h2
-rw-r--r--arch/sparc/include/asm/signal.h2
-rw-r--r--arch/sparc/include/asm/thread_info_32.h4
-rw-r--r--arch/sparc/include/asm/thread_info_64.h8
-rw-r--r--arch/sparc/include/asm/tlb_64.h6
-rw-r--r--arch/sparc/include/asm/trap_block.h207
-rw-r--r--arch/sparc/include/asm/types.h4
-rw-r--r--arch/sparc/include/asm/uaccess_32.h3
-rw-r--r--arch/sparc/include/asm/uaccess_64.h2
-rw-r--r--arch/sparc/include/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/cpumap.c431
-rw-r--r--arch/sparc/kernel/cpumap.h16
-rw-r--r--arch/sparc/kernel/dma.c127
-rw-r--r--arch/sparc/kernel/ds.c3
-rw-r--r--arch/sparc/kernel/ftrace.c47
-rw-r--r--arch/sparc/kernel/head_64.S22
-rw-r--r--arch/sparc/kernel/init_task.c3
-rw-r--r--arch/sparc/kernel/iommu.c15
-rw-r--r--arch/sparc/kernel/irq_64.c86
-rw-r--r--arch/sparc/kernel/ktlb.S42
-rw-r--r--arch/sparc/kernel/mdesc.c149
-rw-r--r--arch/sparc/kernel/module.c2
-rw-r--r--arch/sparc/kernel/of_device_32.c195
-rw-r--r--arch/sparc/kernel/of_device_64.c188
-rw-r--r--arch/sparc/kernel/of_device_common.c174
-rw-r--r--arch/sparc/kernel/of_device_common.h36
-rw-r--r--arch/sparc/kernel/pci.c13
-rw-r--r--arch/sparc/kernel/pci_sun4v.c15
-rw-r--r--arch/sparc/kernel/prom.h1
-rw-r--r--arch/sparc/kernel/prom_64.c232
-rw-r--r--arch/sparc/kernel/prom_common.c2
-rw-r--r--arch/sparc/kernel/ptrace_32.c1
-rw-r--r--arch/sparc/kernel/ptrace_64.c1
-rw-r--r--arch/sparc/kernel/smp_64.c196
-rw-r--r--arch/sparc/kernel/sun4d_smp.c22
-rw-r--r--arch/sparc/kernel/sun4m_smp.c26
-rw-r--r--arch/sparc/kernel/sys32.S4
-rw-r--r--arch/sparc/kernel/systbls_32.S4
-rw-r--r--arch/sparc/kernel/systbls_64.S10
-rw-r--r--arch/sparc/kernel/time_64.c1
-rw-r--r--arch/sparc/kernel/traps_32.c1
-rw-r--r--arch/sparc/kernel/traps_64.c170
-rw-r--r--arch/sparc/kernel/vio.c7
-rw-r--r--arch/sparc/mm/extable.c29
-rw-r--r--arch/sparc/mm/fault_32.c11
-rw-r--r--arch/sparc/mm/fault_64.c9
-rw-r--r--arch/sparc/mm/init_32.c1
-rw-r--r--arch/sparc/mm/init_64.c65
-rw-r--r--arch/sparc/mm/init_64.h7
-rw-r--r--arch/sparc/mm/srmmu.c3
75 files changed, 1961 insertions, 1522 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index cc12cd48bbc..3f8b6a92eab 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -37,6 +37,8 @@ config SPARC64
select HAVE_KPROBES
select HAVE_LMB
select HAVE_SYSCALL_WRAPPERS
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
select USE_GENERIC_SMP_HELPERS if SMP
select RTC_DRV_CMOS
select RTC_DRV_BQ4802
@@ -93,6 +95,9 @@ config AUDIT_ARCH
config HAVE_SETUP_PER_CPU_AREA
def_bool y if SPARC64
+config HAVE_DYNAMIC_PER_CPU_AREA
+ def_bool y if SPARC64
+
config GENERIC_HARDIRQS_NO__DO_IRQ
bool
def_bool y if SPARC64
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
index 96041a8d39e..1ff0fd92475 100644
--- a/arch/sparc/boot/Makefile
+++ b/arch/sparc/boot/Makefile
@@ -15,7 +15,7 @@ quiet_cmd_elftoaout = ELFTOAOUT $@
ifeq ($(CONFIG_SPARC32),y)
quiet_cmd_piggy = PIGGY $@
- cmd_piggy = $(obj)/piggyback_32 $@ $(obj)/System.map $(ROOT_IMG)
+ cmd_piggy = $(obj)/piggyback_32 $@ System.map $(ROOT_IMG)
quiet_cmd_btfix = BTFIX $@
cmd_btfix = $(OBJDUMP) -x vmlinux | $(obj)/btfixupprep > $@
quiet_cmd_sysmap = SYSMAP $(obj)/System.map
@@ -58,7 +58,7 @@ $(obj)/image: $(obj)/btfix.o FORCE
$(obj)/zImage: $(obj)/image
$(call if_changed,strip)
-$(obj)/tftpboot.img: $(obj)/piggyback $(obj)/System.map $(obj)/image FORCE
+$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_32 System.map $(ROOT_IMG) FORCE
$(call if_changed,elftoaout)
$(call if_changed,piggy)
@@ -79,7 +79,7 @@ $(obj)/image: vmlinux FORCE
$(call if_changed,strip)
@echo ' kernel: $@ is ready'
-$(obj)/tftpboot.img: vmlinux $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE
+$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE
$(call if_changed,elftoaout)
$(call if_changed,piggy)
@echo ' kernel: $@ is ready'
diff --git a/arch/sparc/boot/piggyback_32.c b/arch/sparc/boot/piggyback_32.c
index c9f500c1a8b..e8dc9adfcd6 100644
--- a/arch/sparc/boot/piggyback_32.c
+++ b/arch/sparc/boot/piggyback_32.c
@@ -70,7 +70,7 @@ void die(char *str)
int main(int argc,char **argv)
{
static char aout_magic[] = { 0x01, 0x03, 0x01, 0x07 };
- unsigned char buffer[1024], *q, *r;
+ char buffer[1024], *q, *r;
unsigned int i, j, k, start, end, offset;
FILE *map;
struct stat s;
@@ -84,7 +84,7 @@ int main(int argc,char **argv)
while (fgets (buffer, 1024, map)) {
if (!strcmp (buffer + 8, " T start\n") || !strcmp (buffer + 16, " T start\n"))
start = strtoul (buffer, NULL, 16);
- else if (!strcmp (buffer + 8, " A end\n") || !strcmp (buffer + 16, " A end\n"))
+ else if (!strcmp (buffer + 8, " A _end\n") || !strcmp (buffer + 16, " A _end\n"))
end = strtoul (buffer, NULL, 16);
}
fclose (map);
diff --git a/arch/sparc/boot/piggyback_64.c b/arch/sparc/boot/piggyback_64.c
index de364bfed0b..c63fd1b6bdd 100644
--- a/arch/sparc/boot/piggyback_64.c
+++ b/arch/sparc/boot/piggyback_64.c
@@ -46,6 +46,7 @@ int main(int argc,char **argv)
struct stat s;
int image, tail;
+ start = end = 0;
if (stat (argv[3], &s) < 0) die (argv[3]);
map = fopen (argv[2], "r");
if (!map) die(argv[2]);
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig
index 8bcd27af724..a0f62a808ed 100644
--- a/arch/sparc/configs/sparc32_defconfig
+++ b/arch/sparc/configs/sparc32_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30-rc2
-# Fri Apr 17 04:04:46 2009
+# Linux kernel version: 2.6.31-rc1
+# Tue Aug 18 23:45:52 2009
#
# CONFIG_64BIT is not set
CONFIG_SPARC=y
@@ -17,6 +17,7 @@ CONFIG_GENERIC_ISA_DMA=y
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_OF=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -74,7 +75,6 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
@@ -87,8 +87,13 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+
+#
+# Performance Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
+# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
@@ -97,6 +102,10 @@ CONFIG_SLAB=y
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
@@ -109,7 +118,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -154,9 +163,9 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
-CONFIG_UNEVICTABLE_LRU=y
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_SUN_PM=y
# CONFIG_SPARC_LED is not set
CONFIG_SERIAL_CONSOLE=y
@@ -264,6 +273,7 @@ CONFIG_IPV6_TUNNEL=m
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
# CONFIG_DCB is not set
@@ -281,7 +291,11 @@ CONFIG_WIRELESS=y
CONFIG_WIRELESS_OLD_REGULATORY=y
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
-# CONFIG_MAC80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -335,6 +349,7 @@ CONFIG_MISC_DEVICES=y
# EEPROM support
#
# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@@ -358,10 +373,6 @@ CONFIG_BLK_DEV_SR=m
# CONFIG_BLK_DEV_SR_VENDOR is not set
CONFIG_CHR_DEV_SG=m
# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
# CONFIG_SCSI_MULTI_LUN is not set
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
@@ -379,6 +390,7 @@ CONFIG_SCSI_SPI_ATTRS=y
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
# CONFIG_SCSI_CXGB3_ISCSI is not set
+# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
@@ -387,6 +399,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
# CONFIG_SCSI_ARCMSR is not set
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
@@ -401,7 +414,6 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_IPS is not set
# CONFIG_SCSI_INITIO is not set
# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_MVSAS is not set
# CONFIG_SCSI_STEX is not set
# CONFIG_SCSI_SYM53C8XX_2 is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
@@ -426,13 +438,16 @@ CONFIG_SCSI_SUNESP=y
#
#
-# Enable only one of the two stacks, unless you know what you are doing
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
# CONFIG_I2O is not set
CONFIG_NETDEVICES=y
-CONFIG_COMPAT_NET_DEV_OPS=y
CONFIG_DUMMY=m
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@@ -463,6 +478,7 @@ CONFIG_SUNQE=m
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_NET_PCI is not set
# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
# CONFIG_ATL2 is not set
CONFIG_NETDEV_1000=y
# CONFIG_ACENIC is not set
@@ -482,6 +498,7 @@ CONFIG_NETDEV_1000=y
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
@@ -629,6 +646,11 @@ CONFIG_HW_RANDOM=m
CONFIG_DEVPORT=y
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
# CONFIG_GPIOLIB is not set
# CONFIG_W1 is not set
@@ -668,22 +690,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_REGULATOR is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -776,6 +783,10 @@ CONFIG_RTC_DRV_M48T59=y
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
# CONFIG_STAGING is not set
#
@@ -799,10 +810,12 @@ CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -985,6 +998,7 @@ CONFIG_KGDB=y
CONFIG_KGDB_SERIAL_CONSOLE=y
CONFIG_KGDB_TESTS=y
# CONFIG_KGDB_TESTS_ON_BOOT is not set
+# CONFIG_KMEMCHECK is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_STACK_DEBUG is not set
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index b5d63bd8716..fdddf7a6f72 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30-rc2
-# Fri Apr 17 02:03:07 2009
+# Linux kernel version: 2.6.31-rc1
+# Tue Aug 18 23:56:02 2009
#
CONFIG_64BIT=y
CONFIG_SPARC=y
@@ -19,12 +19,14 @@ CONFIG_LOCKDEP_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
CONFIG_AUDIT_ARCH=y
CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+CONFIG_HAVE_DYNAMIC_PER_CPU_AREA=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_MMU=y
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_OF=y
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -82,7 +84,6 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
@@ -95,16 +96,21 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+
+#
+# Performance Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
CONFIG_SLUB_DEBUG=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-# CONFIG_MARKERS is not set
+CONFIG_MARKERS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
@@ -114,6 +120,11 @@ CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_USE_GENERIC_SMP_HELPERS=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
@@ -199,9 +210,9 @@ CONFIG_MIGRATION=y
CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=1
-CONFIG_UNEVICTABLE_LRU=y
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=8192
CONFIG_SCHED_SMT=y
CONFIG_SCHED_MC=y
# CONFIG_PREEMPT_NONE is not set
@@ -321,6 +332,7 @@ CONFIG_VLAN_8021Q=m
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
# CONFIG_DCB is not set
@@ -340,7 +352,11 @@ CONFIG_WIRELESS=y
CONFIG_WIRELESS_OLD_REGULATORY=y
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
-# CONFIG_MAC80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -364,6 +380,7 @@ CONFIG_EXTRA_FIRMWARE=""
CONFIG_CONNECTOR=m
# CONFIG_MTD is not set
CONFIG_OF_DEVICE=y
+CONFIG_OF_MDIO=m
# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_FD is not set
@@ -398,7 +415,9 @@ CONFIG_MISC_DEVICES=y
#
# CONFIG_EEPROM_AT24 is not set
# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
CONFIG_HAVE_IDE=y
CONFIG_IDE=y
@@ -477,10 +496,6 @@ CONFIG_BLK_DEV_SR=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_LOGGING is not set
@@ -499,6 +514,7 @@ CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
# CONFIG_SCSI_CXGB3_ISCSI is not set
+# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
@@ -507,6 +523,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
# CONFIG_SCSI_ARCMSR is not set
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
@@ -521,7 +538,6 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_IPS is not set
# CONFIG_SCSI_INITIO is not set
# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_MVSAS is not set
# CONFIG_SCSI_STEX is not set
# CONFIG_SCSI_SYM53C8XX_2 is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
@@ -552,6 +568,7 @@ CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
+# CONFIG_DM_LOG_USERSPACE is not set
CONFIG_DM_ZERO=m
# CONFIG_DM_MULTIPATH is not set
# CONFIG_DM_DELAY is not set
@@ -563,13 +580,16 @@ CONFIG_DM_ZERO=m
#
#
-# Enable only one of the two stacks, unless you know what you are doing
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
# CONFIG_I2O is not set
CONFIG_NETDEVICES=y
-CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@@ -635,6 +655,7 @@ CONFIG_NET_PCI=y
# CONFIG_SMSC9420 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set
+# CONFIG_KS8842 is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_SC92031 is not set
# CONFIG_ATL2 is not set
@@ -657,6 +678,7 @@ CONFIG_E1000E=m
# CONFIG_VIA_VELOCITY is not set
CONFIG_TIGON3=m
CONFIG_BNX2=m
+# CONFIG_CNIC is not set
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
@@ -763,6 +785,7 @@ CONFIG_MOUSE_SERIAL=y
# CONFIG_MOUSE_APPLETOUCH is not set
# CONFIG_MOUSE_BCM5974 is not set
# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
@@ -860,6 +883,7 @@ CONFIG_I2C_ALGOBIT=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SIMTEC is not set
@@ -888,13 +912,17 @@ CONFIG_I2C_ALGOBIT=y
# CONFIG_SENSORS_PCF8574 is not set
# CONFIG_PCF8575 is not set
# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
# CONFIG_GPIOLIB is not set
# CONFIG_W1 is not set
@@ -949,6 +977,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_SMSC47B397 is not set
# CONFIG_SENSORS_ADS7828 is not set
# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP401 is not set
# CONFIG_SENSORS_VIA686A is not set
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_VT8231 is not set
@@ -984,23 +1013,9 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
# CONFIG_REGULATOR is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -1127,6 +1142,11 @@ CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
CONFIG_SND_VMASTER=y
+CONFIG_SND_RAWMIDI_SEQ=m
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
CONFIG_SND_MPU401_UART=m
CONFIG_SND_AC97_CODEC=m
CONFIG_SND_DRIVERS=y
@@ -1153,6 +1173,7 @@ CONFIG_SND_ALI5451=m
# CONFIG_SND_OXYGEN is not set
# CONFIG_SND_CS4281 is not set
# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CTXFI is not set
# CONFIG_SND_DARLA20 is not set
# CONFIG_SND_GINA20 is not set
# CONFIG_SND_LAYLA20 is not set
@@ -1183,6 +1204,7 @@ CONFIG_SND_ALI5451=m
# CONFIG_SND_INTEL8X0 is not set
# CONFIG_SND_INTEL8X0M is not set
# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_LX6464ES is not set
# CONFIG_SND_MAESTRO3 is not set
# CONFIG_SND_MIXART is not set
# CONFIG_SND_NM256 is not set
@@ -1229,6 +1251,7 @@ CONFIG_HID_BELKIN=y
CONFIG_HID_CHERRY=y
CONFIG_HID_CHICONY=y
CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
# CONFIG_DRAGONRISE_FF is not set
CONFIG_HID_EZKEY=y
CONFIG_HID_KYE=y
@@ -1246,9 +1269,14 @@ CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
# CONFIG_GREENASIA_FF is not set
+CONFIG_HID_SMARTJOYPLUS=y
+# CONFIG_SMARTJOYPLUS_FF is not set
CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
# CONFIG_THRUSTMASTER_FF is not set
+CONFIG_HID_ZEROPLUS=y
# CONFIG_ZEROPLUS_FF is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
@@ -1261,7 +1289,6 @@ CONFIG_USB=y
#
# Miscellaneous USB options
#
-CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
# CONFIG_USB_DYNAMIC_MINORS is not set
# CONFIG_USB_OTG is not set
@@ -1273,6 +1300,7 @@ CONFIG_USB_DEVICEFS=y
# USB Host Controller Drivers
#
# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_XHCI_HCD is not set
CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
@@ -1351,7 +1379,6 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_LD is not set
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
-# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
@@ -1397,6 +1424,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -1425,6 +1453,10 @@ CONFIG_RTC_DRV_STARFIRE=y
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
# CONFIG_STAGING is not set
#
@@ -1457,11 +1489,12 @@ CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -1536,7 +1569,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
CONFIG_SUN_PARTITION=y
-CONFIG_NLS=m
+CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_437 is not set
# CONFIG_NLS_CODEPAGE_737 is not set
@@ -1636,25 +1669,28 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_DEBUG_PAGEALLOC is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
CONFIG_TRACING_SUPPORT=y
-
-#
-# Tracers
-#
+CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_EVENT_TRACER is not set
# CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
CONFIG_BLK_DEV_IO_TRACE=y
# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index bb91b1248cd..f0d343c3b95 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -161,5 +161,5 @@ static inline int __atomic24_sub(int i, atomic24_t *v)
#endif /* !(__KERNEL__) */
-#include <asm-generic/atomic.h>
+#include <asm-generic/atomic-long.h>
#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index a0a70649269..f2e48009989 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -114,5 +114,5 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-#include <asm-generic/atomic.h>
+#include <asm-generic/atomic-long.h>
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/arch/sparc/include/asm/bitsperlong.h b/arch/sparc/include/asm/bitsperlong.h
new file mode 100644
index 00000000000..40dcaa3aaa5
--- /dev/null
+++ b/arch/sparc/include/asm/bitsperlong.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_ALPHA_BITSPERLONG_H
+#define __ASM_ALPHA_BITSPERLONG_H
+
+#if defined(__sparc__) && defined(__arch64__)
+#define __BITS_PER_LONG 64
+#else
+#define __BITS_PER_LONG 32
+#endif
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_ALPHA_BITSPERLONG_H */
+
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index a11b89ee9ef..926397d345f 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -6,9 +6,6 @@
#ifndef _SPARC64_CPUDATA_H
#define _SPARC64_CPUDATA_H
-#include <asm/hypervisor.h>
-#include <asm/asi.h>
-
#ifndef __ASSEMBLY__
#include <linux/percpu.h>
@@ -38,202 +35,10 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#define local_cpu_data() __get_cpu_var(__cpu_data)
-/* Trap handling code needs to get at a few critical values upon
- * trap entry and to process TSB misses. These cannot be in the
- * per_cpu() area as we really need to lock them into the TLB and
- * thus make them part of the main kernel image. As a result we
- * try to make this as small as possible.
- *
- * This is padded out and aligned to 64-bytes to avoid false sharing
- * on SMP.
- */
-
-/* If you modify the size of this structure, please update
- * TRAP_BLOCK_SZ_SHIFT below.
- */
-struct thread_info;
-struct trap_per_cpu {
-/* D-cache line 1: Basic thread information, cpu and device mondo queues */
- struct thread_info *thread;
- unsigned long pgd_paddr;
- unsigned long cpu_mondo_pa;
- unsigned long dev_mondo_pa;
-
-/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
- unsigned long resum_mondo_pa;
- unsigned long resum_kernel_buf_pa;
- unsigned long nonresum_mondo_pa;
- unsigned long nonresum_kernel_buf_pa;
-
-/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
- struct hv_fault_status fault_info;
-
-/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
- unsigned long cpu_mondo_block_pa;
- unsigned long cpu_list_pa;
- unsigned long tsb_huge;
- unsigned long tsb_huge_temp;
-
-/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
- unsigned long irq_worklist_pa;
- unsigned int cpu_mondo_qmask;
- unsigned int dev_mondo_qmask;
- unsigned int resum_qmask;
- unsigned int nonresum_qmask;
- void *hdesc;
-} __attribute__((aligned(64)));
-extern struct trap_per_cpu trap_block[NR_CPUS];
-extern void init_cur_cpu_trap(struct thread_info *);
-extern void setup_tba(void);
-extern int ncpus_probed;
extern const struct seq_operations cpuinfo_op;
-extern unsigned long real_hard_smp_processor_id(void);
-
-struct cpuid_patch_entry {
- unsigned int addr;
- unsigned int cheetah_safari[4];
- unsigned int cheetah_jbus[4];
- unsigned int starfire[4];
- unsigned int sun4v[4];
-};
-extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
-
-struct sun4v_1insn_patch_entry {
- unsigned int addr;
- unsigned int insn;
-};
-extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
- __sun4v_1insn_patch_end;
-
-struct sun4v_2insn_patch_entry {
- unsigned int addr;
- unsigned int insns[2];
-};
-extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
- __sun4v_2insn_patch_end;
-
#endif /* !(__ASSEMBLY__) */
-#define TRAP_PER_CPU_THREAD 0x00
-#define TRAP_PER_CPU_PGD_PADDR 0x08
-#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
-#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
-#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
-#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
-#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
-#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
-#define TRAP_PER_CPU_FAULT_INFO 0x40
-#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
-#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
-#define TRAP_PER_CPU_TSB_HUGE 0xd0
-#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
-#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
-#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
-#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
-#define TRAP_PER_CPU_RESUM_QMASK 0xf0
-#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
-
-#define TRAP_BLOCK_SZ_SHIFT 8
-
-#include <asm/scratchpad.h>
-
-#define __GET_CPUID(REG) \
- /* Spitfire implementation (default). */ \
-661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
- srlx REG, 17, REG; \
- and REG, 0x1f, REG; \
- nop; \
- .section .cpuid_patch, "ax"; \
- /* Instruction location. */ \
- .word 661b; \
- /* Cheetah Safari implementation. */ \
- ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
- srlx REG, 17, REG; \
- and REG, 0x3ff, REG; \
- nop; \
- /* Cheetah JBUS implementation. */ \
- ldxa [%g0] ASI_JBUS_CONFIG, REG; \
- srlx REG, 17, REG; \
- and REG, 0x1f, REG; \
- nop; \
- /* Starfire implementation. */ \
- sethi %hi(0x1fff40000d0 >> 9), REG; \
- sllx REG, 9, REG; \
- or REG, 0xd0, REG; \
- lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
- /* sun4v implementation. */ \
- mov SCRATCHPAD_CPUID, REG; \
- ldxa [REG] ASI_SCRATCHPAD, REG; \
- nop; \
- nop; \
- .previous;
-
-#ifdef CONFIG_SMP
-
-#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- __GET_CPUID(TMP) \
- sethi %hi(trap_block), DEST; \
- sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
- or DEST, %lo(trap_block), DEST; \
- add DEST, TMP, DEST; \
-
-/* Clobbers TMP, current address space PGD phys address into DEST. */
-#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
-
-/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
-#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
-
-/* Clobbers TMP, loads DEST with current thread info pointer. */
-#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
-
-/* Given the current thread info pointer in THR, load the per-cpu
- * area base of the current processor into DEST. REG1, REG2, and REG3 are
- * clobbered.
- *
- * You absolutely cannot use DEST as a temporary in this code. The
- * reason is that traps can happen during execution, and return from
- * trap will load the fully resolved DEST per-cpu base. This can corrupt
- * the calculations done by the macro mid-stream.
- */
-#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
- lduh [THR + TI_CPU], REG1; \
- sethi %hi(__per_cpu_shift), REG3; \
- sethi %hi(__per_cpu_base), REG2; \
- ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
- ldx [REG2 + %lo(__per_cpu_base)], REG2; \
- sllx REG1, REG3, REG3; \
- add REG3, REG2, DEST;
-
-#else
-
-#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- sethi %hi(trap_block), DEST; \
- or DEST, %lo(trap_block), DEST; \
-
-/* Uniprocessor versions, we know the cpuid is zero. */
-#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
-
-/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
-#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
-
-#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
-
-/* No per-cpu areas on uniprocessor, so no need to load DEST. */
-#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
-
-#endif /* !(CONFIG_SMP) */
+#include <asm/trap_block.h>
#endif /* _SPARC64_CPUDATA_H */
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 0f4150e2661..204e4bf6443 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -1,8 +1,166 @@
#ifndef ___ASM_SPARC_DMA_MAPPING_H
#define ___ASM_SPARC_DMA_MAPPING_H
-#if defined(__sparc__) && defined(__arch64__)
-#include <asm/dma-mapping_64.h>
-#else
-#include <asm/dma-mapping_32.h>
-#endif
+
+#include <linux/scatterlist.h>
+#include <linux/mm.h>
+
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
+
+extern int dma_supported(struct device *dev, u64 mask);
+extern int dma_set_mask(struct device *dev, u64 dma_mask);
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+#define dma_is_consistent(d, h) (1)
+
+struct dma_ops {
+ void *(*alloc_coherent)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+ void (*free_coherent)(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle);
+ dma_addr_t (*map_page)(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction);
+ void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction direction);
+ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction);
+ void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
+ int nhwentries,
+ enum dma_data_direction direction);
+ void (*sync_single_for_cpu)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction);
+ void (*sync_single_for_device)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction);
+ void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
+ int nelems,
+ enum dma_data_direction direction);
+ void (*sync_sg_for_device)(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+};
+extern const struct dma_ops *dma_ops;
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
+static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ return dma_ops->map_page(dev, virt_to_page(cpu_addr),
+ (unsigned long)cpu_addr & ~PAGE_MASK, size,
+ direction);
+}
+
+static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ dma_ops->unmap_page(dev, dma_addr, size, direction);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ return dma_ops->map_page(dev, page, offset, size, direction);
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ dma_ops->unmap_page(dev, dma_address, size, direction);
+}
+
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ return dma_ops->map_sg(dev, sg, nents, direction);
+}
+
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ dma_ops->unmap_sg(dev, sg, nents, direction);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ if (dma_ops->sync_single_for_device)
+ dma_ops->sync_single_for_device(dev, dma_handle, size,
+ direction);
+}
+
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
+}
+
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ if (dma_ops->sync_sg_for_device)
+ dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
+}
+
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return (dma_addr == DMA_ERROR_CODE);
+}
+
+static inline int dma_get_cache_alignment(void)
+{
+ /*
+ * no easy way to get cache size on all processors, so return
+ * the maximum possible, to be safe
+ */
+ return (1 << INTERNODE_CACHE_SHIFT);
+}
+
#endif
diff --git a/arch/sparc/include/asm/dma-mapping_32.h b/arch/sparc/include/asm/dma-mapping_32.h
deleted file mode 100644
index 8a57ea0573e..00000000000
--- a/arch/sparc/include/asm/dma-mapping_32.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef _ASM_SPARC_DMA_MAPPING_H
-#define _ASM_SPARC_DMA_MAPPING_H
-
-#include <linux/types.h>
-
-struct device;
-struct scatterlist;
-struct page;
-
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-extern int dma_supported(struct device *dev, u64 mask);
-extern int dma_set_mask(struct device *dev, u64 dma_mask);
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-extern void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle);
-extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size,
- enum dma_data_direction direction);
-extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction);
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction);
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction);
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction);
-extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction);
-extern void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction);
-extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
-extern int dma_get_cache_alignment(void);
-
-#define dma_alloc_noncoherent dma_alloc_coherent
-#define dma_free_noncoherent dma_free_coherent
-
-#endif /* _ASM_SPARC_DMA_MAPPING_H */
diff --git a/arch/sparc/include/asm/dma-mapping_64.h b/arch/sparc/include/asm/dma-mapping_64.h
deleted file mode 100644
index bfa64f9702d..00000000000
--- a/arch/sparc/include/asm/dma-mapping_64.h
+++ /dev/null
@@ -1,154 +0,0 @@
-#ifndef _ASM_SPARC64_DMA_MAPPING_H
-#define _ASM_SPARC64_DMA_MAPPING_H
-
-#include <linux/scatterlist.h>
-#include <linux/mm.h>
-
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-struct dma_ops {
- void *(*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
- void (*free_coherent)(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle);
- dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
- size_t size,
- enum dma_data_direction direction);
- void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction);
- int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
- void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
- int nhwentries,
- enum dma_data_direction direction);
- void (*sync_single_for_cpu)(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction);
- void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
- int nelems,
- enum dma_data_direction direction);
-};
-extern const struct dma_ops *dma_ops;
-
-extern int dma_supported(struct device *dev, u64 mask);
-extern int dma_set_mask(struct device *dev, u64 dma_mask);
-
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
-}
-
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
-}
-
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size,
- enum dma_data_direction direction)
-{
- return dma_ops->map_single(dev, cpu_addr, size, direction);
-}
-
-static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_ops->unmap_single(dev, dma_addr, size, direction);
-}
-
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- return dma_ops->map_single(dev, page_address(page) + offset,
- size, direction);
-}
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_ops->unmap_single(dev, dma_address, size, direction);
-}
-
-static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
- return dma_ops->map_sg(dev, sg, nents, direction);
-}
-
-static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
- dma_ops->unmap_sg(dev, sg, nents, direction);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
-{
- /* No flushing needed to sync cpu writes to the device. */
-}
-
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction)
-{
- /* No flushing needed to sync cpu writes to the device. */
-}
-
-
-static inline void dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
-}
-
-static inline void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- /* No flushing needed to sync cpu writes to the device. */
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return (dma_addr == DMA_ERROR_CODE);
-}
-
-static inline int dma_get_cache_alignment(void)
-{
- /* no easy way to get cache size on all processors, so return
- * the maximum possible, to be safe */
- return (1 << INTERNODE_CACHE_SHIFT);
-}
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d, h) (1)
-
-#endif /* _ASM_SPARC64_DMA_MAPPING_H */
diff --git a/arch/sparc/include/asm/errno.h b/arch/sparc/include/asm/errno.h
index a9ef172977d..4e2bc490d71 100644
--- a/arch/sparc/include/asm/errno.h
+++ b/arch/sparc/include/asm/errno.h
@@ -110,4 +110,6 @@
#define EOWNERDEAD 132 /* Owner died */
#define ENOTRECOVERABLE 133 /* State not recoverable */
+#define ERFKILL 134 /* Operation not possible due to RF-kill */
+
#endif
diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h
index d27716cd38c..b0f18e9893d 100644
--- a/arch/sparc/include/asm/ftrace.h
+++ b/arch/sparc/include/asm/ftrace.h
@@ -11,4 +11,15 @@ extern void _mcount(void);
#endif
+#ifdef CONFIG_DYNAMIC_FTRACE
+/* reloction of mcount call site is the same as the address */
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
#endif /* _ASM_SPARC64_FTRACE */
diff --git a/arch/sparc/include/asm/kmap_types.h b/arch/sparc/include/asm/kmap_types.h
index 602f5e034f7..aad21745fbb 100644
--- a/arch/sparc/include/asm/kmap_types.h
+++ b/arch/sparc/include/asm/kmap_types.h
@@ -5,21 +5,6 @@
* is actually used on sparc. -DaveM
*/
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
#endif
diff --git a/arch/sparc/include/asm/mdesc.h b/arch/sparc/include/asm/mdesc.h
index 1acc7272e53..9faa046713f 100644
--- a/arch/sparc/include/asm/mdesc.h
+++ b/arch/sparc/include/asm/mdesc.h
@@ -71,7 +71,8 @@ struct mdesc_notifier_client {
extern void mdesc_register_notifier(struct mdesc_notifier_client *client);
-extern void mdesc_fill_in_cpu_data(cpumask_t mask);
+extern void mdesc_fill_in_cpu_data(cpumask_t *mask);
+extern void mdesc_populate_present_mask(cpumask_t *mask);
extern void sun4v_mdesc_init(void);
diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h
index fdfbbf0a473..988192e8e95 100644
--- a/arch/sparc/include/asm/mman.h
+++ b/arch/sparc/include/asm/mman.h
@@ -1,7 +1,7 @@
#ifndef __SPARC_MMAN_H__
#define __SPARC_MMAN_H__
-#include <asm-generic/mman.h>
+#include <asm-generic/mman-common.h>
/* SunOS'ified... */
diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
index d1806edc095..f72080bdda9 100644
--- a/arch/sparc/include/asm/page_32.h
+++ b/arch/sparc/include/asm/page_32.h
@@ -152,6 +152,6 @@ extern unsigned long pfn_base;
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#include <asm-generic/memory_model.h>
-#include <asm-generic/page.h>
+#include <asm-generic/getorder.h>
#endif /* _SPARC_PAGE_H */
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index 4274ed13ddb..f0d09b40103 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -132,6 +132,6 @@ typedef struct page *pgtable_t;
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#include <asm-generic/page.h>
+#include <asm-generic/getorder.h>
#endif /* _SPARC64_PAGE_H */
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 4f79a54948f..7a1e3566e59 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -191,8 +191,6 @@ extern void
pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
struct pci_bus_region *region);
-extern struct resource *pcibios_select_root(struct pci_dev *, struct resource *);
-
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return PCI_IRQ_NONE;
diff --git a/arch/sparc/include/asm/percpu_64.h b/arch/sparc/include/asm/percpu_64.h
index bee64593023..007aafb4ae9 100644
--- a/arch/sparc/include/asm/percpu_64.h
+++ b/arch/sparc/include/asm/percpu_64.h
@@ -7,20 +7,16 @@ register unsigned long __local_per_cpu_offset asm("g5");
#ifdef CONFIG_SMP
-extern void real_setup_per_cpu_areas(void);
+#include <asm/trap_block.h>
-extern unsigned long __per_cpu_base;
-extern unsigned long __per_cpu_shift;
#define __per_cpu_offset(__cpu) \
- (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift))
+ (trap_block[(__cpu)].__per_cpu_base)
#define per_cpu_offset(x) (__per_cpu_offset(x))
#define __my_cpu_offset __local_per_cpu_offset
#else /* ! SMP */
-#define real_setup_per_cpu_areas() do { } while (0)
-
#endif /* SMP */
#include <asm-generic/percpu.h>
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index 681582d2696..ca2b34456c4 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
+++ b/arch/sparc/include/asm/pgalloc_32.h
@@ -44,8 +44,8 @@ BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
-#define pmd_free(mm, pmd) free_pmd_fast(pmd)
-#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
+#define pmd_free(mm, pmd) free_pmd_fast(pmd)
+#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
@@ -62,7 +62,7 @@ BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
BTFIXUPDEF_CALL(void, pte_free, pgtable_t )
-#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
-#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
+#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
#endif /* _SPARC_PGALLOC_H */
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index b049abf9902..0ff92fa2206 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -726,11 +726,17 @@ extern unsigned long pte_file(pte_t);
extern pte_t pgoff_to_pte(unsigned long);
#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
-extern unsigned long *sparc64_valid_addr_bitmap;
+extern unsigned long sparc64_valid_addr_bitmap[];
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
-#define kern_addr_valid(addr) \
- (test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap))
+static inline bool kern_addr_valid(unsigned long addr)
+{
+ unsigned long paddr = __pa(addr);
+
+ if ((paddr >> 41UL) != 0UL)
+ return false;
+ return test_bit(paddr >> 22, sparc64_valid_addr_bitmap);
+}
extern int page_in_phys_avail(unsigned long paddr);
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index 900d44714f8..be8d7aaeb60 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -86,6 +86,8 @@ extern int of_node_to_nid(struct device_node *dp);
#endif
extern void prom_build_devicetree(void);
+extern void of_populate_present_mask(void);
+extern void of_fill_in_cpu_data(void);
/* Dummy ref counting routines - to be implemented later */
static inline struct device_node *of_node_get(struct device_node *node)
diff --git a/arch/sparc/include/asm/signal.h b/arch/sparc/include/asm/signal.h
index cba45206b7f..e49b828a247 100644
--- a/arch/sparc/include/asm/signal.h
+++ b/arch/sparc/include/asm/signal.h
@@ -176,7 +176,7 @@ struct sigstack {
#define SA_STATIC_ALLOC 0x8000
#endif
-#include <asm-generic/signal.h>
+#include <asm-generic/signal-defs.h>
struct __new_sigaction {
__sighandler_t sa_handler;
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index 0f7b0e5fb1c..844d73a0340 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -54,8 +54,6 @@ struct thread_info {
/*
* macros/functions for gaining access to the thread information structure
- *
- * preempt_count needs to be 1 initially, until the scheduler is functional.
*/
#define INIT_THREAD_INFO(tsk) \
{ \
@@ -64,7 +62,7 @@ struct thread_info {
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
- .preempt_count = 1, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 639ac805448..1b45a7bbe40 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -102,8 +102,8 @@ struct thread_info {
#define TI_KERN_CNTD1 0x00000488
#define TI_PCR 0x00000490
#define TI_RESTART_BLOCK 0x00000498
-#define TI_KUNA_REGS 0x000004c0
-#define TI_KUNA_INSN 0x000004c8
+#define TI_KUNA_REGS 0x000004c8
+#define TI_KUNA_INSN 0x000004d0
#define TI_FPREGS 0x00000500
/* We embed this in the uppermost byte of thread_info->flags */
@@ -125,8 +125,6 @@ struct thread_info {
/*
* macros/functions for gaining access to the thread information structure
- *
- * preempt_count needs to be 1 initially, until the scheduler is functional.
*/
#ifndef __ASSEMBLY__
@@ -135,7 +133,7 @@ struct thread_info {
.task = &tsk, \
.flags = ((unsigned long)ASI_P) << TI_FLAG_CURRENT_DS_SHIFT, \
.exec_domain = &default_exec_domain, \
- .preempt_count = 1, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h
index ee38e731bfa..dca406b9b6f 100644
--- a/arch/sparc/include/asm/tlb_64.h
+++ b/arch/sparc/include/asm/tlb_64.h
@@ -100,9 +100,9 @@ static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
}
#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
-#define pte_free_tlb(mp, ptepage) pte_free((mp)->mm, ptepage)
-#define pmd_free_tlb(mp, pmdp) pmd_free((mp)->mm, pmdp)
-#define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp)
+#define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage)
+#define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp)
+#define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr)
#define tlb_migrate_finish(mm) do { } while (0)
#define tlb_start_vma(tlb, vma) do { } while (0)
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
new file mode 100644
index 00000000000..7e26b2db621
--- /dev/null
+++ b/arch/sparc/include/asm/trap_block.h
@@ -0,0 +1,207 @@
+#ifndef _SPARC_TRAP_BLOCK_H
+#define _SPARC_TRAP_BLOCK_H
+
+#include <asm/hypervisor.h>
+#include <asm/asi.h>
+
+#ifndef __ASSEMBLY__
+
+/* Trap handling code needs to get at a few critical values upon
+ * trap entry and to process TSB misses. These cannot be in the
+ * per_cpu() area as we really need to lock them into the TLB and
+ * thus make them part of the main kernel image. As a result we
+ * try to make this as small as possible.
+ *
+ * This is padded out and aligned to 64-bytes to avoid false sharing
+ * on SMP.
+ */
+
+/* If you modify the size of this structure, please update
+ * TRAP_BLOCK_SZ_SHIFT below.
+ */
+struct thread_info;
+struct trap_per_cpu {
+/* D-cache line 1: Basic thread information, cpu and device mondo queues */
+ struct thread_info *thread;
+ unsigned long pgd_paddr;
+ unsigned long cpu_mondo_pa;
+ unsigned long dev_mondo_pa;
+
+/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
+ unsigned long resum_mondo_pa;
+ unsigned long resum_kernel_buf_pa;
+ unsigned long nonresum_mondo_pa;
+ unsigned long nonresum_kernel_buf_pa;
+
+/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
+ struct hv_fault_status fault_info;
+
+/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
+ unsigned long cpu_mondo_block_pa;
+ unsigned long cpu_list_pa;
+ unsigned long tsb_huge;
+ unsigned long tsb_huge_temp;
+
+/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
+ unsigned long irq_worklist_pa;
+ unsigned int cpu_mondo_qmask;
+ unsigned int dev_mondo_qmask;
+ unsigned int resum_qmask;
+ unsigned int nonresum_qmask;
+ unsigned long __per_cpu_base;
+} __attribute__((aligned(64)));
+extern struct trap_per_cpu trap_block[NR_CPUS];
+extern void init_cur_cpu_trap(struct thread_info *);
+extern void setup_tba(void);
+extern int ncpus_probed;
+
+extern unsigned long real_hard_smp_processor_id(void);
+
+struct cpuid_patch_entry {
+ unsigned int addr;
+ unsigned int cheetah_safari[4];
+ unsigned int cheetah_jbus[4];
+ unsigned int starfire[4];
+ unsigned int sun4v[4];
+};
+extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
+
+struct sun4v_1insn_patch_entry {
+ unsigned int addr;
+ unsigned int insn;
+};
+extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
+ __sun4v_1insn_patch_end;
+
+struct sun4v_2insn_patch_entry {
+ unsigned int addr;
+ unsigned int insns[2];
+};
+extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
+ __sun4v_2insn_patch_end;
+
+
+#endif /* !(__ASSEMBLY__) */
+
+#define TRAP_PER_CPU_THREAD 0x00
+#define TRAP_PER_CPU_PGD_PADDR 0x08
+#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
+#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
+#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
+#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
+#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
+#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
+#define TRAP_PER_CPU_FAULT_INFO 0x40
+#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
+#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
+#define TRAP_PER_CPU_TSB_HUGE 0xd0
+#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
+#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
+#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
+#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
+#define TRAP_PER_CPU_RESUM_QMASK 0xf0
+#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
+#define TRAP_PER_CPU_PER_CPU_BASE 0xf8
+
+#define TRAP_BLOCK_SZ_SHIFT 8
+
+#include <asm/scratchpad.h>
+
+#define __GET_CPUID(REG) \
+ /* Spitfire implementation (default). */ \
+661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
+ srlx REG, 17, REG; \
+ and REG, 0x1f, REG; \
+ nop; \
+ .section .cpuid_patch, "ax"; \
+ /* Instruction location. */ \
+ .word 661b; \
+ /* Cheetah Safari implementation. */ \
+ ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
+ srlx REG, 17, REG; \
+ and REG, 0x3ff, REG; \
+ nop; \
+ /* Cheetah JBUS implementation. */ \
+ ldxa [%g0] ASI_JBUS_CONFIG, REG; \
+ srlx REG, 17, REG; \
+ and REG, 0x1f, REG; \
+ nop; \
+ /* Starfire implementation. */ \
+ sethi %hi(0x1fff40000d0 >> 9), REG; \
+ sllx REG, 9, REG; \
+ or REG, 0xd0, REG; \
+ lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
+ /* sun4v implementation. */ \
+ mov SCRATCHPAD_CPUID, REG; \
+ ldxa [REG] ASI_SCRATCHPAD, REG; \
+ nop; \
+ nop; \
+ .previous;
+
+#ifdef CONFIG_SMP
+
+#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ __GET_CPUID(TMP) \
+ sethi %hi(trap_block), DEST; \
+ sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
+ or DEST, %lo(trap_block), DEST; \
+ add DEST, TMP, DEST; \
+
+/* Clobbers TMP, current address space PGD phys address into DEST. */
+#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
+
+/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
+#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
+
+/* Clobbers TMP, loads DEST with current thread info pointer. */
+#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
+
+/* Given the current thread info pointer in THR, load the per-cpu
+ * area base of the current processor into DEST. REG1, REG2, and REG3 are
+ * clobbered.
+ *
+ * You absolutely cannot use DEST as a temporary in this code. The
+ * reason is that traps can happen during execution, and return from
+ * trap will load the fully resolved DEST per-cpu base. This can corrupt
+ * the calculations done by the macro mid-stream.
+ */
+#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
+ lduh [THR + TI_CPU], REG1; \
+ sethi %hi(trap_block), REG2; \
+ sllx REG1, TRAP_BLOCK_SZ_SHIFT, REG1; \
+ or REG2, %lo(trap_block), REG2; \
+ add REG2, REG1, REG2; \
+ ldx [REG2 + TRAP_PER_CPU_PER_CPU_BASE], DEST;
+
+#else
+
+#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ sethi %hi(trap_block), DEST; \
+ or DEST, %lo(trap_block), DEST; \
+
+/* Uniprocessor versions, we know the cpuid is zero. */
+#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
+
+/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
+#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
+
+#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
+
+/* No per-cpu areas on uniprocessor, so no need to load DEST. */
+#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
+
+#endif /* !(CONFIG_SMP) */
+
+#endif /* _SPARC_TRAP_BLOCK_H */
diff --git a/arch/sparc/include/asm/types.h b/arch/sparc/include/asm/types.h
index 2237118825d..de671d73bae 100644
--- a/arch/sparc/include/asm/types.h
+++ b/arch/sparc/include/asm/types.h
@@ -21,8 +21,6 @@ typedef unsigned short umode_t;
#ifdef __KERNEL__
-#define BITS_PER_LONG 64
-
#ifndef __ASSEMBLY__
/* Dma addresses come in generic and 64-bit flavours. */
@@ -46,8 +44,6 @@ typedef unsigned short umode_t;
#ifdef __KERNEL__
-#define BITS_PER_LONG 32
-
#ifndef __ASSEMBLY__
typedef u32 dma_addr_t;
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 47d5619d43f..8303ac48103 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -17,6 +17,9 @@
#ifndef __ASSEMBLY__
+#define ARCH_HAS_SORT_EXTABLE
+#define ARCH_HAS_SEARCH_EXTABLE
+
/* Sparc is not segmented, however we need to be able to fool access_ok()
* when doing system calls from kernel mode legitimately.
*
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index c64e767a3e4..a38c0323891 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -12,7 +12,7 @@
#include <asm/asi.h>
#include <asm/system.h>
#include <asm/spitfire.h>
-#include <asm-generic/uaccess.h>
+#include <asm-generic/uaccess-unaligned.h>
#endif
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index b8eb71ef316..b2c406de7d4 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -394,8 +394,9 @@
#define __NR_accept4 323
#define __NR_preadv 324
#define __NR_pwritev 325
+#define __NR_rt_tgsigqueueinfo 326
-#define NR_SYSCALLS 326
+#define NR_SYSCALLS 327
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 54742e58831..475ce4696ac 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -37,6 +37,7 @@ obj-y += una_asm_$(BITS).o
obj-$(CONFIG_SPARC32) += muldiv.o
obj-y += prom_common.o
obj-y += prom_$(BITS).o
+obj-y += of_device_common.o
obj-y += of_device_$(BITS).o
obj-$(CONFIG_SPARC64) += prom_irqtrans.o
@@ -54,6 +55,7 @@ obj-$(CONFIG_SPARC64) += sstate.o
obj-$(CONFIG_SPARC64) += mdesc.o
obj-$(CONFIG_SPARC64) += pcr.o
obj-$(CONFIG_SPARC64) += nmi.o
+obj-$(CONFIG_SPARC64_SMP) += cpumap.o
# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation
obj-$(CONFIG_SPARC32) += devres.o
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
new file mode 100644
index 00000000000..7430ed080b2
--- /dev/null
+++ b/arch/sparc/kernel/cpumap.c
@@ -0,0 +1,431 @@
+/* cpumap.c: used for optimizing CPU assignment
+ *
+ * Copyright (C) 2009 Hong H. Pham <hong.pham@windriver.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cpumask.h>
+#include <linux/spinlock.h>
+#include <asm/cpudata.h>
+#include "cpumap.h"
+
+
+enum {
+ CPUINFO_LVL_ROOT = 0,
+ CPUINFO_LVL_NODE,
+ CPUINFO_LVL_CORE,
+ CPUINFO_LVL_PROC,
+ CPUINFO_LVL_MAX,
+};
+
+enum {
+ ROVER_NO_OP = 0,
+ /* Increment rover every time level is visited */
+ ROVER_INC_ON_VISIT = 1 << 0,
+ /* Increment parent's rover every time rover wraps around */
+ ROVER_INC_PARENT_ON_LOOP = 1 << 1,
+};
+
+struct cpuinfo_node {
+ int id;
+ int level;
+ int num_cpus; /* Number of CPUs in this hierarchy */
+ int parent_index;
+ int child_start; /* Array index of the first child node */
+ int child_end; /* Array index of the last child node */
+ int rover; /* Child node iterator */
+};
+
+struct cpuinfo_level {
+ int start_index; /* Index of first node of a level in a cpuinfo tree */
+ int end_index; /* Index of last node of a level in a cpuinfo tree */
+ int num_nodes; /* Number of nodes in a level in a cpuinfo tree */
+};
+
+struct cpuinfo_tree {
+ int total_nodes;
+
+ /* Offsets into nodes[] for each level of the tree */
+ struct cpuinfo_level level[CPUINFO_LVL_MAX];
+ struct cpuinfo_node nodes[0];
+};
+
+
+static struct cpuinfo_tree *cpuinfo_tree;
+
+static u16 cpu_distribution_map[NR_CPUS];
+static DEFINE_SPINLOCK(cpu_map_lock);
+
+
+/* Niagara optimized cpuinfo tree traversal. */
+static const int niagara_iterate_method[] = {
+ [CPUINFO_LVL_ROOT] = ROVER_NO_OP,
+
+ /* Strands (or virtual CPUs) within a core may not run concurrently
+ * on the Niagara, as instruction pipeline(s) are shared. Distribute
+ * work to strands in different cores first for better concurrency.
+ * Go to next NUMA node when all cores are used.
+ */
+ [CPUINFO_LVL_NODE] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
+
+ /* Strands are grouped together by proc_id in cpuinfo_sparc, i.e.
+ * a proc_id represents an instruction pipeline. Distribute work to
+ * strands in different proc_id groups if the core has multiple
+ * instruction pipelines (e.g. the Niagara 2/2+ has two).
+ */
+ [CPUINFO_LVL_CORE] = ROVER_INC_ON_VISIT,
+
+ /* Pick the next strand in the proc_id group. */
+ [CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT,
+};
+
+/* Generic cpuinfo tree traversal. Distribute work round robin across NUMA
+ * nodes.
+ */
+static const int generic_iterate_method[] = {
+ [CPUINFO_LVL_ROOT] = ROVER_INC_ON_VISIT,
+ [CPUINFO_LVL_NODE] = ROVER_NO_OP,
+ [CPUINFO_LVL_CORE] = ROVER_INC_PARENT_ON_LOOP,
+ [CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
+};
+
+
+static int cpuinfo_id(int cpu, int level)
+{
+ int id;
+
+ switch (level) {
+ case CPUINFO_LVL_ROOT:
+ id = 0;
+ break;
+ case CPUINFO_LVL_NODE:
+ id = cpu_to_node(cpu);
+ break;
+ case CPUINFO_LVL_CORE:
+ id = cpu_data(cpu).core_id;
+ break;
+ case CPUINFO_LVL_PROC:
+ id = cpu_data(cpu).proc_id;
+ break;
+ default:
+ id = -EINVAL;
+ }
+ return id;
+}
+
+/*
+ * Enumerate the CPU information in __cpu_data to determine the start index,
+ * end index, and number of nodes for each level in the cpuinfo tree. The
+ * total number of cpuinfo nodes required to build the tree is returned.
+ */
+static int enumerate_cpuinfo_nodes(struct cpuinfo_level *tree_level)
+{
+ int prev_id[CPUINFO_LVL_MAX];
+ int i, n, num_nodes;
+
+ for (i = CPUINFO_LVL_ROOT; i < CPUINFO_LVL_MAX; i++) {
+ struct cpuinfo_level *lv = &tree_level[i];
+
+ prev_id[i] = -1;
+ lv->start_index = lv->end_index = lv->num_nodes = 0;
+ }
+
+ num_nodes = 1; /* Include the root node */
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (!cpu_online(i))
+ continue;
+
+ n = cpuinfo_id(i, CPUINFO_LVL_NODE);
+ if (n > prev_id[CPUINFO_LVL_NODE]) {
+ tree_level[CPUINFO_LVL_NODE].num_nodes++;
+ prev_id[CPUINFO_LVL_NODE] = n;
+ num_nodes++;
+ }
+ n = cpuinfo_id(i, CPUINFO_LVL_CORE);
+ if (n > prev_id[CPUINFO_LVL_CORE]) {
+ tree_level[CPUINFO_LVL_CORE].num_nodes++;
+ prev_id[CPUINFO_LVL_CORE] = n;
+ num_nodes++;
+ }
+ n = cpuinfo_id(i, CPUINFO_LVL_PROC);
+ if (n > prev_id[CPUINFO_LVL_PROC]) {
+ tree_level[CPUINFO_LVL_PROC].num_nodes++;
+ prev_id[CPUINFO_LVL_PROC] = n;
+ num_nodes++;
+ }
+ }
+
+ tree_level[CPUINFO_LVL_ROOT].num_nodes = 1;
+
+ n = tree_level[CPUINFO_LVL_NODE].num_nodes;
+ tree_level[CPUINFO_LVL_NODE].start_index = 1;
+ tree_level[CPUINFO_LVL_NODE].end_index = n;
+
+ n++;
+ tree_level[CPUINFO_LVL_CORE].start_index = n;
+ n += tree_level[CPUINFO_LVL_CORE].num_nodes;
+ tree_level[CPUINFO_LVL_CORE].end_index = n - 1;
+
+ tree_level[CPUINFO_LVL_PROC].start_index = n;
+ n += tree_level[CPUINFO_LVL_PROC].num_nodes;
+ tree_level[CPUINFO_LVL_PROC].end_index = n - 1;
+
+ return num_nodes;
+}
+
+/* Build a tree representation of the CPU hierarchy using the per CPU
+ * information in __cpu_data. Entries in __cpu_data[0..NR_CPUS] are
+ * assumed to be sorted in ascending order based on node, core_id, and
+ * proc_id (in order of significance).
+ */
+static struct cpuinfo_tree *build_cpuinfo_tree(void)
+{
+ struct cpuinfo_tree *new_tree;
+ struct cpuinfo_node *node;
+ struct cpuinfo_level tmp_level[CPUINFO_LVL_MAX];
+ int num_cpus[CPUINFO_LVL_MAX];
+ int level_rover[CPUINFO_LVL_MAX];
+ int prev_id[CPUINFO_LVL_MAX];
+ int n, id, cpu, prev_cpu, last_cpu, level;
+
+ n = enumerate_cpuinfo_nodes(tmp_level);
+
+ new_tree = kzalloc(sizeof(struct cpuinfo_tree) +
+ (sizeof(struct cpuinfo_node) * n), GFP_ATOMIC);
+ if (!new_tree)
+ return NULL;
+
+ new_tree->total_nodes = n;
+ memcpy(&new_tree->level, tmp_level, sizeof(tmp_level));
+
+ prev_cpu = cpu = first_cpu(cpu_online_map);
+
+ /* Initialize all levels in the tree with the first CPU */
+ for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) {
+ n = new_tree->level[level].start_index;
+
+ level_rover[level] = n;
+ node = &new_tree->nodes[n];
+
+ id = cpuinfo_id(cpu, level);
+ if (unlikely(id < 0)) {
+ kfree(new_tree);
+ return NULL;
+ }
+ node->id = id;
+ node->level = level;
+ node->num_cpus = 1;
+
+ node->parent_index = (level > CPUINFO_LVL_ROOT)
+ ? new_tree->level[level - 1].start_index : -1;
+
+ node->child_start = node->child_end = node->rover =
+ (level == CPUINFO_LVL_PROC)
+ ? cpu : new_tree->level[level + 1].start_index;
+
+ prev_id[level] = node->id;
+ num_cpus[level] = 1;
+ }
+
+ for (last_cpu = (num_possible_cpus() - 1); last_cpu >= 0; last_cpu--) {
+ if (cpu_online(last_cpu))
+ break;
+ }
+
+ while (++cpu <= last_cpu) {
+ if (!cpu_online(cpu))
+ continue;
+
+ for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT;
+ level--) {
+ id = cpuinfo_id(cpu, level);
+ if (unlikely(id < 0)) {
+ kfree(new_tree);
+ return NULL;
+ }
+
+ if ((id != prev_id[level]) || (cpu == last_cpu)) {
+ prev_id[level] = id;
+ node = &new_tree->nodes[level_rover[level]];
+ node->num_cpus = num_cpus[level];
+ num_cpus[level] = 1;
+
+ if (cpu == last_cpu)
+ node->num_cpus++;
+
+ /* Connect tree node to parent */
+ if (level == CPUINFO_LVL_ROOT)
+ node->parent_index = -1;
+ else
+ node->parent_index =
+ level_rover[level - 1];
+
+ if (level == CPUINFO_LVL_PROC) {
+ node->child_end =
+ (cpu == last_cpu) ? cpu : prev_cpu;
+ } else {
+ node->child_end =
+ level_rover[level + 1] - 1;
+ }
+
+ /* Initialize the next node in the same level */
+ n = ++level_rover[level];
+ if (n <= new_tree->level[level].end_index) {
+ node = &new_tree->nodes[n];
+ node->id = id;
+ node->level = level;
+
+ /* Connect node to child */
+ node->child_start = node->child_end =
+ node->rover =
+ (level == CPUINFO_LVL_PROC)
+ ? cpu : level_rover[level + 1];
+ }
+ } else
+ num_cpus[level]++;
+ }
+ prev_cpu = cpu;
+ }
+
+ return new_tree;
+}
+
+static void increment_rover(struct cpuinfo_tree *t, int node_index,
+ int root_index, const int *rover_inc_table)
+{
+ struct cpuinfo_node *node = &t->nodes[node_index];
+ int top_level, level;
+
+ top_level = t->nodes[root_index].level;
+ for (level = node->level; level >= top_level; level--) {
+ node->rover++;
+ if (node->rover <= node->child_end)
+ return;
+
+ node->rover = node->child_start;
+ /* If parent's rover does not need to be adjusted, stop here. */
+ if ((level == top_level) ||
+ !(rover_inc_table[level] & ROVER_INC_PARENT_ON_LOOP))
+ return;
+
+ node = &t->nodes[node->parent_index];
+ }
+}
+
+static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
+{
+ const int *rover_inc_table;
+ int level, new_index, index = root_index;
+
+ switch (sun4v_chip_type) {
+ case SUN4V_CHIP_NIAGARA1:
+ case SUN4V_CHIP_NIAGARA2:
+ rover_inc_table = niagara_iterate_method;
+ break;
+ default:
+ rover_inc_table = generic_iterate_method;
+ }
+
+ for (level = t->nodes[root_index].level; level < CPUINFO_LVL_MAX;
+ level++) {
+ new_index = t->nodes[index].rover;
+ if (rover_inc_table[level] & ROVER_INC_ON_VISIT)
+ increment_rover(t, index, root_index, rover_inc_table);
+
+ index = new_index;
+ }
+ return index;
+}
+
+static void _cpu_map_rebuild(void)
+{
+ int i;
+
+ if (cpuinfo_tree) {
+ kfree(cpuinfo_tree);
+ cpuinfo_tree = NULL;
+ }
+
+ cpuinfo_tree = build_cpuinfo_tree();
+ if (!cpuinfo_tree)
+ return;
+
+ /* Build CPU distribution map that spans all online CPUs. No need
+ * to check if the CPU is online, as that is done when the cpuinfo
+ * tree is being built.
+ */
+ for (i = 0; i < cpuinfo_tree->nodes[0].num_cpus; i++)
+ cpu_distribution_map[i] = iterate_cpu(cpuinfo_tree, 0);
+}
+
+/* Fallback if the cpuinfo tree could not be built. CPU mapping is linear
+ * round robin.
+ */
+static int simple_map_to_cpu(unsigned int index)
+{
+ int i, end, cpu_rover;
+
+ cpu_rover = 0;
+ end = index % num_online_cpus();
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (cpu_online(cpu_rover)) {
+ if (cpu_rover >= end)
+ return cpu_rover;
+
+ cpu_rover++;
+ }
+ }
+
+ /* Impossible, since num_online_cpus() <= num_possible_cpus() */
+ return first_cpu(cpu_online_map);
+}
+
+static int _map_to_cpu(unsigned int index)
+{
+ struct cpuinfo_node *root_node;
+
+ if (unlikely(!cpuinfo_tree)) {
+ _cpu_map_rebuild();
+ if (!cpuinfo_tree)
+ return simple_map_to_cpu(index);
+ }
+
+ root_node = &cpuinfo_tree->nodes[0];
+#ifdef CONFIG_HOTPLUG_CPU
+ if (unlikely(root_node->num_cpus != num_online_cpus())) {
+ _cpu_map_rebuild();
+ if (!cpuinfo_tree)
+ return simple_map_to_cpu(index);
+ }
+#endif
+ return cpu_distribution_map[index % root_node->num_cpus];
+}
+
+int map_to_cpu(unsigned int index)
+{
+ int mapped_cpu;
+ unsigned long flag;
+
+ spin_lock_irqsave(&cpu_map_lock, flag);
+ mapped_cpu = _map_to_cpu(index);
+
+#ifdef CONFIG_HOTPLUG_CPU
+ while (unlikely(!cpu_online(mapped_cpu)))
+ mapped_cpu = _map_to_cpu(index);
+#endif
+ spin_unlock_irqrestore(&cpu_map_lock, flag);
+ return mapped_cpu;
+}
+EXPORT_SYMBOL(map_to_cpu);
+
+void cpu_map_rebuild(void)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&cpu_map_lock, flag);
+ _cpu_map_rebuild();
+ spin_unlock_irqrestore(&cpu_map_lock, flag);
+}
diff --git a/arch/sparc/kernel/cpumap.h b/arch/sparc/kernel/cpumap.h
new file mode 100644
index 00000000000..e639880ab86
--- /dev/null
+++ b/arch/sparc/kernel/cpumap.h
@@ -0,0 +1,16 @@
+#ifndef _CPUMAP_H
+#define _CPUMAP_H
+
+#ifdef CONFIG_SMP
+extern void cpu_map_rebuild(void);
+extern int map_to_cpu(unsigned int index);
+#define cpu_map_init() cpu_map_rebuild()
+#else
+#define cpu_map_init() do {} while (0)
+static inline int map_to_cpu(unsigned int index)
+{
+ return raw_smp_processor_id();
+}
+#endif
+
+#endif
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c
index ebc8403b035..524c32f97c5 100644
--- a/arch/sparc/kernel/dma.c
+++ b/arch/sparc/kernel/dma.c
@@ -35,8 +35,8 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
}
EXPORT_SYMBOL(dma_set_mask);
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+static void *dma32_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
@@ -44,10 +44,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
#endif
return sbus_alloc_consistent(dev, size, dma_handle);
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+static void dma32_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -58,38 +57,10 @@ void dma_free_coherent(struct device *dev, size_t size,
#endif
sbus_free_consistent(dev, size, cpu_addr, dma_handle);
}
-EXPORT_SYMBOL(dma_free_coherent);
-dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
- return pci_map_single(to_pci_dev(dev), cpu_addr,
- size, (int)direction);
-#endif
- return sbus_map_single(dev, cpu_addr, size, (int)direction);
-}
-EXPORT_SYMBOL(dma_map_single);
-
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type) {
- pci_unmap_single(to_pci_dev(dev), dma_addr,
- size, (int)direction);
- return;
- }
-#endif
- sbus_unmap_single(dev, dma_addr, size, (int)direction);
-}
-EXPORT_SYMBOL(dma_unmap_single);
-
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static dma_addr_t dma32_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
@@ -99,10 +70,9 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
return sbus_map_single(dev, page_address(page) + offset,
size, (int)direction);
}
-EXPORT_SYMBOL(dma_map_page);
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction)
+static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size, enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -113,10 +83,9 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
#endif
sbus_unmap_single(dev, dma_address, size, (int)direction);
}
-EXPORT_SYMBOL(dma_unmap_page);
-int dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
+static int dma32_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
@@ -124,10 +93,9 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg,
#endif
return sbus_map_sg(dev, sg, nents, direction);
}
-EXPORT_SYMBOL(dma_map_sg);
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
+void dma32_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -137,10 +105,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
#endif
sbus_unmap_sg(dev, sg, nents, (int)direction);
}
-EXPORT_SYMBOL(dma_unmap_sg);
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -151,10 +119,10 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
#endif
sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction);
}
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static void dma32_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -165,28 +133,9 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
#endif
sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction);
}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-
-void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
-}
-EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
-
-void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
-}
-EXPORT_SYMBOL(dma_sync_single_range_for_device);
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction)
+static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -197,11 +146,10 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
#endif
BUG();
}
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
-void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
+static void dma32_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -212,16 +160,19 @@ void dma_sync_sg_for_device(struct device *dev,
#endif
BUG();
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return (dma_addr == DMA_ERROR_CODE);
-}
-EXPORT_SYMBOL(dma_mapping_error);
-
-int dma_get_cache_alignment(void)
-{
- return 32;
-}
-EXPORT_SYMBOL(dma_get_cache_alignment);
+static const struct dma_ops dma32_dma_ops = {
+ .alloc_coherent = dma32_alloc_coherent,
+ .free_coherent = dma32_free_coherent,
+ .map_page = dma32_map_page,
+ .unmap_page = dma32_unmap_page,
+ .map_sg = dma32_map_sg,
+ .unmap_sg = dma32_unmap_sg,
+ .sync_single_for_cpu = dma32_sync_single_for_cpu,
+ .sync_single_for_device = dma32_sync_single_for_device,
+ .sync_sg_for_cpu = dma32_sync_sg_for_cpu,
+ .sync_sg_for_device = dma32_sync_sg_for_device,
+};
+
+const struct dma_ops *dma_ops = &dma32_dma_ops;
+EXPORT_SYMBOL(dma_ops);
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 90350f838f0..4a700f4b79c 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -544,7 +544,8 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp,
resp_len, ncpus, mask,
DR_CPU_STAT_CONFIGURED);
- mdesc_fill_in_cpu_data(*mask);
+ mdesc_populate_present_mask(mask);
+ mdesc_fill_in_cpu_data(mask);
for_each_cpu_mask(cpu, *mask) {
int err;
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index d0218e73f98..d3b1a307656 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -7,14 +7,10 @@
#include <asm/ftrace.h>
+#ifdef CONFIG_DYNAMIC_FTRACE
static const u32 ftrace_nop = 0x01000000;
-unsigned char *ftrace_nop_replace(void)
-{
- return (char *)&ftrace_nop;
-}
-
-unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
{
static u32 call;
s32 off;
@@ -22,15 +18,11 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
off = ((s32)addr - (s32)ip);
call = 0x40000000 | ((u32)off >> 2);
- return (unsigned char *) &call;
+ return call;
}
-int
-ftrace_modify_code(unsigned long ip, unsigned char *old_code,
- unsigned char *new_code)
+static int ftrace_modify_code(unsigned long ip, u32 old, u32 new)
{
- u32 old = *(u32 *)old_code;
- u32 new = *(u32 *)new_code;
u32 replaced;
int faulted;
@@ -59,18 +51,43 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
return faulted;
}
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long ip = rec->ip;
+ u32 old, new;
+
+ old = ftrace_call_replace(ip, addr);
+ new = ftrace_nop;
+ return ftrace_modify_code(ip, old, new);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long ip = rec->ip;
+ u32 old, new;
+
+ old = ftrace_nop;
+ new = ftrace_call_replace(ip, addr);
+ return ftrace_modify_code(ip, old, new);
+}
+
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
- unsigned char old[MCOUNT_INSN_SIZE], *new;
+ u32 old, new;
- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
+ old = *(u32 *) &ftrace_call;
new = ftrace_call_replace(ip, (unsigned long)func);
return ftrace_modify_code(ip, old, new);
}
int __init ftrace_dyn_arch_init(void *data)
{
- ftrace_mcount_set(data);
+ unsigned long *p = data;
+
+ *p = 0;
+
return 0;
}
+#endif
+
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 91bf4c7f79b..f8f21050448 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -641,28 +641,6 @@ tlb_fixup_done:
/* Not reached... */
1:
- /* If we boot on a non-zero cpu, all of the per-cpu
- * variable references we make before setting up the
- * per-cpu areas will use a bogus offset. Put a
- * compensating factor into __per_cpu_base to handle
- * this cleanly.
- *
- * What the per-cpu code calculates is:
- *
- * __per_cpu_base + (cpu << __per_cpu_shift)
- *
- * These two variables are zero initially, so to
- * make it all cancel out to zero we need to put
- * "0 - (cpu << 0)" into __per_cpu_base so that the
- * above formula evaluates to zero.
- *
- * We cannot even perform a printk() until this stuff
- * is setup as that calls cpu_clock() which uses
- * per-cpu variables.
- */
- sub %g0, %o0, %o1
- sethi %hi(__per_cpu_base), %o2
- stx %o1, [%o2 + %lo(__per_cpu_base)]
#else
mov 0, %o0
#endif
diff --git a/arch/sparc/kernel/init_task.c b/arch/sparc/kernel/init_task.c
index f28cb8278e9..28125c5b3d3 100644
--- a/arch/sparc/kernel/init_task.c
+++ b/arch/sparc/kernel/init_task.c
@@ -10,10 +10,7 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_mm);
EXPORT_SYMBOL(init_task);
/* .text section in head.S is aligned at 8k boundary and this gets linked
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index d8900e1d5aa..0aeaefe696b 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -351,8 +351,9 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
free_pages((unsigned long)cpu, order);
}
-static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
- enum dma_data_direction direction)
+static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t sz,
+ enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
@@ -368,7 +369,7 @@ static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
if (unlikely(direction == DMA_NONE))
goto bad_no_ctx;
- oaddr = (unsigned long)ptr;
+ oaddr = (unsigned long)(page_address(page) + offset);
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
@@ -472,8 +473,8 @@ do_flush_sync:
vaddr, ctx, npages);
}
-static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
- size_t sz, enum dma_data_direction direction)
+static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
+ size_t sz, enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
@@ -824,8 +825,8 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
static const struct dma_ops sun4u_dma_ops = {
.alloc_coherent = dma_4u_alloc_coherent,
.free_coherent = dma_4u_free_coherent,
- .map_single = dma_4u_map_single,
- .unmap_single = dma_4u_unmap_single,
+ .map_page = dma_4u_map_page,
+ .unmap_page = dma_4u_unmap_page,
.map_sg = dma_4u_map_sg,
.unmap_sg = dma_4u_unmap_sg,
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 5deabe921a4..f0ee7905540 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -20,7 +20,6 @@
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/bootmem.h>
#include <linux/irq.h>
#include <asm/ptrace.h>
@@ -45,6 +44,7 @@
#include <asm/cacheflush.h>
#include "entry.h"
+#include "cpumap.h"
#define NUM_IVECS (IMAP_INR + 1)
@@ -256,35 +256,13 @@ static int irq_choose_cpu(unsigned int virt_irq)
int cpuid;
cpumask_copy(&mask, irq_desc[virt_irq].affinity);
- if (cpus_equal(mask, CPU_MASK_ALL)) {
- static int irq_rover;
- static DEFINE_SPINLOCK(irq_rover_lock);
- unsigned long flags;
-
- /* Round-robin distribution... */
- do_round_robin:
- spin_lock_irqsave(&irq_rover_lock, flags);
-
- while (!cpu_online(irq_rover)) {
- if (++irq_rover >= nr_cpu_ids)
- irq_rover = 0;
- }
- cpuid = irq_rover;
- do {
- if (++irq_rover >= nr_cpu_ids)
- irq_rover = 0;
- } while (!cpu_online(irq_rover));
-
- spin_unlock_irqrestore(&irq_rover_lock, flags);
+ if (cpus_equal(mask, cpu_online_map)) {
+ cpuid = map_to_cpu(virt_irq);
} else {
cpumask_t tmp;
cpus_and(tmp, cpu_online_map, mask);
-
- if (cpus_empty(tmp))
- goto do_round_robin;
-
- cpuid = first_cpu(tmp);
+ cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
}
return cpuid;
@@ -318,10 +296,12 @@ static void sun4u_irq_enable(unsigned int virt_irq)
}
}
-static void sun4u_set_affinity(unsigned int virt_irq,
+static int sun4u_set_affinity(unsigned int virt_irq,
const struct cpumask *mask)
{
sun4u_irq_enable(virt_irq);
+
+ return 0;
}
/* Don't do anything. The desc->status check for IRQ_DISABLED in
@@ -377,7 +357,7 @@ static void sun4v_irq_enable(unsigned int virt_irq)
ino, err);
}
-static void sun4v_set_affinity(unsigned int virt_irq,
+static int sun4v_set_affinity(unsigned int virt_irq,
const struct cpumask *mask)
{
unsigned int ino = virt_irq_table[virt_irq].dev_ino;
@@ -388,6 +368,8 @@ static void sun4v_set_affinity(unsigned int virt_irq,
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
"err(%d)\n", ino, cpuid, err);
+
+ return 0;
}
static void sun4v_irq_disable(unsigned int virt_irq)
@@ -445,7 +427,7 @@ static void sun4v_virq_enable(unsigned int virt_irq)
dev_handle, dev_ino, err);
}
-static void sun4v_virt_set_affinity(unsigned int virt_irq,
+static int sun4v_virt_set_affinity(unsigned int virt_irq,
const struct cpumask *mask)
{
unsigned long cpuid, dev_handle, dev_ino;
@@ -461,6 +443,8 @@ static void sun4v_virt_set_affinity(unsigned int virt_irq,
printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
"err(%d)\n",
dev_handle, dev_ino, cpuid, err);
+
+ return 0;
}
static void sun4v_virq_disable(unsigned int virt_irq)
@@ -929,25 +913,19 @@ void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
tb->nonresum_qmask);
}
-static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
-{
- unsigned long size = PAGE_ALIGN(qmask + 1);
- void *p = __alloc_bootmem(size, size, 0);
- if (!p) {
- prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
- prom_halt();
- }
-
- *pa_ptr = __pa(p);
-}
-
-static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
+/* Each queue region must be a power of 2 multiple of 64 bytes in
+ * size. The base real address must be aligned to the size of the
+ * region. Thus, an 8KB queue must be 8KB aligned, for example.
+ */
+static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
{
unsigned long size = PAGE_ALIGN(qmask + 1);
- void *p = __alloc_bootmem(size, size, 0);
+ unsigned long order = get_order(size);
+ unsigned long p;
+ p = __get_free_pages(GFP_KERNEL, order);
if (!p) {
- prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
+ prom_printf("SUN4V: Error, cannot allocate queue.\n");
prom_halt();
}
@@ -957,11 +935,11 @@ static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
{
#ifdef CONFIG_SMP
- void *page;
+ unsigned long page;
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
- page = alloc_bootmem_pages(PAGE_SIZE);
+ page = get_zeroed_page(GFP_KERNEL);
if (!page) {
prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
prom_halt();
@@ -980,13 +958,13 @@ static void __init sun4v_init_mondo_queues(void)
for_each_possible_cpu(cpu) {
struct trap_per_cpu *tb = &trap_block[cpu];
- alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
- alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
- alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
- alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
- alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
- alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
- tb->nonresum_qmask);
+ alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
+ alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
+ alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
+ alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
+ alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
+ alloc_one_queue(&tb->nonresum_kernel_buf_pa,
+ tb->nonresum_qmask);
}
}
@@ -1014,7 +992,7 @@ void __init init_IRQ(void)
kill_prom_timer();
size = sizeof(struct ino_bucket) * NUM_IVECS;
- ivector_table = alloc_bootmem(size);
+ ivector_table = kzalloc(size, GFP_KERNEL);
if (!ivector_table) {
prom_printf("Fatal error, cannot allocate ivector_table\n");
prom_halt();
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index cef8defcd7a..3ea6e8cde8c 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -151,12 +151,46 @@ kvmap_dtlb_4v:
* Must preserve %g1 and %g6 (TAG).
*/
kvmap_dtlb_tsb4m_miss:
- sethi %hi(kpte_linear_bitmap), %g2
- or %g2, %lo(kpte_linear_bitmap), %g2
+ /* Clear the PAGE_OFFSET top virtual bits, shift
+ * down to get PFN, and make sure PFN is in range.
+ */
+ sllx %g4, 21, %g5
- /* Clear the PAGE_OFFSET top virtual bits, then shift
- * down to get a 256MB physical address index.
+ /* Check to see if we know about valid memory at the 4MB
+ * chunk this physical address will reside within.
*/
+ srlx %g5, 21 + 41, %g2
+ brnz,pn %g2, kvmap_dtlb_longpath
+ nop
+
+ /* This unconditional branch and delay-slot nop gets patched
+ * by the sethi sequence once the bitmap is properly setup.
+ */
+ .globl valid_addr_bitmap_insn
+valid_addr_bitmap_insn:
+ ba,pt %xcc, 2f
+ nop
+ .subsection 2
+ .globl valid_addr_bitmap_patch
+valid_addr_bitmap_patch:
+ sethi %hi(sparc64_valid_addr_bitmap), %g7
+ or %g7, %lo(sparc64_valid_addr_bitmap), %g7
+ .previous
+
+ srlx %g5, 21 + 22, %g2
+ srlx %g2, 6, %g5
+ and %g2, 63, %g2
+ sllx %g5, 3, %g5
+ ldx [%g7 + %g5], %g5
+ mov 1, %g7
+ sllx %g7, %g2, %g7
+ andcc %g5, %g7, %g0
+ be,pn %xcc, kvmap_dtlb_longpath
+
+2: sethi %hi(kpte_linear_bitmap), %g2
+ or %g2, %lo(kpte_linear_bitmap), %g2
+
+ /* Get the 256MB physical address index. */
sllx %g4, 21, %g5
mov 1, %g7
srlx %g5, 21 + 28, %g5
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index f0e6ed23a46..938da19dc06 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -574,7 +574,7 @@ static void __init report_platform_properties(void)
mdesc_release(hp);
}
-static void __devinit fill_in_one_cache(cpuinfo_sparc *c,
+static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c,
struct mdesc_handle *hp,
u64 mp)
{
@@ -619,8 +619,7 @@ static void __devinit fill_in_one_cache(cpuinfo_sparc *c,
}
}
-static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp,
- int core_id)
+static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
{
u64 a;
@@ -653,7 +652,7 @@ static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp,
}
}
-static void __devinit set_core_ids(struct mdesc_handle *hp)
+static void __cpuinit set_core_ids(struct mdesc_handle *hp)
{
int idx;
u64 mp;
@@ -678,8 +677,7 @@ static void __devinit set_core_ids(struct mdesc_handle *hp)
}
}
-static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp,
- int proc_id)
+static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
{
u64 a;
@@ -698,8 +696,7 @@ static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp,
}
}
-static void __devinit __set_proc_ids(struct mdesc_handle *hp,
- const char *exec_unit_name)
+static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
{
int idx;
u64 mp;
@@ -720,13 +717,13 @@ static void __devinit __set_proc_ids(struct mdesc_handle *hp,
}
}
-static void __devinit set_proc_ids(struct mdesc_handle *hp)
+static void __cpuinit set_proc_ids(struct mdesc_handle *hp)
{
__set_proc_ids(hp, "exec_unit");
__set_proc_ids(hp, "exec-unit");
}
-static void __devinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
+static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
unsigned char def)
{
u64 val;
@@ -745,7 +742,7 @@ use_default:
*mask = ((1U << def) * 64U) - 1U;
}
-static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
+static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
struct trap_per_cpu *tb)
{
const u64 *val;
@@ -763,23 +760,15 @@ static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
}
-void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask)
+static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
{
struct mdesc_handle *hp = mdesc_grab();
+ void *ret = NULL;
u64 mp;
- ncpus_probed = 0;
mdesc_for_each_node_by_name(hp, mp, "cpu") {
const u64 *id = mdesc_get_property(hp, mp, "id", NULL);
- const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
- struct trap_per_cpu *tb;
- cpuinfo_sparc *c;
- int cpuid;
- u64 a;
-
- ncpus_probed++;
-
- cpuid = *id;
+ int cpuid = *id;
#ifdef CONFIG_SMP
if (cpuid >= NR_CPUS) {
@@ -788,62 +777,104 @@ void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask)
cpuid, NR_CPUS);
continue;
}
- if (!cpu_isset(cpuid, mask))
+ if (!cpu_isset(cpuid, *mask))
continue;
-#else
- /* On uniprocessor we only want the values for the
- * real physical cpu the kernel booted onto, however
- * cpu_data() only has one entry at index 0.
- */
- if (cpuid != real_hard_smp_processor_id())
- continue;
- cpuid = 0;
#endif
- c = &cpu_data(cpuid);
- c->clock_tick = *cfreq;
+ ret = func(hp, mp, cpuid, arg);
+ if (ret)
+ goto out;
+ }
+out:
+ mdesc_release(hp);
+ return ret;
+}
- tb = &trap_block[cpuid];
- get_mondo_data(hp, mp, tb);
+static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
+{
+ ncpus_probed++;
+#ifdef CONFIG_SMP
+ set_cpu_present(cpuid, true);
+#endif
+ return NULL;
+}
- mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
- u64 j, t = mdesc_arc_target(hp, a);
- const char *t_name;
+void __cpuinit mdesc_populate_present_mask(cpumask_t *mask)
+{
+ if (tlb_type != hypervisor)
+ return;
- t_name = mdesc_node_name(hp, t);
- if (!strcmp(t_name, "cache")) {
- fill_in_one_cache(c, hp, t);
- continue;
- }
+ ncpus_probed = 0;
+ mdesc_iterate_over_cpus(record_one_cpu, NULL, mask);
+}
- mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
- u64 n = mdesc_arc_target(hp, j);
- const char *n_name;
+static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
+{
+ const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
+ struct trap_per_cpu *tb;
+ cpuinfo_sparc *c;
+ u64 a;
- n_name = mdesc_node_name(hp, n);
- if (!strcmp(n_name, "cache"))
- fill_in_one_cache(c, hp, n);
- }
+#ifndef CONFIG_SMP
+ /* On uniprocessor we only want the values for the
+ * real physical cpu the kernel booted onto, however
+ * cpu_data() only has one entry at index 0.
+ */
+ if (cpuid != real_hard_smp_processor_id())
+ return NULL;
+ cpuid = 0;
+#endif
+
+ c = &cpu_data(cpuid);
+ c->clock_tick = *cfreq;
+
+ tb = &trap_block[cpuid];
+ get_mondo_data(hp, mp, tb);
+
+ mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
+ u64 j, t = mdesc_arc_target(hp, a);
+ const char *t_name;
+
+ t_name = mdesc_node_name(hp, t);
+ if (!strcmp(t_name, "cache")) {
+ fill_in_one_cache(c, hp, t);
+ continue;
}
-#ifdef CONFIG_SMP
- cpu_set(cpuid, cpu_present_map);
-#endif
+ mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
+ u64 n = mdesc_arc_target(hp, j);
+ const char *n_name;
- c->core_id = 0;
- c->proc_id = -1;
+ n_name = mdesc_node_name(hp, n);
+ if (!strcmp(n_name, "cache"))
+ fill_in_one_cache(c, hp, n);
+ }
}
+ c->core_id = 0;
+ c->proc_id = -1;
+
+ return NULL;
+}
+
+void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask)
+{
+ struct mdesc_handle *hp;
+
+ mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask);
+
#ifdef CONFIG_SMP
sparc64_multi_core = 1;
#endif
+ hp = mdesc_grab();
+
set_core_ids(hp);
set_proc_ids(hp);
- smp_fill_in_sib_core_maps();
-
mdesc_release(hp);
+
+ smp_fill_in_sib_core_maps();
}
static ssize_t mdesc_read(struct file *file, char __user *buf,
@@ -887,7 +918,6 @@ void __init sun4v_mdesc_init(void)
{
struct mdesc_handle *hp;
unsigned long len, real_len, status;
- cpumask_t mask;
(void) sun4v_mach_desc(0UL, 0UL, &len);
@@ -911,7 +941,4 @@ void __init sun4v_mdesc_init(void)
cur_mdesc = hp;
report_platform_properties();
-
- cpus_setall(mask);
- mdesc_fill_in_cpu_data(mask);
}
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 90273765e81..0ee642f6323 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -75,8 +75,6 @@ void *module_alloc(unsigned long size)
void module_free(struct module *mod, void *module_region)
{
vfree(module_region);
- /* FIXME: If module_region == mod->init_region, trim exception
- table entries. */
}
/* Make generic code ignore STT_REGISTER dummy undefined symbols. */
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index c8f14c1dc52..90396702ea2 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -6,159 +6,11 @@
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/errno.h>
+#include <linux/irq.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
-static int node_match(struct device *dev, void *data)
-{
- struct of_device *op = to_of_device(dev);
- struct device_node *dp = data;
-
- return (op->node == dp);
-}
-
-struct of_device *of_find_device_by_node(struct device_node *dp)
-{
- struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
- dp, node_match);
-
- if (dev)
- return to_of_device(dev);
-
- return NULL;
-}
-EXPORT_SYMBOL(of_find_device_by_node);
-
-unsigned int irq_of_parse_and_map(struct device_node *node, int index)
-{
- struct of_device *op = of_find_device_by_node(node);
-
- if (!op || index >= op->num_irqs)
- return 0;
-
- return op->irqs[index];
-}
-EXPORT_SYMBOL(irq_of_parse_and_map);
-
-/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
- * BUS and propagate to all child of_device objects.
- */
-void of_propagate_archdata(struct of_device *bus)
-{
- struct dev_archdata *bus_sd = &bus->dev.archdata;
- struct device_node *bus_dp = bus->node;
- struct device_node *dp;
-
- for (dp = bus_dp->child; dp; dp = dp->sibling) {
- struct of_device *op = of_find_device_by_node(dp);
-
- op->dev.archdata.iommu = bus_sd->iommu;
- op->dev.archdata.stc = bus_sd->stc;
- op->dev.archdata.host_controller = bus_sd->host_controller;
- op->dev.archdata.numa_node = bus_sd->numa_node;
-
- if (dp->child)
- of_propagate_archdata(op);
- }
-}
-
-struct bus_type of_platform_bus_type;
-EXPORT_SYMBOL(of_platform_bus_type);
-
-static inline u64 of_read_addr(const u32 *cell, int size)
-{
- u64 r = 0;
- while (size--)
- r = (r << 32) | *(cell++);
- return r;
-}
-
-static void __init get_cells(struct device_node *dp,
- int *addrc, int *sizec)
-{
- if (addrc)
- *addrc = of_n_addr_cells(dp);
- if (sizec)
- *sizec = of_n_size_cells(dp);
-}
-
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS 4
-
-struct of_bus {
- const char *name;
- const char *addr_prop_name;
- int (*match)(struct device_node *parent);
- void (*count_cells)(struct device_node *child,
- int *addrc, int *sizec);
- int (*map)(u32 *addr, const u32 *range,
- int na, int ns, int pna);
- unsigned long (*get_flags)(const u32 *addr, unsigned long);
-};
-
-/*
- * Default translator (generic bus)
- */
-
-static void of_bus_default_count_cells(struct device_node *dev,
- int *addrc, int *sizec)
-{
- get_cells(dev, addrc, sizec);
-}
-
-/* Make sure the least significant 64-bits are in-range. Even
- * for 3 or 4 cell values it is a good enough approximation.
- */
-static int of_out_of_range(const u32 *addr, const u32 *base,
- const u32 *size, int na, int ns)
-{
- u64 a = of_read_addr(addr, na);
- u64 b = of_read_addr(base, na);
-
- if (a < b)
- return 1;
-
- b += of_read_addr(size, ns);
- if (a >= b)
- return 1;
-
- return 0;
-}
-
-static int of_bus_default_map(u32 *addr, const u32 *range,
- int na, int ns, int pna)
-{
- u32 result[OF_MAX_ADDR_CELLS];
- int i;
-
- if (ns > 2) {
- printk("of_device: Cannot handle size cells (%d) > 2.", ns);
- return -EINVAL;
- }
-
- if (of_out_of_range(addr, range, range + na + pna, na, ns))
- return -EINVAL;
-
- /* Start with the parent range base. */
- memcpy(result, range + na, pna * 4);
-
- /* Add in the child address offset. */
- for (i = 0; i < na; i++)
- result[pna - 1 - i] +=
- (addr[na - 1 - i] -
- range[na - 1 - i]);
-
- memcpy(addr, result, pna * 4);
-
- return 0;
-}
-
-static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
-{
- if (flags)
- return flags;
- return IORESOURCE_MEM;
-}
+#include "of_device_common.h"
/*
* PCI bus specific translator
@@ -240,47 +92,6 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
return flags;
}
-/*
- * SBUS bus specific translator
- */
-
-static int of_bus_sbus_match(struct device_node *np)
-{
- struct device_node *dp = np;
-
- while (dp) {
- if (!strcmp(dp->name, "sbus") ||
- !strcmp(dp->name, "sbi"))
- return 1;
-
- /* Have a look at use_1to1_mapping(). We're trying
- * to match SBUS if that's the top-level bus and we
- * don't have some intervening real bus that provides
- * ranges based translations.
- */
- if (of_find_property(dp, "ranges", NULL) != NULL)
- break;
-
- dp = dp->parent;
- }
-
- return 0;
-}
-
-static void of_bus_sbus_count_cells(struct device_node *child,
- int *addrc, int *sizec)
-{
- if (addrc)
- *addrc = 2;
- if (sizec)
- *sizec = 1;
-}
-
-static int of_bus_sbus_map(u32 *addr, const u32 *range, int na, int ns, int pna)
-{
- return of_bus_default_map(addr, range, na, ns, pna);
-}
-
static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
{
return IORESOURCE_MEM;
@@ -307,7 +118,7 @@ static struct of_bus of_busses[] = {
.addr_prop_name = "reg",
.match = of_bus_sbus_match,
.count_cells = of_bus_sbus_count_cells,
- .map = of_bus_sbus_map,
+ .map = of_bus_default_map,
.get_flags = of_bus_sbus_get_flags,
},
/* Default */
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 5ac287ac03d..881947e59e9 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -10,6 +10,8 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include "of_device_common.h"
+
void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name)
{
unsigned long ret = res->start + offset;
@@ -35,156 +37,6 @@ void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
}
EXPORT_SYMBOL(of_iounmap);
-static int node_match(struct device *dev, void *data)
-{
- struct of_device *op = to_of_device(dev);
- struct device_node *dp = data;
-
- return (op->node == dp);
-}
-
-struct of_device *of_find_device_by_node(struct device_node *dp)
-{
- struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
- dp, node_match);
-
- if (dev)
- return to_of_device(dev);
-
- return NULL;
-}
-EXPORT_SYMBOL(of_find_device_by_node);
-
-unsigned int irq_of_parse_and_map(struct device_node *node, int index)
-{
- struct of_device *op = of_find_device_by_node(node);
-
- if (!op || index >= op->num_irqs)
- return 0;
-
- return op->irqs[index];
-}
-EXPORT_SYMBOL(irq_of_parse_and_map);
-
-/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
- * BUS and propagate to all child of_device objects.
- */
-void of_propagate_archdata(struct of_device *bus)
-{
- struct dev_archdata *bus_sd = &bus->dev.archdata;
- struct device_node *bus_dp = bus->node;
- struct device_node *dp;
-
- for (dp = bus_dp->child; dp; dp = dp->sibling) {
- struct of_device *op = of_find_device_by_node(dp);
-
- op->dev.archdata.iommu = bus_sd->iommu;
- op->dev.archdata.stc = bus_sd->stc;
- op->dev.archdata.host_controller = bus_sd->host_controller;
- op->dev.archdata.numa_node = bus_sd->numa_node;
-
- if (dp->child)
- of_propagate_archdata(op);
- }
-}
-
-struct bus_type of_platform_bus_type;
-EXPORT_SYMBOL(of_platform_bus_type);
-
-static inline u64 of_read_addr(const u32 *cell, int size)
-{
- u64 r = 0;
- while (size--)
- r = (r << 32) | *(cell++);
- return r;
-}
-
-static void get_cells(struct device_node *dp, int *addrc, int *sizec)
-{
- if (addrc)
- *addrc = of_n_addr_cells(dp);
- if (sizec)
- *sizec = of_n_size_cells(dp);
-}
-
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS 4
-
-struct of_bus {
- const char *name;
- const char *addr_prop_name;
- int (*match)(struct device_node *parent);
- void (*count_cells)(struct device_node *child,
- int *addrc, int *sizec);
- int (*map)(u32 *addr, const u32 *range,
- int na, int ns, int pna);
- unsigned long (*get_flags)(const u32 *addr, unsigned long);
-};
-
-/*
- * Default translator (generic bus)
- */
-
-static void of_bus_default_count_cells(struct device_node *dev,
- int *addrc, int *sizec)
-{
- get_cells(dev, addrc, sizec);
-}
-
-/* Make sure the least significant 64-bits are in-range. Even
- * for 3 or 4 cell values it is a good enough approximation.
- */
-static int of_out_of_range(const u32 *addr, const u32 *base,
- const u32 *size, int na, int ns)
-{
- u64 a = of_read_addr(addr, na);
- u64 b = of_read_addr(base, na);
-
- if (a < b)
- return 1;
-
- b += of_read_addr(size, ns);
- if (a >= b)
- return 1;
-
- return 0;
-}
-
-static int of_bus_default_map(u32 *addr, const u32 *range,
- int na, int ns, int pna)
-{
- u32 result[OF_MAX_ADDR_CELLS];
- int i;
-
- if (ns > 2) {
- printk("of_device: Cannot handle size cells (%d) > 2.", ns);
- return -EINVAL;
- }
-
- if (of_out_of_range(addr, range, range + na + pna, na, ns))
- return -EINVAL;
-
- /* Start with the parent range base. */
- memcpy(result, range + na, pna * 4);
-
- /* Add in the child address offset. */
- for (i = 0; i < na; i++)
- result[pna - 1 - i] +=
- (addr[na - 1 - i] -
- range[na - 1 - i]);
-
- memcpy(addr, result, pna * 4);
-
- return 0;
-}
-
-static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
-{
- if (flags)
- return flags;
- return IORESOURCE_MEM;
-}
-
/*
* PCI bus specific translator
*/
@@ -295,42 +147,6 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
}
/*
- * SBUS bus specific translator
- */
-
-static int of_bus_sbus_match(struct device_node *np)
-{
- struct device_node *dp = np;
-
- while (dp) {
- if (!strcmp(dp->name, "sbus") ||
- !strcmp(dp->name, "sbi"))
- return 1;
-
- /* Have a look at use_1to1_mapping(). We're trying
- * to match SBUS if that's the top-level bus and we
- * don't have some intervening real bus that provides
- * ranges based translations.
- */
- if (of_find_property(dp, "ranges", NULL) != NULL)
- break;
-
- dp = dp->parent;
- }
-
- return 0;
-}
-
-static void of_bus_sbus_count_cells(struct device_node *child,
- int *addrc, int *sizec)
-{
- if (addrc)
- *addrc = 2;
- if (sizec)
- *sizec = 1;
-}
-
-/*
* FHC/Central bus specific translator.
*
* This is just needed to hard-code the address and size cell
diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c
new file mode 100644
index 00000000000..cb8eb799bb6
--- /dev/null
+++ b/arch/sparc/kernel/of_device_common.c
@@ -0,0 +1,174 @@
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+#include "of_device_common.h"
+
+static int node_match(struct device *dev, void *data)
+{
+ struct of_device *op = to_of_device(dev);
+ struct device_node *dp = data;
+
+ return (op->node == dp);
+}
+
+struct of_device *of_find_device_by_node(struct device_node *dp)
+{
+ struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
+ dp, node_match);
+
+ if (dev)
+ return to_of_device(dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL(of_find_device_by_node);
+
+unsigned int irq_of_parse_and_map(struct device_node *node, int index)
+{
+ struct of_device *op = of_find_device_by_node(node);
+
+ if (!op || index >= op->num_irqs)
+ return 0;
+
+ return op->irqs[index];
+}
+EXPORT_SYMBOL(irq_of_parse_and_map);
+
+/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
+ * BUS and propagate to all child of_device objects.
+ */
+void of_propagate_archdata(struct of_device *bus)
+{
+ struct dev_archdata *bus_sd = &bus->dev.archdata;
+ struct device_node *bus_dp = bus->node;
+ struct device_node *dp;
+
+ for (dp = bus_dp->child; dp; dp = dp->sibling) {
+ struct of_device *op = of_find_device_by_node(dp);
+
+ op->dev.archdata.iommu = bus_sd->iommu;
+ op->dev.archdata.stc = bus_sd->stc;
+ op->dev.archdata.host_controller = bus_sd->host_controller;
+ op->dev.archdata.numa_node = bus_sd->numa_node;
+
+ if (dp->child)
+ of_propagate_archdata(op);
+ }
+}
+
+struct bus_type of_platform_bus_type;
+EXPORT_SYMBOL(of_platform_bus_type);
+
+static void get_cells(struct device_node *dp, int *addrc, int *sizec)
+{
+ if (addrc)
+ *addrc = of_n_addr_cells(dp);
+ if (sizec)
+ *sizec = of_n_size_cells(dp);
+}
+
+/*
+ * Default translator (generic bus)
+ */
+
+void of_bus_default_count_cells(struct device_node *dev, int *addrc, int *sizec)
+{
+ get_cells(dev, addrc, sizec);
+}
+
+/* Make sure the least significant 64-bits are in-range. Even
+ * for 3 or 4 cell values it is a good enough approximation.
+ */
+int of_out_of_range(const u32 *addr, const u32 *base,
+ const u32 *size, int na, int ns)
+{
+ u64 a = of_read_addr(addr, na);
+ u64 b = of_read_addr(base, na);
+
+ if (a < b)
+ return 1;
+
+ b += of_read_addr(size, ns);
+ if (a >= b)
+ return 1;
+
+ return 0;
+}
+
+int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna)
+{
+ u32 result[OF_MAX_ADDR_CELLS];
+ int i;
+
+ if (ns > 2) {
+ printk("of_device: Cannot handle size cells (%d) > 2.", ns);
+ return -EINVAL;
+ }
+
+ if (of_out_of_range(addr, range, range + na + pna, na, ns))
+ return -EINVAL;
+
+ /* Start with the parent range base. */
+ memcpy(result, range + na, pna * 4);
+
+ /* Add in the child address offset. */
+ for (i = 0; i < na; i++)
+ result[pna - 1 - i] +=
+ (addr[na - 1 - i] -
+ range[na - 1 - i]);
+
+ memcpy(addr, result, pna * 4);
+
+ return 0;
+}
+
+unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
+{
+ if (flags)
+ return flags;
+ return IORESOURCE_MEM;
+}
+
+/*
+ * SBUS bus specific translator
+ */
+
+int of_bus_sbus_match(struct device_node *np)
+{
+ struct device_node *dp = np;
+
+ while (dp) {
+ if (!strcmp(dp->name, "sbus") ||
+ !strcmp(dp->name, "sbi"))
+ return 1;
+
+ /* Have a look at use_1to1_mapping(). We're trying
+ * to match SBUS if that's the top-level bus and we
+ * don't have some intervening real bus that provides
+ * ranges based translations.
+ */
+ if (of_find_property(dp, "ranges", NULL) != NULL)
+ break;
+
+ dp = dp->parent;
+ }
+
+ return 0;
+}
+
+void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec)
+{
+ if (addrc)
+ *addrc = 2;
+ if (sizec)
+ *sizec = 1;
+}
diff --git a/arch/sparc/kernel/of_device_common.h b/arch/sparc/kernel/of_device_common.h
new file mode 100644
index 00000000000..cdfd2399284
--- /dev/null
+++ b/arch/sparc/kernel/of_device_common.h
@@ -0,0 +1,36 @@
+#ifndef _OF_DEVICE_COMMON_H
+#define _OF_DEVICE_COMMON_H
+
+static inline u64 of_read_addr(const u32 *cell, int size)
+{
+ u64 r = 0;
+ while (size--)
+ r = (r << 32) | *(cell++);
+ return r;
+}
+
+void of_bus_default_count_cells(struct device_node *dev, int *addrc,
+ int *sizec);
+int of_out_of_range(const u32 *addr, const u32 *base,
+ const u32 *size, int na, int ns);
+int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna);
+unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags);
+
+int of_bus_sbus_match(struct device_node *np);
+void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec);
+
+/* Max address size we deal with */
+#define OF_MAX_ADDR_CELLS 4
+
+struct of_bus {
+ const char *name;
+ const char *addr_prop_name;
+ int (*match)(struct device_node *parent);
+ void (*count_cells)(struct device_node *child,
+ int *addrc, int *sizec);
+ int (*map)(u32 *addr, const u32 *range,
+ int na, int ns, int pna);
+ unsigned long (*get_flags)(const u32 *addr, unsigned long);
+};
+
+#endif /* _OF_DEVICE_COMMON_H */
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 4638fba799e..57859ad2354 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -711,19 +711,6 @@ void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
pbus->resource[1] = &pbm->mem_space;
}
-struct resource *pcibios_select_root(struct pci_dev *pdev, struct resource *r)
-{
- struct pci_pbm_info *pbm = pdev->bus->sysdata;
- struct resource *root = NULL;
-
- if (r->flags & IORESOURCE_IO)
- root = &pbm->io_space;
- if (r->flags & IORESOURCE_MEM)
- root = &pbm->mem_space;
-
- return root;
-}
-
void pcibios_update_irq(struct pci_dev *pdev, int irq)
{
}
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 5db5ebed35d..2485eaa2310 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -230,8 +230,9 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
free_pages((unsigned long)cpu, order);
}
-static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
- enum dma_data_direction direction)
+static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t sz,
+ enum dma_data_direction direction)
{
struct iommu *iommu;
unsigned long flags, npages, oaddr;
@@ -245,7 +246,7 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
if (unlikely(direction == DMA_NONE))
goto bad;
- oaddr = (unsigned long)ptr;
+ oaddr = (unsigned long)(page_address(page) + offset);
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
@@ -294,8 +295,8 @@ iommu_map_fail:
return DMA_ERROR_CODE;
}
-static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
- size_t sz, enum dma_data_direction direction)
+static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
+ size_t sz, enum dma_data_direction direction)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
@@ -537,8 +538,8 @@ static void dma_4v_sync_sg_for_cpu(struct device *dev,
static const struct dma_ops sun4v_dma_ops = {
.alloc_coherent = dma_4v_alloc_coherent,
.free_coherent = dma_4v_free_coherent,
- .map_single = dma_4v_map_single,
- .unmap_single = dma_4v_unmap_single,
+ .map_page = dma_4v_map_page,
+ .unmap_page = dma_4v_unmap_page,
.map_sg = dma_4v_map_sg,
.unmap_sg = dma_4v_unmap_sg,
.sync_single_for_cpu = dma_4v_sync_single_for_cpu,
diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h
index bb0f0fda6ca..453397fe5e1 100644
--- a/arch/sparc/kernel/prom.h
+++ b/arch/sparc/kernel/prom.h
@@ -22,7 +22,6 @@ static inline int is_root_node(const struct device_node *dp)
extern char *build_path_component(struct device_node *dp);
extern void of_console_init(void);
-extern void of_fill_in_cpu_data(void);
extern unsigned int prom_early_allocated;
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index ca55c7012f7..fb06ac2bd38 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -374,75 +374,26 @@ static const char *get_mid_prop(void)
return (tlb_type == spitfire ? "upa-portid" : "portid");
}
-struct device_node *of_find_node_by_cpuid(int cpuid)
-{
- struct device_node *dp;
- const char *mid_prop = get_mid_prop();
-
- for_each_node_by_type(dp, "cpu") {
- int id = of_getintprop_default(dp, mid_prop, -1);
- const char *this_mid_prop = mid_prop;
-
- if (id < 0) {
- this_mid_prop = "cpuid";
- id = of_getintprop_default(dp, this_mid_prop, -1);
- }
-
- if (id < 0) {
- prom_printf("OF: Serious problem, cpu lacks "
- "%s property", this_mid_prop);
- prom_halt();
- }
- if (cpuid == id)
- return dp;
- }
- return NULL;
-}
-
-void __init of_fill_in_cpu_data(void)
+static void *of_iterate_over_cpus(void *(*func)(struct device_node *, int, int), int arg)
{
struct device_node *dp;
const char *mid_prop;
- if (tlb_type == hypervisor)
- return;
-
mid_prop = get_mid_prop();
- ncpus_probed = 0;
for_each_node_by_type(dp, "cpu") {
int cpuid = of_getintprop_default(dp, mid_prop, -1);
const char *this_mid_prop = mid_prop;
- struct device_node *portid_parent;
- int portid = -1;
+ void *ret;
- portid_parent = NULL;
if (cpuid < 0) {
this_mid_prop = "cpuid";
cpuid = of_getintprop_default(dp, this_mid_prop, -1);
- if (cpuid >= 0) {
- int limit = 2;
-
- portid_parent = dp;
- while (limit--) {
- portid_parent = portid_parent->parent;
- if (!portid_parent)
- break;
- portid = of_getintprop_default(portid_parent,
- "portid", -1);
- if (portid >= 0)
- break;
- }
- }
}
-
if (cpuid < 0) {
prom_printf("OF: Serious problem, cpu lacks "
"%s property", this_mid_prop);
prom_halt();
}
-
- ncpus_probed++;
-
#ifdef CONFIG_SMP
if (cpuid >= NR_CPUS) {
printk(KERN_WARNING "Ignoring CPU %d which is "
@@ -450,79 +401,142 @@ void __init of_fill_in_cpu_data(void)
cpuid, NR_CPUS);
continue;
}
-#else
- /* On uniprocessor we only want the values for the
- * real physical cpu the kernel booted onto, however
- * cpu_data() only has one entry at index 0.
- */
- if (cpuid != real_hard_smp_processor_id())
- continue;
- cpuid = 0;
#endif
+ ret = func(dp, cpuid, arg);
+ if (ret)
+ return ret;
+ }
+ return NULL;
+}
- cpu_data(cpuid).clock_tick =
- of_getintprop_default(dp, "clock-frequency", 0);
-
- if (portid_parent) {
- cpu_data(cpuid).dcache_size =
- of_getintprop_default(dp, "l1-dcache-size",
- 16 * 1024);
- cpu_data(cpuid).dcache_line_size =
- of_getintprop_default(dp, "l1-dcache-line-size",
- 32);
- cpu_data(cpuid).icache_size =
- of_getintprop_default(dp, "l1-icache-size",
- 8 * 1024);
- cpu_data(cpuid).icache_line_size =
- of_getintprop_default(dp, "l1-icache-line-size",
- 32);
- cpu_data(cpuid).ecache_size =
- of_getintprop_default(dp, "l2-cache-size", 0);
- cpu_data(cpuid).ecache_line_size =
- of_getintprop_default(dp, "l2-cache-line-size", 0);
- if (!cpu_data(cpuid).ecache_size ||
- !cpu_data(cpuid).ecache_line_size) {
- cpu_data(cpuid).ecache_size =
- of_getintprop_default(portid_parent,
- "l2-cache-size",
- (4 * 1024 * 1024));
- cpu_data(cpuid).ecache_line_size =
- of_getintprop_default(portid_parent,
- "l2-cache-line-size", 64);
- }
-
- cpu_data(cpuid).core_id = portid + 1;
- cpu_data(cpuid).proc_id = portid;
+static void *check_cpu_node(struct device_node *dp, int cpuid, int id)
+{
+ if (id == cpuid)
+ return dp;
+ return NULL;
+}
+
+struct device_node *of_find_node_by_cpuid(int cpuid)
+{
+ return of_iterate_over_cpus(check_cpu_node, cpuid);
+}
+
+static void *record_one_cpu(struct device_node *dp, int cpuid, int arg)
+{
+ ncpus_probed++;
#ifdef CONFIG_SMP
- sparc64_multi_core = 1;
+ set_cpu_present(cpuid, true);
+ set_cpu_possible(cpuid, true);
#endif
- } else {
- cpu_data(cpuid).dcache_size =
- of_getintprop_default(dp, "dcache-size", 16 * 1024);
- cpu_data(cpuid).dcache_line_size =
- of_getintprop_default(dp, "dcache-line-size", 32);
+ return NULL;
+}
- cpu_data(cpuid).icache_size =
- of_getintprop_default(dp, "icache-size", 16 * 1024);
- cpu_data(cpuid).icache_line_size =
- of_getintprop_default(dp, "icache-line-size", 32);
+void __init of_populate_present_mask(void)
+{
+ if (tlb_type == hypervisor)
+ return;
+
+ ncpus_probed = 0;
+ of_iterate_over_cpus(record_one_cpu, 0);
+}
+static void *fill_in_one_cpu(struct device_node *dp, int cpuid, int arg)
+{
+ struct device_node *portid_parent = NULL;
+ int portid = -1;
+
+ if (of_find_property(dp, "cpuid", NULL)) {
+ int limit = 2;
+
+ portid_parent = dp;
+ while (limit--) {
+ portid_parent = portid_parent->parent;
+ if (!portid_parent)
+ break;
+ portid = of_getintprop_default(portid_parent,
+ "portid", -1);
+ if (portid >= 0)
+ break;
+ }
+ }
+
+#ifndef CONFIG_SMP
+ /* On uniprocessor we only want the values for the
+ * real physical cpu the kernel booted onto, however
+ * cpu_data() only has one entry at index 0.
+ */
+ if (cpuid != real_hard_smp_processor_id())
+ return NULL;
+ cpuid = 0;
+#endif
+
+ cpu_data(cpuid).clock_tick =
+ of_getintprop_default(dp, "clock-frequency", 0);
+
+ if (portid_parent) {
+ cpu_data(cpuid).dcache_size =
+ of_getintprop_default(dp, "l1-dcache-size",
+ 16 * 1024);
+ cpu_data(cpuid).dcache_line_size =
+ of_getintprop_default(dp, "l1-dcache-line-size",
+ 32);
+ cpu_data(cpuid).icache_size =
+ of_getintprop_default(dp, "l1-icache-size",
+ 8 * 1024);
+ cpu_data(cpuid).icache_line_size =
+ of_getintprop_default(dp, "l1-icache-line-size",
+ 32);
+ cpu_data(cpuid).ecache_size =
+ of_getintprop_default(dp, "l2-cache-size", 0);
+ cpu_data(cpuid).ecache_line_size =
+ of_getintprop_default(dp, "l2-cache-line-size", 0);
+ if (!cpu_data(cpuid).ecache_size ||
+ !cpu_data(cpuid).ecache_line_size) {
cpu_data(cpuid).ecache_size =
- of_getintprop_default(dp, "ecache-size",
+ of_getintprop_default(portid_parent,
+ "l2-cache-size",
(4 * 1024 * 1024));
cpu_data(cpuid).ecache_line_size =
- of_getintprop_default(dp, "ecache-line-size", 64);
-
- cpu_data(cpuid).core_id = 0;
- cpu_data(cpuid).proc_id = -1;
+ of_getintprop_default(portid_parent,
+ "l2-cache-line-size", 64);
}
+ cpu_data(cpuid).core_id = portid + 1;
+ cpu_data(cpuid).proc_id = portid;
#ifdef CONFIG_SMP
- set_cpu_present(cpuid, true);
- set_cpu_possible(cpuid, true);
+ sparc64_multi_core = 1;
#endif
+ } else {
+ cpu_data(cpuid).dcache_size =
+ of_getintprop_default(dp, "dcache-size", 16 * 1024);
+ cpu_data(cpuid).dcache_line_size =
+ of_getintprop_default(dp, "dcache-line-size", 32);
+
+ cpu_data(cpuid).icache_size =
+ of_getintprop_default(dp, "icache-size", 16 * 1024);
+ cpu_data(cpuid).icache_line_size =
+ of_getintprop_default(dp, "icache-line-size", 32);
+
+ cpu_data(cpuid).ecache_size =
+ of_getintprop_default(dp, "ecache-size",
+ (4 * 1024 * 1024));
+ cpu_data(cpuid).ecache_line_size =
+ of_getintprop_default(dp, "ecache-line-size", 64);
+
+ cpu_data(cpuid).core_id = 0;
+ cpu_data(cpuid).proc_id = -1;
}
+ return NULL;
+}
+
+void __init of_fill_in_cpu_data(void)
+{
+ if (tlb_type == hypervisor)
+ return;
+
+ of_iterate_over_cpus(fill_in_one_cpu, 0);
+
smp_fill_in_sib_core_maps();
}
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index ff7b591c894..0fb5789d43c 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -313,6 +313,4 @@ void __init prom_build_devicetree(void)
printk("PROM: Built device tree with %u bytes of memory.\n",
prom_early_allocated);
-
- of_fill_in_cpu_data();
}
diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
index 8ce6285a06d..7e3dfd9bb97 100644
--- a/arch/sparc/kernel/ptrace_32.c
+++ b/arch/sparc/kernel/ptrace_32.c
@@ -16,7 +16,6 @@
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/regset.h>
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index a941c610e7c..4ae91dc2feb 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -17,7 +17,6 @@
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/security.h>
#include <linux/seccomp.h>
#include <linux/audit.h>
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index f7642e5a94d..3691907a43b 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -20,7 +20,8 @@
#include <linux/cache.h>
#include <linux/jiffies.h>
#include <linux/profile.h>
-#include <linux/lmb.h>
+#include <linux/bootmem.h>
+#include <linux/vmalloc.h>
#include <linux/cpu.h>
#include <asm/head.h>
@@ -47,6 +48,8 @@
#include <asm/ldc.h>
#include <asm/hypervisor.h>
+#include "cpumap.h"
+
int sparc64_multi_core __read_mostly;
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
@@ -278,7 +281,7 @@ static unsigned long kimage_addr_to_ra(void *p)
return kern_base + (val - KERNBASE);
}
-static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
+static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp)
{
extern unsigned long sparc64_ttable_tl0;
extern unsigned long kern_locked_tte_data;
@@ -298,12 +301,12 @@ static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread
"hvtramp_descr.\n");
return;
}
+ *descrp = hdesc;
hdesc->cpu = cpu;
hdesc->num_mappings = num_kernel_image_mappings;
tb = &trap_block[cpu];
- tb->hdesc = hdesc;
hdesc->fault_info_va = (unsigned long) &tb->fault_info;
hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
@@ -341,12 +344,12 @@ static struct thread_info *cpu_new_thread = NULL;
static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
{
- struct trap_per_cpu *tb = &trap_block[cpu];
unsigned long entry =
(unsigned long)(&sparc64_cpu_startup);
unsigned long cookie =
(unsigned long)(&cpu_new_thread);
struct task_struct *p;
+ void *descr = NULL;
int timeout, ret;
p = fork_idle(cpu);
@@ -359,7 +362,8 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
if (ldom_domaining_enabled)
ldom_startcpu_cpuid(cpu,
- (unsigned long) cpu_new_thread);
+ (unsigned long) cpu_new_thread,
+ &descr);
else
#endif
prom_startcpu_cpuid(cpu, entry, cookie);
@@ -383,10 +387,7 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
}
cpu_new_thread = NULL;
- if (tb->hdesc) {
- kfree(tb->hdesc);
- tb->hdesc = NULL;
- }
+ kfree(descr);
return ret;
}
@@ -1315,6 +1316,8 @@ int __cpu_disable(void)
cpu_clear(cpu, cpu_online_map);
ipi_call_unlock();
+ cpu_map_rebuild();
+
return 0;
}
@@ -1373,36 +1376,171 @@ void smp_send_stop(void)
{
}
-unsigned long __per_cpu_base __read_mostly;
-unsigned long __per_cpu_shift __read_mostly;
+/**
+ * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
+ * @cpu: cpu to allocate for
+ * @size: size allocation in bytes
+ * @align: alignment
+ *
+ * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
+ * does the right thing for NUMA regardless of the current
+ * configuration.
+ *
+ * RETURNS:
+ * Pointer to the allocated area on success, NULL on failure.
+ */
+static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
+ unsigned long align)
+{
+ const unsigned long goal = __pa(MAX_DMA_ADDRESS);
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+ int node = cpu_to_node(cpu);
+ void *ptr;
+
+ if (!node_online(node) || !NODE_DATA(node)) {
+ ptr = __alloc_bootmem(size, align, goal);
+ pr_info("cpu %d has no node %d or node-local memory\n",
+ cpu, node);
+ pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
+ cpu, size, __pa(ptr));
+ } else {
+ ptr = __alloc_bootmem_node(NODE_DATA(node),
+ size, align, goal);
+ pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
+ "%016lx\n", cpu, size, node, __pa(ptr));
+ }
+ return ptr;
+#else
+ return __alloc_bootmem(size, align, goal);
+#endif
+}
-EXPORT_SYMBOL(__per_cpu_base);
-EXPORT_SYMBOL(__per_cpu_shift);
+static size_t pcpur_size __initdata;
+static void **pcpur_ptrs __initdata;
-void __init real_setup_per_cpu_areas(void)
+static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
{
- unsigned long paddr, goal, size, i;
- char *ptr;
+ size_t off = (size_t)pageno << PAGE_SHIFT;
- /* Copy section for each CPU (we discard the original) */
- goal = PERCPU_ENOUGH_ROOM;
+ if (off >= pcpur_size)
+ return NULL;
- __per_cpu_shift = PAGE_SHIFT;
- for (size = PAGE_SIZE; size < goal; size <<= 1UL)
- __per_cpu_shift++;
+ return virt_to_page(pcpur_ptrs[cpu] + off);
+}
+
+#define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL)
+
+static void __init pcpu_map_range(unsigned long start, unsigned long end,
+ struct page *page)
+{
+ unsigned long pfn = page_to_pfn(page);
+ unsigned long pte_base;
+
+ BUG_ON((pfn<<PAGE_SHIFT)&(PCPU_CHUNK_SIZE - 1UL));
+
+ pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
+ _PAGE_CP_4U | _PAGE_CV_4U |
+ _PAGE_P_4U | _PAGE_W_4U);
+ if (tlb_type == hypervisor)
+ pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
+ _PAGE_CP_4V | _PAGE_CV_4V |
+ _PAGE_P_4V | _PAGE_W_4V);
+
+ while (start < end) {
+ pgd_t *pgd = pgd_offset_k(start);
+ unsigned long this_end;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pud = pud_offset(pgd, start);
+ if (pud_none(*pud)) {
+ pmd_t *new;
+
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ pud_populate(&init_mm, pud, new);
+ }
+
+ pmd = pmd_offset(pud, start);
+ if (!pmd_present(*pmd)) {
+ pte_t *new;
+
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ pmd_populate_kernel(&init_mm, pmd, new);
+ }
- paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE);
- if (!paddr) {
- prom_printf("Cannot allocate per-cpu memory.\n");
- prom_halt();
+ pte = pte_offset_kernel(pmd, start);
+ this_end = (start + PMD_SIZE) & PMD_MASK;
+ if (this_end > end)
+ this_end = end;
+
+ while (start < this_end) {
+ unsigned long paddr = pfn << PAGE_SHIFT;
+
+ pte_val(*pte) = (paddr | pte_base);
+
+ start += PAGE_SIZE;
+ pte++;
+ pfn++;
+ }
+ }
+}
+
+void __init setup_per_cpu_areas(void)
+{
+ size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start;
+ static struct vm_struct vm;
+ unsigned long delta, cpu;
+ size_t pcpu_unit_size;
+ size_t ptrs_size;
+
+ pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
+ PERCPU_DYNAMIC_RESERVE);
+ dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE;
+
+
+ ptrs_size = PFN_ALIGN(nr_cpu_ids * sizeof(pcpur_ptrs[0]));
+ pcpur_ptrs = alloc_bootmem(ptrs_size);
+
+ for_each_possible_cpu(cpu) {
+ pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE,
+ PCPU_CHUNK_SIZE);
+
+ free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
+ PCPU_CHUNK_SIZE - pcpur_size);
+
+ memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
}
- ptr = __va(paddr);
- __per_cpu_base = ptr - __per_cpu_start;
+ /* allocate address and map */
+ vm.flags = VM_ALLOC;
+ vm.size = nr_cpu_ids * PCPU_CHUNK_SIZE;
+ vm_area_register_early(&vm, PCPU_CHUNK_SIZE);
+
+ for_each_possible_cpu(cpu) {
+ unsigned long start = (unsigned long) vm.addr;
+ unsigned long end;
+
+ start += cpu * PCPU_CHUNK_SIZE;
+ end = start + PCPU_CHUNK_SIZE;
+ pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu]));
+ }
- for (i = 0; i < NR_CPUS; i++, ptr += size)
- memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+ pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size,
+ PERCPU_MODULE_RESERVE, dyn_size,
+ PCPU_CHUNK_SIZE, vm.addr, NULL);
+
+ free_bootmem(__pa(pcpur_ptrs), ptrs_size);
+
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu) {
+ __per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
+ }
/* Setup %g5 for the boot cpu. */
__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
+
+ of_fill_in_cpu_data();
+ if (tlb_type == hypervisor)
+ mdesc_fill_in_cpu_data(cpu_all_mask);
}
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 54fb02468f0..68791cad7b7 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -162,9 +162,6 @@ extern void cpu_panic(void);
*/
extern struct linux_prom_registers smp_penguin_ctable;
-extern unsigned long trapbase_cpu1[];
-extern unsigned long trapbase_cpu2[];
-extern unsigned long trapbase_cpu3[];
void __init smp4d_boot_cpus(void)
{
@@ -235,25 +232,6 @@ void __init smp4d_smp_done(void)
*prev = first;
local_flush_cache_all();
- /* Free unneeded trap tables */
- ClearPageReserved(virt_to_page(trapbase_cpu1));
- init_page_count(virt_to_page(trapbase_cpu1));
- free_page((unsigned long)trapbase_cpu1);
- totalram_pages++;
- num_physpages++;
-
- ClearPageReserved(virt_to_page(trapbase_cpu2));
- init_page_count(virt_to_page(trapbase_cpu2));
- free_page((unsigned long)trapbase_cpu2);
- totalram_pages++;
- num_physpages++;
-
- ClearPageReserved(virt_to_page(trapbase_cpu3));
- init_page_count(virt_to_page(trapbase_cpu3));
- free_page((unsigned long)trapbase_cpu3);
- totalram_pages++;
- num_physpages++;
-
/* Ok, they are spinning and ready to go. */
smp_processors_ready = 1;
sun4d_distribute_irqs();
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 960b113d000..762d6eedd94 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -121,9 +121,6 @@ void __cpuinit smp4m_callin(void)
*/
extern struct linux_prom_registers smp_penguin_ctable;
-extern unsigned long trapbase_cpu1[];
-extern unsigned long trapbase_cpu2[];
-extern unsigned long trapbase_cpu3[];
void __init smp4m_boot_cpus(void)
{
@@ -193,29 +190,6 @@ void __init smp4m_smp_done(void)
*prev = first;
local_flush_cache_all();
- /* Free unneeded trap tables */
- if (!cpu_isset(1, cpu_present_map)) {
- ClearPageReserved(virt_to_page(trapbase_cpu1));
- init_page_count(virt_to_page(trapbase_cpu1));
- free_page((unsigned long)trapbase_cpu1);
- totalram_pages++;
- num_physpages++;
- }
- if (!cpu_isset(2, cpu_present_map)) {
- ClearPageReserved(virt_to_page(trapbase_cpu2));
- init_page_count(virt_to_page(trapbase_cpu2));
- free_page((unsigned long)trapbase_cpu2);
- totalram_pages++;
- num_physpages++;
- }
- if (!cpu_isset(3, cpu_present_map)) {
- ClearPageReserved(virt_to_page(trapbase_cpu3));
- init_page_count(virt_to_page(trapbase_cpu3));
- free_page((unsigned long)trapbase_cpu3);
- totalram_pages++;
- num_physpages++;
- }
-
/* Ok, they are spinning and ready to go. */
}
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index f061c4dda9e..aed94869ad6 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -134,10 +134,12 @@ SIGN1(sys32_getpeername, sys_getpeername, %o0)
SIGN1(sys32_getsockname, sys_getsockname, %o0)
SIGN2(sys32_ioprio_get, sys_ioprio_get, %o0, %o1)
SIGN3(sys32_ioprio_set, sys_ioprio_set, %o0, %o1, %o2)
-SIGN2(sys32_splice, sys_splice, %o0, %o1)
+SIGN2(sys32_splice, sys_splice, %o0, %o2)
SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5)
SIGN2(sys32_tee, sys_tee, %o0, %o1)
SIGN1(sys32_vmsplice, compat_sys_vmsplice, %o0)
+SIGN1(sys32_truncate, sys_truncate, %o1)
+SIGN1(sys32_ftruncate, sys_ftruncate, %o1)
.globl sys32_mmap2
sys32_mmap2:
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 00ec3b15f38..69090165729 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -81,4 +81,6 @@ sys_call_table:
/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv, sys_pwritev
+/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
+/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo
+
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 82b5bf85b9d..2ee7250ba7a 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -43,8 +43,8 @@ sys_call_table32:
/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
.word sys32_getgroups, compat_sys_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd
/*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod
- .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate
-/*130*/ .word sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall
+ .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys32_truncate
+/*130*/ .word sys32_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall
.word sys_nis_syscall, sys32_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
/*140*/ .word sys32_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit
.word compat_sys_setrlimit, sys_pivot_root, sys32_prctl, sys_pciconfig_read, sys_pciconfig_write
@@ -82,7 +82,8 @@ sys_call_table32:
.word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv, compat_sys_pwritev
+/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
+ .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo
#endif /* CONFIG_COMPAT */
@@ -156,4 +157,5 @@ sys_call_table:
.word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv, sys_pwritev
+/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
+ .word sys_pwritev, sys_rt_tgsigqueueinfo
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 5c12e79b4bd..da1218e8ee8 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -11,7 +11,6 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index 358283341b4..c0490c7bbde 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/kdebug.h>
#include <asm/delay.h>
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index d809c4ebb48..10f7bb9fc14 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2509,6 +2509,7 @@ void do_getpsr(struct pt_regs *regs)
}
struct trap_per_cpu trap_block[NR_CPUS];
+EXPORT_SYMBOL(trap_block);
/* This can get invoked before sched_init() so play it super safe
* and use hard_smp_processor_id().
@@ -2530,84 +2531,97 @@ extern void tsb_config_offsets_are_bolixed_dave(void);
void __init trap_init(void)
{
/* Compile time sanity check. */
- if (TI_TASK != offsetof(struct thread_info, task) ||
- TI_FLAGS != offsetof(struct thread_info, flags) ||
- TI_CPU != offsetof(struct thread_info, cpu) ||
- TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
- TI_KSP != offsetof(struct thread_info, ksp) ||
- TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
- TI_KREGS != offsetof(struct thread_info, kregs) ||
- TI_UTRAPS != offsetof(struct thread_info, utraps) ||
- TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
- TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
- TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
- TI_GSR != offsetof(struct thread_info, gsr) ||
- TI_XFSR != offsetof(struct thread_info, xfsr) ||
- TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
- TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
- TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
- TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
- TI_PCR != offsetof(struct thread_info, pcr_reg) ||
- TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
- TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
- TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
- TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
- TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
- TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
- TI_FPREGS != offsetof(struct thread_info, fpregs) ||
- (TI_FPREGS & (64 - 1)))
- thread_info_offsets_are_bolixed_dave();
-
- if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
- (TRAP_PER_CPU_PGD_PADDR !=
- offsetof(struct trap_per_cpu, pgd_paddr)) ||
- (TRAP_PER_CPU_CPU_MONDO_PA !=
- offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
- (TRAP_PER_CPU_DEV_MONDO_PA !=
- offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
- (TRAP_PER_CPU_RESUM_MONDO_PA !=
- offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
- (TRAP_PER_CPU_RESUM_KBUF_PA !=
- offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
- (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
- offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
- (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
- offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
- (TRAP_PER_CPU_FAULT_INFO !=
- offsetof(struct trap_per_cpu, fault_info)) ||
- (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
- offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
- (TRAP_PER_CPU_CPU_LIST_PA !=
- offsetof(struct trap_per_cpu, cpu_list_pa)) ||
- (TRAP_PER_CPU_TSB_HUGE !=
- offsetof(struct trap_per_cpu, tsb_huge)) ||
- (TRAP_PER_CPU_TSB_HUGE_TEMP !=
- offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
- (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
- offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
- (TRAP_PER_CPU_CPU_MONDO_QMASK !=
- offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
- (TRAP_PER_CPU_DEV_MONDO_QMASK !=
- offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
- (TRAP_PER_CPU_RESUM_QMASK !=
- offsetof(struct trap_per_cpu, resum_qmask)) ||
- (TRAP_PER_CPU_NONRESUM_QMASK !=
- offsetof(struct trap_per_cpu, nonresum_qmask)))
- trap_per_cpu_offsets_are_bolixed_dave();
-
- if ((TSB_CONFIG_TSB !=
- offsetof(struct tsb_config, tsb)) ||
- (TSB_CONFIG_RSS_LIMIT !=
- offsetof(struct tsb_config, tsb_rss_limit)) ||
- (TSB_CONFIG_NENTRIES !=
- offsetof(struct tsb_config, tsb_nentries)) ||
- (TSB_CONFIG_REG_VAL !=
- offsetof(struct tsb_config, tsb_reg_val)) ||
- (TSB_CONFIG_MAP_VADDR !=
- offsetof(struct tsb_config, tsb_map_vaddr)) ||
- (TSB_CONFIG_MAP_PTE !=
- offsetof(struct tsb_config, tsb_map_pte)))
- tsb_config_offsets_are_bolixed_dave();
+ BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
+ TI_FLAGS != offsetof(struct thread_info, flags) ||
+ TI_CPU != offsetof(struct thread_info, cpu) ||
+ TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
+ TI_KSP != offsetof(struct thread_info, ksp) ||
+ TI_FAULT_ADDR != offsetof(struct thread_info,
+ fault_address) ||
+ TI_KREGS != offsetof(struct thread_info, kregs) ||
+ TI_UTRAPS != offsetof(struct thread_info, utraps) ||
+ TI_EXEC_DOMAIN != offsetof(struct thread_info,
+ exec_domain) ||
+ TI_REG_WINDOW != offsetof(struct thread_info,
+ reg_window) ||
+ TI_RWIN_SPTRS != offsetof(struct thread_info,
+ rwbuf_stkptrs) ||
+ TI_GSR != offsetof(struct thread_info, gsr) ||
+ TI_XFSR != offsetof(struct thread_info, xfsr) ||
+ TI_USER_CNTD0 != offsetof(struct thread_info,
+ user_cntd0) ||
+ TI_USER_CNTD1 != offsetof(struct thread_info,
+ user_cntd1) ||
+ TI_KERN_CNTD0 != offsetof(struct thread_info,
+ kernel_cntd0) ||
+ TI_KERN_CNTD1 != offsetof(struct thread_info,
+ kernel_cntd1) ||
+ TI_PCR != offsetof(struct thread_info, pcr_reg) ||
+ TI_PRE_COUNT != offsetof(struct thread_info,
+ preempt_count) ||
+ TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
+ TI_SYS_NOERROR != offsetof(struct thread_info,
+ syscall_noerror) ||
+ TI_RESTART_BLOCK != offsetof(struct thread_info,
+ restart_block) ||
+ TI_KUNA_REGS != offsetof(struct thread_info,
+ kern_una_regs) ||
+ TI_KUNA_INSN != offsetof(struct thread_info,
+ kern_una_insn) ||
+ TI_FPREGS != offsetof(struct thread_info, fpregs) ||
+ (TI_FPREGS & (64 - 1)));
+
+ BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
+ thread) ||
+ (TRAP_PER_CPU_PGD_PADDR !=
+ offsetof(struct trap_per_cpu, pgd_paddr)) ||
+ (TRAP_PER_CPU_CPU_MONDO_PA !=
+ offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
+ (TRAP_PER_CPU_DEV_MONDO_PA !=
+ offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
+ (TRAP_PER_CPU_RESUM_MONDO_PA !=
+ offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
+ (TRAP_PER_CPU_RESUM_KBUF_PA !=
+ offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
+ (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
+ offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
+ (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
+ offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
+ (TRAP_PER_CPU_FAULT_INFO !=
+ offsetof(struct trap_per_cpu, fault_info)) ||
+ (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
+ offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
+ (TRAP_PER_CPU_CPU_LIST_PA !=
+ offsetof(struct trap_per_cpu, cpu_list_pa)) ||
+ (TRAP_PER_CPU_TSB_HUGE !=
+ offsetof(struct trap_per_cpu, tsb_huge)) ||
+ (TRAP_PER_CPU_TSB_HUGE_TEMP !=
+ offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
+ (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
+ offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
+ (TRAP_PER_CPU_CPU_MONDO_QMASK !=
+ offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
+ (TRAP_PER_CPU_DEV_MONDO_QMASK !=
+ offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
+ (TRAP_PER_CPU_RESUM_QMASK !=
+ offsetof(struct trap_per_cpu, resum_qmask)) ||
+ (TRAP_PER_CPU_NONRESUM_QMASK !=
+ offsetof(struct trap_per_cpu, nonresum_qmask)) ||
+ (TRAP_PER_CPU_PER_CPU_BASE !=
+ offsetof(struct trap_per_cpu, __per_cpu_base)));
+
+ BUILD_BUG_ON((TSB_CONFIG_TSB !=
+ offsetof(struct tsb_config, tsb)) ||
+ (TSB_CONFIG_RSS_LIMIT !=
+ offsetof(struct tsb_config, tsb_rss_limit)) ||
+ (TSB_CONFIG_NENTRIES !=
+ offsetof(struct tsb_config, tsb_nentries)) ||
+ (TSB_CONFIG_REG_VAL !=
+ offsetof(struct tsb_config, tsb_reg_val)) ||
+ (TSB_CONFIG_MAP_VADDR !=
+ offsetof(struct tsb_config, tsb_map_vaddr)) ||
+ (TSB_CONFIG_MAP_PTE !=
+ offsetof(struct tsb_config, tsb_map_pte)));
/* Attach to the address space of init_task. On SMP we
* do this in smp.c:smp_callin for other cpus.
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index 753d128ed15..c28c71449a6 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -224,7 +224,12 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
if (!strcmp(type, "domain-services-port"))
bus_id_name = "ds";
- if (strlen(bus_id_name) >= BUS_ID_SIZE - 4) {
+ /*
+ * 20 char is the old driver-core name size limit, which is no more.
+ * This check can probably be removed after review and possible
+ * adaption of the vio users name length handling.
+ */
+ if (strlen(bus_id_name) >= 20 - 4) {
printk(KERN_ERR "VIO: bus_id_name [%s] is too long.\n",
bus_id_name);
return NULL;
diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c
index 16cc28935e3..a61c349448e 100644
--- a/arch/sparc/mm/extable.c
+++ b/arch/sparc/mm/extable.c
@@ -28,6 +28,10 @@ search_extable(const struct exception_table_entry *start,
* word 3: last insn address + 4 bytes
* word 4: fixup code address
*
+ * Deleted entries are encoded as:
+ * word 1: unused
+ * word 2: -1
+ *
* See asm/uaccess.h for more details.
*/
@@ -39,6 +43,10 @@ search_extable(const struct exception_table_entry *start,
continue;
}
+ /* A deleted entry; see trim_init_extable */
+ if (walk->fixup == -1)
+ continue;
+
if (walk->insn == value)
return walk;
}
@@ -57,6 +65,27 @@ search_extable(const struct exception_table_entry *start,
return NULL;
}
+#ifdef CONFIG_MODULES
+/* We could memmove them around; easier to mark the trimmed ones. */
+void trim_init_extable(struct module *m)
+{
+ unsigned int i;
+ bool range;
+
+ for (i = 0; i < m->num_exentries; i += range ? 2 : 1) {
+ range = m->extable[i].fixup == 0;
+
+ if (within_module_init(m->extable[i].insn, m)) {
+ m->extable[i].fixup = -1;
+ if (range)
+ m->extable[i+1].fixup = -1;
+ }
+ if (range)
+ i++;
+ }
+}
+#endif /* CONFIG_MODULES */
+
/* Special extable search, which handles ranges. Returns fixup */
unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
{
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 12e447fc854..b99f81c4906 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -241,7 +241,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, write);
+ fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -319,9 +319,10 @@ no_context:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- printk("VM: killing process %s\n", tsk->comm);
- if (from_user)
- do_group_exit(SIGKILL);
+ if (from_user) {
+ pagefault_out_of_memory();
+ return;
+ }
goto no_context;
do_sigbus:
@@ -484,7 +485,7 @@ good_area:
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
- switch (handle_mm_fault(mm, vma, address, write)) {
+ switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
case VM_FAULT_SIGBUS:
case VM_FAULT_OOM:
goto do_sigbus;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 4ab8993b086..43b0da96a4f 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -398,7 +398,7 @@ good_area:
goto bad_area;
}
- fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE));
+ fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -447,9 +447,10 @@ handle_kernel_fault:
out_of_memory:
insn = get_fault_insn(regs, insn);
up_read(&mm->mmap_sem);
- printk("VM: killing process %s\n", current->comm);
- if (!(regs->tstate & TSTATE_PRIV))
- do_group_exit(SIGKILL);
+ if (!(regs->tstate & TSTATE_PRIV)) {
+ pagefault_out_of_memory();
+ return;
+ }
goto handle_kernel_fault;
intr_or_no_mm:
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index cbb282dab5a..26bb3919ff1 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -358,6 +358,7 @@ void __init paging_init(void)
protection_map[15] = PAGE_SHARED;
btfixup();
prom_build_devicetree();
+ of_fill_in_cpu_data();
device_scan();
}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index f26a352c08a..a70a5e1904d 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -145,7 +145,8 @@ static void __init read_obp_memory(const char *property,
cmp_p64, NULL);
}
-unsigned long *sparc64_valid_addr_bitmap __read_mostly;
+unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
+ sizeof(unsigned long)];
EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
/* Kernel physical address base and size in bytes. */
@@ -1679,11 +1680,6 @@ pgd_t swapper_pg_dir[2048];
static void sun4u_pgprot_init(void);
static void sun4v_pgprot_init(void);
-/* Dummy function */
-void __init setup_per_cpu_areas(void)
-{
-}
-
void __init paging_init(void)
{
unsigned long end_pfn, shift, phys_base;
@@ -1799,16 +1795,19 @@ void __init paging_init(void)
if (tlb_type == hypervisor)
sun4v_ktsb_register();
- /* We must setup the per-cpu areas before we pull in the
- * PROM and the MDESC. The code there fills in cpu and
- * other information into per-cpu data structures.
- */
- real_setup_per_cpu_areas();
-
prom_build_devicetree();
+ of_populate_present_mask();
+#ifndef CONFIG_SMP
+ of_fill_in_cpu_data();
+#endif
- if (tlb_type == hypervisor)
+ if (tlb_type == hypervisor) {
sun4v_mdesc_init();
+ mdesc_populate_present_mask(cpu_all_mask);
+#ifndef CONFIG_SMP
+ mdesc_fill_in_cpu_data(cpu_all_mask);
+#endif
+ }
/* Once the OF device tree and MDESC have been setup, we know
* the list of possible cpus. Therefore we can allocate the
@@ -1876,7 +1875,7 @@ static int pavail_rescan_ents __initdata;
* memory list again, and make sure it provides at least as much
* memory as 'pavail' does.
*/
-static void __init setup_valid_addr_bitmap_from_pavail(void)
+static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
{
int i;
@@ -1899,8 +1898,7 @@ static void __init setup_valid_addr_bitmap_from_pavail(void)
if (new_start <= old_start &&
new_end >= (old_start + PAGE_SIZE)) {
- set_bit(old_start >> 22,
- sparc64_valid_addr_bitmap);
+ set_bit(old_start >> 22, bitmap);
goto do_next_page;
}
}
@@ -1921,20 +1919,21 @@ static void __init setup_valid_addr_bitmap_from_pavail(void)
}
}
+static void __init patch_tlb_miss_handler_bitmap(void)
+{
+ extern unsigned int valid_addr_bitmap_insn[];
+ extern unsigned int valid_addr_bitmap_patch[];
+
+ valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
+ mb();
+ valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
+ flushi(&valid_addr_bitmap_insn[0]);
+}
+
void __init mem_init(void)
{
unsigned long codepages, datapages, initpages;
unsigned long addr, last;
- int i;
-
- i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
- i += 1;
- sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
- if (sparc64_valid_addr_bitmap == NULL) {
- prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
- prom_halt();
- }
- memset(sparc64_valid_addr_bitmap, 0, i << 3);
addr = PAGE_OFFSET + kern_base;
last = PAGE_ALIGN(kern_size) + addr;
@@ -1943,15 +1942,19 @@ void __init mem_init(void)
addr += PAGE_SIZE;
}
- setup_valid_addr_bitmap_from_pavail();
+ setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
+ patch_tlb_miss_handler_bitmap();
high_memory = __va(last_valid_pfn << PAGE_SHIFT);
#ifdef CONFIG_NEED_MULTIPLE_NODES
- for_each_online_node(i) {
- if (NODE_DATA(i)->node_spanned_pages != 0) {
- totalram_pages +=
- free_all_bootmem_node(NODE_DATA(i));
+ {
+ int i;
+ for_each_online_node(i) {
+ if (NODE_DATA(i)->node_spanned_pages != 0) {
+ totalram_pages +=
+ free_all_bootmem_node(NODE_DATA(i));
+ }
}
}
#else
diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h
index 16063870a48..c2f772dbd55 100644
--- a/arch/sparc/mm/init_64.h
+++ b/arch/sparc/mm/init_64.h
@@ -5,10 +5,13 @@
* marked non-static so that assembler code can get at them.
*/
-#define MAX_PHYS_ADDRESS (1UL << 42UL)
-#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
+#define MAX_PHYS_ADDRESS (1UL << 41UL)
+#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
#define KPTE_BITMAP_BYTES \
((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
+#define VALID_ADDR_BITMAP_CHUNK_SZ (4UL * 1024UL * 1024UL)
+#define VALID_ADDR_BITMAP_BYTES \
+ ((MAX_PHYS_ADDRESS / VALID_ADDR_BITMAP_CHUNK_SZ) / 8)
extern unsigned long kern_linear_pte_xor[2];
extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 06c9a7d9820..ade4eb373bd 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -19,6 +19,7 @@
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/kdebug.h>
+#include <linux/log2.h>
#include <asm/bitext.h>
#include <asm/page.h>
@@ -349,7 +350,7 @@ static void srmmu_free_nocache(unsigned long vaddr, int size)
vaddr, srmmu_nocache_end);
BUG();
}
- if (size & (size-1)) {
+ if (!is_power_of_2(size)) {
printk("Size 0x%x is not a power of 2\n", size);
BUG();
}