summaryrefslogtreecommitdiffstats
path: root/arch/x86/platform
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-10-23 11:23:37 +0200
committerThomas Gleixner <tglx@linutronix.de>2010-10-27 14:30:02 +0200
commit329b84e42e3ee348b114fd0bfe4b2421e6139257 (patch)
tree95fefceaf95025c4f06b0bce21bb49c4b34b3f6e /arch/x86/platform
parent9694d4afc1ebe1e46cacfb78b107cd8f9fb550ba (diff)
x86: Move uv to platform
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Mike Travis <travis@sgi.com>
Diffstat (limited to 'arch/x86/platform')
-rw-r--r--arch/x86/platform/Makefile1
-rw-r--r--arch/x86/platform/uv/Makefile1
-rw-r--r--arch/x86/platform/uv/bios_uv.c215
-rw-r--r--arch/x86/platform/uv/tlb_uv.c1661
-rw-r--r--arch/x86/platform/uv/uv_irq.c285
-rw-r--r--arch/x86/platform/uv/uv_sysfs.c76
-rw-r--r--arch/x86/platform/uv/uv_time.c423
7 files changed, 2662 insertions, 0 deletions
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
index 06761ed5374..8519b01f1ac 100644
--- a/arch/x86/platform/Makefile
+++ b/arch/x86/platform/Makefile
@@ -4,3 +4,4 @@ obj-y += mrst/
obj-y += scx200/
obj-y += sfi/
obj-y += visws/
+obj-y += uv/
diff --git a/arch/x86/platform/uv/Makefile b/arch/x86/platform/uv/Makefile
new file mode 100644
index 00000000000..6c40995fefb
--- /dev/null
+++ b/arch/x86/platform/uv/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
new file mode 100644
index 00000000000..8bc57baaa9a
--- /dev/null
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -0,0 +1,215 @@
+/*
+ * BIOS run time interface routines.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) Russ Anderson <rja@sgi.com>
+ */
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+#include <linux/io.h>
+#include <asm/uv/bios.h>
+#include <asm/uv/uv_hub.h>
+
+static struct uv_systab uv_systab;
+
+s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
+{
+ struct uv_systab *tab = &uv_systab;
+ s64 ret;
+
+ if (!tab->function)
+ /*
+ * BIOS does not support UV systab
+ */
+ return BIOS_STATUS_UNIMPLEMENTED;
+
+ ret = efi_call6((void *)__va(tab->function), (u64)which,
+ a1, a2, a3, a4, a5);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(uv_bios_call);
+
+s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
+ u64 a4, u64 a5)
+{
+ unsigned long bios_flags;
+ s64 ret;
+
+ local_irq_save(bios_flags);
+ ret = uv_bios_call(which, a1, a2, a3, a4, a5);
+ local_irq_restore(bios_flags);
+
+ return ret;
+}
+
+s64 uv_bios_call_reentrant(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
+ u64 a4, u64 a5)
+{
+ s64 ret;
+
+ preempt_disable();
+ ret = uv_bios_call(which, a1, a2, a3, a4, a5);
+ preempt_enable();
+
+ return ret;
+}
+
+
+long sn_partition_id;
+EXPORT_SYMBOL_GPL(sn_partition_id);
+long sn_coherency_id;
+EXPORT_SYMBOL_GPL(sn_coherency_id);
+long sn_region_size;
+EXPORT_SYMBOL_GPL(sn_region_size);
+long system_serial_number;
+EXPORT_SYMBOL_GPL(system_serial_number);
+int uv_type;
+EXPORT_SYMBOL_GPL(uv_type);
+
+
+s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
+ long *region, long *ssn)
+{
+ s64 ret;
+ u64 v0, v1;
+ union partition_info_u part;
+
+ ret = uv_bios_call_irqsave(UV_BIOS_GET_SN_INFO, fc,
+ (u64)(&v0), (u64)(&v1), 0, 0);
+ if (ret != BIOS_STATUS_SUCCESS)
+ return ret;
+
+ part.val = v0;
+ if (uvtype)
+ *uvtype = part.hub_version;
+ if (partid)
+ *partid = part.partition_id;
+ if (coher)
+ *coher = part.coherence_id;
+ if (region)
+ *region = part.region_size;
+ if (ssn)
+ *ssn = v1;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(uv_bios_get_sn_info);
+
+int
+uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
+ unsigned long *intr_mmr_offset)
+{
+ u64 watchlist;
+ s64 ret;
+
+ /*
+ * bios returns watchlist number or negative error number.
+ */
+ ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
+ mq_size, (u64)intr_mmr_offset,
+ (u64)&watchlist, 0);
+ if (ret < BIOS_STATUS_SUCCESS)
+ return ret;
+
+ return watchlist;
+}
+EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_alloc);
+
+int
+uv_bios_mq_watchlist_free(int blade, int watchlist_num)
+{
+ return (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_FREE,
+ blade, watchlist_num, 0, 0, 0);
+}
+EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_free);
+
+s64
+uv_bios_change_memprotect(u64 paddr, u64 len, enum uv_memprotect perms)
+{
+ return uv_bios_call_irqsave(UV_BIOS_MEMPROTECT, paddr, len,
+ perms, 0, 0);
+}
+EXPORT_SYMBOL_GPL(uv_bios_change_memprotect);
+
+s64
+uv_bios_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len)
+{
+ s64 ret;
+
+ ret = uv_bios_call_irqsave(UV_BIOS_GET_PARTITION_ADDR, (u64)cookie,
+ (u64)addr, buf, (u64)len, 0);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(uv_bios_reserved_page_pa);
+
+s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second)
+{
+ return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type,
+ (u64)ticks_per_second, 0, 0, 0);
+}
+EXPORT_SYMBOL_GPL(uv_bios_freq_base);
+
+/*
+ * uv_bios_set_legacy_vga_target - Set Legacy VGA I/O Target
+ * @decode: true to enable target, false to disable target
+ * @domain: PCI domain number
+ * @bus: PCI bus number
+ *
+ * Returns:
+ * 0: Success
+ * -EINVAL: Invalid domain or bus number
+ * -ENOSYS: Capability not available
+ * -EBUSY: Legacy VGA I/O cannot be retargeted at this time
+ */
+int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus)
+{
+ return uv_bios_call(UV_BIOS_SET_LEGACY_VGA_TARGET,
+ (u64)decode, (u64)domain, (u64)bus, 0, 0);
+}
+EXPORT_SYMBOL_GPL(uv_bios_set_legacy_vga_target);
+
+
+#ifdef CONFIG_EFI
+void uv_bios_init(void)
+{
+ struct uv_systab *tab;
+
+ if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) ||
+ (efi.uv_systab == (unsigned long)NULL)) {
+ printk(KERN_CRIT "No EFI UV System Table.\n");
+ uv_systab.function = (unsigned long)NULL;
+ return;
+ }
+
+ tab = (struct uv_systab *)ioremap(efi.uv_systab,
+ sizeof(struct uv_systab));
+ if (strncmp(tab->signature, "UVST", 4) != 0)
+ printk(KERN_ERR "bad signature in UV system table!");
+
+ /*
+ * Copy table to permanent spot for later use.
+ */
+ memcpy(&uv_systab, tab, sizeof(struct uv_systab));
+ iounmap(tab);
+
+ printk(KERN_INFO "EFI UV System Table Revision %d\n",
+ uv_systab.revision);
+}
+#else /* !CONFIG_EFI */
+
+void uv_bios_init(void) { }
+#endif
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
new file mode 100644
index 00000000000..20ea20a39e2
--- /dev/null
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -0,0 +1,1661 @@
+/*
+ * SGI UltraViolet TLB flush routines.
+ *
+ * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI.
+ *
+ * This code is released under the GNU General Public License version 2 or
+ * later.
+ */
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <asm/mmu_context.h>
+#include <asm/uv/uv.h>
+#include <asm/uv/uv_mmrs.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/uv_bau.h>
+#include <asm/apic.h>
+#include <asm/idle.h>
+#include <asm/tsc.h>
+#include <asm/irq_vectors.h>
+#include <asm/timer.h>
+
+/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
+static int timeout_base_ns[] = {
+ 20,
+ 160,
+ 1280,
+ 10240,
+ 81920,
+ 655360,
+ 5242880,
+ 167772160
+};
+static int timeout_us;
+static int nobau;
+static int baudisabled;
+static spinlock_t disable_lock;
+static cycles_t congested_cycles;
+
+/* tunables: */
+static int max_bau_concurrent = MAX_BAU_CONCURRENT;
+static int max_bau_concurrent_constant = MAX_BAU_CONCURRENT;
+static int plugged_delay = PLUGGED_DELAY;
+static int plugsb4reset = PLUGSB4RESET;
+static int timeoutsb4reset = TIMEOUTSB4RESET;
+static int ipi_reset_limit = IPI_RESET_LIMIT;
+static int complete_threshold = COMPLETE_THRESHOLD;
+static int congested_response_us = CONGESTED_RESPONSE_US;
+static int congested_reps = CONGESTED_REPS;
+static int congested_period = CONGESTED_PERIOD;
+static struct dentry *tunables_dir;
+static struct dentry *tunables_file;
+
+static int __init setup_nobau(char *arg)
+{
+ nobau = 1;
+ return 0;
+}
+early_param("nobau", setup_nobau);
+
+/* base pnode in this partition */
+static int uv_partition_base_pnode __read_mostly;
+/* position of pnode (which is nasid>>1): */
+static int uv_nshift __read_mostly;
+static unsigned long uv_mmask __read_mostly;
+
+static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
+static DEFINE_PER_CPU(struct bau_control, bau_control);
+static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
+
+/*
+ * Determine the first node on a uvhub. 'Nodes' are used for kernel
+ * memory allocation.
+ */
+static int __init uvhub_to_first_node(int uvhub)
+{
+ int node, b;
+
+ for_each_online_node(node) {
+ b = uv_node_to_blade_id(node);
+ if (uvhub == b)
+ return node;
+ }
+ return -1;
+}
+
+/*
+ * Determine the apicid of the first cpu on a uvhub.
+ */
+static int __init uvhub_to_first_apicid(int uvhub)
+{
+ int cpu;
+
+ for_each_present_cpu(cpu)
+ if (uvhub == uv_cpu_to_blade_id(cpu))
+ return per_cpu(x86_cpu_to_apicid, cpu);
+ return -1;
+}
+
+/*
+ * Free a software acknowledge hardware resource by clearing its Pending
+ * bit. This will return a reply to the sender.
+ * If the message has timed out, a reply has already been sent by the
+ * hardware but the resource has not been released. In that case our
+ * clear of the Timeout bit (as well) will free the resource. No reply will
+ * be sent (the hardware will only do one reply per message).
+ */
+static inline void uv_reply_to_message(struct msg_desc *mdp,
+ struct bau_control *bcp)
+{
+ unsigned long dw;
+ struct bau_payload_queue_entry *msg;
+
+ msg = mdp->msg;
+ if (!msg->canceled) {
+ dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) |
+ msg->sw_ack_vector;
+ uv_write_local_mmr(
+ UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
+ }
+ msg->replied_to = 1;
+ msg->sw_ack_vector = 0;
+}
+
+/*
+ * Process the receipt of a RETRY message
+ */
+static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
+ struct bau_control *bcp)
+{
+ int i;
+ int cancel_count = 0;
+ int slot2;
+ unsigned long msg_res;
+ unsigned long mmr = 0;
+ struct bau_payload_queue_entry *msg;
+ struct bau_payload_queue_entry *msg2;
+ struct ptc_stats *stat;
+
+ msg = mdp->msg;
+ stat = bcp->statp;
+ stat->d_retries++;
+ /*
+ * cancel any message from msg+1 to the retry itself
+ */
+ for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
+ if (msg2 > mdp->va_queue_last)
+ msg2 = mdp->va_queue_first;
+ if (msg2 == msg)
+ break;
+
+ /* same conditions for cancellation as uv_do_reset */
+ if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
+ (msg2->sw_ack_vector) && ((msg2->sw_ack_vector &
+ msg->sw_ack_vector) == 0) &&
+ (msg2->sending_cpu == msg->sending_cpu) &&
+ (msg2->msg_type != MSG_NOOP)) {
+ slot2 = msg2 - mdp->va_queue_first;
+ mmr = uv_read_local_mmr
+ (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+ msg_res = msg2->sw_ack_vector;
+ /*
+ * This is a message retry; clear the resources held
+ * by the previous message only if they timed out.
+ * If it has not timed out we have an unexpected
+ * situation to report.
+ */
+ if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
+ /*
+ * is the resource timed out?
+ * make everyone ignore the cancelled message.
+ */
+ msg2->canceled = 1;
+ stat->d_canceled++;
+ cancel_count++;
+ uv_write_local_mmr(
+ UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
+ (msg_res << UV_SW_ACK_NPENDING) |
+ msg_res);
+ }
+ }
+ }
+ if (!cancel_count)
+ stat->d_nocanceled++;
+}
+
+/*
+ * Do all the things a cpu should do for a TLB shootdown message.
+ * Other cpu's may come here at the same time for this message.
+ */
+static void uv_bau_process_message(struct msg_desc *mdp,
+ struct bau_control *bcp)
+{
+ int msg_ack_count;
+ short socket_ack_count = 0;
+ struct ptc_stats *stat;
+ struct bau_payload_queue_entry *msg;
+ struct bau_control *smaster = bcp->socket_master;
+
+ /*
+ * This must be a normal message, or retry of a normal message
+ */
+ msg = mdp->msg;
+ stat = bcp->statp;
+ if (msg->address == TLB_FLUSH_ALL) {
+ local_flush_tlb();
+ stat->d_alltlb++;
+ } else {
+ __flush_tlb_one(msg->address);
+ stat->d_onetlb++;
+ }
+ stat->d_requestee++;
+
+ /*
+ * One cpu on each uvhub has the additional job on a RETRY
+ * of releasing the resource held by the message that is
+ * being retried. That message is identified by sending
+ * cpu number.
+ */
+ if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
+ uv_bau_process_retry_msg(mdp, bcp);
+
+ /*
+ * This is a sw_ack message, so we have to reply to it.
+ * Count each responding cpu on the socket. This avoids
+ * pinging the count's cache line back and forth between
+ * the sockets.
+ */
+ socket_ack_count = atomic_add_short_return(1, (struct atomic_short *)
+ &smaster->socket_acknowledge_count[mdp->msg_slot]);
+ if (socket_ack_count == bcp->cpus_in_socket) {
+ /*
+ * Both sockets dump their completed count total into
+ * the message's count.
+ */
+ smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
+ msg_ack_count = atomic_add_short_return(socket_ack_count,
+ (struct atomic_short *)&msg->acknowledge_count);
+
+ if (msg_ack_count == bcp->cpus_in_uvhub) {
+ /*
+ * All cpus in uvhub saw it; reply
+ */
+ uv_reply_to_message(mdp, bcp);
+ }
+ }
+
+ return;
+}
+
+/*
+ * Determine the first cpu on a uvhub.
+ */
+static int uvhub_to_first_cpu(int uvhub)
+{
+ int cpu;
+ for_each_present_cpu(cpu)
+ if (uvhub == uv_cpu_to_blade_id(cpu))
+ return cpu;
+ return -1;
+}
+
+/*
+ * Last resort when we get a large number of destination timeouts is
+ * to clear resources held by a given cpu.
+ * Do this with IPI so that all messages in the BAU message queue
+ * can be identified by their nonzero sw_ack_vector field.
+ *
+ * This is entered for a single cpu on the uvhub.
+ * The sender want's this uvhub to free a specific message's
+ * sw_ack resources.
+ */
+static void
+uv_do_reset(void *ptr)
+{
+ int i;
+ int slot;
+ int count = 0;
+ unsigned long mmr;
+ unsigned long msg_res;
+ struct bau_control *bcp;
+ struct reset_args *rap;
+ struct bau_payload_queue_entry *msg;
+ struct ptc_stats *stat;
+
+ bcp = &per_cpu(bau_control, smp_processor_id());
+ rap = (struct reset_args *)ptr;
+ stat = bcp->statp;
+ stat->d_resets++;
+
+ /*
+ * We're looking for the given sender, and
+ * will free its sw_ack resource.
+ * If all cpu's finally responded after the timeout, its
+ * message 'replied_to' was set.
+ */
+ for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
+ /* uv_do_reset: same conditions for cancellation as
+ uv_bau_process_retry_msg() */
+ if ((msg->replied_to == 0) &&
+ (msg->canceled == 0) &&
+ (msg->sending_cpu == rap->sender) &&
+ (msg->sw_ack_vector) &&
+ (msg->msg_type != MSG_NOOP)) {
+ /*
+ * make everyone else ignore this message
+ */
+ msg->canceled = 1;
+ slot = msg - bcp->va_queue_first;
+ count++;
+ /*
+ * only reset the resource if it is still pending
+ */
+ mmr = uv_read_local_mmr
+ (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+ msg_res = msg->sw_ack_vector;
+ if (mmr & msg_res) {
+ stat->d_rcanceled++;
+ uv_write_local_mmr(
+ UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
+ (msg_res << UV_SW_ACK_NPENDING) |
+ msg_res);
+ }
+ }
+ }
+ return;
+}
+
+/*
+ * Use IPI to get all target uvhubs to release resources held by
+ * a given sending cpu number.
+ */
+static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution,
+ int sender)
+{
+ int uvhub;
+ int cpu;
+ cpumask_t mask;
+ struct reset_args reset_args;
+
+ reset_args.sender = sender;
+
+ cpus_clear(mask);
+ /* find a single cpu for each uvhub in this distribution mask */
+ for (uvhub = 0;
+ uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE;
+ uvhub++) {
+ if (!bau_uvhub_isset(uvhub, distribution))
+ continue;
+ /* find a cpu for this uvhub */
+ cpu = uvhub_to_first_cpu(uvhub);
+ cpu_set(cpu, mask);
+ }
+ /* IPI all cpus; Preemption is already disabled */
+ smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1);
+ return;
+}
+
+static inline unsigned long
+cycles_2_us(unsigned long long cyc)
+{
+ unsigned long long ns;
+ unsigned long us;
+ ns = (cyc * per_cpu(cyc2ns, smp_processor_id()))
+ >> CYC2NS_SCALE_FACTOR;
+ us = ns / 1000;
+ return us;
+}
+
+/*
+ * wait for all cpus on this hub to finish their sends and go quiet
+ * leaves uvhub_quiesce set so that no new broadcasts are started by
+ * bau_flush_send_and_wait()
+ */
+static inline void
+quiesce_local_uvhub(struct bau_control *hmaster)
+{
+ atomic_add_short_return(1, (struct atomic_short *)
+ &hmaster->uvhub_quiesce);
+}
+
+/*
+ * mark this quiet-requestor as done
+ */
+static inline void
+end_uvhub_quiesce(struct bau_control *hmaster)
+{
+ atomic_add_short_return(-1, (struct atomic_short *)
+ &hmaster->uvhub_quiesce);
+}
+
+/*
+ * Wait for completion of a broadcast software ack message
+ * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
+ */
+static int uv_wait_completion(struct bau_desc *bau_desc,
+ unsigned long mmr_offset, int right_shift, int this_cpu,
+ struct bau_control *bcp, struct bau_control *smaster, long try)
+{
+ unsigned long descriptor_status;
+ cycles_t ttime;
+ struct ptc_stats *stat = bcp->statp;
+ struct bau_control *hmaster;
+
+ hmaster = bcp->uvhub_master;
+
+ /* spin on the status MMR, waiting for it to go idle */
+ while ((descriptor_status = (((unsigned long)
+ uv_read_local_mmr(mmr_offset) >>
+ right_shift) & UV_ACT_STATUS_MASK)) !=
+ DESC_STATUS_IDLE) {
+ /*
+ * Our software ack messages may be blocked because there are
+ * no swack resources available. As long as none of them
+ * has timed out hardware will NACK our message and its
+ * state will stay IDLE.
+ */
+ if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
+ stat->s_stimeout++;
+ return FLUSH_GIVEUP;
+ } else if (descriptor_status ==
+ DESC_STATUS_DESTINATION_TIMEOUT) {
+ stat->s_dtimeout++;
+ ttime = get_cycles();
+
+ /*
+ * Our retries may be blocked by all destination
+ * swack resources being consumed, and a timeout
+ * pending. In that case hardware returns the
+ * ERROR that looks like a destination timeout.
+ */
+ if (cycles_2_us(ttime - bcp->send_message) <
+ timeout_us) {
+ bcp->conseccompletes = 0;
+ return FLUSH_RETRY_PLUGGED;
+ }
+
+ bcp->conseccompletes = 0;
+ return FLUSH_RETRY_TIMEOUT;
+ } else {
+ /*
+ * descriptor_status is still BUSY
+ */
+ cpu_relax();
+ }
+ }
+ bcp->conseccompletes++;
+ return FLUSH_COMPLETE;
+}
+
+static inline cycles_t
+sec_2_cycles(unsigned long sec)
+{
+ unsigned long ns;
+ cycles_t cyc;
+
+ ns = sec * 1000000000;
+ cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
+ return cyc;
+}
+
+/*
+ * conditionally add 1 to *v, unless *v is >= u
+ * return 0 if we cannot add 1 to *v because it is >= u
+ * return 1 if we can add 1 to *v because it is < u
+ * the add is atomic
+ *
+ * This is close to atomic_add_unless(), but this allows the 'u' value
+ * to be lowered below the current 'v'. atomic_add_unless can only stop
+ * on equal.
+ */
+static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
+{
+ spin_lock(lock);
+ if (atomic_read(v) >= u) {
+ spin_unlock(lock);
+ return 0;
+ }
+ atomic_inc(v);
+ spin_unlock(lock);
+ return 1;
+}
+
+/*
+ * Our retries are blocked by all destination swack resources being
+ * in use, and a timeout is pending. In that case hardware immediately
+ * returns the ERROR that looks like a destination timeout.
+ */
+static void
+destination_plugged(struct bau_desc *bau_desc, struct bau_control *bcp,
+ struct bau_control *hmaster, struct ptc_stats *stat)
+{
+ udelay(bcp->plugged_delay);
+ bcp->plugged_tries++;
+ if (bcp->plugged_tries >= bcp->plugsb4reset) {
+ bcp->plugged_tries = 0;
+ quiesce_local_uvhub(hmaster);
+ spin_lock(&hmaster->queue_lock);
+ uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
+ spin_unlock(&hmaster->queue_lock);
+ end_uvhub_quiesce(hmaster);
+ bcp->ipi_attempts++;
+ stat->s_resets_plug++;
+ }
+}
+
+static void
+destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp,
+ struct bau_control *hmaster, struct ptc_stats *stat)
+{
+ hmaster->max_bau_concurrent = 1;
+ bcp->timeout_tries++;
+ if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
+ bcp->timeout_tries = 0;
+ quiesce_local_uvhub(hmaster);
+ spin_lock(&hmaster->queue_lock);
+ uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
+ spin_unlock(&hmaster->queue_lock);
+ end_uvhub_quiesce(hmaster);
+ bcp->ipi_attempts++;
+ stat->s_resets_timeout++;
+ }
+}
+
+/*
+ * Completions are taking a very long time due to a congested numalink
+ * network.
+ */
+static void
+disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat)
+{
+ int tcpu;
+ struct bau_control *tbcp;
+
+ /* let only one cpu do this disabling */
+ spin_lock(&disable_lock);
+ if (!baudisabled && bcp->period_requests &&
+ ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
+ /* it becomes this cpu's job to turn on the use of the
+ BAU again */
+ baudisabled = 1;
+ bcp->set_bau_off = 1;
+ bcp->set_bau_on_time = get_cycles() +
+ sec_2_cycles(bcp->congested_period);
+ stat->s_bau_disabled++;
+ for_each_present_cpu(tcpu) {
+ tbcp = &per_cpu(bau_control, tcpu);
+ tbcp->baudisabled = 1;
+ }
+ }
+ spin_unlock(&disable_lock);
+}
+
+/**
+ * uv_flush_send_and_wait
+ *
+ * Send a broadcast and wait for it to complete.
+ *
+ * The flush_mask contains the cpus the broadcast is to be sent to including
+ * cpus that are on the local uvhub.
+ *
+ * Returns 0 if all flushing represented in the mask was done.
+ * Returns 1 if it gives up entirely and the original cpu mask is to be
+ * returned to the kernel.
+ */
+int uv_flush_send_and_wait(struct bau_desc *bau_desc,
+ struct cpumask *flush_mask, struct bau_control *bcp)
+{
+ int right_shift;
+ int completion_status = 0;
+ int seq_number = 0;
+ long try = 0;
+ int cpu = bcp->uvhub_cpu;
+ int this_cpu = bcp->cpu;
+ unsigned long mmr_offset;
+ unsigned long index;
+ cycles_t time1;
+ cycles_t time2;
+ cycles_t elapsed;
+ struct ptc_stats *stat = bcp->statp;
+ struct bau_control *smaster = bcp->socket_master;
+ struct bau_control *hmaster = bcp->uvhub_master;
+
+ if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
+ &hmaster->active_descriptor_count,
+ hmaster->max_bau_concurrent)) {
+ stat->s_throttles++;
+ do {
+ cpu_relax();
+ } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
+ &hmaster->active_descriptor_count,
+ hmaster->max_bau_concurrent));
+ }
+ while (hmaster->uvhub_quiesce)
+ cpu_relax();
+
+ if (cpu < UV_CPUS_PER_ACT_STATUS) {
+ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
+ right_shift = cpu * UV_ACT_STATUS_SIZE;
+ } else {
+ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
+ right_shift =
+ ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE);
+ }
+ time1 = get_cycles();
+ do {
+ if (try == 0) {
+ bau_desc->header.msg_type = MSG_REGULAR;
+ seq_number = bcp->message_number++;
+ } else {
+ bau_desc->header.msg_type = MSG_RETRY;
+ stat->s_retry_messages++;
+ }
+ bau_desc->header.sequence = seq_number;
+ index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
+ bcp->uvhub_cpu;
+ bcp->send_message = get_cycles();
+ uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
+ try++;
+ completion_status = uv_wait_completion(bau_desc, mmr_offset,
+ right_shift, this_cpu, bcp, smaster, try);
+
+ if (completion_status == FLUSH_RETRY_PLUGGED) {
+ destination_plugged(bau_desc, bcp, hmaster, stat);
+ } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
+ destination_timeout(bau_desc, bcp, hmaster, stat);
+ }
+ if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
+ bcp->ipi_attempts = 0;
+ completion_status = FLUSH_GIVEUP;
+ break;
+ }
+ cpu_relax();
+ } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
+ (completion_status == FLUSH_RETRY_TIMEOUT));
+ time2 = get_cycles();
+ bcp->plugged_tries = 0;
+ bcp->timeout_tries = 0;
+ if ((completion_status == FLUSH_COMPLETE) &&
+ (bcp->conseccompletes > bcp->complete_threshold) &&
+ (hmaster->max_bau_concurrent <
+ hmaster->max_bau_concurrent_constant))
+ hmaster->max_bau_concurrent++;
+ while (hmaster->uvhub_quiesce)
+ cpu_relax();
+ atomic_dec(&hmaster->active_descriptor_count);
+ if (time2 > time1) {
+ elapsed = time2 - time1;
+ stat->s_time += elapsed;
+ if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
+ bcp->period_requests++;
+ bcp->period_time += elapsed;
+ if ((elapsed > congested_cycles) &&
+ (bcp->period_requests > bcp->congested_reps)) {
+ disable_for_congestion(bcp, stat);
+ }
+ }
+ } else
+ stat->s_requestor--;
+ if (completion_status == FLUSH_COMPLETE && try > 1)
+ stat->s_retriesok++;
+ else if (completion_status == FLUSH_GIVEUP) {
+ stat->s_giveup++;
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * uv_flush_tlb_others - globally purge translation cache of a virtual
+ * address or all TLB's
+ * @cpumask: mask of all cpu's in which the address is to be removed
+ * @mm: mm_struct containing virtual address range
+ * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
+ * @cpu: the current cpu
+ *
+ * This is the entry point for initiating any UV global TLB shootdown.
+ *
+ * Purges the translation caches of all specified processors of the given
+ * virtual address, or purges all TLB's on specified processors.
+ *
+ * The caller has derived the cpumask from the mm_struct. This function
+ * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
+ *
+ * The cpumask is converted into a uvhubmask of the uvhubs containing
+ * those cpus.
+ *
+ * Note that this function should be called with preemption disabled.
+ *
+ * Returns NULL if all remote flushing was done.
+ * Returns pointer to cpumask if some remote flushing remains to be
+ * done. The returned pointer is valid till preemption is re-enabled.
+ */
+const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
+ struct mm_struct *mm,
+ unsigned long va, unsigned int cpu)
+{
+ int tcpu;
+ int uvhub;
+ int locals = 0;
+ int remotes = 0;
+ int hubs = 0;
+ struct bau_desc *bau_desc;
+ struct cpumask *flush_mask;
+ struct ptc_stats *stat;
+ struct bau_control *bcp;
+ struct bau_control *tbcp;
+
+ /* kernel was booted 'nobau' */
+ if (nobau)
+ return cpumask;
+
+ bcp = &per_cpu(bau_control, cpu);
+ stat = bcp->statp;
+
+ /* bau was disabled due to slow response */
+ if (bcp->baudisabled) {
+ /* the cpu that disabled it must re-enable it */
+ if (bcp->set_bau_off) {
+ if (get_cycles() >= bcp->set_bau_on_time) {
+ stat->s_bau_reenabled++;
+ baudisabled = 0;
+ for_each_present_cpu(tcpu) {
+ tbcp = &per_cpu(bau_control, tcpu);
+ tbcp->baudisabled = 0;
+ tbcp->period_requests = 0;
+ tbcp->period_time = 0;
+ }
+ }
+ }
+ return cpumask;
+ }
+
+ /*
+ * Each sending cpu has a per-cpu mask which it fills from the caller's
+ * cpu mask. All cpus are converted to uvhubs and copied to the
+ * activation descriptor.
+ */
+ flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
+ /* don't actually do a shootdown of the local cpu */
+ cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
+ if (cpu_isset(cpu, *cpumask))
+ stat->s_ntargself++;
+
+ bau_desc = bcp->descriptor_base;
+ bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
+ bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
+
+ /* cpu statistics */
+ for_each_cpu(tcpu, flush_mask) {
+ uvhub = uv_cpu_to_blade_id(tcpu);
+ bau_uvhub_set(uvhub, &bau_desc->distribution);
+ if (uvhub == bcp->uvhub)
+ locals++;
+ else
+ remotes++;
+ }
+ if ((locals + remotes) == 0)
+ return NULL;
+ stat->s_requestor++;
+ stat->s_ntargcpu += remotes + locals;
+ stat->s_ntargremotes += remotes;
+ stat->s_ntarglocals += locals;
+ remotes = bau_uvhub_weight(&bau_desc->distribution);
+
+ /* uvhub statistics */
+ hubs = bau_uvhub_weight(&bau_desc->distribution);
+ if (locals) {
+ stat->s_ntarglocaluvhub++;
+ stat->s_ntargremoteuvhub += (hubs - 1);
+ } else
+ stat->s_ntargremoteuvhub += hubs;
+ stat->s_ntarguvhub += hubs;
+ if (hubs >= 16)
+ stat->s_ntarguvhub16++;
+ else if (hubs >= 8)
+ stat->s_ntarguvhub8++;
+ else if (hubs >= 4)
+ stat->s_ntarguvhub4++;
+ else if (hubs >= 2)
+ stat->s_ntarguvhub2++;
+ else
+ stat->s_ntarguvhub1++;
+
+ bau_desc->payload.address = va;
+ bau_desc->payload.sending_cpu = cpu;
+
+ /*
+ * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
+ * or 1 if it gave up and the original cpumask should be returned.
+ */
+ if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
+ return NULL;
+ else
+ return cpumask;
+}
+
+/*
+ * The BAU message interrupt comes here. (registered by set_intr_gate)
+ * See entry_64.S
+ *
+ * We received a broadcast assist message.
+ *
+ * Interrupts are disabled; this interrupt could represent
+ * the receipt of several messages.
+ *
+ * All cores/threads on this hub get this interrupt.
+ * The last one to see it does the software ack.
+ * (the resource will not be freed until noninterruptable cpus see this
+ * interrupt; hardware may timeout the s/w ack and reply ERROR)
+ */
+void uv_bau_message_interrupt(struct pt_regs *regs)
+{
+ int count = 0;
+ cycles_t time_start;
+ struct bau_payload_queue_entry *msg;
+ struct bau_control *bcp;
+ struct ptc_stats *stat;
+ struct msg_desc msgdesc;
+
+ time_start = get_cycles();
+ bcp = &per_cpu(bau_control, smp_processor_id());
+ stat = bcp->statp;
+ msgdesc.va_queue_first = bcp->va_queue_first;
+ msgdesc.va_queue_last = bcp->va_queue_last;
+ msg = bcp->bau_msg_head;
+ while (msg->sw_ack_vector) {
+ count++;
+ msgdesc.msg_slot = msg - msgdesc.va_queue_first;
+ msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1;
+ msgdesc.msg = msg;
+ uv_bau_process_message(&msgdesc, bcp);
+ msg++;
+ if (msg > msgdesc.va_queue_last)
+ msg = msgdesc.va_queue_first;
+ bcp->bau_msg_head = msg;
+ }
+ stat->d_time += (get_cycles() - time_start);
+ if (!count)
+ stat->d_nomsg++;
+ else if (count > 1)
+ stat->d_multmsg++;
+ ack_APIC_irq();
+}
+
+/*
+ * uv_enable_timeouts
+ *
+ * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have
+ * shootdown message timeouts enabled. The timeout does not cause
+ * an interrupt, but causes an error message to be returned to
+ * the sender.
+ */
+static void uv_enable_timeouts(void)
+{
+ int uvhub;
+ int nuvhubs;
+ int pnode;
+ unsigned long mmr_image;
+
+ nuvhubs = uv_num_possible_blades();
+
+ for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
+ if (!uv_blade_nr_possible_cpus(uvhub))
+ continue;
+
+ pnode = uv_blade_to_pnode(uvhub);
+ mmr_image =
+ uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
+ /*
+ * Set the timeout period and then lock it in, in three
+ * steps; captures and locks in the period.
+ *
+ * To program the period, the SOFT_ACK_MODE must be off.
+ */
+ mmr_image &= ~((unsigned long)1 <<
+ UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
+ uv_write_global_mmr64
+ (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
+ /*
+ * Set the 4-bit period.
+ */
+ mmr_image &= ~((unsigned long)0xf <<
+ UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
+ mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
+ UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
+ uv_write_global_mmr64
+ (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
+ /*
+ * Subsequent reversals of the timebase bit (3) cause an
+ * immediate timeout of one or all INTD resources as
+ * indicated in bits 2:0 (7 causes all of them to timeout).
+ */
+ mmr_image |= ((unsigned long)1 <<
+ UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
+ uv_write_global_mmr64
+ (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
+ }
+}
+
+static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset)
+{
+ if (*offset < num_possible_cpus())
+ return offset;
+ return NULL;
+}
+
+static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
+{
+ (*offset)++;
+ if (*offset < num_possible_cpus())
+ return offset;
+ return NULL;
+}
+
+static void uv_ptc_seq_stop(struct seq_file *file, void *data)
+{
+}
+
+static inline unsigned long long
+microsec_2_cycles(unsigned long microsec)
+{
+ unsigned long ns;
+ unsigned long long cyc;
+
+ ns = microsec * 1000;
+ cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
+ return cyc;
+}
+
+/*
+ * Display the statistics thru /proc.
+ * 'data' points to the cpu number
+ */
+static int uv_ptc_seq_show(struct seq_file *file, void *data)
+{
+ struct ptc_stats *stat;
+ int cpu;
+
+ cpu = *(loff_t *)data;
+
+ if (!cpu) {
+ seq_printf(file,
+ "# cpu sent stime self locals remotes ncpus localhub ");
+ seq_printf(file,
+ "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
+ seq_printf(file,
+ "numuvhubs4 numuvhubs2 numuvhubs1 dto ");
+ seq_printf(file,
+ "retries rok resetp resett giveup sto bz throt ");
+ seq_printf(file,
+ "sw_ack recv rtime all ");
+ seq_printf(file,
+ "one mult none retry canc nocan reset rcan ");
+ seq_printf(file,
+ "disable enable\n");
+ }
+ if (cpu < num_possible_cpus() && cpu_online(cpu)) {
+ stat = &per_cpu(ptcstats, cpu);
+ /* source side statistics */
+ seq_printf(file,
+ "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
+ cpu, stat->s_requestor, cycles_2_us(stat->s_time),
+ stat->s_ntargself, stat->s_ntarglocals,
+ stat->s_ntargremotes, stat->s_ntargcpu,
+ stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
+ stat->s_ntarguvhub, stat->s_ntarguvhub16);
+ seq_printf(file, "%ld %ld %ld %ld %ld ",
+ stat->s_ntarguvhub8, stat->s_ntarguvhub4,
+ stat->s_ntarguvhub2, stat->s_ntarguvhub1,
+ stat->s_dtimeout);
+ seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
+ stat->s_retry_messages, stat->s_retriesok,
+ stat->s_resets_plug, stat->s_resets_timeout,
+ stat->s_giveup, stat->s_stimeout,
+ stat->s_busy, stat->s_throttles);
+
+ /* destination side statistics */
+ seq_printf(file,
+ "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
+ uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
+ UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
+ stat->d_requestee, cycles_2_us(stat->d_time),
+ stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
+ stat->d_nomsg, stat->d_retries, stat->d_canceled,
+ stat->d_nocanceled, stat->d_resets,
+ stat->d_rcanceled);
+ seq_printf(file, "%ld %ld\n",
+ stat->s_bau_disabled, stat->s_bau_reenabled);
+ }
+
+ return 0;
+}
+
+/*
+ * Display the tunables thru debugfs
+ */
+static ssize_t tunables_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf;
+ int ret;
+
+ buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
+ "max_bau_concurrent plugged_delay plugsb4reset",
+ "timeoutsb4reset ipi_reset_limit complete_threshold",
+ "congested_response_us congested_reps congested_period",
+ max_bau_concurrent, plugged_delay, plugsb4reset,
+ timeoutsb4reset, ipi_reset_limit, complete_threshold,
+ congested_response_us, congested_reps, congested_period);
+
+ if (!buf)
+ return -ENOMEM;
+
+ ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * -1: resetf the statistics
+ * 0: display meaning of the statistics
+ */
+static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
+ size_t count, loff_t *data)
+{
+ int cpu;
+ long input_arg;
+ char optstr[64];
+ struct ptc_stats *stat;
+
+ if (count == 0 || count > sizeof(optstr))
+ return -EINVAL;
+ if (copy_from_user(optstr, user, count))
+ return -EFAULT;
+ optstr[count - 1] = '\0';
+ if (strict_strtol(optstr, 10, &input_arg) < 0) {
+ printk(KERN_DEBUG "%s is invalid\n", optstr);
+ return -EINVAL;
+ }
+
+ if (input_arg == 0) {
+ printk(KERN_DEBUG "# cpu: cpu number\n");
+ printk(KERN_DEBUG "Sender statistics:\n");
+ printk(KERN_DEBUG
+ "sent: number of shootdown messages sent\n");
+ printk(KERN_DEBUG
+ "stime: time spent sending messages\n");
+ printk(KERN_DEBUG
+ "numuvhubs: number of hubs targeted with shootdown\n");
+ printk(KERN_DEBUG
+ "numuvhubs16: number times 16 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs8: number times 8 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs4: number times 4 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs2: number times 2 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs1: number times 1 hub targeted\n");
+ printk(KERN_DEBUG
+ "numcpus: number of cpus targeted with shootdown\n");
+ printk(KERN_DEBUG
+ "dto: number of destination timeouts\n");
+ printk(KERN_DEBUG
+ "retries: destination timeout retries sent\n");
+ printk(KERN_DEBUG
+ "rok: : destination timeouts successfully retried\n");
+ printk(KERN_DEBUG
+ "resetp: ipi-style resource resets for plugs\n");
+ printk(KERN_DEBUG
+ "resett: ipi-style resource resets for timeouts\n");
+ printk(KERN_DEBUG
+ "giveup: fall-backs to ipi-style shootdowns\n");
+ printk(KERN_DEBUG
+ "sto: number of source timeouts\n");
+ printk(KERN_DEBUG
+ "bz: number of stay-busy's\n");
+ printk(KERN_DEBUG
+ "throt: number times spun in throttle\n");
+ printk(KERN_DEBUG "Destination side statistics:\n");
+ printk(KERN_DEBUG
+ "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
+ printk(KERN_DEBUG
+ "recv: shootdown messages received\n");
+ printk(KERN_DEBUG
+ "rtime: time spent processing messages\n");
+ printk(KERN_DEBUG
+ "all: shootdown all-tlb messages\n");
+ printk(KERN_DEBUG
+ "one: shootdown one-tlb messages\n");
+ printk(KERN_DEBUG
+ "mult: interrupts that found multiple messages\n");
+ printk(KERN_DEBUG
+ "none: interrupts that found no messages\n");
+ printk(KERN_DEBUG
+ "retry: number of retry messages processed\n");
+ printk(KERN_DEBUG
+ "canc: number messages canceled by retries\n");
+ printk(KERN_DEBUG
+ "nocan: number retries that found nothing to cancel\n");
+ printk(KERN_DEBUG
+ "reset: number of ipi-style reset requests processed\n");
+ printk(KERN_DEBUG
+ "rcan: number messages canceled by reset requests\n");
+ printk(KERN_DEBUG
+ "disable: number times use of the BAU was disabled\n");
+ printk(KERN_DEBUG
+ "enable: number times use of the BAU was re-enabled\n");
+ } else if (input_arg == -1) {
+ for_each_present_cpu(cpu) {
+ stat = &per_cpu(ptcstats, cpu);
+ memset(stat, 0, sizeof(struct ptc_stats));
+ }
+ }
+
+ return count;
+}
+
+static int local_atoi(const char *name)
+{
+ int val = 0;
+
+ for (;; name++) {
+ switch (*name) {
+ case '0' ... '9':
+ val = 10*val+(*name-'0');
+ break;
+ default:
+ return val;
+ }
+ }
+}
+
+/*
+ * set the tunables
+ * 0 values reset them to defaults
+ */
+static ssize_t tunables_write(struct file *file, const char __user *user,
+ size_t count, loff_t *data)
+{
+ int cpu;
+ int cnt = 0;
+ int val;
+ char *p;
+ char *q;
+ char instr[64];
+ struct bau_control *bcp;
+
+ if (count == 0 || count > sizeof(instr)-1)
+ return -EINVAL;
+ if (copy_from_user(instr, user, count))
+ return -EFAULT;
+
+ instr[count] = '\0';
+ /* count the fields */
+ p = instr + strspn(instr, WHITESPACE);
+ q = p;
+ for (; *p; p = q + strspn(q, WHITESPACE)) {
+ q = p + strcspn(p, WHITESPACE);
+ cnt++;
+ if (q == p)
+ break;
+ }
+ if (cnt != 9) {
+ printk(KERN_INFO "bau tunable error: should be 9 numbers\n");
+ return -EINVAL;
+ }
+
+ p = instr + strspn(instr, WHITESPACE);
+ q = p;
+ for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
+ q = p + strcspn(p, WHITESPACE);
+ val = local_atoi(p);
+ switch (cnt) {
+ case 0:
+ if (val == 0) {
+ max_bau_concurrent = MAX_BAU_CONCURRENT;
+ max_bau_concurrent_constant =
+ MAX_BAU_CONCURRENT;
+ continue;
+ }
+ bcp = &per_cpu(bau_control, smp_processor_id());
+ if (val < 1 || val > bcp->cpus_in_uvhub) {
+ printk(KERN_DEBUG
+ "Error: BAU max concurrent %d is invalid\n",
+ val);
+ return -EINVAL;
+ }
+ max_bau_concurrent = val;
+ max_bau_concurrent_constant = val;
+ continue;
+ case 1:
+ if (val == 0)
+ plugged_delay = PLUGGED_DELAY;
+ else
+ plugged_delay = val;
+ continue;
+ case 2:
+ if (val == 0)
+ plugsb4reset = PLUGSB4RESET;
+ else
+ plugsb4reset = val;
+ continue;
+ case 3:
+ if (val == 0)
+ timeoutsb4reset = TIMEOUTSB4RESET;
+ else
+ timeoutsb4reset = val;
+ continue;
+ case 4:
+ if (val == 0)
+ ipi_reset_limit = IPI_RESET_LIMIT;
+ else
+ ipi_reset_limit = val;
+ continue;
+ case 5:
+ if (val == 0)
+ complete_threshold = COMPLETE_THRESHOLD;
+ else
+ complete_threshold = val;
+ continue;
+ case 6:
+ if (val == 0)
+ congested_response_us = CONGESTED_RESPONSE_US;
+ else
+ congested_response_us = val;
+ continue;
+ case 7:
+ if (val == 0)
+ congested_reps = CONGESTED_REPS;
+ else
+ congested_reps = val;
+ continue;
+ case 8:
+ if (val == 0)
+ congested_period = CONGESTED_PERIOD;
+ else
+ congested_period = val;
+ continue;
+ }
+ if (q == p)
+ break;
+ }
+ for_each_present_cpu(cpu) {
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->max_bau_concurrent = max_bau_concurrent;
+ bcp->max_bau_concurrent_constant = max_bau_concurrent;
+ bcp->plugged_delay = plugged_delay;
+ bcp->plugsb4reset = plugsb4reset;
+ bcp->timeoutsb4reset = timeoutsb4reset;
+ bcp->ipi_reset_limit = ipi_reset_limit;
+ bcp->complete_threshold = complete_threshold;
+ bcp->congested_response_us = congested_response_us;
+ bcp->congested_reps = congested_reps;
+ bcp->congested_period = congested_period;
+ }
+ return count;
+}
+
+static const struct seq_operations uv_ptc_seq_ops = {
+ .start = uv_ptc_seq_start,
+ .next = uv_ptc_seq_next,
+ .stop = uv_ptc_seq_stop,
+ .show = uv_ptc_seq_show
+};
+
+static int uv_ptc_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &uv_ptc_seq_ops);
+}
+
+static int tunables_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations proc_uv_ptc_operations = {
+ .open = uv_ptc_proc_open,
+ .read = seq_read,
+ .write = uv_ptc_proc_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static const struct file_operations tunables_fops = {
+ .open = tunables_open,
+ .read = tunables_read,
+ .write = tunables_write,
+ .llseek = default_llseek,
+};
+
+static int __init uv_ptc_init(void)
+{
+ struct proc_dir_entry *proc_uv_ptc;
+
+ if (!is_uv_system())
+ return 0;
+
+ proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
+ &proc_uv_ptc_operations);
+ if (!proc_uv_ptc) {
+ printk(KERN_ERR "unable to create %s proc entry\n",
+ UV_PTC_BASENAME);
+ return -EINVAL;
+ }
+
+ tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
+ if (!tunables_dir) {
+ printk(KERN_ERR "unable to create debugfs directory %s\n",
+ UV_BAU_TUNABLES_DIR);
+ return -EINVAL;
+ }
+ tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
+ tunables_dir, NULL, &tunables_fops);
+ if (!tunables_file) {
+ printk(KERN_ERR "unable to create debugfs file %s\n",
+ UV_BAU_TUNABLES_FILE);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * initialize the sending side's sending buffers
+ */
+static void
+uv_activation_descriptor_init(int node, int pnode)
+{
+ int i;
+ int cpu;
+ unsigned long pa;
+ unsigned long m;
+ unsigned long n;
+ struct bau_desc *bau_desc;
+ struct bau_desc *bd2;
+ struct bau_control *bcp;
+
+ /*
+ * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
+ * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
+ */
+ bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
+ UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
+ BUG_ON(!bau_desc);
+
+ pa = uv_gpa(bau_desc); /* need the real nasid*/
+ n = pa >> uv_nshift;
+ m = pa & uv_mmask;
+
+ uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
+ (n << UV_DESC_BASE_PNODE_SHIFT | m));
+
+ /*
+ * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
+ * cpu even though we only use the first one; one descriptor can
+ * describe a broadcast to 256 uv hubs.
+ */
+ for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
+ i++, bd2++) {
+ memset(bd2, 0, sizeof(struct bau_desc));
+ bd2->header.sw_ack_flag = 1;
+ /*
+ * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub
+ * in the partition. The bit map will indicate uvhub numbers,
+ * which are 0-N in a partition. Pnodes are unique system-wide.
+ */
+ bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
+ bd2->header.dest_subnodeid = 0x10; /* the LB */
+ bd2->header.command = UV_NET_ENDPOINT_INTD;
+ bd2->header.int_both = 1;
+ /*
+ * all others need to be set to zero:
+ * fairness chaining multilevel count replied_to
+ */
+ }
+ for_each_present_cpu(cpu) {
+ if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
+ continue;
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->descriptor_base = bau_desc;
+ }
+}
+
+/*
+ * initialize the destination side's receiving buffers
+ * entered for each uvhub in the partition
+ * - node is first node (kernel memory notion) on the uvhub
+ * - pnode is the uvhub's physical identifier
+ */
+static void
+uv_payload_queue_init(int node, int pnode)
+{
+ int pn;
+ int cpu;
+ char *cp;
+ unsigned long pa;
+ struct bau_payload_queue_entry *pqp;
+ struct bau_payload_queue_entry *pqp_malloc;
+ struct bau_control *bcp;
+
+ pqp = (struct bau_payload_queue_entry *) kmalloc_node(
+ (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
+ GFP_KERNEL, node);
+ BUG_ON(!pqp);
+ pqp_malloc = pqp;
+
+ cp = (char *)pqp + 31;
+ pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
+
+ for_each_present_cpu(cpu) {
+ if (pnode != uv_cpu_to_pnode(cpu))
+ continue;
+ /* for every cpu on this pnode: */
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->va_queue_first = pqp;
+ bcp->bau_msg_head = pqp;
+ bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
+ }
+ /*
+ * need the pnode of where the memory was really allocated
+ */
+ pa = uv_gpa(pqp);
+ pn = pa >> uv_nshift;
+ uv_write_global_mmr64(pnode,
+ UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
+ ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
+ uv_physnodeaddr(pqp));
+ uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
+ uv_physnodeaddr(pqp));
+ uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
+ (unsigned long)
+ uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1)));
+ /* in effect, all msg_type's are set to MSG_NOOP */
+ memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
+}
+
+/*
+ * Initialization of each UV hub's structures
+ */
+static void __init uv_init_uvhub(int uvhub, int vector)
+{
+ int node;
+ int pnode;
+ unsigned long apicid;
+
+ node = uvhub_to_first_node(uvhub);
+ pnode = uv_blade_to_pnode(uvhub);
+ uv_activation_descriptor_init(node, pnode);
+ uv_payload_queue_init(node, pnode);
+ /*
+ * the below initialization can't be in firmware because the
+ * messaging IRQ will be determined by the OS
+ */
+ apicid = uvhub_to_first_apicid(uvhub);
+ uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
+ ((apicid << 32) | vector));
+}
+
+/*
+ * We will set BAU_MISC_CONTROL with a timeout period.
+ * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
+ * So the destination timeout period has be be calculated from them.
+ */
+static int
+calculate_destination_timeout(void)
+{
+ unsigned long mmr_image;
+ int mult1;
+ int mult2;
+ int index;
+ int base;
+ int ret;
+ unsigned long ts_ns;
+
+ mult1 = UV_INTD_SOFT_ACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
+ mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
+ index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
+ mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
+ mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
+ base = timeout_base_ns[index];
+ ts_ns = base * mult1 * mult2;
+ ret = ts_ns / 1000;
+ return ret;
+}
+
+/*
+ * initialize the bau_control structure for each cpu
+ */
+static void __init uv_init_per_cpu(int nuvhubs)
+{
+ int i;
+ int cpu;
+ int pnode;
+ int uvhub;
+ int have_hmaster;
+ short socket = 0;
+ unsigned short socket_mask;
+ unsigned char *uvhub_mask;
+ struct bau_control *bcp;
+ struct uvhub_desc *bdp;
+ struct socket_desc *sdp;
+ struct bau_control *hmaster = NULL;
+ struct bau_control *smaster = NULL;
+ struct socket_desc {
+ short num_cpus;
+ short cpu_number[16];
+ };
+ struct uvhub_desc {
+ unsigned short socket_mask;
+ short num_cpus;
+ short uvhub;
+ short pnode;
+ struct socket_desc socket[2];
+ };
+ struct uvhub_desc *uvhub_descs;
+
+ timeout_us = calculate_destination_timeout();
+
+ uvhub_descs = (struct uvhub_desc *)
+ kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
+ memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
+ uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
+ for_each_present_cpu(cpu) {
+ bcp = &per_cpu(bau_control, cpu);
+ memset(bcp, 0, sizeof(struct bau_control));
+ pnode = uv_cpu_hub_info(cpu)->pnode;
+ uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
+ *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
+ bdp = &uvhub_descs[uvhub];
+ bdp->num_cpus++;
+ bdp->uvhub = uvhub;
+ bdp->pnode = pnode;
+ /* kludge: 'assuming' one node per socket, and assuming that
+ disabling a socket just leaves a gap in node numbers */
+ socket = (cpu_to_node(cpu) & 1);
+ bdp->socket_mask |= (1 << socket);
+ sdp = &bdp->socket[socket];
+ sdp->cpu_number[sdp->num_cpus] = cpu;
+ sdp->num_cpus++;
+ }
+ for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
+ if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
+ continue;
+ have_hmaster = 0;
+ bdp = &uvhub_descs[uvhub];
+ socket_mask = bdp->socket_mask;
+ socket = 0;
+ while (socket_mask) {
+ if (!(socket_mask & 1))
+ goto nextsocket;
+ sdp = &bdp->socket[socket];
+ for (i = 0; i < sdp->num_cpus; i++) {
+ cpu = sdp->cpu_number[i];
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->cpu = cpu;
+ if (i == 0) {
+ smaster = bcp;
+ if (!have_hmaster) {
+ have_hmaster++;
+ hmaster = bcp;
+ }
+ }
+ bcp->cpus_in_uvhub = bdp->num_cpus;
+ bcp->cpus_in_socket = sdp->num_cpus;
+ bcp->socket_master = smaster;
+ bcp->uvhub = bdp->uvhub;
+ bcp->uvhub_master = hmaster;
+ bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->
+ blade_processor_id;
+ }
+nextsocket:
+ socket++;
+ socket_mask = (socket_mask >> 1);
+ }
+ }
+ kfree(uvhub_descs);
+ kfree(uvhub_mask);
+ for_each_present_cpu(cpu) {
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->baudisabled = 0;
+ bcp->statp = &per_cpu(ptcstats, cpu);
+ /* time interval to catch a hardware stay-busy bug */
+ bcp->timeout_interval = microsec_2_cycles(2*timeout_us);
+ bcp->max_bau_concurrent = max_bau_concurrent;
+ bcp->max_bau_concurrent_constant = max_bau_concurrent;
+ bcp->plugged_delay = plugged_delay;
+ bcp->plugsb4reset = plugsb4reset;
+ bcp->timeoutsb4reset = timeoutsb4reset;
+ bcp->ipi_reset_limit = ipi_reset_limit;
+ bcp->complete_threshold = complete_threshold;
+ bcp->congested_response_us = congested_response_us;
+ bcp->congested_reps = congested_reps;
+ bcp->congested_period = congested_period;
+ }
+}
+
+/*
+ * Initialization of BAU-related structures
+ */
+static int __init uv_bau_init(void)
+{
+ int uvhub;
+ int pnode;
+ int nuvhubs;
+ int cur_cpu;
+ int vector;
+ unsigned long mmr;
+
+ if (!is_uv_system())
+ return 0;
+
+ if (nobau)
+ return 0;
+
+ for_each_possible_cpu(cur_cpu)
+ zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
+ GFP_KERNEL, cpu_to_node(cur_cpu));
+
+ uv_nshift = uv_hub_info->m_val;
+ uv_mmask = (1UL << uv_hub_info->m_val) - 1;
+ nuvhubs = uv_num_possible_blades();
+ spin_lock_init(&disable_lock);
+ congested_cycles = microsec_2_cycles(congested_response_us);
+
+ uv_init_per_cpu(nuvhubs);
+
+ uv_partition_base_pnode = 0x7fffffff;
+ for (uvhub = 0; uvhub < nuvhubs; uvhub++)
+ if (uv_blade_nr_possible_cpus(uvhub) &&
+ (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
+ uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
+
+ vector = UV_BAU_MESSAGE;
+ for_each_possible_blade(uvhub)
+ if (uv_blade_nr_possible_cpus(uvhub))
+ uv_init_uvhub(uvhub, vector);
+
+ uv_enable_timeouts();
+ alloc_intr_gate(vector, uv_bau_message_intr1);
+
+ for_each_possible_blade(uvhub) {
+ if (uv_blade_nr_possible_cpus(uvhub)) {
+ pnode = uv_blade_to_pnode(uvhub);
+ /* INIT the bau */
+ uv_write_global_mmr64(pnode,
+ UVH_LB_BAU_SB_ACTIVATION_CONTROL,
+ ((unsigned long)1 << 63));
+ mmr = 1; /* should be 1 to broadcast to both sockets */
+ uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST,
+ mmr);
+ }
+ }
+
+ return 0;
+}
+core_initcall(uv_bau_init);
+fs_initcall(uv_ptc_init);
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
new file mode 100644
index 00000000000..7b24460917d
--- /dev/null
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -0,0 +1,285 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV IRQ functions
+ *
+ * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#include <asm/apic.h>
+#include <asm/uv/uv_irq.h>
+#include <asm/uv/uv_hub.h>
+
+/* MMR offset and pnode of hub sourcing interrupts for a given irq */
+struct uv_irq_2_mmr_pnode{
+ struct rb_node list;
+ unsigned long offset;
+ int pnode;
+ int irq;
+};
+
+static spinlock_t uv_irq_lock;
+static struct rb_root uv_irq_root;
+
+static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool);
+
+static void uv_noop(struct irq_data *data) { }
+
+static void uv_ack_apic(struct irq_data *data)
+{
+ ack_APIC_irq();
+}
+
+static struct irq_chip uv_irq_chip = {
+ .name = "UV-CORE",
+ .irq_mask = uv_noop,
+ .irq_unmask = uv_noop,
+ .irq_eoi = uv_ack_apic,
+ .irq_set_affinity = uv_set_irq_affinity,
+};
+
+/*
+ * Add offset and pnode information of the hub sourcing interrupts to the
+ * rb tree for a specific irq.
+ */
+static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
+{
+ struct rb_node **link = &uv_irq_root.rb_node;
+ struct rb_node *parent = NULL;
+ struct uv_irq_2_mmr_pnode *n;
+ struct uv_irq_2_mmr_pnode *e;
+ unsigned long irqflags;
+
+ n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
+ uv_blade_to_memory_nid(blade));
+ if (!n)
+ return -ENOMEM;
+
+ n->irq = irq;
+ n->offset = offset;
+ n->pnode = uv_blade_to_pnode(blade);
+ spin_lock_irqsave(&uv_irq_lock, irqflags);
+ /* Find the right place in the rbtree: */
+ while (*link) {
+ parent = *link;
+ e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
+
+ if (unlikely(irq == e->irq)) {
+ /* irq entry exists */
+ e->pnode = uv_blade_to_pnode(blade);
+ e->offset = offset;
+ spin_unlock_irqrestore(&uv_irq_lock, irqflags);
+ kfree(n);
+ return 0;
+ }
+
+ if (irq < e->irq)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+
+ /* Insert the node into the rbtree. */
+ rb_link_node(&n->list, parent, link);
+ rb_insert_color(&n->list, &uv_irq_root);
+
+ spin_unlock_irqrestore(&uv_irq_lock, irqflags);
+ return 0;
+}
+
+/* Retrieve offset and pnode information from the rb tree for a specific irq */
+int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
+{
+ struct uv_irq_2_mmr_pnode *e;
+ struct rb_node *n;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&uv_irq_lock, irqflags);
+ n = uv_irq_root.rb_node;
+ while (n) {
+ e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
+
+ if (e->irq == irq) {
+ *offset = e->offset;
+ *pnode = e->pnode;
+ spin_unlock_irqrestore(&uv_irq_lock, irqflags);
+ return 0;
+ }
+
+ if (irq < e->irq)
+ n = n->rb_left;
+ else
+ n = n->rb_right;
+ }
+ spin_unlock_irqrestore(&uv_irq_lock, irqflags);
+ return -1;
+}
+
+/*
+ * Re-target the irq to the specified CPU and enable the specified MMR located
+ * on the specified blade to allow the sending of MSIs to the specified CPU.
+ */
+static int
+arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
+ unsigned long mmr_offset, int limit)
+{
+ const struct cpumask *eligible_cpu = cpumask_of(cpu);
+ struct irq_cfg *cfg = get_irq_chip_data(irq);
+ unsigned long mmr_value;
+ struct uv_IO_APIC_route_entry *entry;
+ int mmr_pnode, err;
+
+ BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
+ sizeof(unsigned long));
+
+ err = assign_irq_vector(irq, cfg, eligible_cpu);
+ if (err != 0)
+ return err;
+
+ if (limit == UV_AFFINITY_CPU)
+ irq_set_status_flags(irq, IRQ_NO_BALANCING);
+ else
+ irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
+
+ set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
+ irq_name);
+
+ mmr_value = 0;
+ entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
+ entry->vector = cfg->vector;
+ entry->delivery_mode = apic->irq_delivery_mode;
+ entry->dest_mode = apic->irq_dest_mode;
+ entry->polarity = 0;
+ entry->trigger = 0;
+ entry->mask = 0;
+ entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
+
+ mmr_pnode = uv_blade_to_pnode(mmr_blade);
+ uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
+
+ if (cfg->move_in_progress)
+ send_cleanup_vector(cfg);
+
+ return irq;
+}
+
+/*
+ * Disable the specified MMR located on the specified blade so that MSIs are
+ * longer allowed to be sent.
+ */
+static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
+{
+ unsigned long mmr_value;
+ struct uv_IO_APIC_route_entry *entry;
+
+ BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
+ sizeof(unsigned long));
+
+ mmr_value = 0;
+ entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
+ entry->mask = 1;
+
+ uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
+}
+
+static int
+uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force)
+{
+ struct irq_cfg *cfg = data->chip_data;
+ unsigned int dest;
+ unsigned long mmr_value, mmr_offset;
+ struct uv_IO_APIC_route_entry *entry;
+ int mmr_pnode;
+
+ if (__ioapic_set_affinity(data, mask, &dest))
+ return -1;
+
+ mmr_value = 0;
+ entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
+
+ entry->vector = cfg->vector;
+ entry->delivery_mode = apic->irq_delivery_mode;
+ entry->dest_mode = apic->irq_dest_mode;
+ entry->polarity = 0;
+ entry->trigger = 0;
+ entry->mask = 0;
+ entry->dest = dest;
+
+ /* Get previously stored MMR and pnode of hub sourcing interrupts */
+ if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode))
+ return -1;
+
+ uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
+
+ if (cfg->move_in_progress)
+ send_cleanup_vector(cfg);
+
+ return 0;
+}
+
+/*
+ * Set up a mapping of an available irq and vector, and enable the specified
+ * MMR that defines the MSI that is to be sent to the specified CPU when an
+ * interrupt is raised.
+ */
+int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
+ unsigned long mmr_offset, int limit)
+{
+ int irq, ret;
+
+ irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
+
+ if (irq <= 0)
+ return -EBUSY;
+
+ ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
+ limit);
+ if (ret == irq)
+ uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
+ else
+ destroy_irq(irq);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(uv_setup_irq);
+
+/*
+ * Tear down a mapping of an irq and vector, and disable the specified MMR that
+ * defined the MSI that was to be sent to the specified CPU when an interrupt
+ * was raised.
+ *
+ * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
+ */
+void uv_teardown_irq(unsigned int irq)
+{
+ struct uv_irq_2_mmr_pnode *e;
+ struct rb_node *n;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&uv_irq_lock, irqflags);
+ n = uv_irq_root.rb_node;
+ while (n) {
+ e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
+ if (e->irq == irq) {
+ arch_disable_uv_irq(e->pnode, e->offset);
+ rb_erase(n, &uv_irq_root);
+ kfree(e);
+ break;
+ }
+ if (irq < e->irq)
+ n = n->rb_left;
+ else
+ n = n->rb_right;
+ }
+ spin_unlock_irqrestore(&uv_irq_lock, irqflags);
+ destroy_irq(irq);
+}
+EXPORT_SYMBOL_GPL(uv_teardown_irq);
diff --git a/arch/x86/platform/uv/uv_sysfs.c b/arch/x86/platform/uv/uv_sysfs.c
new file mode 100644
index 00000000000..309c70fb775
--- /dev/null
+++ b/arch/x86/platform/uv/uv_sysfs.c
@@ -0,0 +1,76 @@
+/*
+ * This file supports the /sys/firmware/sgi_uv interfaces for SGI UV.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) Russ Anderson
+ */
+
+#include <linux/sysdev.h>
+#include <asm/uv/bios.h>
+#include <asm/uv/uv.h>
+
+struct kobject *sgi_uv_kobj;
+
+static ssize_t partition_id_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%ld\n", sn_partition_id);
+}
+
+static ssize_t coherence_id_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%ld\n", partition_coherence_id());
+}
+
+static struct kobj_attribute partition_id_attr =
+ __ATTR(partition_id, S_IRUGO, partition_id_show, NULL);
+
+static struct kobj_attribute coherence_id_attr =
+ __ATTR(coherence_id, S_IRUGO, coherence_id_show, NULL);
+
+
+static int __init sgi_uv_sysfs_init(void)
+{
+ unsigned long ret;
+
+ if (!is_uv_system())
+ return -ENODEV;
+
+ if (!sgi_uv_kobj)
+ sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
+ if (!sgi_uv_kobj) {
+ printk(KERN_WARNING "kobject_create_and_add sgi_uv failed\n");
+ return -EINVAL;
+ }
+
+ ret = sysfs_create_file(sgi_uv_kobj, &partition_id_attr.attr);
+ if (ret) {
+ printk(KERN_WARNING "sysfs_create_file partition_id failed\n");
+ return ret;
+ }
+
+ ret = sysfs_create_file(sgi_uv_kobj, &coherence_id_attr.attr);
+ if (ret) {
+ printk(KERN_WARNING "sysfs_create_file coherence_id failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+device_initcall(sgi_uv_sysfs_init);
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
new file mode 100644
index 00000000000..56e421bc379
--- /dev/null
+++ b/arch/x86/platform/uv/uv_time.c
@@ -0,0 +1,423 @@
+/*
+ * SGI RTC clock/timer routines.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (c) 2009 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) Dimitri Sivanich
+ */
+#include <linux/clockchips.h>
+#include <linux/slab.h>
+
+#include <asm/uv/uv_mmrs.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/bios.h>
+#include <asm/uv/uv.h>
+#include <asm/apic.h>
+#include <asm/cpu.h>
+
+#define RTC_NAME "sgi_rtc"
+
+static cycle_t uv_read_rtc(struct clocksource *cs);
+static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
+static void uv_rtc_timer_setup(enum clock_event_mode,
+ struct clock_event_device *);
+
+static struct clocksource clocksource_uv = {
+ .name = RTC_NAME,
+ .rating = 400,
+ .read = uv_read_rtc,
+ .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK,
+ .shift = 10,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static struct clock_event_device clock_event_device_uv = {
+ .name = RTC_NAME,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 20,
+ .rating = 400,
+ .irq = -1,
+ .set_next_event = uv_rtc_next_event,
+ .set_mode = uv_rtc_timer_setup,
+ .event_handler = NULL,
+};
+
+static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
+
+/* There is one of these allocated per node */
+struct uv_rtc_timer_head {
+ spinlock_t lock;
+ /* next cpu waiting for timer, local node relative: */
+ int next_cpu;
+ /* number of cpus on this node: */
+ int ncpus;
+ struct {
+ int lcpu; /* systemwide logical cpu number */
+ u64 expires; /* next timer expiration for this cpu */
+ } cpu[1];
+};
+
+/*
+ * Access to uv_rtc_timer_head via blade id.
+ */
+static struct uv_rtc_timer_head **blade_info __read_mostly;
+
+static int uv_rtc_evt_enable;
+
+/*
+ * Hardware interface routines
+ */
+
+/* Send IPIs to another node */
+static void uv_rtc_send_IPI(int cpu)
+{
+ unsigned long apicid, val;
+ int pnode;
+
+ apicid = cpu_physical_id(cpu);
+ pnode = uv_apicid_to_pnode(apicid);
+ val = (1UL << UVH_IPI_INT_SEND_SHFT) |
+ (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
+ (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
+
+ uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
+}
+
+/* Check for an RTC interrupt pending */
+static int uv_intr_pending(int pnode)
+{
+ return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
+ UVH_EVENT_OCCURRED0_RTC1_MASK;
+}
+
+/* Setup interrupt and return non-zero if early expiration occurred. */
+static int uv_setup_intr(int cpu, u64 expires)
+{
+ u64 val;
+ int pnode = uv_cpu_to_pnode(cpu);
+
+ uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
+ UVH_RTC1_INT_CONFIG_M_MASK);
+ uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
+
+ uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
+ UVH_EVENT_OCCURRED0_RTC1_MASK);
+
+ val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
+ ((u64)cpu_physical_id(cpu) << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
+
+ /* Set configuration */
+ uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
+ /* Initialize comparator value */
+ uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
+
+ if (uv_read_rtc(NULL) <= expires)
+ return 0;
+
+ return !uv_intr_pending(pnode);
+}
+
+/*
+ * Per-cpu timer tracking routines
+ */
+
+static __init void uv_rtc_deallocate_timers(void)
+{
+ int bid;
+
+ for_each_possible_blade(bid) {
+ kfree(blade_info[bid]);
+ }
+ kfree(blade_info);
+}
+
+/* Allocate per-node list of cpu timer expiration times. */
+static __init int uv_rtc_allocate_timers(void)
+{
+ int cpu;
+
+ blade_info = kmalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL);
+ if (!blade_info)
+ return -ENOMEM;
+ memset(blade_info, 0, uv_possible_blades * sizeof(void *));
+
+ for_each_present_cpu(cpu) {
+ int nid = cpu_to_node(cpu);
+ int bid = uv_cpu_to_blade_id(cpu);
+ int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+ struct uv_rtc_timer_head *head = blade_info[bid];
+
+ if (!head) {
+ head = kmalloc_node(sizeof(struct uv_rtc_timer_head) +
+ (uv_blade_nr_possible_cpus(bid) *
+ 2 * sizeof(u64)),
+ GFP_KERNEL, nid);
+ if (!head) {
+ uv_rtc_deallocate_timers();
+ return -ENOMEM;
+ }
+ spin_lock_init(&head->lock);
+ head->ncpus = uv_blade_nr_possible_cpus(bid);
+ head->next_cpu = -1;
+ blade_info[bid] = head;
+ }
+
+ head->cpu[bcpu].lcpu = cpu;
+ head->cpu[bcpu].expires = ULLONG_MAX;
+ }
+
+ return 0;
+}
+
+/* Find and set the next expiring timer. */
+static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode)
+{
+ u64 lowest = ULLONG_MAX;
+ int c, bcpu = -1;
+
+ head->next_cpu = -1;
+ for (c = 0; c < head->ncpus; c++) {
+ u64 exp = head->cpu[c].expires;
+ if (exp < lowest) {
+ bcpu = c;
+ lowest = exp;
+ }
+ }
+ if (bcpu >= 0) {
+ head->next_cpu = bcpu;
+ c = head->cpu[bcpu].lcpu;
+ if (uv_setup_intr(c, lowest))
+ /* If we didn't set it up in time, trigger */
+ uv_rtc_send_IPI(c);
+ } else {
+ uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
+ UVH_RTC1_INT_CONFIG_M_MASK);
+ }
+}
+
+/*
+ * Set expiration time for current cpu.
+ *
+ * Returns 1 if we missed the expiration time.
+ */
+static int uv_rtc_set_timer(int cpu, u64 expires)
+{
+ int pnode = uv_cpu_to_pnode(cpu);
+ int bid = uv_cpu_to_blade_id(cpu);
+ struct uv_rtc_timer_head *head = blade_info[bid];
+ int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+ u64 *t = &head->cpu[bcpu].expires;
+ unsigned long flags;
+ int next_cpu;
+
+ spin_lock_irqsave(&head->lock, flags);
+
+ next_cpu = head->next_cpu;
+ *t = expires;
+
+ /* Will this one be next to go off? */
+ if (next_cpu < 0 || bcpu == next_cpu ||
+ expires < head->cpu[next_cpu].expires) {
+ head->next_cpu = bcpu;
+ if (uv_setup_intr(cpu, expires)) {
+ *t = ULLONG_MAX;
+ uv_rtc_find_next_timer(head, pnode);
+ spin_unlock_irqrestore(&head->lock, flags);
+ return -ETIME;
+ }
+ }
+
+ spin_unlock_irqrestore(&head->lock, flags);
+ return 0;
+}
+
+/*
+ * Unset expiration time for current cpu.
+ *
+ * Returns 1 if this timer was pending.
+ */
+static int uv_rtc_unset_timer(int cpu, int force)
+{
+ int pnode = uv_cpu_to_pnode(cpu);
+ int bid = uv_cpu_to_blade_id(cpu);
+ struct uv_rtc_timer_head *head = blade_info[bid];
+ int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+ u64 *t = &head->cpu[bcpu].expires;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&head->lock, flags);
+
+ if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
+ rc = 1;
+
+ if (rc) {
+ *t = ULLONG_MAX;
+ /* Was the hardware setup for this timer? */
+ if (head->next_cpu == bcpu)
+ uv_rtc_find_next_timer(head, pnode);
+ }
+
+ spin_unlock_irqrestore(&head->lock, flags);
+
+ return rc;
+}
+
+
+/*
+ * Kernel interface routines.
+ */
+
+/*
+ * Read the RTC.
+ *
+ * Starting with HUB rev 2.0, the UV RTC register is replicated across all
+ * cachelines of it's own page. This allows faster simultaneous reads
+ * from a given socket.
+ */
+static cycle_t uv_read_rtc(struct clocksource *cs)
+{
+ unsigned long offset;
+
+ if (uv_get_min_hub_revision_id() == 1)
+ offset = 0;
+ else
+ offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
+
+ return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
+}
+
+/*
+ * Program the next event, relative to now
+ */
+static int uv_rtc_next_event(unsigned long delta,
+ struct clock_event_device *ced)
+{
+ int ced_cpu = cpumask_first(ced->cpumask);
+
+ return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL));
+}
+
+/*
+ * Setup the RTC timer in oneshot mode
+ */
+static void uv_rtc_timer_setup(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ int ced_cpu = cpumask_first(evt->cpumask);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_RESUME:
+ /* Nothing to do here yet */
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ uv_rtc_unset_timer(ced_cpu, 1);
+ break;
+ }
+}
+
+static void uv_rtc_interrupt(void)
+{
+ int cpu = smp_processor_id();
+ struct clock_event_device *ced = &per_cpu(cpu_ced, cpu);
+
+ if (!ced || !ced->event_handler)
+ return;
+
+ if (uv_rtc_unset_timer(cpu, 0) != 1)
+ return;
+
+ ced->event_handler(ced);
+}
+
+static int __init uv_enable_evt_rtc(char *str)
+{
+ uv_rtc_evt_enable = 1;
+
+ return 1;
+}
+__setup("uvrtcevt", uv_enable_evt_rtc);
+
+static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
+{
+ struct clock_event_device *ced = &__get_cpu_var(cpu_ced);
+
+ *ced = clock_event_device_uv;
+ ced->cpumask = cpumask_of(smp_processor_id());
+ clockevents_register_device(ced);
+}
+
+static __init int uv_rtc_setup_clock(void)
+{
+ int rc;
+
+ if (!is_uv_system())
+ return -ENODEV;
+
+ clocksource_uv.mult = clocksource_hz2mult(sn_rtc_cycles_per_second,
+ clocksource_uv.shift);
+
+ /* If single blade, prefer tsc */
+ if (uv_num_possible_blades() == 1)
+ clocksource_uv.rating = 250;
+
+ rc = clocksource_register(&clocksource_uv);
+ if (rc)
+ printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc);
+ else
+ printk(KERN_INFO "UV RTC clocksource registered freq %lu MHz\n",
+ sn_rtc_cycles_per_second/(unsigned long)1E6);
+
+ if (rc || !uv_rtc_evt_enable || x86_platform_ipi_callback)
+ return rc;
+
+ /* Setup and register clockevents */
+ rc = uv_rtc_allocate_timers();
+ if (rc)
+ goto error;
+
+ x86_platform_ipi_callback = uv_rtc_interrupt;
+
+ clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second,
+ NSEC_PER_SEC, clock_event_device_uv.shift);
+
+ clock_event_device_uv.min_delta_ns = NSEC_PER_SEC /
+ sn_rtc_cycles_per_second;
+
+ clock_event_device_uv.max_delta_ns = clocksource_uv.mask *
+ (NSEC_PER_SEC / sn_rtc_cycles_per_second);
+
+ rc = schedule_on_each_cpu(uv_rtc_register_clockevents);
+ if (rc) {
+ x86_platform_ipi_callback = NULL;
+ uv_rtc_deallocate_timers();
+ goto error;
+ }
+
+ printk(KERN_INFO "UV RTC clockevents registered\n");
+
+ return 0;
+
+error:
+ clocksource_unregister(&clocksource_uv);
+ printk(KERN_INFO "UV RTC clockevents failed rc %d\n", rc);
+
+ return rc;
+}
+arch_initcall(uv_rtc_setup_clock);