summaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/crash.c2
-rw-r--r--arch/ia64/kernel/efi.c32
-rw-r--r--arch/ia64/kernel/fsys.S28
-rw-r--r--arch/ia64/kernel/iosapic.c28
-rw-r--r--arch/ia64/kernel/irq_ia64.c142
-rw-r--r--arch/ia64/kernel/kprobes.c7
-rw-r--r--arch/ia64/kernel/mca.c73
-rw-r--r--arch/ia64/kernel/module.c22
-rw-r--r--arch/ia64/kernel/msi_ia64.c3
-rw-r--r--arch/ia64/kernel/patch.c8
-rw-r--r--arch/ia64/kernel/perfmon.c4
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c4
-rw-r--r--arch/ia64/kernel/ptrace.c327
-rw-r--r--arch/ia64/kernel/sal.c7
-rw-r--r--arch/ia64/kernel/setup.c8
-rw-r--r--arch/ia64/kernel/signal.c36
-rw-r--r--arch/ia64/kernel/unaligned.c6
-rw-r--r--arch/ia64/kernel/unwind.c102
18 files changed, 384 insertions, 455 deletions
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index f1cf2df97a2..fbe742ad2fd 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -155,7 +155,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
if (val == DIE_INIT_MONARCH_LEAVE)
ia64_mca_printk(KERN_NOTICE
"%s: kdump not configured\n",
- __FUNCTION__);
+ __func__);
return NOTIFY_DONE;
}
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 919070a9aed..728d7247a1a 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -379,8 +379,8 @@ efi_get_pal_addr (void)
* a dedicated ITR for the PAL code.
*/
if ((vaddr & mask) == (KERNEL_START & mask)) {
- printk(KERN_INFO "%s: no need to install ITR for "
- "PAL code\n", __FUNCTION__);
+ printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
+ __func__);
continue;
}
@@ -399,7 +399,7 @@ efi_get_pal_addr (void)
return __va(md->phys_addr);
}
printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
- __FUNCTION__);
+ __func__);
return NULL;
}
@@ -543,12 +543,30 @@ efi_init (void)
for (i = 0, p = efi_map_start; p < efi_map_end;
++i, p += efi_desc_size)
{
+ const char *unit;
+ unsigned long size;
+
md = p;
- printk("mem%02u: type=%u, attr=0x%lx, "
- "range=[0x%016lx-0x%016lx) (%luMB)\n",
+ size = md->num_pages << EFI_PAGE_SHIFT;
+
+ if ((size >> 40) > 0) {
+ size >>= 40;
+ unit = "TB";
+ } else if ((size >> 30) > 0) {
+ size >>= 30;
+ unit = "GB";
+ } else if ((size >> 20) > 0) {
+ size >>= 20;
+ unit = "MB";
+ } else {
+ size >>= 10;
+ unit = "KB";
+ }
+
+ printk("mem%02d: type=%2u, attr=0x%016lx, "
+ "range=[0x%016lx-0x%016lx) (%4lu%s)\n",
i, md->type, md->attribute, md->phys_addr,
- md->phys_addr + efi_md_size(md),
- md->num_pages >> (20 - EFI_PAGE_SHIFT));
+ md->phys_addr + efi_md_size(md), size, unit);
}
}
#endif
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index c932d86e2d8..357b7e2adc6 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -210,27 +210,25 @@ ENTRY(fsys_gettimeofday)
// Note that instructions are optimized for McKinley. McKinley can
// process two bundles simultaneously and therefore we continuously
// try to feed the CPU two bundles and then a stop.
- //
- // Additional note that code has changed a lot. Optimization is TBD.
- // Comments begin with "?" are maybe outdated.
- tnat.nz p6,p0 = r31 // ? branch deferred to fit later bundle
- mov pr = r30,0xc000 // Set predicates according to function
+
add r2 = TI_FLAGS+IA64_TASK_SIZE,r16
+ tnat.nz p6,p0 = r31 // guard against Nat argument
+(p6) br.cond.spnt.few .fail_einval
movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address
;;
+ ld4 r2 = [r2] // process work pending flags
movl r29 = itc_jitter_data // itc_jitter
add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20 // wall_time
- ld4 r2 = [r2] // process work pending flags
- ;;
-(p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time
add r21 = IA64_CLKSRC_MMIO_OFFSET,r20
- add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29
+ mov pr = r30,0xc000 // Set predicates according to function
+ ;;
and r2 = TIF_ALLWORK_MASK,r2
-(p6) br.cond.spnt.few .fail_einval // ? deferred branch
+ add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29
+(p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time
;;
- add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last
+ add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last
cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled
-(p6) br.cond.spnt.many fsys_fallback_syscall
+(p6) br.cond.spnt.many fsys_fallback_syscall
;;
// Begin critical section
.time_redo:
@@ -258,7 +256,6 @@ ENTRY(fsys_gettimeofday)
(p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!!
(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues..
(p13) ld8 r25 = [r19] // get itc_lastcycle value
- ;; // ? could be removed by moving the last add upward
ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec
;;
ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec
@@ -285,13 +282,12 @@ ENTRY(fsys_gettimeofday)
EX(.fail_efault, probe.w.fault r31, 3)
xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter)
;;
- // ? simulate tbit.nz.or p7,p0 = r28,0
getf.sig r2 = f8
mf
;;
ld4 r10 = [r20] // gtod_lock.sequence
shr.u r2 = r2,r23 // shift by factor
- ;; // ? overloaded 3 bundles!
+ ;;
add r8 = r8,r2 // Add xtime.nsecs
cmp4.ne p7,p0 = r28,r10
(p7) br.cond.dpnt.few .time_redo // sequence number changed, redo
@@ -319,9 +315,9 @@ EX(.fail_efault, probe.w.fault r31, 3)
EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles
(p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it
;;
- mov r8 = r0
(p14) getf.sig r2 = f8
;;
+ mov r8 = r0
(p14) shr.u r21 = r2, 4
;;
EX(.fail_efault, st8 [r31] = r9)
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 398e2fd1cd2..082c31dcfd9 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -345,7 +345,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
if (cpus_empty(mask))
return;
- if (reassign_irq_vector(irq, first_cpu(mask)))
+ if (irq_prepare_move(irq, first_cpu(mask)))
return;
dest = cpu_physical_id(first_cpu(mask));
@@ -397,6 +397,7 @@ iosapic_end_level_irq (unsigned int irq)
struct iosapic_rte_info *rte;
int do_unmask_irq = 0;
+ irq_complete_move(irq);
if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
do_unmask_irq = 1;
mask_irq(irq);
@@ -450,6 +451,7 @@ iosapic_ack_edge_irq (unsigned int irq)
{
irq_desc_t *idesc = irq_desc + irq;
+ irq_complete_move(irq);
move_native_irq(irq);
/*
* Once we have recorded IRQ_PENDING already, we can mask the
@@ -532,7 +534,7 @@ iosapic_reassign_vector (int irq)
if (iosapic_intr_info[irq].count) {
new_irq = create_irq();
if (new_irq < 0)
- panic("%s: out of interrupt vectors!\n", __FUNCTION__);
+ panic("%s: out of interrupt vectors!\n", __func__);
printk(KERN_INFO "Reassigning vector %d to %d\n",
irq_to_vector(irq), irq_to_vector(new_irq));
memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
@@ -597,7 +599,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
index = find_iosapic(gsi);
if (index < 0) {
printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
- __FUNCTION__, gsi);
+ __func__, gsi);
return -ENODEV;
}
@@ -606,7 +608,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
rte = iosapic_alloc_rte();
if (!rte) {
printk(KERN_WARNING "%s: cannot allocate memory\n",
- __FUNCTION__);
+ __func__);
return -ENOMEM;
}
@@ -623,7 +625,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
(info->trigger != trigger || info->polarity != polarity)){
printk (KERN_WARNING
"%s: cannot override the interrupt\n",
- __FUNCTION__);
+ __func__);
return -EINVAL;
}
rte->refcnt++;
@@ -645,7 +647,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
if (idesc->chip != &no_irq_type)
printk(KERN_WARNING
"%s: changing vector %d from %s to %s\n",
- __FUNCTION__, irq_to_vector(irq),
+ __func__, irq_to_vector(irq),
idesc->chip->name, irq_type->name);
idesc->chip = irq_type;
}
@@ -918,7 +920,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
case ACPI_INTERRUPT_INIT:
irq = create_irq();
if (irq < 0)
- panic("%s: out of interrupt vectors!\n", __FUNCTION__);
+ panic("%s: out of interrupt vectors!\n", __func__);
vector = irq_to_vector(irq);
delivery = IOSAPIC_INIT;
break;
@@ -929,7 +931,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
mask = 1;
break;
default:
- printk(KERN_ERR "%s: invalid int type 0x%x\n", __FUNCTION__,
+ printk(KERN_ERR "%s: invalid int type 0x%x\n", __func__,
int_type);
return -1;
}
@@ -994,7 +996,7 @@ iosapic_system_init (int system_pcat_compat)
*/
printk(KERN_INFO
"%s: Disabling PC-AT compatible 8259 interrupts\n",
- __FUNCTION__);
+ __func__);
outb(0xff, 0xA1);
outb(0xff, 0x21);
}
@@ -1009,7 +1011,7 @@ iosapic_alloc (void)
if (!iosapic_lists[index].addr)
return index;
- printk(KERN_WARNING "%s: failed to allocate iosapic\n", __FUNCTION__);
+ printk(KERN_WARNING "%s: failed to allocate iosapic\n", __func__);
return -1;
}
@@ -1107,14 +1109,14 @@ iosapic_remove (unsigned int gsi_base)
index = find_iosapic(gsi_base);
if (index < 0) {
printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
- __FUNCTION__, gsi_base);
+ __func__, gsi_base);
goto out;
}
if (iosapic_lists[index].rtes_inuse) {
err = -EBUSY;
printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
- __FUNCTION__, gsi_base);
+ __func__, gsi_base);
goto out;
}
@@ -1135,7 +1137,7 @@ map_iosapic_to_node(unsigned int gsi_base, int node)
index = find_iosapic(gsi_base);
if (index < 0) {
printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
- __FUNCTION__, gsi_base);
+ __func__, gsi_base);
return;
}
iosapic_lists[index].node = node;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 0b52f19ed04..d8be23fbe6b 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -260,6 +260,8 @@ void __setup_vector_irq(int cpu)
}
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
+#define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR
+
static enum vector_domain_type {
VECTOR_DOMAIN_NONE,
VECTOR_DOMAIN_PERCPU
@@ -272,6 +274,101 @@ static cpumask_t vector_allocation_domain(int cpu)
return CPU_MASK_ALL;
}
+static int __irq_prepare_move(int irq, int cpu)
+{
+ struct irq_cfg *cfg = &irq_cfg[irq];
+ int vector;
+ cpumask_t domain;
+
+ if (cfg->move_in_progress || cfg->move_cleanup_count)
+ return -EBUSY;
+ if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
+ return -EINVAL;
+ if (cpu_isset(cpu, cfg->domain))
+ return 0;
+ domain = vector_allocation_domain(cpu);
+ vector = find_unassigned_vector(domain);
+ if (vector < 0)
+ return -ENOSPC;
+ cfg->move_in_progress = 1;
+ cfg->old_domain = cfg->domain;
+ cfg->vector = IRQ_VECTOR_UNASSIGNED;
+ cfg->domain = CPU_MASK_NONE;
+ BUG_ON(__bind_irq_vector(irq, vector, domain));
+ return 0;
+}
+
+int irq_prepare_move(int irq, int cpu)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&vector_lock, flags);
+ ret = __irq_prepare_move(irq, cpu);
+ spin_unlock_irqrestore(&vector_lock, flags);
+ return ret;
+}
+
+void irq_complete_move(unsigned irq)
+{
+ struct irq_cfg *cfg = &irq_cfg[irq];
+ cpumask_t cleanup_mask;
+ int i;
+
+ if (likely(!cfg->move_in_progress))
+ return;
+
+ if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
+ return;
+
+ cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
+ cfg->move_cleanup_count = cpus_weight(cleanup_mask);
+ for_each_cpu_mask(i, cleanup_mask)
+ platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
+ cfg->move_in_progress = 0;
+}
+
+static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
+{
+ int me = smp_processor_id();
+ ia64_vector vector;
+ unsigned long flags;
+
+ for (vector = IA64_FIRST_DEVICE_VECTOR;
+ vector < IA64_LAST_DEVICE_VECTOR; vector++) {
+ int irq;
+ struct irq_desc *desc;
+ struct irq_cfg *cfg;
+ irq = __get_cpu_var(vector_irq)[vector];
+ if (irq < 0)
+ continue;
+
+ desc = irq_desc + irq;
+ cfg = irq_cfg + irq;
+ spin_lock(&desc->lock);
+ if (!cfg->move_cleanup_count)
+ goto unlock;
+
+ if (!cpu_isset(me, cfg->old_domain))
+ goto unlock;
+
+ spin_lock_irqsave(&vector_lock, flags);
+ __get_cpu_var(vector_irq)[vector] = -1;
+ cpu_clear(me, vector_table[vector]);
+ spin_unlock_irqrestore(&vector_lock, flags);
+ cfg->move_cleanup_count--;
+ unlock:
+ spin_unlock(&desc->lock);
+ }
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq_move_irqaction = {
+ .handler = smp_irq_move_cleanup_interrupt,
+ .flags = IRQF_DISABLED,
+ .name = "irq_move"
+};
+
static int __init parse_vector_domain(char *arg)
{
if (!arg)
@@ -303,36 +400,6 @@ void destroy_and_reserve_irq(unsigned int irq)
spin_unlock_irqrestore(&vector_lock, flags);
}
-static int __reassign_irq_vector(int irq, int cpu)
-{
- struct irq_cfg *cfg = &irq_cfg[irq];
- int vector;
- cpumask_t domain;
-
- if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
- return -EINVAL;
- if (cpu_isset(cpu, cfg->domain))
- return 0;
- domain = vector_allocation_domain(cpu);
- vector = find_unassigned_vector(domain);
- if (vector < 0)
- return -ENOSPC;
- __clear_irq_vector(irq);
- BUG_ON(__bind_irq_vector(irq, vector, domain));
- return 0;
-}
-
-int reassign_irq_vector(int irq, int cpu)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&vector_lock, flags);
- ret = __reassign_irq_vector(irq, cpu);
- spin_unlock_irqrestore(&vector_lock, flags);
- return ret;
-}
-
/*
* Dynamic irq allocate and deallocation for MSI
*/
@@ -440,7 +507,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
if (unlikely(irq < 0)) {
printk(KERN_ERR "%s: Unexpected interrupt "
"vector %d on CPU %d is not mapped "
- "to any IRQ!\n", __FUNCTION__, vector,
+ "to any IRQ!\n", __func__, vector,
smp_processor_id());
} else
generic_handle_irq(irq);
@@ -505,7 +572,7 @@ void ia64_process_pending_intr(void)
if (unlikely(irq < 0)) {
printk(KERN_ERR "%s: Unexpected interrupt "
"vector %d on CPU %d not being mapped "
- "to any IRQ!!\n", __FUNCTION__, vector,
+ "to any IRQ!!\n", __func__, vector,
smp_processor_id());
} else {
vectors_in_migration[irq]=0;
@@ -578,6 +645,13 @@ init_IRQ (void)
register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
+#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
+ if (vector_domain_type != VECTOR_DOMAIN_NONE) {
+ BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
+ IA64_FIRST_DEVICE_VECTOR++;
+ register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
+ }
+#endif
#endif
#ifdef CONFIG_PERFMON
pfm_init_percpu();
@@ -592,11 +666,7 @@ ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
unsigned long ipi_data;
unsigned long phys_cpu_id;
-#ifdef CONFIG_SMP
phys_cpu_id = cpu_physical_id(cpu);
-#else
- phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
-#endif
/*
* cpu number is in 8bit ID and 8bit EID
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index b618487cdc8..8d9a446a0d1 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -838,7 +838,7 @@ out:
return 1;
}
-int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -1001,6 +1001,11 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
return 1;
}
+/* ia64 does not need this */
+void __kprobes jprobe_return(void)
+{
+}
+
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6e17aed5313..6c18221dba3 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -413,8 +413,8 @@ ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
IA64_LOG_INDEX_INC(sal_info_type);
IA64_LOG_UNLOCK(sal_info_type);
if (irq_safe) {
- IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
- "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len);
+ IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n",
+ __func__, sal_info_type, total_len);
}
*buffer = (u8 *) log_buffer;
return total_len;
@@ -518,7 +518,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg)
static DEFINE_SPINLOCK(cpe_history_lock);
IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
- __FUNCTION__, cpe_irq, smp_processor_id());
+ __func__, cpe_irq, smp_processor_id());
/* SAL spec states this should run w/ interrupts enabled */
local_irq_enable();
@@ -594,7 +594,7 @@ ia64_mca_register_cpev (int cpev)
}
IA64_MCA_DEBUG("%s: corrected platform error "
- "vector %#x registered\n", __FUNCTION__, cpev);
+ "vector %#x registered\n", __func__, cpev);
}
#endif /* CONFIG_ACPI */
@@ -621,12 +621,11 @@ ia64_mca_cmc_vector_setup (void)
cmcv.cmcv_vector = IA64_CMC_VECTOR;
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
- IA64_MCA_DEBUG("%s: CPU %d corrected "
- "machine check vector %#x registered.\n",
- __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
+ IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n",
+ __func__, smp_processor_id(), IA64_CMC_VECTOR);
IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
- __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
+ __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
}
/*
@@ -651,9 +650,8 @@ ia64_mca_cmc_vector_disable (void *dummy)
cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
- IA64_MCA_DEBUG("%s: CPU %d corrected "
- "machine check vector %#x disabled.\n",
- __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
+ IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n",
+ __func__, smp_processor_id(), cmcv.cmcv_vector);
}
/*
@@ -678,9 +676,8 @@ ia64_mca_cmc_vector_enable (void *dummy)
cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
- IA64_MCA_DEBUG("%s: CPU %d corrected "
- "machine check vector %#x enabled.\n",
- __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
+ IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n",
+ __func__, smp_processor_id(), cmcv.cmcv_vector);
}
/*
@@ -767,7 +764,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
local_irq_save(flags);
if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(),
(long)&nd, 0, 0) == NOTIFY_STOP)
- ia64_mca_spin(__FUNCTION__);
+ ia64_mca_spin(__func__);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
/* Register with the SAL monarch that the slave has
@@ -777,7 +774,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(),
(long)&nd, 0, 0) == NOTIFY_STOP)
- ia64_mca_spin(__FUNCTION__);
+ ia64_mca_spin(__func__);
/* Wait for the monarch cpu to exit. */
while (monarch_cpu != -1)
@@ -785,7 +782,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(),
(long)&nd, 0, 0) == NOTIFY_STOP)
- ia64_mca_spin(__FUNCTION__);
+ ia64_mca_spin(__func__);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
/* Enable all interrupts */
@@ -1230,7 +1227,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
- ia64_mca_spin(__FUNCTION__);
+ ia64_mca_spin(__func__);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
if (sos->monarch) {
@@ -1246,7 +1243,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
ia64_mca_wakeup_all();
if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
- ia64_mca_spin(__FUNCTION__);
+ ia64_mca_spin(__func__);
} else {
while (cpu_isset(cpu, mca_cpu))
cpu_relax(); /* spin until monarch wakes us */
@@ -1276,7 +1273,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
}
if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
== NOTIFY_STOP)
- ia64_mca_spin(__FUNCTION__);
+ ia64_mca_spin(__func__);
if (atomic_dec_return(&mca_count) > 0) {
@@ -1328,7 +1325,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg)
static DEFINE_SPINLOCK(cmc_history_lock);
IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
- __FUNCTION__, cmc_irq, smp_processor_id());
+ __func__, cmc_irq, smp_processor_id());
/* SAL spec states this should run w/ interrupts enabled */
local_irq_enable();
@@ -1614,7 +1611,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
*/
if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
- __FUNCTION__, cpu);
+ __func__, cpu);
atomic_dec(&slaves);
sos->monarch = 1;
}
@@ -1626,7 +1623,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
*/
if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
- __FUNCTION__, cpu);
+ __func__, cpu);
atomic_dec(&monarchs);
sos->monarch = 0;
}
@@ -1637,15 +1634,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
cpu_relax(); /* spin until monarch enters */
if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
- ia64_mca_spin(__FUNCTION__);
+ ia64_mca_spin(__func__);
if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
- ia64_mca_spin(__FUNCTION__);
+ ia64_mca_spin(__func__);
while (monarch_cpu != -1)
cpu_relax(); /* spin until monarch leaves */
if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
- ia64_mca_spin(__FUNCTION__);
+ ia64_mca_spin(__func__);
mprintk("Slave on cpu %d returning to normal service.\n", cpu);
set_curr_task(cpu, previous_current);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
@@ -1656,7 +1653,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
monarch_cpu = cpu;
if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
- ia64_mca_spin(__FUNCTION__);
+ ia64_mca_spin(__func__);
/*
* Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
@@ -1673,10 +1670,10 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
*/
if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
- ia64_mca_spin(__FUNCTION__);
+ ia64_mca_spin(__func__);
if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
- ia64_mca_spin(__FUNCTION__);
+ ia64_mca_spin(__func__);
mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
atomic_dec(&monarchs);
set_curr_task(cpu, previous_current);
@@ -1884,7 +1881,7 @@ ia64_mca_init(void)
.priority = 0/* we need to notified last */
};
- IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
+ IA64_MCA_DEBUG("%s: begin\n", __func__);
/* Clear the Rendez checkin flag for all cpus */
for(i = 0 ; i < NR_CPUS; i++)
@@ -1928,7 +1925,7 @@ ia64_mca_init(void)
return;
}
- IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__);
+ IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__);
ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
/*
@@ -1949,7 +1946,7 @@ ia64_mca_init(void)
return;
}
- IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__,
+ IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__,
ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
/*
@@ -1961,7 +1958,7 @@ ia64_mca_init(void)
ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp);
ia64_mc_info.imi_slave_init_handler_size = 0;
- IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__,
+ IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__,
ia64_mc_info.imi_monarch_init_handler);
/* Register the os init handler with SAL */
@@ -1982,7 +1979,7 @@ ia64_mca_init(void)
return;
}
- IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__);
+ IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
/*
* Configure the CMCI/P vector and handler. Interrupts for CMC are
@@ -2042,7 +2039,7 @@ ia64_mca_late_init(void)
cmc_polling_enabled = 0;
schedule_work(&cmc_enable_work);
- IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
+ IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
#ifdef CONFIG_ACPI
/* Setup the CPEI/P vector and handler */
@@ -2065,17 +2062,17 @@ ia64_mca_late_init(void)
ia64_cpe_irq = irq;
ia64_mca_register_cpev(cpe_vector);
IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
- __FUNCTION__);
+ __func__);
return 0;
}
printk(KERN_ERR "%s: Failed to find irq for CPE "
"interrupt handler, vector %d\n",
- __FUNCTION__, cpe_vector);
+ __func__, cpe_vector);
}
/* If platform doesn't support CPEI, get the timer going. */
if (cpe_poll_enabled) {
ia64_mca_cpe_poll(0UL);
- IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
+ IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__);
}
}
#endif
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index e58f4367cf1..e83e2ea3b3e 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -493,7 +493,7 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
mod->arch.opd->sh_addralign = 8;
mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
- __FUNCTION__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
+ __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
mod->arch.got->sh_size, mod->arch.opd->sh_size);
return 0;
}
@@ -585,7 +585,7 @@ get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
#if ARCH_MODULE_DEBUG
if (plt_target(plt) != target_ip) {
printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
- __FUNCTION__, target_ip, plt_target(plt));
+ __func__, target_ip, plt_target(plt));
*okp = 0;
return 0;
}
@@ -703,7 +703,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
if (r_type == R_IA64_PCREL21BI) {
if (!is_internal(mod, val)) {
printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n",
- __FUNCTION__, reloc_name[r_type], val);
+ __func__, reloc_name[r_type], val);
return -ENOEXEC;
}
format = RF_INSN21B;
@@ -737,7 +737,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
case R_IA64_LDXMOV:
if (gp_addressable(mod, val)) {
/* turn "ld8" into "mov": */
- DEBUGP("%s: patching ld8 at %p to mov\n", __FUNCTION__, location);
+ DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location);
ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
}
return 0;
@@ -771,7 +771,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
if (!ok)
return -ENOEXEC;
- DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __FUNCTION__, location, val,
+ DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
switch (format) {
@@ -807,7 +807,7 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
Elf64_Shdr *target_sec;
int ret;
- DEBUGP("%s: applying section %u (%u relocs) to %u\n", __FUNCTION__,
+ DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__,
relsec, n, sechdrs[relsec].sh_info);
target_sec = sechdrs + sechdrs[relsec].sh_info;
@@ -835,7 +835,7 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
gp = mod->core_size / 2;
gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
mod->arch.gp = gp;
- DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp);
+ DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
}
for (i = 0; i < n; i++) {
@@ -903,7 +903,7 @@ register_unwind_table (struct module *mod)
init = start + num_core;
}
- DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __FUNCTION__,
+ DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
mod->name, mod->arch.gp, num_init, num_core);
/*
@@ -912,13 +912,13 @@ register_unwind_table (struct module *mod)
if (num_core > 0) {
mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
core, core + num_core);
- DEBUGP("%s: core: handle=%p [%p-%p)\n", __FUNCTION__,
+ DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__,
mod->arch.core_unw_table, core, core + num_core);
}
if (num_init > 0) {
mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
init, init + num_init);
- DEBUGP("%s: init: handle=%p [%p-%p)\n", __FUNCTION__,
+ DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__,
mod->arch.init_unw_table, init, init + num_init);
}
}
@@ -926,7 +926,7 @@ register_unwind_table (struct module *mod)
int
module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
{
- DEBUGP("%s: init: entry=%p\n", __FUNCTION__, mod->init);
+ DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
if (mod->arch.unwind)
register_unwind_table(mod);
return 0;
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index e86d0295979..60c6ef67ebb 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -57,7 +57,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
if (!cpu_online(cpu))
return;
- if (reassign_irq_vector(irq, cpu))
+ if (irq_prepare_move(irq, cpu))
return;
read_msi_msg(irq, &msg);
@@ -119,6 +119,7 @@ void ia64_teardown_msi_irq(unsigned int irq)
static void ia64_ack_msi_irq(unsigned int irq)
{
+ irq_complete_move(irq);
move_native_irq(irq);
ia64_eoi();
}
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c
index 2cb9425e042..e0dca8743db 100644
--- a/arch/ia64/kernel/patch.c
+++ b/arch/ia64/kernel/patch.c
@@ -135,10 +135,10 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
while (offp < (s32 *) end) {
wp = (u64 *) ia64_imva((char *) offp + *offp);
- wp[0] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */
- wp[1] = 0x0004000000000200UL;
- wp[2] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */
- wp[3] = 0x0084006880000200UL;
+ wp[0] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */
+ wp[1] = 0x0084006880000200UL;
+ wp[2] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */
+ wp[3] = 0x0004000000000200UL;
ia64_fc(wp); ia64_fc(wp + 2);
++offp;
}
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index f6b99719f10..a2aabfdc80d 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -227,12 +227,12 @@
#ifdef PFM_DEBUGGING
#define DPRINT(a) \
do { \
- if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
+ if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
} while (0)
#define DPRINT_ovfl(a) \
do { \
- if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
+ if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
} while (0)
#endif
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
index a7af1cb419f..5f637bbfccc 100644
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ b/arch/ia64/kernel/perfmon_default_smpl.c
@@ -24,12 +24,12 @@ MODULE_LICENSE("GPL");
#ifdef DEFAULT_DEBUG
#define DPRINT(a) \
do { \
- if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
+ if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \
} while (0)
#define DPRINT_ovfl(a) \
do { \
- if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
+ if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \
} while (0)
#else
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 331d6768b5d..ab784ec4319 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -698,52 +698,6 @@ thread_matches (struct task_struct *thread, unsigned long addr)
}
/*
- * GDB apparently wants to be able to read the register-backing store
- * of any thread when attached to a given process. If we are peeking
- * or poking an address that happens to reside in the kernel-backing
- * store of another thread, we need to attach to that thread, because
- * otherwise we end up accessing stale data.
- *
- * task_list_lock must be read-locked before calling this routine!
- */
-static struct task_struct *
-find_thread_for_addr (struct task_struct *child, unsigned long addr)
-{
- struct task_struct *p;
- struct mm_struct *mm;
- struct list_head *this, *next;
- int mm_users;
-
- if (!(mm = get_task_mm(child)))
- return child;
-
- /* -1 because of our get_task_mm(): */
- mm_users = atomic_read(&mm->mm_users) - 1;
- if (mm_users <= 1)
- goto out; /* not multi-threaded */
-
- /*
- * Traverse the current process' children list. Every task that
- * one attaches to becomes a child. And it is only attached children
- * of the debugger that are of interest (ptrace_check_attach checks
- * for this).
- */
- list_for_each_safe(this, next, &current->children) {
- p = list_entry(this, struct task_struct, sibling);
- if (p->tgid != child->tgid)
- continue;
- if (thread_matches(p, addr)) {
- child = p;
- goto out;
- }
- }
-
- out:
- mmput(mm);
- return child;
-}
-
-/*
* Write f32-f127 back to task->thread.fph if it has been modified.
*/
inline void
@@ -826,14 +780,14 @@ convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
< IA64_PT_REGS_SIZE) {
dprintk("ptrace.%s: ran off the top of the kernel "
- "stack\n", __FUNCTION__);
+ "stack\n", __func__);
return;
}
if (unw_get_pr (&prev_info, &pr) < 0) {
unw_get_rp(&prev_info, &ip);
dprintk("ptrace.%s: failed to read "
"predicate register (ip=0x%lx)\n",
- __FUNCTION__, ip);
+ __func__, ip);
return;
}
if (unw_is_intr_frame(&info)
@@ -908,7 +862,7 @@ static int
access_uarea (struct task_struct *child, unsigned long addr,
unsigned long *data, int write_access)
{
- unsigned long *ptr, regnum, urbs_end, rnat_addr, cfm;
+ unsigned long *ptr, regnum, urbs_end, cfm;
struct switch_stack *sw;
struct pt_regs *pt;
# define pt_reg_addr(pt, reg) ((void *) \
@@ -1011,14 +965,9 @@ access_uarea (struct task_struct *child, unsigned long addr,
* the kernel was entered.
*
* Furthermore, when changing the contents of
- * PT_AR_BSP (or PT_CFM) we MUST copy any
- * users-level stacked registers that are
- * stored on the kernel stack back to
- * user-space because otherwise, we might end
- * up clobbering kernel stacked registers.
- * Also, if this happens while the task is
- * blocked in a system call, which convert the
- * state such that the non-system-call exit
+ * PT_AR_BSP (or PT_CFM) while the task is
+ * blocked in a system call, convert the state
+ * so that the non-system-call exit
* path is used. This ensures that the proper
* state will be picked up when resuming
* execution. However, it *also* means that
@@ -1035,10 +984,6 @@ access_uarea (struct task_struct *child, unsigned long addr,
urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
if (write_access) {
if (*data != urbs_end) {
- if (ia64_sync_user_rbs(child, sw,
- pt->ar_bspstore,
- urbs_end) < 0)
- return -1;
if (in_syscall(pt))
convert_to_non_syscall(child,
pt,
@@ -1058,10 +1003,6 @@ access_uarea (struct task_struct *child, unsigned long addr,
urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
if (write_access) {
if (((cfm ^ *data) & PFM_MASK) != 0) {
- if (ia64_sync_user_rbs(child, sw,
- pt->ar_bspstore,
- urbs_end) < 0)
- return -1;
if (in_syscall(pt))
convert_to_non_syscall(child,
pt,
@@ -1093,16 +1034,8 @@ access_uarea (struct task_struct *child, unsigned long addr,
return 0;
case PT_AR_RNAT:
- urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
- rnat_addr = (long) ia64_rse_rnat_addr((long *)
- urbs_end);
- if (write_access)
- return ia64_poke(child, sw, urbs_end,
- rnat_addr, *data);
- else
- return ia64_peek(child, sw, urbs_end,
- rnat_addr, data);
-
+ ptr = pt_reg_addr(pt, ar_rnat);
+ break;
case PT_R1:
ptr = pt_reg_addr(pt, r1);
break;
@@ -1521,215 +1454,97 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
return ret;
}
-/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure the single step bit is not set.
- */
void
-ptrace_disable (struct task_struct *child)
+user_enable_single_step (struct task_struct *child)
{
struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
- /* make sure the single step/taken-branch trap bits are not set: */
- clear_tsk_thread_flag(child, TIF_SINGLESTEP);
- child_psr->ss = 0;
- child_psr->tb = 0;
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
+ child_psr->ss = 1;
}
-asmlinkage long
-sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
+void
+user_enable_block_step (struct task_struct *child)
{
- struct pt_regs *pt;
- unsigned long urbs_end, peek_or_poke;
- struct task_struct *child;
- struct switch_stack *sw;
- long ret;
- struct unw_frame_info info;
+ struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
- lock_kernel();
- ret = -EPERM;
- if (request == PTRACE_TRACEME) {
- ret = ptrace_traceme();
- goto out;
- }
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
+ child_psr->tb = 1;
+}
- peek_or_poke = (request == PTRACE_PEEKTEXT
- || request == PTRACE_PEEKDATA
- || request == PTRACE_POKETEXT
- || request == PTRACE_POKEDATA);
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- {
- child = find_task_by_pid(pid);
- if (child) {
- if (peek_or_poke)
- child = find_thread_for_addr(child, addr);
- get_task_struct(child);
- }
- }
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
- ret = -EPERM;
- if (pid == 1) /* no messing around with init! */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- if (!ret)
- arch_ptrace_attach(child);
- goto out_tsk;
- }
+void
+user_disable_single_step (struct task_struct *child)
+{
+ struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
+ /* make sure the single step/taken-branch trap bits are not set: */
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+ child_psr->ss = 0;
+ child_psr->tb = 0;
+}
- pt = task_pt_regs(child);
- sw = (struct switch_stack *) (child->thread.ksp + 16);
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure the single step bit is not set.
+ */
+void
+ptrace_disable (struct task_struct *child)
+{
+ user_disable_single_step(child);
+}
+long
+arch_ptrace (struct task_struct *child, long request, long addr, long data)
+{
switch (request) {
- case PTRACE_PEEKTEXT:
- case PTRACE_PEEKDATA:
+ case PTRACE_PEEKTEXT:
+ case PTRACE_PEEKDATA:
/* read word at location addr */
- urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
- ret = ia64_peek(child, sw, urbs_end, addr, &data);
- if (ret == 0) {
- ret = data;
- /* ensure "ret" is not mistaken as an error code: */
- force_successful_syscall_return();
- }
- goto out_tsk;
-
- case PTRACE_POKETEXT:
- case PTRACE_POKEDATA:
- /* write the word at location addr */
- urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
- ret = ia64_poke(child, sw, urbs_end, addr, data);
-
- /* Make sure user RBS has the latest data */
- unw_init_from_blocked_task(&info, child);
- do_sync_rbs(&info, ia64_sync_user_rbs);
+ if (access_process_vm(child, addr, &data, sizeof(data), 0)
+ != sizeof(data))
+ return -EIO;
+ /* ensure return value is not mistaken for error code */
+ force_successful_syscall_return();
+ return data;
- goto out_tsk;
+ /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
+ * by the generic ptrace_request().
+ */
- case PTRACE_PEEKUSR:
+ case PTRACE_PEEKUSR:
/* read the word at addr in the USER area */
- if (access_uarea(child, addr, &data, 0) < 0) {
- ret = -EIO;
- goto out_tsk;
- }
- ret = data;
- /* ensure "ret" is not mistaken as an error code */
+ if (access_uarea(child, addr, &data, 0) < 0)
+ return -EIO;
+ /* ensure return value is not mistaken for error code */
force_successful_syscall_return();
- goto out_tsk;
+ return data;
- case PTRACE_POKEUSR:
+ case PTRACE_POKEUSR:
/* write the word at addr in the USER area */
- if (access_uarea(child, addr, &data, 1) < 0) {
- ret = -EIO;
- goto out_tsk;
- }
- ret = 0;
- goto out_tsk;
+ if (access_uarea(child, addr, &data, 1) < 0)
+ return -EIO;
+ return 0;
- case PTRACE_OLD_GETSIGINFO:
+ case PTRACE_OLD_GETSIGINFO:
/* for backwards-compatibility */
- ret = ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
- goto out_tsk;
+ return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
- case PTRACE_OLD_SETSIGINFO:
+ case PTRACE_OLD_SETSIGINFO:
/* for backwards-compatibility */
- ret = ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
- goto out_tsk;
-
- case PTRACE_SYSCALL:
- /* continue and stop at next (return from) syscall */
- case PTRACE_CONT:
- /* restart after signal. */
- ret = -EIO;
- if (!valid_signal(data))
- goto out_tsk;
- if (request == PTRACE_SYSCALL)
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- else
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- child->exit_code = data;
+ return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
- /*
- * Make sure the single step/taken-branch trap bits
- * are not set:
- */
- clear_tsk_thread_flag(child, TIF_SINGLESTEP);
- ia64_psr(pt)->ss = 0;
- ia64_psr(pt)->tb = 0;
+ case PTRACE_GETREGS:
+ return ptrace_getregs(child,
+ (struct pt_all_user_regs __user *) data);
- wake_up_process(child);
- ret = 0;
- goto out_tsk;
+ case PTRACE_SETREGS:
+ return ptrace_setregs(child,
+ (struct pt_all_user_regs __user *) data);
- case PTRACE_KILL:
- /*
- * Make the child exit. Best I can do is send it a
- * sigkill. Perhaps it should be put in the status
- * that it wants to exit.
- */
- if (child->exit_state == EXIT_ZOMBIE)
- /* already dead */
- goto out_tsk;
- child->exit_code = SIGKILL;
-
- ptrace_disable(child);
- wake_up_process(child);
- ret = 0;
- goto out_tsk;
-
- case PTRACE_SINGLESTEP:
- /* let child execute for one instruction */
- case PTRACE_SINGLEBLOCK:
- ret = -EIO;
- if (!valid_signal(data))
- goto out_tsk;
-
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- set_tsk_thread_flag(child, TIF_SINGLESTEP);
- if (request == PTRACE_SINGLESTEP) {
- ia64_psr(pt)->ss = 1;
- } else {
- ia64_psr(pt)->tb = 1;
- }
- child->exit_code = data;
-
- /* give it a chance to run. */
- wake_up_process(child);
- ret = 0;
- goto out_tsk;
-
- case PTRACE_DETACH:
- /* detach a process that was attached. */
- ret = ptrace_detach(child, data);
- goto out_tsk;
-
- case PTRACE_GETREGS:
- ret = ptrace_getregs(child,
- (struct pt_all_user_regs __user *) data);
- goto out_tsk;
-
- case PTRACE_SETREGS:
- ret = ptrace_setregs(child,
- (struct pt_all_user_regs __user *) data);
- goto out_tsk;
-
- default:
- ret = ptrace_request(child, request, addr, data);
- goto out_tsk;
+ default:
+ return ptrace_request(child, request, addr, data);
}
- out_tsk:
- put_task_struct(child);
- out:
- unlock_kernel();
- return ret;
}
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index f44fe841216..a3022dc48ef 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -109,6 +109,13 @@ check_versions (struct ia64_sal_systab *systab)
sal_revision = SAL_VERSION_CODE(2, 8);
sal_version = SAL_VERSION_CODE(0, 0);
}
+
+ if (ia64_platform_is("sn2") && (sal_revision == SAL_VERSION_CODE(2, 9)))
+ /*
+ * SGI Altix has hard-coded version 2.9 in their prom
+ * but they actually implement 3.2, so let's fix it here.
+ */
+ sal_revision = SAL_VERSION_CODE(3, 2);
}
static void __init
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index ebd1a09f320..4aa9eaea76c 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -690,7 +690,7 @@ get_model_name(__u8 family, __u8 model)
if (overflow++ == 0)
printk(KERN_ERR
"%s: Table overflow. Some processor model information will be missing\n",
- __FUNCTION__);
+ __func__);
return "Unknown";
}
@@ -785,7 +785,7 @@ get_max_cacheline_size (void)
status = ia64_pal_cache_summary(&levels, &unique_caches);
if (status != 0) {
printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
- __FUNCTION__, status);
+ __func__, status);
max = SMP_CACHE_BYTES;
/* Safest setup for "flush_icache_range()" */
ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
@@ -798,7 +798,7 @@ get_max_cacheline_size (void)
if (status != 0) {
printk(KERN_ERR
"%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
- __FUNCTION__, l, status);
+ __func__, l, status);
max = SMP_CACHE_BYTES;
/* The safest setup for "flush_icache_range()" */
cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
@@ -814,7 +814,7 @@ get_max_cacheline_size (void)
if (status != 0) {
printk(KERN_ERR
"%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
- __FUNCTION__, l, status);
+ __func__, l, status);
/* The safest setup for "flush_icache_range()" */
cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
}
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 309da3567bc..5740296c35a 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -342,15 +342,33 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
new_sp = scr->pt.r12;
tramp_addr = (unsigned long) __kernel_sigtramp;
- if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(new_sp) == 0) {
- new_sp = current->sas_ss_sp + current->sas_ss_size;
- /*
- * We need to check for the register stack being on the signal stack
- * separately, because it's switched separately (memory stack is switched
- * in the kernel, register stack is switched in the signal trampoline).
- */
- if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
- new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1);
+ if (ka->sa.sa_flags & SA_ONSTACK) {
+ int onstack = sas_ss_flags(new_sp);
+
+ if (onstack == 0) {
+ new_sp = current->sas_ss_sp + current->sas_ss_size;
+ /*
+ * We need to check for the register stack being on the
+ * signal stack separately, because it's switched
+ * separately (memory stack is switched in the kernel,
+ * register stack is switched in the signal trampoline).
+ */
+ if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
+ new_rbs = ALIGN(current->sas_ss_sp,
+ sizeof(long));
+ } else if (onstack == SS_ONSTACK) {
+ unsigned long check_sp;
+
+ /*
+ * If we are on the alternate signal stack and would
+ * overflow it, don't. Return an always-bogus address
+ * instead so we will die with SIGSEGV.
+ */
+ check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN;
+ if (!likely(on_sig_stack(check_sp)))
+ return force_sigsegv_info(sig, (void __user *)
+ check_sp);
+ }
}
frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN);
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 52f70bbc192..6903361d11a 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -28,7 +28,7 @@ extern int die_if_kernel(char *str, struct pt_regs *regs, long err);
#undef DEBUG_UNALIGNED_TRAP
#ifdef DEBUG_UNALIGNED_TRAP
-# define DPRINT(a...) do { printk("%s %u: ", __FUNCTION__, __LINE__); printk (a); } while (0)
+# define DPRINT(a...) do { printk("%s %u: ", __func__, __LINE__); printk (a); } while (0)
# define DDUMP(str,vp,len) dump(str, vp, len)
static void
@@ -674,7 +674,7 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi
* just in case.
*/
if (ld.x6_op == 1 || ld.x6_op == 3) {
- printk(KERN_ERR "%s: register update on speculative load, error\n", __FUNCTION__);
+ printk(KERN_ERR "%s: register update on speculative load, error\n", __func__);
if (die_if_kernel("unaligned reference on speculative load with register update\n",
regs, 30))
return;
@@ -1104,7 +1104,7 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs
*/
if (ld.x6_op == 1 || ld.x6_op == 3)
printk(KERN_ERR "%s: register update on speculative load pair, error\n",
- __FUNCTION__);
+ __func__);
setreg(ld.r3, ifa, 0, regs);
}
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index c1bdb513181..67810b77d99 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -257,7 +257,7 @@ pt_regs_off (unsigned long reg)
off = unw.pt_regs_offsets[reg];
if (off < 0) {
- UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
+ UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __func__, reg);
off = 0;
}
return (unsigned long) off;
@@ -268,13 +268,13 @@ get_scratch_regs (struct unw_frame_info *info)
{
if (!info->pt) {
/* This should not happen with valid unwind info. */
- UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
+ UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __func__);
if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
else
info->pt = info->sp - 16;
}
- UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
+ UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __func__, info->sp, info->pt);
return (struct pt_regs *) info->pt;
}
@@ -294,7 +294,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
return 0;
}
UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
- __FUNCTION__, regnum);
+ __func__, regnum);
return -1;
}
@@ -341,7 +341,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
{
UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
"[0x%lx-0x%lx)\n",
- __FUNCTION__, (void *) addr,
+ __func__, (void *) addr,
info->regstk.limit,
info->regstk.top);
return -1;
@@ -374,7 +374,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
|| (unsigned long) addr >= info->regstk.top)
{
UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
- "of rbs\n", __FUNCTION__);
+ "of rbs\n", __func__);
return -1;
}
if ((unsigned long) nat_addr >= info->regstk.top)
@@ -385,7 +385,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
if (write) {
if (read_only(addr)) {
UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
- __FUNCTION__);
+ __func__);
} else {
*addr = *val;
if (*nat)
@@ -427,13 +427,13 @@ unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int
default:
UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
- __FUNCTION__, regnum);
+ __func__, regnum);
return -1;
}
if (write)
if (read_only(addr)) {
UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
- __FUNCTION__);
+ __func__);
} else
*addr = *val;
else
@@ -450,7 +450,7 @@ unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val,
if ((unsigned) (regnum - 2) >= 126) {
UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
- __FUNCTION__, regnum);
+ __func__, regnum);
return -1;
}
@@ -482,7 +482,7 @@ unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val,
if (write)
if (read_only(addr)) {
UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
- __FUNCTION__);
+ __func__);
} else
*addr = *val;
else
@@ -572,14 +572,14 @@ unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int
default:
UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
- __FUNCTION__, regnum);
+ __func__, regnum);
return -1;
}
if (write) {
if (read_only(addr)) {
UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
- __FUNCTION__);
+ __func__);
} else
*addr = *val;
} else
@@ -600,7 +600,7 @@ unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
if (write) {
if (read_only(addr)) {
UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
- __FUNCTION__);
+ __func__);
} else
*addr = *val;
} else
@@ -699,7 +699,7 @@ decode_abreg (unsigned char abreg, int memory)
default:
break;
}
- UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg);
+ UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __func__, abreg);
return UNW_REG_LC;
}
@@ -739,7 +739,7 @@ spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word
return;
}
}
- UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__);
+ UNW_DPRINT(0, "unwind.%s: excess spill!\n", __func__);
}
static inline void
@@ -855,11 +855,11 @@ desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
{
if (abi == 3 && context == 'i') {
sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
- UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__);
+ UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __func__);
}
else
UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
- __FUNCTION__, abi, context);
+ __func__, abi, context);
}
static inline void
@@ -1347,7 +1347,7 @@ script_emit (struct unw_script *script, struct unw_insn insn)
{
if (script->count >= UNW_MAX_SCRIPT_LEN) {
UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
- __FUNCTION__, UNW_MAX_SCRIPT_LEN);
+ __func__, UNW_MAX_SCRIPT_LEN);
return;
}
script->insn[script->count++] = insn;
@@ -1389,7 +1389,7 @@ emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
default:
UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
- __FUNCTION__, r->where);
+ __func__, r->where);
return;
}
insn.opc = opc;
@@ -1446,7 +1446,7 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
else
UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
- __FUNCTION__, rval);
+ __func__, rval);
}
break;
@@ -1474,7 +1474,7 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
default:
UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
- __FUNCTION__, i, r->where);
+ __func__, i, r->where);
break;
}
insn.opc = opc;
@@ -1547,10 +1547,10 @@ build_script (struct unw_frame_info *info)
r->when = UNW_WHEN_NEVER;
sr.pr_val = info->pr;
- UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip);
+ UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __func__, ip);
script = script_new(ip);
if (!script) {
- UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__);
+ UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __func__);
STAT(unw.stat.script.build_time += ia64_get_itc() - start);
return NULL;
}
@@ -1569,7 +1569,7 @@ build_script (struct unw_frame_info *info)
if (!e) {
/* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
- __FUNCTION__, ip, unw.cache[info->prev_script].ip);
+ __func__, ip, unw.cache[info->prev_script].ip);
sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
sr.curr.reg[UNW_REG_RP].when = -1;
sr.curr.reg[UNW_REG_RP].val = 0;
@@ -1618,13 +1618,13 @@ build_script (struct unw_frame_info *info)
sr.curr.reg[UNW_REG_RP].when = -1;
sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
- __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where,
+ __func__, ip, sr.curr.reg[UNW_REG_RP].where,
sr.curr.reg[UNW_REG_RP].val);
}
#ifdef UNW_DEBUG
UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
- __FUNCTION__, table->segment_base + e->start_offset, sr.when_target);
+ __func__, table->segment_base + e->start_offset, sr.when_target);
for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
@@ -1746,7 +1746,7 @@ run_script (struct unw_script *script, struct unw_frame_info *state)
} else {
s[dst] = 0;
UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
- __FUNCTION__, dst, val);
+ __func__, dst, val);
}
break;
@@ -1756,7 +1756,7 @@ run_script (struct unw_script *script, struct unw_frame_info *state)
else {
s[dst] = 0;
UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
- __FUNCTION__, val);
+ __func__, val);
}
break;
@@ -1791,7 +1791,7 @@ run_script (struct unw_script *script, struct unw_frame_info *state)
|| s[val] < TASK_SIZE)
{
UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
- __FUNCTION__, s[val]);
+ __func__, s[val]);
break;
}
#endif
@@ -1825,7 +1825,7 @@ find_save_locs (struct unw_frame_info *info)
if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
/* don't let obviously bad addresses pollute the cache */
/* FIXME: should really be level 0 but it occurs too often. KAO */
- UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
+ UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __func__, info->ip);
info->rp_loc = NULL;
return -1;
}
@@ -1838,7 +1838,7 @@ find_save_locs (struct unw_frame_info *info)
spin_unlock_irqrestore(&unw.lock, flags);
UNW_DPRINT(0,
"unwind.%s: failed to locate/build unwind script for ip %lx\n",
- __FUNCTION__, info->ip);
+ __func__, info->ip);
return -1;
}
have_write_lock = 1;
@@ -1882,21 +1882,21 @@ unw_unwind (struct unw_frame_info *info)
if (!unw_valid(info, info->rp_loc)) {
/* FIXME: should really be level 0 but it occurs too often. KAO */
UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
- __FUNCTION__, info->ip);
+ __func__, info->ip);
STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
return -1;
}
/* restore the ip */
ip = info->ip = *info->rp_loc;
if (ip < GATE_ADDR) {
- UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
+ UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __func__, ip);
STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
return -1;
}
/* validate the previous stack frame pointer */
if (!unw_valid(info, info->pfs_loc)) {
- UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
+ UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __func__);
STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
return -1;
}
@@ -1912,13 +1912,13 @@ unw_unwind (struct unw_frame_info *info)
num_regs = *info->cfm_loc & 0x7f; /* size of frame */
info->pfs_loc =
(unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
- UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
+ UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __func__, info->pt);
} else
num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
- __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top);
+ __func__, info->bsp, info->regstk.limit, info->regstk.top);
STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
return -1;
}
@@ -1927,14 +1927,14 @@ unw_unwind (struct unw_frame_info *info)
info->sp = info->psp;
if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
- __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit);
+ __func__, info->sp, info->memstk.top, info->memstk.limit);
STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
return -1;
}
if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
- __FUNCTION__, ip);
+ __func__, ip);
STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
return -1;
}
@@ -1961,7 +1961,7 @@ unw_unwind_to_user (struct unw_frame_info *info)
if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
< IA64_PT_REGS_SIZE) {
UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
- __FUNCTION__);
+ __func__);
break;
}
if (unw_is_intr_frame(info) &&
@@ -1971,13 +1971,13 @@ unw_unwind_to_user (struct unw_frame_info *info)
unw_get_rp(info, &ip);
UNW_DPRINT(0, "unwind.%s: failed to read "
"predicate register (ip=0x%lx)\n",
- __FUNCTION__, ip);
+ __func__, ip);
return -1;
}
} while (unw_unwind(info) >= 0);
unw_get_ip(info, &ip);
UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
- __FUNCTION__, ip);
+ __func__, ip);
return -1;
}
EXPORT_SYMBOL(unw_unwind_to_user);
@@ -2028,7 +2028,7 @@ init_frame_info (struct unw_frame_info *info, struct task_struct *t,
" pr 0x%lx\n"
" sw 0x%lx\n"
" sp 0x%lx\n",
- __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
+ __func__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
info->pr, (unsigned long) info->sw, info->sp);
STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
}
@@ -2047,7 +2047,7 @@ unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct
" bsp 0x%lx\n"
" sol 0x%lx\n"
" ip 0x%lx\n",
- __FUNCTION__, info->bsp, sol, info->ip);
+ __func__, info->bsp, sol, info->ip);
find_save_locs(info);
}
@@ -2058,7 +2058,7 @@ unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
{
struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
- UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
+ UNW_DPRINT(1, "unwind.%s\n", __func__);
unw_init_frame_info(info, t, sw);
}
EXPORT_SYMBOL(unw_init_from_blocked_task);
@@ -2088,7 +2088,7 @@ unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned lon
if (end - start <= 0) {
UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
- __FUNCTION__);
+ __func__);
return NULL;
}
@@ -2119,14 +2119,14 @@ unw_remove_unwind_table (void *handle)
if (!handle) {
UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
- __FUNCTION__);
+ __func__);
return;
}
table = handle;
if (table == &unw.kernel_table) {
UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
- "no-can-do!\n", __FUNCTION__);
+ "no-can-do!\n", __func__);
return;
}
@@ -2139,7 +2139,7 @@ unw_remove_unwind_table (void *handle)
break;
if (!prev) {
UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
- __FUNCTION__, (void *) table);
+ __func__, (void *) table);
spin_unlock_irqrestore(&unw.lock, flags);
return;
}
@@ -2185,7 +2185,7 @@ create_gate_table (void)
}
if (!punw) {
- printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__);
+ printk("%s: failed to find gate DSO's unwind table!\n", __func__);
return 0;
}
@@ -2202,7 +2202,7 @@ create_gate_table (void)
unw.gate_table = kmalloc(size, GFP_KERNEL);
if (!unw.gate_table) {
unw.gate_table_size = 0;
- printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__);
+ printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __func__);
return 0;
}
unw.gate_table_size = size;